source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
heat.c | /*********************************************************************************/
/* */
/* Animation of heat equation in a planar domain */
/* */
/* N. Berglund, May 2021 */
/* */
/* Feel free to reuse, but if doing so it would be nice to drop a */
/* line to nils.berglund@univ-orleans.fr - Thanks! */
/* */
/* compile with */
/* gcc -o heat heat.c */
/* -L/usr/X11R6/lib -ltiff -lm -lGL -lGLU -lX11 -lXmu -lglut -O3 -fopenmp */
/* */
/* To make a video, set MOVIE to 1 and create subfolder tif_heat */
/* It may be possible to increase parameter PAUSE */
/* */
/* create movie using */
/* ffmpeg -i wave.%05d.tif -vcodec libx264 wave.mp4 */
/* */
/*********************************************************************************/
/*********************************************************************************/
/* */
/* NB: The algorithm used to simulate the wave equation is highly paralellizable */
/* One could make it much faster by using a GPU */
/* */
/*********************************************************************************/
#include <math.h>
#include <string.h>
#include <GL/glut.h>
#include <GL/glu.h>
#include <unistd.h>
#include <sys/types.h>
#include <tiffio.h> /* Sam Leffler's libtiff library. */
#include <omp.h>
#define MOVIE 0 /* set to 1 to generate movie */
/* General geometrical parameters */
#define WINWIDTH 1280 /* window width */
#define WINHEIGHT 720 /* window height */
#define NX 1280 /* number of grid points on x axis */
#define NY 720 /* number of grid points on y axis */
// #define NX 640 /* number of grid points on x axis */
// #define NY 360 /* number of grid points on y axis */
/* setting NX to WINWIDTH and NY to WINHEIGHT increases resolution */
/* but will multiply run time by 4 */
// #define XMIN -2.0
// #define XMAX 2.0 /* x interval */
#define XMIN -2.5
#define XMAX 1.5 /* x interval */
#define YMIN -1.125
#define YMAX 1.125 /* y interval for 9/16 aspect ratio */
#define JULIA_SCALE 0.5 /* scaling for Julia sets */
/* Choice of the billiard table */
#define B_DOMAIN 26 /* choice of domain shape, see list in global_pdes.c */
#define CIRCLE_PATTERN 0 /* pattern of circles, see list in global_pdes.c */
#define P_PERCOL 0.25 /* probability of having a circle in C_RAND_PERCOL arrangement */
#define NPOISSON 300 /* number of points for Poisson C_RAND_POISSON arrangement */
#define RANDOM_POLY_ANGLE 0 /* set to 1 to randomize angle of polygons */
#define LAMBDA -1.0 /* parameter controlling the dimensions of domain */
#define MU 0.1 /* parameter controlling the dimensions of domain */
#define NPOLY 6 /* number of sides of polygon */
#define APOLY 1.0 /* angle by which to turn polygon, in units of Pi/2 */
#define MDEPTH 5 /* depth of computation of Menger gasket */
#define MRATIO 5 /* ratio defining Menger gasket */
#define MANDELLEVEL 1000 /* iteration level for Mandelbrot set */
#define MANDELLIMIT 10.0 /* limit value for approximation of Mandelbrot set */
#define FOCI 1 /* set to 1 to draw focal points of ellipse */
#define NGRIDX 15 /* number of grid point for grid of disks */
#define NGRIDY 20 /* number of grid point for grid of disks */
#define X_SHOOTER -0.2
#define Y_SHOOTER -0.6
#define X_TARGET 0.4
#define Y_TARGET 0.7 /* shooter and target positions in laser fight */
#define ISO_XSHIFT_LEFT -1.65
#define ISO_XSHIFT_RIGHT 0.4
#define ISO_YSHIFT_LEFT -0.05
#define ISO_YSHIFT_RIGHT -0.05
#define ISO_SCALE 0.85 /* coordinates for isospectral billiards */
/* You can add more billiard tables by adapting the functions */
/* xy_in_billiard and draw_billiard in sub_wave.c */
/* Physical patameters of wave equation */
// #define DT 0.00001
#define DT 0.000004
// #define DT 0.000002
// #define DT 0.00000002
// #define DT 0.000000005
#define VISCOSITY 10.0
#define T_OUT 2.0 /* outside temperature */
#define T_IN 0.0 /* inside temperature */
// #define T_OUT 0.0 /* outside temperature */
// #define T_IN 2.0 /* inside temperature */
#define SPEED 0.0 /* speed of drift to the right */
/* Boundary conditions, see list in global_pdes.c */
#define B_COND 1
/* Parameters for length and speed of simulation */
#define NSTEPS 1000 /* number of frames of movie */
#define NVID 50 /* number of iterations between images displayed on screen */
// #define NVID 100 /* number of iterations between images displayed on screen */
#define NSEG 100 /* number of segments of boundary */
#define BOUNDARY_WIDTH 1 /* width of billiard boundary */
#define PAUSE 100 /* number of frames after which to pause */
#define PSLEEP 1 /* sleep time during pause */
#define SLEEP1 2 /* initial sleeping time */
#define SLEEP2 1 /* final sleeping time */
/* For debugging purposes only */
#define FLOOR 0 /* set to 1 to limit wave amplitude to VMAX */
#define VMAX 10.0 /* max value of wave amplitude */
/* Field representation */
#define FIELD_REP 1
#define F_INTENSITY 0 /* color represents intensity */
#define F_GRADIENT 1 /* color represents norm of gradient */
#define DRAW_FIELD_LINES 1 /* set to 1 to draw field lines */
#define FIELD_LINE_WIDTH 1 /* width of field lines */
#define N_FIELD_LINES 120 /* number of field lines */
#define FIELD_LINE_FACTOR 120 /* factor controlling precision when computing origin of field lines */
/* Color schemes, see list in global_pdes.c */
#define COLOR_PALETTE 10 /* Color palette, see list in global_pdes.c */
#define BLACK 1 /* black background */
#define COLOR_SCHEME 1 /* choice of color scheme */
#define SCALE 0 /* set to 1 to adjust color scheme to variance of field */
// #define SLOPE 0.1 /* sensitivity of color on wave amplitude */
#define SLOPE 0.2 /* sensitivity of color on wave amplitude */
#define ATTENUATION 0.0 /* exponential attenuation coefficient of contrast with time */
#define E_SCALE 100.0 /* scaling factor for energy representation */
#define LOG_SCALE 1.0 /* scaling factor for energy log representation */
#define COLORHUE 260 /* initial hue of water color for scheme C_LUM */
#define COLORDRIFT 0.0 /* how much the color hue drifts during the whole simulation */
#define LUMMEAN 0.5 /* amplitude of luminosity variation for scheme C_LUM */
#define LUMAMP 0.3 /* amplitude of luminosity variation for scheme C_LUM */
// #define HUEMEAN 180.0 /* mean value of hue for color scheme C_HUE */
// #define HUEAMP -180.0 /* amplitude of variation of hue for color scheme C_HUE */
#define HUEMEAN 359.0 /* mean value of hue for color scheme C_HUE */
#define HUEAMP -359.0 /* amplitude of variation of hue for color scheme C_HUE */
// #define HUEMEAN 270.0 /* mean value of hue for color scheme C_HUE */
// #define HUEAMP -130.0 /* amplitude of variation of hue for color scheme C_HUE */
#define DRAW_COLOR_SCHEME 0 /* set to 1 to plot the color scheme */
#define COLORBAR_RANGE 2.0 /* scale of color scheme bar */
#define COLORBAR_RANGE_B 12.0 /* scale of color scheme bar for 2nd part */
#define ROTATE_COLOR_SCHEME 0 /* set to 1 to draw color scheme horizontally */
#include "global_pdes.c"
#include "sub_wave.c"
double courant2; /* Courant parameter squared */
double dx2; /* spatial step size squared */
double intstep; /* integration step */
double intstep1; /* integration step used in absorbing boundary conditions */
void init_gaussian(double x, double y, double mean, double amplitude, double scalex,
double *phi[NX], short int * xy_in[NX])
/* initialise field with gaussian at position (x,y) */
{
int i, j, in;
double xy[2], dist2, module, phase, scale2;
scale2 = scalex*scalex;
printf("Initialising field\n");
for (i=0; i<NX; i++)
for (j=0; j<NY; j++)
{
ij_to_xy(i, j, xy);
xy_in[i][j] = xy_in_billiard(xy[0],xy[1]);
in = xy_in[i][j];
if (in == 1)
{
dist2 = (xy[0]-x)*(xy[0]-x) + (xy[1]-y)*(xy[1]-y);
module = amplitude*exp(-dist2/scale2);
if (module < 1.0e-15) module = 1.0e-15;
phi[i][j] = mean + module/scalex;
} /* boundary temperatures */
else if (in >= 2) phi[i][j] = T_IN*pow(0.75, (double)(in-2));
// else if (in >= 2) phi[i][j] = T_IN*pow(1.0 - 0.5*(double)(in-2), (double)(in-2));
// else if (in >= 2) phi[i][j] = T_IN*(1.0 - (double)(in-2)/((double)MDEPTH))*(1.0 - (double)(in-2)/((double)MDEPTH));
else phi[i][j] = T_OUT;
}
}
void init_julia_set(double *phi[NX], short int * xy_in[NX])
/* change Julia set boundary condition */
{
int i, j, in;
double xy[2], dist2, module, phase, scale2;
// printf("Changing Julia set\n");
for (i=0; i<NX; i++)
for (j=0; j<NY; j++)
{
ij_to_xy(i, j, xy);
xy_in[i][j] = xy_in_billiard(xy[0],xy[1]);
in = xy_in[i][j];
if (in >= 2) phi[i][j] = T_IN;
}
}
/*********************/
/* animation part */
/*********************/
void compute_gradient(double *phi[NX], double *nablax[NX], double *nablay[NX])
/* compute the gradient of the field */
{
int i, j, iplus, iminus, jplus, jminus;
double dx;
dx = (XMAX-XMIN)/((double)NX);
for (i=0; i<NX; i++)
for (j=0; j<NY; j++)
{
iplus = i+1; if (iplus == NX) iplus = NX-1;
iminus = i-1; if (iminus == -1) iminus = 0;
jplus = j+1; if (jplus == NX) jplus = NY-1;
jminus = j-1; if (jminus == -1) jminus = 0;
nablax[i][j] = (phi[iplus][j] - phi[iminus][j])/dx;
nablay[i][j] = (phi[i][jplus] - phi[i][jminus])/dx;
}
}
void draw_field_line(double x, double y, short int *xy_in[NX], double *nablax[NX],
double *nablay[NX], double delta, int nsteps)
/* draw a field line of the gradient, starting in (x,y) */
{
double x1, y1, x2, y2, pos[2], nabx, naby, norm2, norm;
int i = 0, ij[2], cont = 1;
glColor3f(1.0, 1.0, 1.0);
// glColor3f(0.0, 0.0, 0.0);
glLineWidth(FIELD_LINE_WIDTH);
x1 = x;
y1 = y;
// printf("Drawing field line \n");
glEnable(GL_LINE_SMOOTH);
glBegin(GL_LINE_STRIP);
xy_to_pos(x1, y1, pos);
glVertex2d(pos[0], pos[1]);
i = 0;
while ((cont)&&(i < nsteps))
{
xy_to_ij(x1, y1, ij);
if (ij[0] < 0) ij[0] = 0;
if (ij[0] > NX-1) ij[0] = NX-1;
if (ij[1] < 0) ij[1] = 0;
if (ij[1] > NY-1) ij[1] = NY-1;
nabx = nablax[ij[0]][ij[1]];
naby = nablay[ij[0]][ij[1]];
norm2 = nabx*nabx + naby*naby;
if (norm2 > 1.0e-14)
{
/* avoid too large step size */
if (norm2 < 1.0e-9) norm2 = 1.0e-9;
norm = sqrt(norm2);
x1 = x1 + delta*nabx/norm;
y1 = y1 + delta*naby/norm;
}
else cont = 0;
if (!xy_in[ij[0]][ij[1]]) cont = 0;
/* stop if the boundary is hit */
// if (xy_in[ij[0]][ij[1]] != 1) cont = 0;
// printf("x1 = %.3lg \t y1 = %.3lg \n", x1, y1);
xy_to_pos(x1, y1, pos);
glVertex2d(pos[0], pos[1]);
i++;
}
glEnd();
}
void draw_wave(double *phi[NX], short int *xy_in[NX], double scale, int time)
/* draw the field */
{
int i, j, iplus, iminus, jplus, jminus, ij[2], counter = 0;
static int first = 1;
double rgb[3], xy[2], x1, y1, x2, y2, dx, value, angle, dangle, intens, deltaintens, sum = 0.0;
double *nablax[NX], *nablay[NX];
static double linex[N_FIELD_LINES*FIELD_LINE_FACTOR], liney[N_FIELD_LINES*FIELD_LINE_FACTOR], distance[N_FIELD_LINES*FIELD_LINE_FACTOR], integral[N_FIELD_LINES*FIELD_LINE_FACTOR + 1];
for (i=0; i<NX; i++)
{
nablax[i] = (double *)malloc(NY*sizeof(double));
nablay[i] = (double *)malloc(NY*sizeof(double));
}
/* compute the gradient */
compute_gradient(phi, nablax, nablay);
/* compute the position of origins of field lines */
if ((first)&&(DRAW_FIELD_LINES))
{
first = 0;
printf("computing linex\n");
x1 = LAMBDA + MU*1.01;
y1 = 1.0;
linex[0] = x1;
liney[0] = y1;
dangle = DPI/((double)(N_FIELD_LINES*FIELD_LINE_FACTOR));
for (i = 1; i < N_FIELD_LINES*FIELD_LINE_FACTOR; i++)
{
angle = (double)i*dangle;
x2 = LAMBDA + MU*1.01*cos(angle);
y2 = 0.5 + MU*1.01*sin(angle);
linex[i] = x2;
liney[i] = y2;
distance[i-1] = module2(x2-x1,y2-y1);
x1 = x2;
y1 = y2;
}
distance[N_FIELD_LINES*FIELD_LINE_FACTOR - 1] = module2(x2- 0.99*LAMBDA,y2);
// distance[N_FIELD_LINES*FIELD_LINE_FACTOR - 1] = module2(x2-LAMBDA,y2-0.5);
}
dx = (XMAX-XMIN)/((double)NX);
glBegin(GL_QUADS);
for (i=0; i<NX; i++)
for (j=0; j<NY; j++)
{
if (FIELD_REP == F_INTENSITY) value = phi[i][j];
else if (FIELD_REP == F_GRADIENT)
{
value = module2(nablax[i][j], nablay[i][j]);
}
if (xy_in[i][j] == 1)
{
color_scheme(COLOR_SCHEME, value, scale, time, rgb);
glColor3f(rgb[0], rgb[1], rgb[2]);
}
else glColor3f(0.0, 0.0, 0.0);
glVertex2i(i, j);
glVertex2i(i+1, j);
glVertex2i(i+1, j+1);
glVertex2i(i, j+1);
}
glEnd ();
/* draw a field line */
if (DRAW_FIELD_LINES)
{
/* compute gradient norm along boundary and its integral */
for (i = 0; i < N_FIELD_LINES*FIELD_LINE_FACTOR; i++)
{
xy_to_ij(linex[i], liney[i], ij);
intens = module2(nablax[ij[0]][ij[1]], nablay[ij[0]][ij[1]])*distance[i];
if (i > 0) integral[i] = integral[i-1] + intens;
else integral[i] = intens;
}
deltaintens = integral[N_FIELD_LINES*FIELD_LINE_FACTOR-1]/((double)N_FIELD_LINES);
// printf("delta = %.5lg\n", deltaintens);
i = 0;
draw_field_line(linex[0], liney[0], xy_in, nablax, nablay, 0.00002, 100000);
for (j = 1; j < N_FIELD_LINES+1; j++)
{
while ((integral[i] <= j*deltaintens)&&(i < N_FIELD_LINES*FIELD_LINE_FACTOR)) i++;
draw_field_line(linex[i], liney[i], xy_in, nablax, nablay, 0.00002, 100000);
counter++;
}
printf("%i lines\n", counter);
}
for (i=0; i<NX; i++)
{
free(nablax[i]);
free(nablay[i]);
}
}
void evolve_wave_half(double *phi_in[NX], double *phi_out[NX], short int *xy_in[NX])
/* time step of field evolution */
{
int i, j, iplus, iminus, jplus, jminus;
double delta1, delta2, x, y;
#pragma omp parallel for private(i,j,iplus,iminus,jplus,jminus,delta1,delta2,x,y)
for (i=0; i<NX; i++){
for (j=0; j<NY; j++){
if (xy_in[i][j] == 1){
/* discretized Laplacian depending on boundary conditions */
if ((B_COND == BC_DIRICHLET)||(B_COND == BC_ABSORBING))
{
iplus = (i+1); if (iplus == NX) iplus = NX-1;
iminus = (i-1); if (iminus == -1) iminus = 0;
jplus = (j+1); if (jplus == NY) jplus = NY-1;
jminus = (j-1); if (jminus == -1) jminus = 0;
}
else if (B_COND == BC_PERIODIC)
{
iplus = (i+1) % NX;
iminus = (i-1) % NX;
if (iminus < 0) iminus += NX;
jplus = (j+1) % NY;
jminus = (j-1) % NY;
if (jminus < 0) jminus += NY;
}
delta1 = phi_in[iplus][j] + phi_in[iminus][j] + phi_in[i][jplus] + phi_in[i][jminus] - 4.0*phi_in[i][j];
x = phi_in[i][j];
/* evolve phi */
if (B_COND != BC_ABSORBING)
{
phi_out[i][j] = x + intstep*(delta1 - SPEED*(phi_in[iplus][j] - phi_in[i][j]));
}
else /* case of absorbing b.c. - this is only an approximation of correct way of implementing */
{
/* in the bulk */
if ((i>0)&&(i<NX-1)&&(j>0)&&(j<NY-1))
{
phi_out[i][j] = x - intstep*delta2;
}
/* right border */
else if (i==NX-1)
{
phi_out[i][j] = x - intstep1*(x - phi_in[i-1][j]);
}
/* upper border */
else if (j==NY-1)
{
phi_out[i][j] = x - intstep1*(x - phi_in[i][j-1]);
}
/* left border */
else if (i==0)
{
phi_out[i][j] = x - intstep1*(x - phi_in[1][j]);
}
/* lower border */
else if (j==0)
{
phi_out[i][j] = x - intstep1*(x - phi_in[i][1]);
}
}
if (FLOOR)
{
if (phi_out[i][j] > VMAX) phi_out[i][j] = VMAX;
if (phi_out[i][j] < -VMAX) phi_out[i][j] = -VMAX;
}
}
}
}
// printf("phi(0,0) = %.3lg, psi(0,0) = %.3lg\n", phi[NX/2][NY/2], psi[NX/2][NY/2]);
}
void evolve_wave(double *phi[NX], double *phi_tmp[NX], short int *xy_in[NX])
/* time step of field evolution */
{
evolve_wave_half(phi, phi_tmp, xy_in);
evolve_wave_half(phi_tmp, phi, xy_in);
}
double compute_variance(double *phi[NX], short int * xy_in[NX])
/* compute the variance (total probability) of the field */
{
int i, j, n = 0;
double variance = 0.0;
for (i=1; i<NX; i++)
for (j=1; j<NY; j++)
{
if (xy_in[i][j])
{
n++;
variance += phi[i][j]*phi[i][j];
}
}
if (n==0) n=1;
return(variance/(double)n);
}
void renormalise_field(double *phi[NX], short int * xy_in[NX], double variance)
/* renormalise variance of field */
{
int i, j;
double stdv;
stdv = sqrt(variance);
for (i=1; i<NX; i++)
for (j=1; j<NY; j++)
{
if (xy_in[i][j])
{
phi[i][j] = phi[i][j]/stdv;
}
}
}
void print_level(int level)
{
double pos[2];
char message[50];
glColor3f(1.0, 1.0, 1.0);
sprintf(message, "Level %i", level);
xy_to_pos(XMIN + 0.1, YMAX - 0.2, pos);
write_text(pos[0], pos[1], message);
}
void print_Julia_parameters()
{
double pos[2];
char message[50];
glColor3f(1.0, 1.0, 1.0);
if (julia_y >= 0.0) sprintf(message, "c = %.5f + %.5f i", julia_x, julia_y);
else sprintf(message, "c = %.5f %.5f i", julia_x, julia_y);
xy_to_pos(XMIN + 0.1, YMAX - 0.2, pos);
write_text(pos[0], pos[1], message);
}
void set_Julia_parameters(int time, double *phi[NX], short int *xy_in[NX])
{
double jangle, cosj, sinj, radius = 0.15;
jangle = (double)time*DPI/(double)NSTEPS;
// jangle = (double)time*0.001;
// jangle = (double)time*0.0001;
cosj = cos(jangle);
sinj = sin(jangle);
julia_x = -0.9 + radius*cosj;
julia_y = radius*sinj;
init_julia_set(phi, xy_in);
printf("Julia set parameters : i = %i, angle = %.5lg, cx = %.5lg, cy = %.5lg \n", time, jangle, julia_x, julia_y);
}
void set_Julia_parameters_cardioid(int time, double *phi[NX], short int *xy_in[NX])
{
double jangle, cosj, sinj, yshift;
jangle = pow(1.05 + (double)time*0.00003, 0.333);
yshift = 0.02*sin((double)time*PID*0.002);
// jangle = pow(1.0 + (double)time*0.00003, 0.333);
// jangle = pow(0.05 + (double)time*0.00003, 0.333);
// jangle = pow(0.1 + (double)time*0.00001, 0.333);
// yshift = 0.04*sin((double)time*PID*0.002);
cosj = cos(jangle);
sinj = sin(jangle);
julia_x = 0.5*(cosj*(1.0 - 0.5*cosj) + 0.5*sinj*sinj);
julia_y = 0.5*sinj*(1.0-cosj) + yshift;
// julia_x = 0.5*(cosj*(1.0 - 0.5*cosj) + 0.5*sinj*sinj);
// julia_y = 0.5*sinj*(1.0-cosj);
init_julia_set(phi, xy_in);
printf("Julia set parameters : i = %i, angle = %.5lg, cx = %.5lg, cy = %.5lg \n", time, jangle, julia_x, julia_y);
}
void animation()
{
double time, scale, dx, var, jangle, cosj, sinj;
double *phi[NX], *phi_tmp[NX];
short int *xy_in[NX];
int i, j, s;
/* Since NX and NY are big, it seemed wiser to use some memory allocation here */
for (i=0; i<NX; i++)
{
phi[i] = (double *)malloc(NY*sizeof(double));
phi_tmp[i] = (double *)malloc(NY*sizeof(double));
xy_in[i] = (short int *)malloc(NY*sizeof(short int));
}
npolyline = init_polyline(MDEPTH, polyline);
for (i=0; i<npolyline; i++) printf("vertex %i: (%.3f, %.3f)\n", i, polyline[i].x, polyline[i].y);
dx = (XMAX-XMIN)/((double)NX);
intstep = DT/(dx*dx*VISCOSITY);
intstep1 = DT/(dx*VISCOSITY);
// julia_x = 0.1;
// julia_y = 0.6;
// set_Julia_parameters(0, phi, xy_in);
printf("Integration step %.3lg\n", intstep);
/* initialize wave wave function */
init_gaussian(-1.0, 0.0, 0.1, 0.0, 0.01, phi, xy_in);
// init_gaussian(x, y, mean, amplitude, scalex, phi, xy_in)
if (SCALE)
{
var = compute_variance(phi, xy_in);
scale = sqrt(1.0 + var);
renormalise_field(phi, xy_in, var);
}
blank();
glColor3f(0.0, 0.0, 0.0);
glutSwapBuffers();
draw_wave(phi, xy_in, 1.0, 0);
draw_billiard();
// print_Julia_parameters(i);
// print_level(MDEPTH);
glutSwapBuffers();
sleep(SLEEP1);
if (MOVIE) for (i=0; i<SLEEP1*25; i++) save_frame();
for (i=0; i<=NSTEPS; i++)
{
/* compute the variance of the field to adjust color scheme */
/* the color depends on the field divided by sqrt(1 + variance) */
if (SCALE)
{
var = compute_variance(phi, xy_in);
scale = sqrt(1.0 + var);
// printf("Norm: %5lg\t Scaling factor: %5lg\n", var, scale);
renormalise_field(phi, xy_in, var);
}
else scale = 1.0;
draw_wave(phi, xy_in, scale, i);
for (j=0; j<NVID; j++) evolve_wave(phi, phi_tmp, xy_in);
draw_billiard();
// print_level(MDEPTH);
// print_Julia_parameters(i);
glutSwapBuffers();
/* modify Julia set */
// set_Julia_parameters(i, phi, xy_in);
if (MOVIE)
{
save_frame();
/* it seems that saving too many files too fast can cause trouble with the file system */
/* so this is to make a pause from time to time - parameter PAUSE may need adjusting */
if (i % PAUSE == PAUSE - 1)
{
printf("Making a short pause\n");
sleep(PSLEEP);
s = system("mv wave*.tif tif_heat/");
}
}
}
if (MOVIE)
{
for (i=0; i<20; i++) save_frame();
s = system("mv wave*.tif tif_heat/");
}
for (i=0; i<NX; i++)
{
free(phi[i]);
free(phi_tmp[i]);
}
}
void display(void)
{
glPushMatrix();
blank();
glutSwapBuffers();
blank();
glutSwapBuffers();
animation();
sleep(SLEEP2);
glPopMatrix();
glutDestroyWindow(glutGetWindow());
}
int main(int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH);
glutInitWindowSize(WINWIDTH,WINHEIGHT);
glutCreateWindow("Heat equation in a planar domain");
init();
glutDisplayFunc(display);
glutMainLoop();
return 0;
}
|
convolution_7x7_pack1to8_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv7x7s2_pack1to8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const __fp16* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
float16x8_t _bias0 = bias ? vld1q_f16(bias + p * 8) : vdupq_n_f16(0.f);
out0.fill(_bias0);
for (int q = 0; q < inch; q++)
{
__fp16* outptr0 = out0;
const Mat img0 = bottom_blob.channel(q);
const __fp16* r0 = img0.row<const __fp16>(0);
const __fp16* r1 = img0.row<const __fp16>(1);
const __fp16* r2 = img0.row<const __fp16>(2);
const __fp16* r3 = img0.row<const __fp16>(3);
const __fp16* r4 = img0.row<const __fp16>(4);
const __fp16* r5 = img0.row<const __fp16>(5);
const __fp16* r6 = img0.row<const __fp16>(6);
const __fp16* kptr = kernel.channel(p).row<const __fp16>(q);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 7 < outw; j += 8)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%0], #64 \n" // sum0
"prfm pldl1keep, [%1, #384] \n"
"ld1 {v0.8h, v1.8h, v2.8h}, [%1] \n" // r0
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0] \n" // sum0
"fmla v24.8h, v16.8h, v0.h[0] \n"
"fmla v25.8h, v16.8h, v0.h[2] \n"
"fmla v26.8h, v16.8h, v0.h[4] \n"
"fmla v27.8h, v16.8h, v0.h[6] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v1.h[2] \n"
"fmla v30.8h, v16.8h, v1.h[4] \n"
"fmla v31.8h, v16.8h, v1.h[6] \n"
"sub %0, %0, #64 \n"
"fmla v24.8h, v17.8h, v0.h[1] \n"
"fmla v25.8h, v17.8h, v0.h[3] \n"
"fmla v26.8h, v17.8h, v0.h[5] \n"
"fmla v27.8h, v17.8h, v0.h[7] \n"
"fmla v28.8h, v17.8h, v1.h[1] \n"
"fmla v29.8h, v17.8h, v1.h[3] \n"
"fmla v30.8h, v17.8h, v1.h[5] \n"
"fmla v31.8h, v17.8h, v1.h[7] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n"
"fmla v24.8h, v18.8h, v0.h[2] \n"
"fmla v25.8h, v18.8h, v0.h[4] \n"
"fmla v26.8h, v18.8h, v0.h[6] \n"
"fmla v27.8h, v18.8h, v1.h[0] \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v1.h[4] \n"
"fmla v30.8h, v18.8h, v1.h[6] \n"
"fmla v31.8h, v18.8h, v2.h[0] \n"
"fmla v24.8h, v19.8h, v0.h[3] \n"
"fmla v25.8h, v19.8h, v0.h[5] \n"
"fmla v26.8h, v19.8h, v0.h[7] \n"
"fmla v27.8h, v19.8h, v1.h[1] \n"
"fmla v28.8h, v19.8h, v1.h[3] \n"
"fmla v29.8h, v19.8h, v1.h[5] \n"
"fmla v30.8h, v19.8h, v1.h[7] \n"
"fmla v31.8h, v19.8h, v2.h[1] \n"
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v4.8h, v5.8h, v6.8h}, [%2] \n" // r1
"fmla v24.8h, v20.8h, v0.h[4] \n"
"fmla v25.8h, v20.8h, v0.h[6] \n"
"fmla v26.8h, v20.8h, v1.h[0] \n"
"fmla v27.8h, v20.8h, v1.h[2] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v1.h[6] \n"
"fmla v30.8h, v20.8h, v2.h[0] \n"
"fmla v31.8h, v20.8h, v2.h[2] \n"
"fmla v24.8h, v21.8h, v0.h[5] \n"
"fmla v25.8h, v21.8h, v0.h[7] \n"
"fmla v26.8h, v21.8h, v1.h[1] \n"
"fmla v27.8h, v21.8h, v1.h[3] \n"
"fmla v28.8h, v21.8h, v1.h[5] \n"
"fmla v29.8h, v21.8h, v1.h[7] \n"
"fmla v30.8h, v21.8h, v2.h[1] \n"
"fmla v31.8h, v21.8h, v2.h[3] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n"
"fmla v24.8h, v22.8h, v0.h[6] \n"
"fmla v25.8h, v22.8h, v1.h[0] \n"
"fmla v26.8h, v22.8h, v1.h[2] \n"
"fmla v27.8h, v22.8h, v1.h[4] \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[0] \n"
"fmla v30.8h, v22.8h, v2.h[2] \n"
"fmla v31.8h, v22.8h, v2.h[4] \n"
"fmla v24.8h, v23.8h, v4.h[0] \n"
"fmla v25.8h, v23.8h, v4.h[2] \n"
"fmla v26.8h, v23.8h, v4.h[4] \n"
"fmla v27.8h, v23.8h, v4.h[6] \n"
"fmla v28.8h, v23.8h, v5.h[0] \n"
"fmla v29.8h, v23.8h, v5.h[2] \n"
"fmla v30.8h, v23.8h, v5.h[4] \n"
"fmla v31.8h, v23.8h, v5.h[6] \n"
"fmla v24.8h, v16.8h, v4.h[1] \n"
"fmla v25.8h, v16.8h, v4.h[3] \n"
"fmla v26.8h, v16.8h, v4.h[5] \n"
"fmla v27.8h, v16.8h, v4.h[7] \n"
"fmla v28.8h, v16.8h, v5.h[1] \n"
"fmla v29.8h, v16.8h, v5.h[3] \n"
"fmla v30.8h, v16.8h, v5.h[5] \n"
"fmla v31.8h, v16.8h, v5.h[7] \n"
"fmla v24.8h, v17.8h, v4.h[2] \n"
"fmla v25.8h, v17.8h, v4.h[4] \n"
"fmla v26.8h, v17.8h, v4.h[6] \n"
"fmla v27.8h, v17.8h, v5.h[0] \n"
"fmla v28.8h, v17.8h, v5.h[2] \n"
"fmla v29.8h, v17.8h, v5.h[4] \n"
"fmla v30.8h, v17.8h, v5.h[6] \n"
"fmla v31.8h, v17.8h, v6.h[0] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n"
"fmla v24.8h, v18.8h, v4.h[3] \n"
"fmla v25.8h, v18.8h, v4.h[5] \n"
"fmla v26.8h, v18.8h, v4.h[7] \n"
"fmla v27.8h, v18.8h, v5.h[1] \n"
"fmla v28.8h, v18.8h, v5.h[3] \n"
"fmla v29.8h, v18.8h, v5.h[5] \n"
"fmla v30.8h, v18.8h, v5.h[7] \n"
"fmla v31.8h, v18.8h, v6.h[1] \n"
"fmla v24.8h, v19.8h, v4.h[4] \n"
"fmla v25.8h, v19.8h, v4.h[6] \n"
"fmla v26.8h, v19.8h, v5.h[0] \n"
"fmla v27.8h, v19.8h, v5.h[2] \n"
"fmla v28.8h, v19.8h, v5.h[4] \n"
"fmla v29.8h, v19.8h, v5.h[6] \n"
"fmla v30.8h, v19.8h, v6.h[0] \n"
"fmla v31.8h, v19.8h, v6.h[2] \n"
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v0.8h, v1.8h, v2.8h}, [%3] \n" // r2
"fmla v24.8h, v20.8h, v4.h[5] \n"
"fmla v25.8h, v20.8h, v4.h[7] \n"
"fmla v26.8h, v20.8h, v5.h[1] \n"
"fmla v27.8h, v20.8h, v5.h[3] \n"
"fmla v28.8h, v20.8h, v5.h[5] \n"
"fmla v29.8h, v20.8h, v5.h[7] \n"
"fmla v30.8h, v20.8h, v6.h[1] \n"
"fmla v31.8h, v20.8h, v6.h[3] \n"
"fmla v24.8h, v21.8h, v4.h[6] \n"
"fmla v25.8h, v21.8h, v5.h[0] \n"
"fmla v26.8h, v21.8h, v5.h[2] \n"
"fmla v27.8h, v21.8h, v5.h[4] \n"
"fmla v28.8h, v21.8h, v5.h[6] \n"
"fmla v29.8h, v21.8h, v6.h[0] \n"
"fmla v30.8h, v21.8h, v6.h[2] \n"
"fmla v31.8h, v21.8h, v6.h[4] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n"
"fmla v24.8h, v22.8h, v0.h[0] \n"
"fmla v25.8h, v22.8h, v0.h[2] \n"
"fmla v26.8h, v22.8h, v0.h[4] \n"
"fmla v27.8h, v22.8h, v0.h[6] \n"
"fmla v28.8h, v22.8h, v1.h[0] \n"
"fmla v29.8h, v22.8h, v1.h[2] \n"
"fmla v30.8h, v22.8h, v1.h[4] \n"
"fmla v31.8h, v22.8h, v1.h[6] \n"
"fmla v24.8h, v23.8h, v0.h[1] \n"
"fmla v25.8h, v23.8h, v0.h[3] \n"
"fmla v26.8h, v23.8h, v0.h[5] \n"
"fmla v27.8h, v23.8h, v0.h[7] \n"
"fmla v28.8h, v23.8h, v1.h[1] \n"
"fmla v29.8h, v23.8h, v1.h[3] \n"
"fmla v30.8h, v23.8h, v1.h[5] \n"
"fmla v31.8h, v23.8h, v1.h[7] \n"
"fmla v24.8h, v16.8h, v0.h[2] \n"
"fmla v25.8h, v16.8h, v0.h[4] \n"
"fmla v26.8h, v16.8h, v0.h[6] \n"
"fmla v27.8h, v16.8h, v1.h[0] \n"
"fmla v28.8h, v16.8h, v1.h[2] \n"
"fmla v29.8h, v16.8h, v1.h[4] \n"
"fmla v30.8h, v16.8h, v1.h[6] \n"
"fmla v31.8h, v16.8h, v2.h[0] \n"
"fmla v24.8h, v17.8h, v0.h[3] \n"
"fmla v25.8h, v17.8h, v0.h[5] \n"
"fmla v26.8h, v17.8h, v0.h[7] \n"
"fmla v27.8h, v17.8h, v1.h[1] \n"
"fmla v28.8h, v17.8h, v1.h[3] \n"
"fmla v29.8h, v17.8h, v1.h[5] \n"
"fmla v30.8h, v17.8h, v1.h[7] \n"
"fmla v31.8h, v17.8h, v2.h[1] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n"
"fmla v24.8h, v18.8h, v0.h[4] \n"
"fmla v25.8h, v18.8h, v0.h[6] \n"
"fmla v26.8h, v18.8h, v1.h[0] \n"
"fmla v27.8h, v18.8h, v1.h[2] \n"
"fmla v28.8h, v18.8h, v1.h[4] \n"
"fmla v29.8h, v18.8h, v1.h[6] \n"
"fmla v30.8h, v18.8h, v2.h[0] \n"
"fmla v31.8h, v18.8h, v2.h[2] \n"
"prfm pldl1keep, [%4, #384] \n"
"ld1 {v4.8h, v5.8h, v6.8h}, [%4] \n" // r3
"fmla v24.8h, v19.8h, v0.h[5] \n"
"fmla v25.8h, v19.8h, v0.h[7] \n"
"fmla v26.8h, v19.8h, v1.h[1] \n"
"fmla v27.8h, v19.8h, v1.h[3] \n"
"fmla v28.8h, v19.8h, v1.h[5] \n"
"fmla v29.8h, v19.8h, v1.h[7] \n"
"fmla v30.8h, v19.8h, v2.h[1] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"fmla v24.8h, v20.8h, v0.h[6] \n"
"fmla v25.8h, v20.8h, v1.h[0] \n"
"fmla v26.8h, v20.8h, v1.h[2] \n"
"fmla v27.8h, v20.8h, v1.h[4] \n"
"fmla v28.8h, v20.8h, v1.h[6] \n"
"fmla v29.8h, v20.8h, v2.h[0] \n"
"fmla v30.8h, v20.8h, v2.h[2] \n"
"fmla v31.8h, v20.8h, v2.h[4] \n"
"fmla v24.8h, v21.8h, v4.h[0] \n"
"fmla v25.8h, v21.8h, v4.h[2] \n"
"fmla v26.8h, v21.8h, v4.h[4] \n"
"fmla v27.8h, v21.8h, v4.h[6] \n"
"fmla v28.8h, v21.8h, v5.h[0] \n"
"fmla v29.8h, v21.8h, v5.h[2] \n"
"fmla v30.8h, v21.8h, v5.h[4] \n"
"fmla v31.8h, v21.8h, v5.h[6] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n"
"fmla v24.8h, v22.8h, v4.h[1] \n"
"fmla v25.8h, v22.8h, v4.h[3] \n"
"fmla v26.8h, v22.8h, v4.h[5] \n"
"fmla v27.8h, v22.8h, v4.h[7] \n"
"fmla v28.8h, v22.8h, v5.h[1] \n"
"fmla v29.8h, v22.8h, v5.h[3] \n"
"fmla v30.8h, v22.8h, v5.h[5] \n"
"fmla v31.8h, v22.8h, v5.h[7] \n"
"fmla v24.8h, v23.8h, v4.h[2] \n"
"fmla v25.8h, v23.8h, v4.h[4] \n"
"fmla v26.8h, v23.8h, v4.h[6] \n"
"fmla v27.8h, v23.8h, v5.h[0] \n"
"fmla v28.8h, v23.8h, v5.h[2] \n"
"fmla v29.8h, v23.8h, v5.h[4] \n"
"fmla v30.8h, v23.8h, v5.h[6] \n"
"fmla v31.8h, v23.8h, v6.h[0] \n"
"fmla v24.8h, v16.8h, v4.h[3] \n"
"fmla v25.8h, v16.8h, v4.h[5] \n"
"fmla v26.8h, v16.8h, v4.h[7] \n"
"fmla v27.8h, v16.8h, v5.h[1] \n"
"fmla v28.8h, v16.8h, v5.h[3] \n"
"fmla v29.8h, v16.8h, v5.h[5] \n"
"fmla v30.8h, v16.8h, v5.h[7] \n"
"fmla v31.8h, v16.8h, v6.h[1] \n"
"fmla v24.8h, v17.8h, v4.h[4] \n"
"fmla v25.8h, v17.8h, v4.h[6] \n"
"fmla v26.8h, v17.8h, v5.h[0] \n"
"fmla v27.8h, v17.8h, v5.h[2] \n"
"fmla v28.8h, v17.8h, v5.h[4] \n"
"fmla v29.8h, v17.8h, v5.h[6] \n"
"fmla v30.8h, v17.8h, v6.h[0] \n"
"fmla v31.8h, v17.8h, v6.h[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n"
"fmla v24.8h, v18.8h, v4.h[5] \n"
"fmla v25.8h, v18.8h, v4.h[7] \n"
"fmla v26.8h, v18.8h, v5.h[1] \n"
"fmla v27.8h, v18.8h, v5.h[3] \n"
"fmla v28.8h, v18.8h, v5.h[5] \n"
"fmla v29.8h, v18.8h, v5.h[7] \n"
"fmla v30.8h, v18.8h, v6.h[1] \n"
"fmla v31.8h, v18.8h, v6.h[3] \n"
"prfm pldl1keep, [%5, #384] \n"
"ld1 {v0.8h, v1.8h, v2.8h}, [%5] \n" // r4
"fmla v24.8h, v19.8h, v4.h[6] \n"
"fmla v25.8h, v19.8h, v5.h[0] \n"
"fmla v26.8h, v19.8h, v5.h[2] \n"
"fmla v27.8h, v19.8h, v5.h[4] \n"
"fmla v28.8h, v19.8h, v5.h[6] \n"
"fmla v29.8h, v19.8h, v6.h[0] \n"
"fmla v30.8h, v19.8h, v6.h[2] \n"
"fmla v31.8h, v19.8h, v6.h[4] \n"
"fmla v24.8h, v20.8h, v0.h[0] \n"
"fmla v25.8h, v20.8h, v0.h[2] \n"
"fmla v26.8h, v20.8h, v0.h[4] \n"
"fmla v27.8h, v20.8h, v0.h[6] \n"
"fmla v28.8h, v20.8h, v1.h[0] \n"
"fmla v29.8h, v20.8h, v1.h[2] \n"
"fmla v30.8h, v20.8h, v1.h[4] \n"
"fmla v31.8h, v20.8h, v1.h[6] \n"
"fmla v24.8h, v21.8h, v0.h[1] \n"
"fmla v25.8h, v21.8h, v0.h[3] \n"
"fmla v26.8h, v21.8h, v0.h[5] \n"
"fmla v27.8h, v21.8h, v0.h[7] \n"
"fmla v28.8h, v21.8h, v1.h[1] \n"
"fmla v29.8h, v21.8h, v1.h[3] \n"
"fmla v30.8h, v21.8h, v1.h[5] \n"
"fmla v31.8h, v21.8h, v1.h[7] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n"
"fmla v24.8h, v22.8h, v0.h[2] \n"
"fmla v25.8h, v22.8h, v0.h[4] \n"
"fmla v26.8h, v22.8h, v0.h[6] \n"
"fmla v27.8h, v22.8h, v1.h[0] \n"
"fmla v28.8h, v22.8h, v1.h[2] \n"
"fmla v29.8h, v22.8h, v1.h[4] \n"
"fmla v30.8h, v22.8h, v1.h[6] \n"
"fmla v31.8h, v22.8h, v2.h[0] \n"
"fmla v24.8h, v23.8h, v0.h[3] \n"
"fmla v25.8h, v23.8h, v0.h[5] \n"
"fmla v26.8h, v23.8h, v0.h[7] \n"
"fmla v27.8h, v23.8h, v1.h[1] \n"
"fmla v28.8h, v23.8h, v1.h[3] \n"
"fmla v29.8h, v23.8h, v1.h[5] \n"
"fmla v30.8h, v23.8h, v1.h[7] \n"
"fmla v31.8h, v23.8h, v2.h[1] \n"
"prfm pldl1keep, [%6, #384] \n"
"ld1 {v4.8h, v5.8h, v6.8h}, [%6] \n" // r5
"fmla v24.8h, v16.8h, v0.h[4] \n"
"fmla v25.8h, v16.8h, v0.h[6] \n"
"fmla v26.8h, v16.8h, v1.h[0] \n"
"fmla v27.8h, v16.8h, v1.h[2] \n"
"fmla v28.8h, v16.8h, v1.h[4] \n"
"fmla v29.8h, v16.8h, v1.h[6] \n"
"fmla v30.8h, v16.8h, v2.h[0] \n"
"fmla v31.8h, v16.8h, v2.h[2] \n"
"fmla v24.8h, v17.8h, v0.h[5] \n"
"fmla v25.8h, v17.8h, v0.h[7] \n"
"fmla v26.8h, v17.8h, v1.h[1] \n"
"fmla v27.8h, v17.8h, v1.h[3] \n"
"fmla v28.8h, v17.8h, v1.h[5] \n"
"fmla v29.8h, v17.8h, v1.h[7] \n"
"fmla v30.8h, v17.8h, v2.h[1] \n"
"fmla v31.8h, v17.8h, v2.h[3] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n"
"fmla v24.8h, v18.8h, v0.h[6] \n"
"fmla v25.8h, v18.8h, v1.h[0] \n"
"fmla v26.8h, v18.8h, v1.h[2] \n"
"fmla v27.8h, v18.8h, v1.h[4] \n"
"fmla v28.8h, v18.8h, v1.h[6] \n"
"fmla v30.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[0] \n"
"fmla v31.8h, v18.8h, v2.h[4] \n"
"fmla v24.8h, v19.8h, v4.h[0] \n"
"fmla v25.8h, v19.8h, v4.h[2] \n"
"fmla v26.8h, v19.8h, v4.h[4] \n"
"fmla v27.8h, v19.8h, v4.h[6] \n"
"fmla v28.8h, v19.8h, v5.h[0] \n"
"fmla v29.8h, v19.8h, v5.h[2] \n"
"fmla v30.8h, v19.8h, v5.h[4] \n"
"fmla v31.8h, v19.8h, v5.h[6] \n"
"fmla v24.8h, v20.8h, v4.h[1] \n"
"fmla v25.8h, v20.8h, v4.h[3] \n"
"fmla v26.8h, v20.8h, v4.h[5] \n"
"fmla v27.8h, v20.8h, v4.h[7] \n"
"fmla v28.8h, v20.8h, v5.h[1] \n"
"fmla v29.8h, v20.8h, v5.h[3] \n"
"fmla v30.8h, v20.8h, v5.h[5] \n"
"fmla v31.8h, v20.8h, v5.h[7] \n"
"fmla v24.8h, v21.8h, v4.h[2] \n"
"fmla v25.8h, v21.8h, v4.h[4] \n"
"fmla v26.8h, v21.8h, v4.h[6] \n"
"fmla v27.8h, v21.8h, v5.h[0] \n"
"fmla v28.8h, v21.8h, v5.h[2] \n"
"fmla v29.8h, v21.8h, v5.h[4] \n"
"fmla v30.8h, v21.8h, v5.h[6] \n"
"fmla v31.8h, v21.8h, v6.h[0] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n"
"fmla v24.8h, v22.8h, v4.h[3] \n"
"fmla v25.8h, v22.8h, v4.h[5] \n"
"fmla v26.8h, v22.8h, v4.h[7] \n"
"fmla v27.8h, v22.8h, v5.h[1] \n"
"fmla v28.8h, v22.8h, v5.h[3] \n"
"fmla v29.8h, v22.8h, v5.h[5] \n"
"fmla v30.8h, v22.8h, v5.h[7] \n"
"fmla v31.8h, v22.8h, v6.h[1] \n"
"fmla v24.8h, v23.8h, v4.h[4] \n"
"fmla v25.8h, v23.8h, v4.h[6] \n"
"fmla v26.8h, v23.8h, v5.h[0] \n"
"fmla v27.8h, v23.8h, v5.h[2] \n"
"fmla v28.8h, v23.8h, v5.h[4] \n"
"fmla v29.8h, v23.8h, v5.h[6] \n"
"fmla v30.8h, v23.8h, v6.h[0] \n"
"fmla v31.8h, v23.8h, v6.h[2] \n"
"prfm pldl1keep, [%7, #384] \n"
"ld1 {v0.8h, v1.8h, v2.8h}, [%7] \n" // r6
"fmla v24.8h, v16.8h, v4.h[5] \n"
"fmla v25.8h, v16.8h, v4.h[7] \n"
"fmla v26.8h, v16.8h, v5.h[1] \n"
"fmla v27.8h, v16.8h, v5.h[3] \n"
"fmla v28.8h, v16.8h, v5.h[5] \n"
"fmla v29.8h, v16.8h, v5.h[7] \n"
"fmla v30.8h, v16.8h, v6.h[1] \n"
"fmla v31.8h, v16.8h, v6.h[3] \n"
"fmla v24.8h, v17.8h, v4.h[6] \n"
"fmla v25.8h, v17.8h, v5.h[0] \n"
"fmla v26.8h, v17.8h, v5.h[2] \n"
"fmla v27.8h, v17.8h, v5.h[4] \n"
"fmla v28.8h, v17.8h, v5.h[6] \n"
"fmla v29.8h, v17.8h, v6.h[0] \n"
"fmla v30.8h, v17.8h, v6.h[2] \n"
"fmla v31.8h, v17.8h, v6.h[4] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n"
"fmla v24.8h, v18.8h, v0.h[0] \n"
"fmla v25.8h, v18.8h, v0.h[2] \n"
"fmla v26.8h, v18.8h, v0.h[4] \n"
"fmla v27.8h, v18.8h, v0.h[6] \n"
"fmla v28.8h, v18.8h, v1.h[0] \n"
"fmla v29.8h, v18.8h, v1.h[2] \n"
"fmla v30.8h, v18.8h, v1.h[4] \n"
"fmla v31.8h, v18.8h, v1.h[6] \n"
"fmla v24.8h, v19.8h, v0.h[1] \n"
"fmla v25.8h, v19.8h, v0.h[3] \n"
"fmla v26.8h, v19.8h, v0.h[5] \n"
"fmla v27.8h, v19.8h, v0.h[7] \n"
"fmla v28.8h, v19.8h, v1.h[1] \n"
"fmla v29.8h, v19.8h, v1.h[3] \n"
"fmla v30.8h, v19.8h, v1.h[5] \n"
"fmla v31.8h, v19.8h, v1.h[7] \n"
"fmla v24.8h, v20.8h, v0.h[2] \n"
"fmla v25.8h, v20.8h, v0.h[4] \n"
"fmla v26.8h, v20.8h, v0.h[6] \n"
"fmla v27.8h, v20.8h, v1.h[0] \n"
"fmla v28.8h, v20.8h, v1.h[2] \n"
"fmla v29.8h, v20.8h, v1.h[4] \n"
"fmla v30.8h, v20.8h, v1.h[6] \n"
"fmla v31.8h, v20.8h, v2.h[0] \n"
"add %1, %1, #32 \n"
"fmla v24.8h, v21.8h, v0.h[3] \n"
"fmla v25.8h, v21.8h, v0.h[5] \n"
"fmla v26.8h, v21.8h, v0.h[7] \n"
"fmla v27.8h, v21.8h, v1.h[1] \n"
"add %2, %2, #32 \n"
"fmla v28.8h, v21.8h, v1.h[3] \n"
"fmla v29.8h, v21.8h, v1.h[5] \n"
"fmla v30.8h, v21.8h, v1.h[7] \n"
"fmla v31.8h, v21.8h, v2.h[1] \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v16.8h}, [%8] \n"
"fmla v24.8h, v22.8h, v0.h[4] \n"
"fmla v25.8h, v22.8h, v0.h[6] \n"
"fmla v26.8h, v22.8h, v1.h[0] \n"
"fmla v27.8h, v22.8h, v1.h[2] \n"
"add %3, %3, #32 \n"
"fmla v28.8h, v22.8h, v1.h[4] \n"
"fmla v29.8h, v22.8h, v1.h[6] \n"
"fmla v30.8h, v22.8h, v2.h[0] \n"
"fmla v31.8h, v22.8h, v2.h[2] \n"
"add %4, %4, #32 \n"
"fmla v24.8h, v23.8h, v0.h[5] \n"
"fmla v25.8h, v23.8h, v0.h[7] \n"
"fmla v26.8h, v23.8h, v1.h[1] \n"
"fmla v27.8h, v23.8h, v1.h[3] \n"
"add %5, %5, #32 \n"
"fmla v28.8h, v23.8h, v1.h[5] \n"
"fmla v29.8h, v23.8h, v1.h[7] \n"
"fmla v30.8h, v23.8h, v2.h[1] \n"
"fmla v31.8h, v23.8h, v2.h[3] \n"
"add %6, %6, #32 \n"
"fmla v24.8h, v16.8h, v0.h[6] \n"
"fmla v25.8h, v16.8h, v1.h[0] \n"
"fmla v26.8h, v16.8h, v1.h[2] \n"
"fmla v27.8h, v16.8h, v1.h[4] \n"
"add %7, %7, #32 \n"
"fmla v28.8h, v16.8h, v1.h[6] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v16.8h, v2.h[2] \n"
"fmla v31.8h, v16.8h, v2.h[4] \n"
"sub %8, %8, #768 \n" // kptr -= 48 * 8;
"st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%0], #64 \n"
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(r5), // %6
"=r"(r6), // %7
"=r"(kptr) // %8
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(r5),
"7"(r6),
"8"(kptr)
: "memory", "v0", "v1", "v2", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
for (; j + 3 < outw; j += 4)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0] \n" // sum0
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v0.8h, v1.8h}, [%1] \n" // r0
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n"
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v16.8h, v0.h[2] \n"
"fmla v30.8h, v16.8h, v0.h[4] \n"
"fmla v31.8h, v16.8h, v0.h[6] \n"
"fmla v28.8h, v17.8h, v0.h[1] \n"
"fmla v29.8h, v17.8h, v0.h[3] \n"
"fmla v30.8h, v17.8h, v0.h[5] \n"
"fmla v31.8h, v17.8h, v0.h[7] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v0.h[4] \n"
"fmla v30.8h, v18.8h, v0.h[6] \n"
"fmla v31.8h, v18.8h, v1.h[0] \n"
"fmla v28.8h, v19.8h, v0.h[3] \n"
"fmla v29.8h, v19.8h, v0.h[5] \n"
"fmla v30.8h, v19.8h, v0.h[7] \n"
"fmla v31.8h, v19.8h, v1.h[1] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v2.8h, v3.8h}, [%2] \n" // r1
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v0.h[6] \n"
"fmla v30.8h, v20.8h, v1.h[0] \n"
"fmla v31.8h, v20.8h, v1.h[2] \n"
"fmla v28.8h, v21.8h, v0.h[5] \n"
"fmla v29.8h, v21.8h, v0.h[7] \n"
"fmla v30.8h, v21.8h, v1.h[1] \n"
"fmla v31.8h, v21.8h, v1.h[3] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v1.h[0] \n"
"fmla v30.8h, v22.8h, v1.h[2] \n"
"fmla v31.8h, v22.8h, v1.h[4] \n"
"fmla v28.8h, v23.8h, v2.h[0] \n"
"fmla v29.8h, v23.8h, v2.h[2] \n"
"fmla v30.8h, v23.8h, v2.h[4] \n"
"fmla v31.8h, v23.8h, v2.h[6] \n"
"fmla v28.8h, v16.8h, v2.h[1] \n"
"fmla v29.8h, v16.8h, v2.h[3] \n"
"fmla v30.8h, v16.8h, v2.h[5] \n"
"fmla v31.8h, v16.8h, v2.h[7] \n"
"fmla v28.8h, v17.8h, v2.h[2] \n"
"fmla v29.8h, v17.8h, v2.h[4] \n"
"fmla v30.8h, v17.8h, v2.h[6] \n"
"fmla v31.8h, v17.8h, v3.h[0] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n"
"fmla v28.8h, v18.8h, v2.h[3] \n"
"fmla v29.8h, v18.8h, v2.h[5] \n"
"fmla v30.8h, v18.8h, v2.h[7] \n"
"fmla v31.8h, v18.8h, v3.h[1] \n"
"fmla v28.8h, v19.8h, v2.h[4] \n"
"fmla v29.8h, v19.8h, v2.h[6] \n"
"fmla v30.8h, v19.8h, v3.h[0] \n"
"fmla v31.8h, v19.8h, v3.h[2] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.8h, v1.8h}, [%3] \n" // r2
"fmla v28.8h, v20.8h, v2.h[5] \n"
"fmla v29.8h, v20.8h, v2.h[7] \n"
"fmla v30.8h, v20.8h, v3.h[1] \n"
"fmla v31.8h, v20.8h, v3.h[3] \n"
"fmla v28.8h, v21.8h, v2.h[6] \n"
"fmla v29.8h, v21.8h, v3.h[0] \n"
"fmla v30.8h, v21.8h, v3.h[2] \n"
"fmla v31.8h, v21.8h, v3.h[4] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n"
"fmla v28.8h, v22.8h, v0.h[0] \n"
"fmla v29.8h, v22.8h, v0.h[2] \n"
"fmla v30.8h, v22.8h, v0.h[4] \n"
"fmla v31.8h, v22.8h, v0.h[6] \n"
"fmla v28.8h, v23.8h, v0.h[1] \n"
"fmla v29.8h, v23.8h, v0.h[3] \n"
"fmla v30.8h, v23.8h, v0.h[5] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"fmla v28.8h, v16.8h, v0.h[2] \n"
"fmla v29.8h, v16.8h, v0.h[4] \n"
"fmla v30.8h, v16.8h, v0.h[6] \n"
"fmla v31.8h, v16.8h, v1.h[0] \n"
"fmla v28.8h, v17.8h, v0.h[3] \n"
"fmla v29.8h, v17.8h, v0.h[5] \n"
"fmla v30.8h, v17.8h, v0.h[7] \n"
"fmla v31.8h, v17.8h, v1.h[1] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n"
"fmla v28.8h, v18.8h, v0.h[4] \n"
"fmla v29.8h, v18.8h, v0.h[6] \n"
"fmla v30.8h, v18.8h, v1.h[0] \n"
"fmla v31.8h, v18.8h, v1.h[2] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v2.8h, v3.8h}, [%4] \n" // r3
"fmla v28.8h, v19.8h, v0.h[5] \n"
"fmla v29.8h, v19.8h, v0.h[7] \n"
"fmla v30.8h, v19.8h, v1.h[1] \n"
"fmla v31.8h, v19.8h, v1.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[6] \n"
"fmla v29.8h, v20.8h, v1.h[0] \n"
"fmla v30.8h, v20.8h, v1.h[2] \n"
"fmla v31.8h, v20.8h, v1.h[4] \n"
"fmla v28.8h, v21.8h, v2.h[0] \n"
"fmla v29.8h, v21.8h, v2.h[2] \n"
"fmla v30.8h, v21.8h, v2.h[4] \n"
"fmla v31.8h, v21.8h, v2.h[6] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n"
"fmla v28.8h, v22.8h, v2.h[1] \n"
"fmla v29.8h, v22.8h, v2.h[3] \n"
"fmla v30.8h, v22.8h, v2.h[5] \n"
"fmla v31.8h, v22.8h, v2.h[7] \n"
"fmla v28.8h, v23.8h, v2.h[2] \n"
"fmla v29.8h, v23.8h, v2.h[4] \n"
"fmla v30.8h, v23.8h, v2.h[6] \n"
"fmla v31.8h, v23.8h, v3.h[0] \n"
"fmla v28.8h, v16.8h, v2.h[3] \n"
"fmla v29.8h, v16.8h, v2.h[5] \n"
"fmla v30.8h, v16.8h, v2.h[7] \n"
"fmla v31.8h, v16.8h, v3.h[1] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.8h, v1.8h}, [%5] \n" // r4
"fmla v28.8h, v17.8h, v2.h[4] \n"
"fmla v29.8h, v17.8h, v2.h[6] \n"
"fmla v30.8h, v17.8h, v3.h[0] \n"
"fmla v31.8h, v17.8h, v3.h[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n"
"fmla v28.8h, v18.8h, v2.h[5] \n"
"fmla v29.8h, v18.8h, v2.h[7] \n"
"fmla v30.8h, v18.8h, v3.h[1] \n"
"fmla v31.8h, v18.8h, v3.h[3] \n"
"fmla v28.8h, v19.8h, v2.h[6] \n"
"fmla v29.8h, v19.8h, v3.h[0] \n"
"fmla v30.8h, v19.8h, v3.h[2] \n"
"fmla v31.8h, v19.8h, v3.h[4] \n"
"fmla v28.8h, v20.8h, v0.h[0] \n"
"fmla v29.8h, v20.8h, v0.h[2] \n"
"fmla v30.8h, v20.8h, v0.h[4] \n"
"fmla v31.8h, v20.8h, v0.h[6] \n"
"fmla v28.8h, v21.8h, v0.h[1] \n"
"fmla v29.8h, v21.8h, v0.h[3] \n"
"fmla v30.8h, v21.8h, v0.h[5] \n"
"fmla v31.8h, v21.8h, v0.h[7] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n"
"fmla v28.8h, v22.8h, v0.h[2] \n"
"fmla v29.8h, v22.8h, v0.h[4] \n"
"fmla v30.8h, v22.8h, v0.h[6] \n"
"fmla v31.8h, v22.8h, v1.h[0] \n"
"fmla v28.8h, v23.8h, v0.h[3] \n"
"fmla v29.8h, v23.8h, v0.h[5] \n"
"fmla v30.8h, v23.8h, v0.h[7] \n"
"fmla v31.8h, v23.8h, v1.h[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v2.8h, v3.8h}, [%6] \n" // r5
"fmla v28.8h, v16.8h, v0.h[4] \n"
"fmla v29.8h, v16.8h, v0.h[6] \n"
"fmla v30.8h, v16.8h, v1.h[0] \n"
"fmla v31.8h, v16.8h, v1.h[2] \n"
"fmla v28.8h, v17.8h, v0.h[5] \n"
"fmla v29.8h, v17.8h, v0.h[7] \n"
"fmla v30.8h, v17.8h, v1.h[1] \n"
"fmla v31.8h, v17.8h, v1.h[3] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n"
"fmla v28.8h, v18.8h, v0.h[6] \n"
"fmla v29.8h, v18.8h, v1.h[0] \n"
"fmla v30.8h, v18.8h, v1.h[2] \n"
"fmla v31.8h, v18.8h, v1.h[4] \n"
"fmla v28.8h, v19.8h, v2.h[0] \n"
"fmla v29.8h, v19.8h, v2.h[2] \n"
"fmla v30.8h, v19.8h, v2.h[4] \n"
"fmla v31.8h, v19.8h, v2.h[6] \n"
"fmla v28.8h, v20.8h, v2.h[1] \n"
"fmla v29.8h, v20.8h, v2.h[3] \n"
"fmla v30.8h, v20.8h, v2.h[5] \n"
"fmla v31.8h, v20.8h, v2.h[7] \n"
"fmla v28.8h, v21.8h, v2.h[2] \n"
"fmla v29.8h, v21.8h, v2.h[4] \n"
"fmla v30.8h, v21.8h, v2.h[6] \n"
"fmla v31.8h, v21.8h, v3.h[0] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n"
"fmla v28.8h, v22.8h, v2.h[3] \n"
"fmla v29.8h, v22.8h, v2.h[5] \n"
"fmla v30.8h, v22.8h, v2.h[7] \n"
"fmla v31.8h, v22.8h, v3.h[1] \n"
"add %1, %1, #16 \n"
"fmla v28.8h, v23.8h, v2.h[4] \n"
"fmla v29.8h, v23.8h, v2.h[6] \n"
"fmla v30.8h, v23.8h, v3.h[0] \n"
"fmla v31.8h, v23.8h, v3.h[2] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v0.8h, v1.8h}, [%7] \n" // r6
"fmla v28.8h, v16.8h, v2.h[5] \n"
"fmla v29.8h, v16.8h, v2.h[7] \n"
"fmla v30.8h, v16.8h, v3.h[1] \n"
"fmla v31.8h, v16.8h, v3.h[3] \n"
"add %2, %2, #16 \n"
"fmla v28.8h, v17.8h, v2.h[6] \n"
"fmla v29.8h, v17.8h, v3.h[0] \n"
"fmla v30.8h, v17.8h, v3.h[2] \n"
"fmla v31.8h, v17.8h, v3.h[4] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n"
"fmla v28.8h, v18.8h, v0.h[0] \n"
"fmla v29.8h, v18.8h, v0.h[2] \n"
"fmla v30.8h, v18.8h, v0.h[4] \n"
"fmla v31.8h, v18.8h, v0.h[6] \n"
"add %3, %3, #16 \n"
"fmla v28.8h, v19.8h, v0.h[1] \n"
"fmla v29.8h, v19.8h, v0.h[3] \n"
"fmla v30.8h, v19.8h, v0.h[5] \n"
"fmla v31.8h, v19.8h, v0.h[7] \n"
"add %4, %4, #16 \n"
"fmla v28.8h, v20.8h, v0.h[2] \n"
"fmla v29.8h, v20.8h, v0.h[4] \n"
"fmla v30.8h, v20.8h, v0.h[6] \n"
"fmla v31.8h, v20.8h, v1.h[0] \n"
"add %5, %5, #16 \n"
"fmla v28.8h, v21.8h, v0.h[3] \n"
"fmla v29.8h, v21.8h, v0.h[5] \n"
"fmla v30.8h, v21.8h, v0.h[7] \n"
"fmla v31.8h, v21.8h, v1.h[1] \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v16.8h}, [%8] \n"
"fmla v28.8h, v22.8h, v0.h[4] \n"
"fmla v29.8h, v22.8h, v0.h[6] \n"
"fmla v30.8h, v22.8h, v1.h[0] \n"
"fmla v31.8h, v22.8h, v1.h[2] \n"
"add %6, %6, #16 \n"
"fmla v28.8h, v23.8h, v0.h[5] \n"
"fmla v29.8h, v23.8h, v0.h[7] \n"
"fmla v30.8h, v23.8h, v1.h[1] \n"
"fmla v31.8h, v23.8h, v1.h[3] \n"
"add %7, %7, #16 \n"
"fmla v28.8h, v16.8h, v0.h[6] \n"
"fmla v29.8h, v16.8h, v1.h[0] \n"
"fmla v30.8h, v16.8h, v1.h[2] \n"
"fmla v31.8h, v16.8h, v1.h[4] \n"
"sub %8, %8, #768 \n" // kptr -= 48 * 8;
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(r5), // %6
"=r"(r6), // %7
"=r"(kptr) // %8
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(r5),
"7"(r6),
"8"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31"
);
}
for (; j < outw; j++)
{
asm volatile(
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.8h}, [%1] \n" // r0
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n"
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v31.8h}, [%0] \n" // sum0
"fmul v28.8h, v16.8h, v0.h[0] \n"
"fmul v29.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n"
"fmul v30.8h, v18.8h, v0.h[2] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v1.8h}, [%2] \n" // r1
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v21.8h, v0.h[5] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n"
"fmla v30.8h, v22.8h, v0.h[6] \n"
"fmla v31.8h, v23.8h, v1.h[0] \n"
"fmla v28.8h, v16.8h, v1.h[1] \n"
"fmla v29.8h, v17.8h, v1.h[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n"
"fmla v30.8h, v18.8h, v1.h[3] \n"
"fmla v31.8h, v19.8h, v1.h[4] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.8h}, [%3] \n" // r2
"fmla v28.8h, v20.8h, v1.h[5] \n"
"fmla v29.8h, v21.8h, v1.h[6] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n"
"fmla v30.8h, v22.8h, v0.h[0] \n"
"fmla v31.8h, v23.8h, v0.h[1] \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v1.8h}, [%4] \n" // r3
"fmla v28.8h, v16.8h, v0.h[2] \n"
"fmla v29.8h, v17.8h, v0.h[3] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n"
"fmla v30.8h, v18.8h, v0.h[4] \n"
"fmla v31.8h, v19.8h, v0.h[5] \n"
"add %1, %1, #4 \n"
"fmla v28.8h, v20.8h, v0.h[6] \n"
"fmla v29.8h, v21.8h, v1.h[0] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n"
"fmla v30.8h, v22.8h, v1.h[1] \n"
"fmla v31.8h, v23.8h, v1.h[2] \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.8h}, [%5] \n" // r4
"fmla v28.8h, v16.8h, v1.h[3] \n"
"fmla v29.8h, v17.8h, v1.h[4] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n"
"fmla v30.8h, v18.8h, v1.h[5] \n"
"fmla v31.8h, v19.8h, v1.h[6] \n"
"add %2, %2, #4 \n"
"fmla v28.8h, v20.8h, v0.h[0] \n"
"fmla v29.8h, v21.8h, v0.h[1] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n"
"fmla v30.8h, v22.8h, v0.h[2] \n"
"fmla v31.8h, v23.8h, v0.h[3] \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v1.8h}, [%6] \n" // r5
"fmla v28.8h, v16.8h, v0.h[4] \n"
"fmla v29.8h, v17.8h, v0.h[5] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n"
"fmla v30.8h, v18.8h, v0.h[6] \n"
"fmla v31.8h, v19.8h, v1.h[0] \n"
"add %3, %3, #4 \n"
"fmla v28.8h, v20.8h, v1.h[1] \n"
"fmla v29.8h, v21.8h, v1.h[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n"
"fmla v30.8h, v22.8h, v1.h[3] \n"
"fmla v31.8h, v23.8h, v1.h[4] \n"
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v0.8h}, [%7] \n" // r6
"fmla v28.8h, v16.8h, v1.h[5] \n"
"fmla v29.8h, v17.8h, v1.h[6] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n"
"fmla v30.8h, v18.8h, v0.h[0] \n"
"fmla v31.8h, v19.8h, v0.h[1] \n"
"add %4, %4, #4 \n"
"fmla v28.8h, v20.8h, v0.h[2] \n"
"fmla v29.8h, v21.8h, v0.h[3] \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v16.8h}, [%8] \n"
"fmla v30.8h, v22.8h, v0.h[4] \n"
"fmla v31.8h, v23.8h, v0.h[5] \n"
"add %5, %5, #4 \n"
"fmla v28.8h, v16.8h, v0.h[6] \n"
"add %6, %6, #4 \n"
"fadd v29.8h, v29.8h, v30.8h \n"
"fadd v31.8h, v31.8h, v28.8h \n"
"add %7, %7, #4 \n"
"fadd v29.8h, v29.8h, v31.8h \n"
"sub %8, %8, #768 \n" // kptr -= 48 * 8;
"st1 {v29.8h}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(r5), // %6
"=r"(r6), // %7
"=r"(kptr) // %8
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(r5),
"7"(r6),
"8"(kptr)
: "memory", "v0", "v1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31"
);
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
r5 += tailstep;
r6 += tailstep;
}
}
}
}
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 8;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
relic_multi.h | /*
* RELIC is an Efficient LIbrary for Cryptography
* Copyright (c) 2020 RELIC Authors
*
* This file is part of RELIC. RELIC is legal property of its developers,
* whose names are not listed here. Please refer to the COPYRIGHT file
* for contact information.
*
* RELIC is free software; you can redistribute it and/or modify it under the
* terms of the version 2.1 (or later) of the GNU Lesser General Public License
* as published by the Free Software Foundation; or version 2.0 of the Apache
* License as published by the Apache Software Foundation. See the LICENSE files
* for more details.
*
* RELIC is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
* A PARTICULAR PURPOSE. See the LICENSE files for more details.
*
* You should have received a copy of the GNU Lesser General Public or the
* Apache License along with RELIC. If not, see <https://www.gnu.org/licenses/>
* or <https://www.apache.org/licenses/>.
*/
/**
* @defgroup relic Core functions
*/
/**
* @file
*
* Multithreading support.
*
* @ingroup relic
*/
#ifndef RLC_MULTI_H
#define RLC_MULTI_H
#if defined(MULTI)
#include <math.h>
#if MULTI == OPENMP
#include <omp.h>
#elif MULTI == PTHREAD
#include <pthread.h>
#endif /* OPENMP */
#endif /* MULTI */
/*============================================================================*/
/* Constant definitions */
/*============================================================================*/
/**
* If multi-threading is enabled, assigns each thread a local copy of the data.
*/
#if MULTI == PTHREAD
#define rlc_thread __thread
#else
#define rlc_thread /* */
#endif
/**
* Make library context private to each thread.
*/
#if MULTI == OPENMP
/**
* Active library context, only visible inside the library.
*/
extern ctx_t first_ctx;
/**
* Pointer to active library context, only visible inside the library.
*/
extern ctx_t *core_ctx;
#pragma omp threadprivate(first_ctx, core_ctx)
#endif
#endif /* !RLC_MULTI_H */
|
GB_binop__pair_bool.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pair_bool)
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__pair_bool)
// C+=b function (dense accum): GB (_Cdense_accumb__pair_bool)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_bool)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: bool
// A type: bool
// A pattern? 1
// B type: bool
// B pattern? 1
// BinaryOp: cij = 1
#define GB_ATYPE \
bool
#define GB_BTYPE \
bool
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// true if values of A are not used
#define GB_A_IS_PATTERN \
1 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// true if values of B are not used
#define GB_B_IS_PATTERN \
1 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = 1 ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PAIR || GxB_NO_BOOL || GxB_NO_PAIR_BOOL)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__pair_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pair_bool)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pair_bool)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type bool
bool bwork = (*((bool *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pair_bool)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
bool alpha_scalar ;
bool beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((bool *) alpha_scalar_in)) ;
beta_scalar = (*((bool *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
bool x = (*((bool *) x_input)) ;
bool *Bx = (bool *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
bool *Ax = (bool *) Ax_input ;
bool y = (*((bool *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
bool
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool x = (*((const bool *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
bool
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool y = (*((const bool *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
nndes.h | /*
Copyright (C) 2010,2011 Wei Dong <wdong.pku@gmail.com>. All Rights Reserved.
DISTRIBUTION OF THIS PROGRAM IN EITHER BINARY OR SOURCE CODE FORM MUST BE
PERMITTED BY THE AUTHOR.
*/
#ifndef WDONG_NNDESCENT
#define WDONG_NNDESCENT
#include "nndes-common.h"
namespace nndes {
using std::cerr;
using std::vector;
using std::swap;
using boost::progress_display;
#ifndef NNDES_SHOW_PROGRESS
#define NNDES_SHOW_PROGRESS 1
#endif
// Normally one would use GRAPH_BOTH,
// GRAPH_KNN & GRAPH_RNN are for experiments only.
static const int GRAPH_NONE = 0, GRAPH_KNN = 1, GRAPH_RNN = 2, GRAPH_BOTH = 4;
typedef int GraphOption;
// The main NN-Descent class.
// Instead of the actual dataset, the class takes a distance oracle
// as input. Given two data item ids, the oracle returns the distance
// between the two.
template <typename ORACLE>
class NNDescent {
private:
const ORACLE &oracle;
int N; // # points
int K; // K-NN to find
int S; // # of NNs to use for exploration
GraphOption option;
vector<KNN> nn; // K-NN approximation
// We maintain old and newly added KNN/RNN items
// separately for incremental processing:
// we need to compare two new ones
// and a new one to an old one, but not two old ones as they
// must have been compared already.
vector<vector<int> > nn_old;
vector<vector<int> > nn_new;
vector<vector<int> > rnn_old;
vector<vector<int> > rnn_new;
// total number of comparisons done.
long long int cost;
// This function decides of it's necessary to compare two
// points. Obviously a point should not compare against itself.
// Another potential usage of this function is to record all
// pairs that have already be compared, so that when seen in the future,
// then same pair doesn't have be compared again.
bool mark (int p1, int p2) {
return p1 == p2;
}
// Compare two points and update their KNN list of necessary.
// Return the number of comparisons done (0 or 1).
int update (int p1, int p2) {
if (mark(p1, p2)) return 0;
// KNN::update is synchronized by a lock
// keep an order is necessary to avoid deadlock.
if (p1 > p2) swap(p1, p2);
float dist = oracle(p1, p2);
nn[p1].update(KNN::Element(p2, dist, true));
nn[p2].update(KNN::Element(p1, dist, true));
return 1;
}
public:
const vector<KNN> &getNN() const {
return nn;
}
long long int getCost () const {
return cost;
}
NNDescent (int N_, int K_, float S_, const ORACLE &oracle_,
GraphOption opt = GRAPH_BOTH)
: oracle(oracle_), N(N_), K(K_), S(K * S_), option(opt), nn(N_),
nn_old(N_), nn_new(N_), rnn_old(N_), rnn_new(N_), cost(0)
{
for (int i = 0; i < N; ++i) {
nn[i].init(K);
// random initial edges
if ((option & GRAPH_KNN) || (option & GRAPH_BOTH)) {
nn_new[i].resize(S);
BOOST_FOREACH(int &u, nn_new[i]) {
u = rand() % N;
}
}
if ((option & GRAPH_RNN) || (option & GRAPH_BOTH)) {
rnn_new[i].resize(S);
BOOST_FOREACH(int &u, rnn_new[i]) {
u = rand() % N;
}
}
}
}
// An iteration contains two parts:
// local join
// identify the newly detected NNs.
int iterate () {
#if NNDES_SHOW_PROGRESS
progress_display progress(N, cerr);
#endif
long long int cc = 0;
// local joins
#pragma omp parallel for default(shared) reduction(+:cc)
for (int i = 0; i < N; ++i) {
// The following loops are bloated to deal with all
// the experimental setups. Otherwise they should
// be really simple.
if (option & (GRAPH_KNN | GRAPH_BOTH)) {
BOOST_FOREACH(int j, nn_new[i]) {
BOOST_FOREACH(int k, nn_new[i]) {
if (j >= k) continue;
cc += update(j, k);
}
BOOST_FOREACH(int k, nn_old[i]) {
cc += update(j, k);
}
}
}
if (option & (GRAPH_RNN | GRAPH_BOTH)) {
BOOST_FOREACH(int j, rnn_new[i]) {
BOOST_FOREACH(int k, rnn_new[i]) {
if (j >= k) continue;
cc += update(j, k);
}
BOOST_FOREACH(int k, rnn_old[i]) {
cc += update(j, k);
}
}
}
if (option & GRAPH_BOTH) {
BOOST_FOREACH(int j, nn_new[i]) {
BOOST_FOREACH(int k, rnn_old[i]) {
cc += update(j, k);
}
BOOST_FOREACH(int k, rnn_new[i]) {
cc += update(j, k);
}
}
BOOST_FOREACH(int j, nn_old[i]) {
BOOST_FOREACH(int k, rnn_new[i]) {
cc += update(j, k);
}
}
}
#if NNDES_SHOW_PROGRESS
#pragma omp critical
++progress;
#endif
}
cost += cc;
int t = 0;
//#pragma omp parallel for default(shared) reduction(+:t)
for (int i = 0; i < N; ++i) {
nn_old[i].clear();
nn_new[i].clear();
rnn_old[i].clear();
rnn_new[i].clear();
// find the new ones
for (int j = 0; j < K; ++j) {
KNN::Element &e = nn[i][j];
if (e.key == KNN::Element::BAD) continue;
if (e.flag){
nn_new[i].push_back(j);
}
else {
nn_old[i].push_back(e.key);
}
}
t += nn_new[i].size();
// sample
if (nn_new[i].size() > unsigned(S)) {
random_shuffle(nn_new[i].begin(), nn_new[i].end());
nn_new[i].resize(S);
}
BOOST_FOREACH(int &v, nn_new[i]) {
nn[i][v].flag = false;
v = nn[i][v].key;
}
}
// symmetrize
if ((option & GRAPH_RNN) || (option & GRAPH_BOTH)) {
for (int i = 0; i < N; ++i) {
BOOST_FOREACH(int e, nn_old[i]) {
rnn_old[e].push_back(i);
}
BOOST_FOREACH(int e, nn_new[i]) {
rnn_new[e].push_back(i);
}
}
}
//#pragma omp parallel for default(shared) reduction(+:t)
for (int i = 0; i < N; ++i) {
if (rnn_old[i].size() > unsigned(S)) {
random_shuffle(rnn_old[i].begin(), rnn_old[i].end());
rnn_old[i].resize(S);
}
if (rnn_new[i].size() > unsigned(S)) {
random_shuffle(rnn_new[i].begin(), rnn_new[i].end());
rnn_new[i].resize(S);
}
}
return t;
}
};
}
#endif
|
primos.c | #include<stdlib.h>
#include<stdio.h>
#include<omp.h>
int main (int argc, const char** argv) {
if (argc < 2) {
printf("Usage ./%s <number_of_threads>\n", argv[0]);
exit(0);
}
printf("Number of processors: %d\n", omp_get_num_procs());
const int NUM_TH = atoi(argv[1]);
const int intervalo = 5000;
double starttime, stoptime;
int i,j,k;
int prime;
int total;
starttime = omp_get_wtime();
omp_set_num_threads(NUM_TH);
#pragma omp parallel private ( i, j, k, prime )
#pragma omp for schedule (dynamic)
for (k = 1 ; k <= intervalo ; k++) {
total = 0;
for ( i = 2; i <= k ; i++ ) {
prime = 1;
for ( j = 2; j < i; j++ ) {
if ( i % j == 0 ) {
prime = 0;
break;
}
}
total = total + prime;
}
printf("O número de primos do intervalo [1-%d] é %d\n", k, total);
}
stoptime = omp_get_wtime();
printf("Tempo de execução: %3.2f segundos\n", stoptime-starttime);
return(0);
}
|
struct_matvec.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision: 2.25 $
***********************************************************************EHEADER*/
/******************************************************************************
*
* Structured matrix-vector multiply routine
*
*****************************************************************************/
#include "_hypre_struct_mv.h"
/* this currently cannot be greater than 7 */
#ifdef MAX_DEPTH
#undef MAX_DEPTH
#endif
#define MAX_DEPTH 7
/*--------------------------------------------------------------------------
* hypre_StructMatvecData data structure
*--------------------------------------------------------------------------*/
typedef struct
{
hypre_StructMatrix *A;
hypre_StructVector *x;
hypre_ComputePkg *compute_pkg;
} hypre_StructMatvecData;
/*--------------------------------------------------------------------------
* hypre_StructMatvecCreate
*--------------------------------------------------------------------------*/
void *
hypre_StructMatvecCreate( )
{
hypre_StructMatvecData *matvec_data;
matvec_data = hypre_CTAlloc(hypre_StructMatvecData, 1);
return (void *) matvec_data;
}
/*--------------------------------------------------------------------------
* hypre_StructMatvecSetup
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructMatvecSetup( void *matvec_vdata,
hypre_StructMatrix *A,
hypre_StructVector *x )
{
hypre_StructMatvecData *matvec_data = matvec_vdata;
hypre_StructGrid *grid;
hypre_StructStencil *stencil;
hypre_ComputeInfo *compute_info;
hypre_ComputePkg *compute_pkg;
/*----------------------------------------------------------
* Set up the compute package
*----------------------------------------------------------*/
grid = hypre_StructMatrixGrid(A);
stencil = hypre_StructMatrixStencil(A);
hypre_CreateComputeInfo(grid, stencil, &compute_info);
hypre_ComputePkgCreate(compute_info, hypre_StructVectorDataSpace(x), 1,
grid, &compute_pkg);
/*----------------------------------------------------------
* Set up the matvec data structure
*----------------------------------------------------------*/
(matvec_data -> A) = hypre_StructMatrixRef(A);
(matvec_data -> x) = hypre_StructVectorRef(x);
(matvec_data -> compute_pkg) = compute_pkg;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_StructMatvecCompute
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructMatvecCompute( void *matvec_vdata,
double alpha,
hypre_StructMatrix *A,
hypre_StructVector *x,
double beta,
hypre_StructVector *y )
{
hypre_StructMatvecData *matvec_data = matvec_vdata;
hypre_ComputePkg *compute_pkg;
hypre_CommHandle *comm_handle;
hypre_BoxArrayArray *compute_box_aa;
hypre_Box *y_data_box;
HYPRE_Int yi;
double *xp;
double *yp;
hypre_BoxArray *boxes;
hypre_Box *box;
hypre_Index loop_size;
hypre_IndexRef start;
hypre_IndexRef stride;
HYPRE_Int constant_coefficient;
double temp;
HYPRE_Int compute_i, i;
/*-----------------------------------------------------------------------
* Initialize some things
*-----------------------------------------------------------------------*/
constant_coefficient = hypre_StructMatrixConstantCoefficient(A);
if (constant_coefficient) hypre_StructVectorClearBoundGhostValues(x, 0);
compute_pkg = (matvec_data -> compute_pkg);
stride = hypre_ComputePkgStride(compute_pkg);
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(A));
hypre_ForBoxI(i, boxes)
{
box = hypre_BoxArrayBox(boxes, i);
start = hypre_BoxIMin(box);
y_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(y), i);
yp = hypre_StructVectorBoxData(y, i);
hypre_BoxGetSize(box, loop_size);
hypre_BoxLoop1Begin(hypre_StructVectorDim(x), loop_size,
y_data_box, start, stride, yi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,yi) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(yi)
{
yp[yi] *= beta;
}
hypre_BoxLoop1End(yi);
}
return hypre_error_flag;
}
/*-----------------------------------------------------------------------
* Do (alpha != 0.0) computation
*-----------------------------------------------------------------------*/
for (compute_i = 0; compute_i < 2; compute_i++)
{
switch(compute_i)
{
case 0:
{
xp = hypre_StructVectorData(x);
hypre_InitializeIndtComputations(compute_pkg, xp, &comm_handle);
compute_box_aa = hypre_ComputePkgIndtBoxes(compute_pkg);
/*--------------------------------------------------------------
* initialize y= (beta/alpha)*y normally (where everything
* is multiplied by alpha at the end),
* beta*y for constant coefficient (where only Ax gets multiplied by alpha)
*--------------------------------------------------------------*/
if ( constant_coefficient==1 )
{
temp = beta;
}
else
{
temp = beta / alpha;
}
if (temp != 1.0)
{
boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(A));
hypre_ForBoxI(i, boxes)
{
box = hypre_BoxArrayBox(boxes, i);
start = hypre_BoxIMin(box);
y_data_box =
hypre_BoxArrayBox(hypre_StructVectorDataSpace(y), i);
yp = hypre_StructVectorBoxData(y, i);
if (temp == 0.0)
{
hypre_BoxGetSize(box, loop_size);
hypre_BoxLoop1Begin(hypre_StructVectorDim(x), loop_size,
y_data_box, start, stride, yi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,yi) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(yi)
{
yp[yi] = 0.0;
}
hypre_BoxLoop1End(yi);
}
else
{
hypre_BoxGetSize(box, loop_size);
hypre_BoxLoop1Begin(hypre_StructVectorDim(x), loop_size,
y_data_box, start, stride, yi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,yi) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(yi)
{
yp[yi] *= temp;
}
hypre_BoxLoop1End(yi);
}
}
}
}
break;
case 1:
{
hypre_FinalizeIndtComputations(comm_handle);
compute_box_aa = hypre_ComputePkgDeptBoxes(compute_pkg);
}
break;
}
/*--------------------------------------------------------------------
* y += A*x
*--------------------------------------------------------------------*/
switch( constant_coefficient )
{
case 0:
{
hypre_StructMatvecCC0( alpha, A, x, y, compute_box_aa, stride );
break;
}
case 1:
{
hypre_StructMatvecCC1( alpha, A, x, y, compute_box_aa, stride );
break;
}
case 2:
{
hypre_StructMatvecCC2( alpha, A, x, y, compute_box_aa, stride );
break;
}
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_StructMatvecCC0
* core of struct matvec computation, for the case constant_coefficient==0
* (all coefficients are variable)
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_StructMatvecCC0( double alpha,
hypre_StructMatrix *A,
hypre_StructVector *x,
hypre_StructVector *y,
hypre_BoxArrayArray *compute_box_aa,
hypre_IndexRef stride
)
{
HYPRE_Int i, j, si;
double *Ap0;
double *Ap1;
double *Ap2;
double *Ap3;
double *Ap4;
double *Ap5;
double *Ap6;
HYPRE_Int xoff0;
HYPRE_Int xoff1;
HYPRE_Int xoff2;
HYPRE_Int xoff3;
HYPRE_Int xoff4;
HYPRE_Int xoff5;
HYPRE_Int xoff6;
HYPRE_Int Ai;
HYPRE_Int xi;
hypre_BoxArray *compute_box_a;
hypre_Box *compute_box;
hypre_Box *A_data_box;
hypre_Box *x_data_box;
hypre_StructStencil *stencil;
hypre_Index *stencil_shape;
HYPRE_Int stencil_size;
hypre_Box *y_data_box;
double *xp;
double *yp;
HYPRE_Int depth;
hypre_Index loop_size;
hypre_IndexRef start;
HYPRE_Int yi;
HYPRE_Int ndim;
stencil = hypre_StructMatrixStencil(A);
stencil_shape = hypre_StructStencilShape(stencil);
stencil_size = hypre_StructStencilSize(stencil);
ndim = hypre_StructVectorDim(x);
hypre_ForBoxArrayI(i, compute_box_aa)
{
compute_box_a = hypre_BoxArrayArrayBoxArray(compute_box_aa, i);
A_data_box = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), i);
x_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(x), i);
y_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(y), i);
xp = hypre_StructVectorBoxData(x, i);
yp = hypre_StructVectorBoxData(y, i);
hypre_ForBoxI(j, compute_box_a)
{
compute_box = hypre_BoxArrayBox(compute_box_a, j);
hypre_BoxGetSize(compute_box, loop_size);
start = hypre_BoxIMin(compute_box);
/* unroll up to depth MAX_DEPTH */
for (si = 0; si < stencil_size; si+= MAX_DEPTH)
{
depth = hypre_min(MAX_DEPTH, (stencil_size -si));
switch(depth)
{
case 7:
Ap0 = hypre_StructMatrixBoxData(A, i, si+0);
Ap1 = hypre_StructMatrixBoxData(A, i, si+1);
Ap2 = hypre_StructMatrixBoxData(A, i, si+2);
Ap3 = hypre_StructMatrixBoxData(A, i, si+3);
Ap4 = hypre_StructMatrixBoxData(A, i, si+4);
Ap5 = hypre_StructMatrixBoxData(A, i, si+5);
Ap6 = hypre_StructMatrixBoxData(A, i, si+6);
xoff0 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+0]);
xoff1 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+1]);
xoff2 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+2]);
xoff3 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+3]);
xoff4 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+4]);
xoff5 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+5]);
xoff6 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+6]);
hypre_BoxLoop3Begin(ndim, loop_size,
A_data_box, start, stride, Ai,
x_data_box, start, stride, xi,
y_data_box, start, stride, yi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,yi,xi,Ai) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop3For(Ai, xi, yi)
{
yp[yi] +=
Ap0[Ai] * xp[xi + xoff0] +
Ap1[Ai] * xp[xi + xoff1] +
Ap2[Ai] * xp[xi + xoff2] +
Ap3[Ai] * xp[xi + xoff3] +
Ap4[Ai] * xp[xi + xoff4] +
Ap5[Ai] * xp[xi + xoff5] +
Ap6[Ai] * xp[xi + xoff6];
}
hypre_BoxLoop3End(Ai, xi, yi);
break;
case 6:
Ap0 = hypre_StructMatrixBoxData(A, i, si+0);
Ap1 = hypre_StructMatrixBoxData(A, i, si+1);
Ap2 = hypre_StructMatrixBoxData(A, i, si+2);
Ap3 = hypre_StructMatrixBoxData(A, i, si+3);
Ap4 = hypre_StructMatrixBoxData(A, i, si+4);
Ap5 = hypre_StructMatrixBoxData(A, i, si+5);
xoff0 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+0]);
xoff1 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+1]);
xoff2 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+2]);
xoff3 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+3]);
xoff4 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+4]);
xoff5 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+5]);
hypre_BoxLoop3Begin(ndim, loop_size,
A_data_box, start, stride, Ai,
x_data_box, start, stride, xi,
y_data_box, start, stride, yi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,yi,xi,Ai) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop3For(Ai, xi, yi)
{
yp[yi] +=
Ap0[Ai] * xp[xi + xoff0] +
Ap1[Ai] * xp[xi + xoff1] +
Ap2[Ai] * xp[xi + xoff2] +
Ap3[Ai] * xp[xi + xoff3] +
Ap4[Ai] * xp[xi + xoff4] +
Ap5[Ai] * xp[xi + xoff5];
}
hypre_BoxLoop3End(Ai, xi, yi);
break;
case 5:
Ap0 = hypre_StructMatrixBoxData(A, i, si+0);
Ap1 = hypre_StructMatrixBoxData(A, i, si+1);
Ap2 = hypre_StructMatrixBoxData(A, i, si+2);
Ap3 = hypre_StructMatrixBoxData(A, i, si+3);
Ap4 = hypre_StructMatrixBoxData(A, i, si+4);
xoff0 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+0]);
xoff1 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+1]);
xoff2 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+2]);
xoff3 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+3]);
xoff4 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+4]);
hypre_BoxLoop3Begin(ndim, loop_size,
A_data_box, start, stride, Ai,
x_data_box, start, stride, xi,
y_data_box, start, stride, yi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,yi,xi,Ai) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop3For(Ai, xi, yi)
{
yp[yi] +=
Ap0[Ai] * xp[xi + xoff0] +
Ap1[Ai] * xp[xi + xoff1] +
Ap2[Ai] * xp[xi + xoff2] +
Ap3[Ai] * xp[xi + xoff3] +
Ap4[Ai] * xp[xi + xoff4];
}
hypre_BoxLoop3End(Ai, xi, yi);
break;
case 4:
Ap0 = hypre_StructMatrixBoxData(A, i, si+0);
Ap1 = hypre_StructMatrixBoxData(A, i, si+1);
Ap2 = hypre_StructMatrixBoxData(A, i, si+2);
Ap3 = hypre_StructMatrixBoxData(A, i, si+3);
xoff0 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+0]);
xoff1 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+1]);
xoff2 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+2]);
xoff3 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+3]);
hypre_BoxLoop3Begin(ndim, loop_size,
A_data_box, start, stride, Ai,
x_data_box, start, stride, xi,
y_data_box, start, stride, yi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,yi,xi,Ai) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop3For(Ai, xi, yi)
{
yp[yi] +=
Ap0[Ai] * xp[xi + xoff0] +
Ap1[Ai] * xp[xi + xoff1] +
Ap2[Ai] * xp[xi + xoff2] +
Ap3[Ai] * xp[xi + xoff3];
}
hypre_BoxLoop3End(Ai, xi, yi);
break;
case 3:
Ap0 = hypre_StructMatrixBoxData(A, i, si+0);
Ap1 = hypre_StructMatrixBoxData(A, i, si+1);
Ap2 = hypre_StructMatrixBoxData(A, i, si+2);
xoff0 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+0]);
xoff1 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+1]);
xoff2 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+2]);
hypre_BoxLoop3Begin(ndim, loop_size,
A_data_box, start, stride, Ai,
x_data_box, start, stride, xi,
y_data_box, start, stride, yi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,yi,xi,Ai) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop3For(Ai, xi, yi)
{
yp[yi] +=
Ap0[Ai] * xp[xi + xoff0] +
Ap1[Ai] * xp[xi + xoff1] +
Ap2[Ai] * xp[xi + xoff2];
}
hypre_BoxLoop3End(Ai, xi, yi);
break;
case 2:
Ap0 = hypre_StructMatrixBoxData(A, i, si+0);
Ap1 = hypre_StructMatrixBoxData(A, i, si+1);
xoff0 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+0]);
xoff1 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+1]);
hypre_BoxLoop3Begin(ndim, loop_size,
A_data_box, start, stride, Ai,
x_data_box, start, stride, xi,
y_data_box, start, stride, yi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,yi,xi,Ai) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop3For(Ai, xi, yi)
{
yp[yi] +=
Ap0[Ai] * xp[xi + xoff0] +
Ap1[Ai] * xp[xi + xoff1];
}
hypre_BoxLoop3End(Ai, xi, yi);
break;
case 1:
Ap0 = hypre_StructMatrixBoxData(A, i, si+0);
xoff0 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+0]);
hypre_BoxLoop3Begin(ndim, loop_size,
A_data_box, start, stride, Ai,
x_data_box, start, stride, xi,
y_data_box, start, stride, yi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,yi,xi,Ai) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop3For(Ai, xi, yi)
{
yp[yi] +=
Ap0[Ai] * xp[xi + xoff0];
}
hypre_BoxLoop3End(Ai, xi, yi);
break;
}
}
if (alpha != 1.0)
{
hypre_BoxLoop1Begin(ndim, loop_size,
y_data_box, start, stride, yi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,yi) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(yi)
{
yp[yi] *= alpha;
}
hypre_BoxLoop1End(yi);
}
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_StructMatvecCC1
* core of struct matvec computation, for the case constant_coefficient==1
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_StructMatvecCC1( double alpha,
hypre_StructMatrix *A,
hypre_StructVector *x,
hypre_StructVector *y,
hypre_BoxArrayArray *compute_box_aa,
hypre_IndexRef stride
)
{
HYPRE_Int i, j, si;
double *Ap0;
double *Ap1;
double *Ap2;
double *Ap3;
double *Ap4;
double *Ap5;
double *Ap6;
double AAp0;
double AAp1;
double AAp2;
double AAp3;
double AAp4;
double AAp5;
double AAp6;
HYPRE_Int xoff0;
HYPRE_Int xoff1;
HYPRE_Int xoff2;
HYPRE_Int xoff3;
HYPRE_Int xoff4;
HYPRE_Int xoff5;
HYPRE_Int xoff6;
HYPRE_Int Ai;
HYPRE_Int xi;
hypre_BoxArray *compute_box_a;
hypre_Box *compute_box;
hypre_Box *A_data_box;
hypre_Box *x_data_box;
hypre_StructStencil *stencil;
hypre_Index *stencil_shape;
HYPRE_Int stencil_size;
hypre_Box *y_data_box;
double *xp;
double *yp;
HYPRE_Int depth;
hypre_Index loop_size;
hypre_IndexRef start;
HYPRE_Int yi;
HYPRE_Int ndim;
stencil = hypre_StructMatrixStencil(A);
stencil_shape = hypre_StructStencilShape(stencil);
stencil_size = hypre_StructStencilSize(stencil);
ndim = hypre_StructVectorDim(x);
hypre_ForBoxArrayI(i, compute_box_aa)
{
compute_box_a = hypre_BoxArrayArrayBoxArray(compute_box_aa, i);
A_data_box = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), i);
x_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(x), i);
y_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(y), i);
xp = hypre_StructVectorBoxData(x, i);
yp = hypre_StructVectorBoxData(y, i);
hypre_ForBoxI(j, compute_box_a)
{
compute_box = hypre_BoxArrayBox(compute_box_a, j);
hypre_BoxGetSize(compute_box, loop_size);
start = hypre_BoxIMin(compute_box);
Ai = hypre_CCBoxIndexRank( A_data_box, start );
/* unroll up to depth MAX_DEPTH */
for (si = 0; si < stencil_size; si+= MAX_DEPTH)
{
depth = hypre_min(MAX_DEPTH, (stencil_size -si));
switch(depth)
{
case 7:
Ap0 = hypre_StructMatrixBoxData(A, i, si+0);
Ap1 = hypre_StructMatrixBoxData(A, i, si+1);
Ap2 = hypre_StructMatrixBoxData(A, i, si+2);
Ap3 = hypre_StructMatrixBoxData(A, i, si+3);
Ap4 = hypre_StructMatrixBoxData(A, i, si+4);
Ap5 = hypre_StructMatrixBoxData(A, i, si+5);
Ap6 = hypre_StructMatrixBoxData(A, i, si+6);
AAp0 = Ap0[Ai]*alpha;
AAp1 = Ap1[Ai]*alpha;
AAp2 = Ap2[Ai]*alpha;
AAp3 = Ap3[Ai]*alpha;
AAp4 = Ap4[Ai]*alpha;
AAp5 = Ap5[Ai]*alpha;
AAp6 = Ap6[Ai]*alpha;
xoff0 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+0]);
xoff1 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+1]);
xoff2 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+2]);
xoff3 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+3]);
xoff4 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+4]);
xoff5 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+5]);
xoff6 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+6]);
hypre_BoxLoop2Begin(ndim, loop_size,
x_data_box, start, stride, xi,
y_data_box, start, stride, yi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,yi,xi) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(xi, yi)
{
yp[yi] +=
AAp0 * xp[xi + xoff0] +
AAp1 * xp[xi + xoff1] +
AAp2 * xp[xi + xoff2] +
AAp3 * xp[xi + xoff3] +
AAp4 * xp[xi + xoff4] +
AAp5 * xp[xi + xoff5] +
AAp6 * xp[xi + xoff6];
}
hypre_BoxLoop2End(xi, yi);
break;
case 6:
Ap0 = hypre_StructMatrixBoxData(A, i, si+0);
Ap1 = hypre_StructMatrixBoxData(A, i, si+1);
Ap2 = hypre_StructMatrixBoxData(A, i, si+2);
Ap3 = hypre_StructMatrixBoxData(A, i, si+3);
Ap4 = hypre_StructMatrixBoxData(A, i, si+4);
Ap5 = hypre_StructMatrixBoxData(A, i, si+5);
AAp0 = Ap0[Ai]*alpha;
AAp1 = Ap1[Ai]*alpha;
AAp2 = Ap2[Ai]*alpha;
AAp3 = Ap3[Ai]*alpha;
AAp4 = Ap4[Ai]*alpha;
AAp5 = Ap5[Ai]*alpha;
xoff0 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+0]);
xoff1 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+1]);
xoff2 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+2]);
xoff3 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+3]);
xoff4 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+4]);
xoff5 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+5]);
hypre_BoxLoop2Begin(ndim, loop_size,
x_data_box, start, stride, xi,
y_data_box, start, stride, yi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,yi,xi) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(xi, yi)
{
yp[yi] +=
AAp0 * xp[xi + xoff0] +
AAp1 * xp[xi + xoff1] +
AAp2 * xp[xi + xoff2] +
AAp3 * xp[xi + xoff3] +
AAp4 * xp[xi + xoff4] +
AAp5 * xp[xi + xoff5];
}
hypre_BoxLoop2End(xi, yi);
break;
case 5:
Ap0 = hypre_StructMatrixBoxData(A, i, si+0);
Ap1 = hypre_StructMatrixBoxData(A, i, si+1);
Ap2 = hypre_StructMatrixBoxData(A, i, si+2);
Ap3 = hypre_StructMatrixBoxData(A, i, si+3);
Ap4 = hypre_StructMatrixBoxData(A, i, si+4);
AAp0 = Ap0[Ai]*alpha;
AAp1 = Ap1[Ai]*alpha;
AAp2 = Ap2[Ai]*alpha;
AAp3 = Ap3[Ai]*alpha;
AAp4 = Ap4[Ai]*alpha;
xoff0 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+0]);
xoff1 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+1]);
xoff2 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+2]);
xoff3 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+3]);
xoff4 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+4]);
hypre_BoxLoop2Begin(ndim, loop_size,
x_data_box, start, stride, xi,
y_data_box, start, stride, yi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,yi,xi) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(xi, yi)
{
yp[yi] +=
AAp0 * xp[xi + xoff0] +
AAp1 * xp[xi + xoff1] +
AAp2 * xp[xi + xoff2] +
AAp3 * xp[xi + xoff3] +
AAp4 * xp[xi + xoff4];
}
hypre_BoxLoop2End(xi, yi);
break;
case 4:
Ap0 = hypre_StructMatrixBoxData(A, i, si+0);
Ap1 = hypre_StructMatrixBoxData(A, i, si+1);
Ap2 = hypre_StructMatrixBoxData(A, i, si+2);
Ap3 = hypre_StructMatrixBoxData(A, i, si+3);
AAp0 = Ap0[Ai]*alpha;
AAp1 = Ap1[Ai]*alpha;
AAp2 = Ap2[Ai]*alpha;
AAp3 = Ap3[Ai]*alpha;
xoff0 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+0]);
xoff1 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+1]);
xoff2 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+2]);
xoff3 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+3]);
hypre_BoxLoop2Begin(ndim, loop_size,
x_data_box, start, stride, xi,
y_data_box, start, stride, yi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,yi,xi) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(xi, yi)
{
yp[yi] +=
AAp0 * xp[xi + xoff0] +
AAp1 * xp[xi + xoff1] +
AAp2 * xp[xi + xoff2] +
AAp3 * xp[xi + xoff3];
}
hypre_BoxLoop2End(xi, yi);
break;
case 3:
Ap0 = hypre_StructMatrixBoxData(A, i, si+0);
Ap1 = hypre_StructMatrixBoxData(A, i, si+1);
Ap2 = hypre_StructMatrixBoxData(A, i, si+2);
AAp0 = Ap0[Ai]*alpha;
AAp1 = Ap1[Ai]*alpha;
AAp2 = Ap2[Ai]*alpha;
xoff0 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+0]);
xoff1 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+1]);
xoff2 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+2]);
hypre_BoxLoop2Begin(ndim, loop_size,
x_data_box, start, stride, xi,
y_data_box, start, stride, yi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,yi,xi) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(xi, yi)
{
yp[yi] +=
AAp0 * xp[xi + xoff0] +
AAp1 * xp[xi + xoff1] +
AAp2 * xp[xi + xoff2];
}
hypre_BoxLoop2End(xi, yi);
break;
case 2:
Ap0 = hypre_StructMatrixBoxData(A, i, si+0);
Ap1 = hypre_StructMatrixBoxData(A, i, si+1);
AAp0 = Ap0[Ai]*alpha;
AAp1 = Ap1[Ai]*alpha;
xoff0 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+0]);
xoff1 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+1]);
hypre_BoxLoop2Begin(ndim, loop_size,
x_data_box, start, stride, xi,
y_data_box, start, stride, yi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,yi,xi) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(xi, yi)
{
yp[yi] +=
AAp0 * xp[xi + xoff0] +
AAp1 * xp[xi + xoff1];
}
hypre_BoxLoop2End(xi, yi);
break;
case 1:
Ap0 = hypre_StructMatrixBoxData(A, i, si+0);
AAp0 = Ap0[Ai]*alpha;
xoff0 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+0]);
hypre_BoxLoop2Begin(ndim, loop_size,
x_data_box, start, stride, xi,
y_data_box, start, stride, yi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,yi,xi) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(xi, yi)
{
yp[yi] +=
AAp0 * xp[xi + xoff0];
}
hypre_BoxLoop2End(xi, yi);
}
}
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_StructMatvecCC2
* core of struct matvec computation, for the case constant_coefficient==2
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_StructMatvecCC2( double alpha,
hypre_StructMatrix *A,
hypre_StructVector *x,
hypre_StructVector *y,
hypre_BoxArrayArray *compute_box_aa,
hypre_IndexRef stride
)
{
HYPRE_Int i, j, si;
double *Ap0;
double *Ap1;
double *Ap2;
double *Ap3;
double *Ap4;
double *Ap5;
double *Ap6;
double AAp0;
double AAp1;
double AAp2;
double AAp3;
double AAp4;
double AAp5;
double AAp6;
HYPRE_Int xoff0;
HYPRE_Int xoff1;
HYPRE_Int xoff2;
HYPRE_Int xoff3;
HYPRE_Int xoff4;
HYPRE_Int xoff5;
HYPRE_Int xoff6;
HYPRE_Int si_center, center_rank;
hypre_Index center_index;
HYPRE_Int Ai, Ai_CC;
HYPRE_Int xi;
hypre_BoxArray *compute_box_a;
hypre_Box *compute_box;
hypre_Box *A_data_box;
hypre_Box *x_data_box;
hypre_StructStencil *stencil;
hypre_Index *stencil_shape;
HYPRE_Int stencil_size;
hypre_Box *y_data_box;
double *xp;
double *yp;
HYPRE_Int depth;
hypre_Index loop_size;
hypre_IndexRef start;
HYPRE_Int yi;
HYPRE_Int ndim;
stencil = hypre_StructMatrixStencil(A);
stencil_shape = hypre_StructStencilShape(stencil);
stencil_size = hypre_StructStencilSize(stencil);
ndim = hypre_StructVectorDim(x);
hypre_ForBoxArrayI(i, compute_box_aa)
{
compute_box_a = hypre_BoxArrayArrayBoxArray(compute_box_aa, i);
A_data_box = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), i);
x_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(x), i);
y_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(y), i);
xp = hypre_StructVectorBoxData(x, i);
yp = hypre_StructVectorBoxData(y, i);
hypre_ForBoxI(j, compute_box_a)
{
compute_box = hypre_BoxArrayBox(compute_box_a, j);
hypre_BoxGetSize(compute_box, loop_size);
start = hypre_BoxIMin(compute_box);
Ai_CC = hypre_CCBoxIndexRank( A_data_box, start );
/* Find the stencil index for the center of the stencil, which
makes the matrix diagonal. This is the variable coefficient
part of the matrix, so will get different treatment...*/
hypre_SetIndex(center_index, 0, 0, 0);
center_rank = hypre_StructStencilElementRank( stencil, center_index );
si_center = center_rank;
/* unroll up to depth MAX_DEPTH
Only the constant coefficient part of the matrix is referenced here,
the center (variable) coefficient part is deferred. */
for (si = 0; si < stencil_size; si+= MAX_DEPTH)
{
depth = hypre_min(MAX_DEPTH, (stencil_size -si));
switch(depth)
{
case 7:
Ap0 = hypre_StructMatrixBoxData(A, i, si+0);
Ap1 = hypre_StructMatrixBoxData(A, i, si+1);
Ap2 = hypre_StructMatrixBoxData(A, i, si+2);
Ap3 = hypre_StructMatrixBoxData(A, i, si+3);
Ap4 = hypre_StructMatrixBoxData(A, i, si+4);
Ap5 = hypre_StructMatrixBoxData(A, i, si+5);
Ap6 = hypre_StructMatrixBoxData(A, i, si+6);
AAp0 = Ap0[Ai_CC];
AAp1 = Ap1[Ai_CC];
AAp2 = Ap2[Ai_CC];
AAp3 = Ap3[Ai_CC];
AAp4 = Ap4[Ai_CC];
AAp5 = Ap5[Ai_CC];
AAp6 = Ap6[Ai_CC];
if ( (0 <= si_center-si) && (si_center-si < 7) )
{
switch ( si_center-si )
{
case 0: AAp0 = 0; break;
case 1: AAp1 = 0; break;
case 2: AAp2 = 0; break;
case 3: AAp3 = 0; break;
case 4: AAp4 = 0; break;
case 5: AAp5 = 0; break;
case 6: AAp6 = 0; break;
}
}
xoff0 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+0]);
xoff0 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+0]);
xoff1 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+1]);
xoff2 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+2]);
xoff3 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+3]);
xoff4 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+4]);
xoff5 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+5]);
xoff6 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+6]);
hypre_BoxLoop2Begin(ndim, loop_size,
x_data_box, start, stride, xi,
y_data_box, start, stride, yi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,yi,xi) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(xi, yi)
{
yp[yi] +=
AAp0 * xp[xi + xoff0] +
AAp1 * xp[xi + xoff1] +
AAp2 * xp[xi + xoff2] +
AAp3 * xp[xi + xoff3] +
AAp4 * xp[xi + xoff4] +
AAp5 * xp[xi + xoff5] +
AAp6 * xp[xi + xoff6];
}
hypre_BoxLoop2End(xi, yi);
break;
case 6:
Ap0 = hypre_StructMatrixBoxData(A, i, si+0);
Ap1 = hypre_StructMatrixBoxData(A, i, si+1);
Ap2 = hypre_StructMatrixBoxData(A, i, si+2);
Ap3 = hypre_StructMatrixBoxData(A, i, si+3);
Ap4 = hypre_StructMatrixBoxData(A, i, si+4);
Ap5 = hypre_StructMatrixBoxData(A, i, si+5);
AAp0 = Ap0[Ai_CC];
AAp1 = Ap1[Ai_CC];
AAp2 = Ap2[Ai_CC];
AAp3 = Ap3[Ai_CC];
AAp4 = Ap4[Ai_CC];
AAp5 = Ap5[Ai_CC];
if ( (0 <= si_center-si) && (si_center-si < 6) )
{
switch ( si_center-si )
{
case 0: AAp0 = 0; break;
case 1: AAp1 = 0; break;
case 2: AAp2 = 0; break;
case 3: AAp3 = 0; break;
case 4: AAp4 = 0; break;
case 5: AAp5 = 0; break;
}
}
xoff0 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+0]);
xoff1 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+1]);
xoff2 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+2]);
xoff3 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+3]);
xoff4 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+4]);
xoff5 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+5]);
hypre_BoxLoop2Begin(ndim, loop_size,
x_data_box, start, stride, xi,
y_data_box, start, stride, yi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,yi,xi) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(xi, yi)
{
yp[yi] +=
AAp0 * xp[xi + xoff0] +
AAp1 * xp[xi + xoff1] +
AAp2 * xp[xi + xoff2] +
AAp3 * xp[xi + xoff3] +
AAp4 * xp[xi + xoff4] +
AAp5 * xp[xi + xoff5];
}
hypre_BoxLoop2End(xi, yi);
break;
case 5:
Ap0 = hypre_StructMatrixBoxData(A, i, si+0);
Ap1 = hypre_StructMatrixBoxData(A, i, si+1);
Ap2 = hypre_StructMatrixBoxData(A, i, si+2);
Ap3 = hypre_StructMatrixBoxData(A, i, si+3);
Ap4 = hypre_StructMatrixBoxData(A, i, si+4);
AAp0 = Ap0[Ai_CC];
AAp1 = Ap1[Ai_CC];
AAp2 = Ap2[Ai_CC];
AAp3 = Ap3[Ai_CC];
AAp4 = Ap4[Ai_CC];
if ( (0 <= si_center-si) && (si_center-si < 5) )
{
switch ( si_center-si )
{
case 0: AAp0 = 0; break;
case 1: AAp1 = 0; break;
case 2: AAp2 = 0; break;
case 3: AAp3 = 0; break;
case 4: AAp4 = 0; break;
}
}
xoff0 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+0]);
xoff1 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+1]);
xoff2 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+2]);
xoff3 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+3]);
xoff4 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+4]);
hypre_BoxLoop2Begin(ndim, loop_size,
x_data_box, start, stride, xi,
y_data_box, start, stride, yi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,yi,xi) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(xi, yi)
{
yp[yi] +=
AAp0 * xp[xi + xoff0] +
AAp1 * xp[xi + xoff1] +
AAp2 * xp[xi + xoff2] +
AAp3 * xp[xi + xoff3] +
AAp4 * xp[xi + xoff4];
}
hypre_BoxLoop2End(xi, yi);
break;
case 4:
Ap0 = hypre_StructMatrixBoxData(A, i, si+0);
Ap1 = hypre_StructMatrixBoxData(A, i, si+1);
Ap2 = hypre_StructMatrixBoxData(A, i, si+2);
Ap3 = hypre_StructMatrixBoxData(A, i, si+3);
AAp0 = Ap0[Ai_CC];
AAp1 = Ap1[Ai_CC];
AAp2 = Ap2[Ai_CC];
AAp3 = Ap3[Ai_CC];
if ( (0 <= si_center-si) && (si_center-si < 4) )
{
switch ( si_center-si )
{
case 0: AAp0 = 0; break;
case 1: AAp1 = 0; break;
case 2: AAp2 = 0; break;
case 3: AAp3 = 0; break;
}
}
xoff0 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+0]);
xoff1 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+1]);
xoff2 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+2]);
xoff3 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+3]);
hypre_BoxLoop2Begin(ndim, loop_size,
x_data_box, start, stride, xi,
y_data_box, start, stride, yi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,yi,xi) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(xi, yi)
{
yp[yi] +=
AAp0 * xp[xi + xoff0] +
AAp1 * xp[xi + xoff1] +
AAp2 * xp[xi + xoff2] +
AAp3 * xp[xi + xoff3];
}
hypre_BoxLoop2End(xi, yi);
break;
case 3:
Ap0 = hypre_StructMatrixBoxData(A, i, si+0);
Ap1 = hypre_StructMatrixBoxData(A, i, si+1);
Ap2 = hypre_StructMatrixBoxData(A, i, si+2);
AAp0 = Ap0[Ai_CC];
AAp1 = Ap1[Ai_CC];
AAp2 = Ap2[Ai_CC];
if ( (0 <= si_center-si) && (si_center-si < 3) )
{
switch ( si_center-si )
{
case 0: AAp0 = 0; break;
case 1: AAp1 = 0; break;
case 2: AAp2 = 0; break;
}
}
xoff0 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+0]);
xoff1 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+1]);
xoff2 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+2]);
hypre_BoxLoop2Begin(ndim, loop_size,
x_data_box, start, stride, xi,
y_data_box, start, stride, yi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,yi,xi) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(xi, yi)
{
yp[yi] +=
AAp0 * xp[xi + xoff0] +
AAp1 * xp[xi + xoff1] +
AAp2 * xp[xi + xoff2];
}
hypre_BoxLoop2End(xi, yi);
break;
case 2:
Ap0 = hypre_StructMatrixBoxData(A, i, si+0);
Ap1 = hypre_StructMatrixBoxData(A, i, si+1);
AAp0 = Ap0[Ai_CC];
AAp1 = Ap1[Ai_CC];
if ( (0 <= si_center-si) && (si_center-si < 2) )
{
switch ( si_center-si )
{
case 0: AAp0 = 0; break;
case 1: AAp1 = 0; break;
}
}
xoff0 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+0]);
xoff1 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+1]);
hypre_BoxLoop2Begin(ndim, loop_size,
x_data_box, start, stride, xi,
y_data_box, start, stride, yi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,yi,xi) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(xi, yi)
{
yp[yi] +=
AAp0 * xp[xi + xoff0] +
AAp1 * xp[xi + xoff1];
}
hypre_BoxLoop2End(xi, yi);
break;
case 1:
Ap0 = hypre_StructMatrixBoxData(A, i, si+0);
AAp0 = Ap0[Ai_CC];
if ( si_center-si == 0 )
{
AAp0 = 0;
}
xoff0 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si+0]);
hypre_BoxLoop2Begin(ndim, loop_size,
x_data_box, start, stride, xi,
y_data_box, start, stride, yi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,yi,xi) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(xi, yi)
{
yp[yi] +=
AAp0 * xp[xi + xoff0];
}
hypre_BoxLoop2End(xi, yi);
break;
}
}
Ap0 = hypre_StructMatrixBoxData(A, i, si_center);
xoff0 = hypre_BoxOffsetDistance(x_data_box,
stencil_shape[si_center]);
if (alpha!= 1.0 )
{
hypre_BoxLoop3Begin(ndim, loop_size,
A_data_box, start, stride, Ai,
x_data_box, start, stride, xi,
y_data_box, start, stride, yi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,yi,xi,Ai) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop3For(Ai, xi, yi)
{
yp[yi] = alpha * ( yp[yi] +
Ap0[Ai] * xp[xi + xoff0] );
}
hypre_BoxLoop3End(Ai, xi, yi);
}
else
{
hypre_BoxLoop3Begin(ndim, loop_size,
A_data_box, start, stride, Ai,
x_data_box, start, stride, xi,
y_data_box, start, stride, yi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,yi,xi,Ai) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop3For(Ai, xi, yi)
{
yp[yi] +=
Ap0[Ai] * xp[xi + xoff0];
}
hypre_BoxLoop3End(Ai, xi, yi);
}
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_StructMatvecDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructMatvecDestroy( void *matvec_vdata )
{
hypre_StructMatvecData *matvec_data = matvec_vdata;
if (matvec_data)
{
hypre_StructMatrixDestroy(matvec_data -> A);
hypre_StructVectorDestroy(matvec_data -> x);
hypre_ComputePkgDestroy(matvec_data -> compute_pkg );
hypre_TFree(matvec_data);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_StructMatvec
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructMatvec( double alpha,
hypre_StructMatrix *A,
hypre_StructVector *x,
double beta,
hypre_StructVector *y )
{
void *matvec_data;
matvec_data = hypre_StructMatvecCreate();
hypre_StructMatvecSetup(matvec_data, A, x);
hypre_StructMatvecCompute(matvec_data, alpha, A, x, beta, y);
hypre_StructMatvecDestroy(matvec_data);
return hypre_error_flag;
}
|
GB_binop__ne_fc32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ne_fc32)
// A.*B function (eWiseMult): GB (_AemultB_08__ne_fc32)
// A.*B function (eWiseMult): GB (_AemultB_02__ne_fc32)
// A.*B function (eWiseMult): GB (_AemultB_04__ne_fc32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_fc32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__ne_fc32)
// C+=b function (dense accum): GB (_Cdense_accumb__ne_fc32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_fc32)
// C=scalar+B GB (_bind1st__ne_fc32)
// C=scalar+B' GB (_bind1st_tran__ne_fc32)
// C=A+scalar GB (_bind2nd__ne_fc32)
// C=A'+scalar GB (_bind2nd_tran__ne_fc32)
// C type: bool
// A type: GxB_FC32_t
// A pattern? 0
// B type: GxB_FC32_t
// B pattern? 0
// BinaryOp: cij = GB_FC32_ne (aij, bij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_BTYPE \
GxB_FC32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = (crealf (GBX (Ax, pA, A_iso)) != 0) || (cimagf (GBX (Ax, pA, A_iso)) != 0)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = (crealf (GBX (Bx, pB, B_iso)) != 0) || (cimagf (GBX (Bx, pB, B_iso)) != 0)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_FC32_ne (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_NE || GxB_NO_FC32 || GxB_NO_NE_FC32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__ne_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ne_fc32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ne_fc32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type GxB_FC32_t
GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ne_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
GxB_FC32_t alpha_scalar ;
GxB_FC32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((GxB_FC32_t *) alpha_scalar_in)) ;
beta_scalar = (*((GxB_FC32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__ne_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ne_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__ne_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ne_fc32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ne_fc32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ;
GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_FC32_ne (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ne_fc32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ;
GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_FC32_ne (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC32_ne (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__ne_fc32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC32_ne (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__ne_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Par-30-ParSectionsForLoop.c |
int main(int argc, char **argv) {
int a[4] = {1,2,3,4};
int b[4] = {1,2,3,4};
#pragma omp parallel
{
#pragma omp sections
{
#pragma omp section
{
for (int i = 0; i < 4; ++i) {
a[i] = 3*a[i];
}
}
#pragma omp section
{
for (int i = 0; i < 4; ++i) {
b[i] = 3*b[i];
}
}
}
}
return 0;
}
|
ClosestCentroids.h | #pragma once
#include "headers/Matrix.h"
template<typename T>
class ClosestCentroids : public Matrix<int>{
public:
/**
* if buffered ClosestCentroids desired -> 2 rows else 1
*/
ClosestCentroids(int samples, int value = 0, bool buffered=true, int num_threads = 1) :
Matrix<int>((buffered?2:1), samples, value, num_threads),
_toggle{ (buffered?1:0) } {
initDistBuffer();
}
/**
* Gets closest cluster index w.r.t. each sample
*
* Dimensions:
* rows: 1 (2 if toggle feature activated)
* cols: n_samples
*/
ClosestCentroids& getClosest(const Matrix<T>& data, const Matrix<T>& cluster){
int n_dims = data.getRows();
int n_clusters = cluster.getCols();
#pragma omp parallel for collapse(1) num_threads(_n_threads)
for(int i = 0; i < _cols; ++i){
T abs_sum = 0;
for(int d = 0; d < n_dims; ++d){
abs_sum += std::abs(data(d, i) - cluster(d, 0));
}
_matrix[i+_toggled_row*_cols] = 0;
(*_distBuffer)(0, i) = abs_sum;
for(int c = 1; c < n_clusters; ++c){
abs_sum = 0;
for(int d = 0; d < n_dims; ++d){
abs_sum += std::abs(data(d, i) - cluster(d, c));
}
if(abs_sum < (*_distBuffer)(0, i)){
_matrix[i+_toggled_row*_cols] = c;
(*_distBuffer)(0, i) = abs_sum;
}
}
}
_current_row = _toggled_row;
_toggled_row ^= _toggle;
return *this;
}
/**
* Checks whether the stopping criterion is satisfied or not.
* If 2 consecutive closest centroids computation's modification
* rate that is below the given threshold, we consider that
* KMeans has converged
*/
float getModifRate(){
// stopping criterion never satisfied if we dont keep track of assigned centroids modifications
if(_rows < 2) return 1.0f;
int counter = 0;
#pragma omp parallel for simd reduction(+:counter) num_threads(_n_threads)
for(int i = 0; i < _cols; ++i){
const int& a = _matrix[i];
const int& b = _matrix[i+_cols];
if(!(a ^ b)) ++counter;
}
return 1.0f - static_cast<float>(counter) / _cols;
}
inline int& operator()(const int& col) { return _matrix[col+_current_row*_cols]; }
inline const int& operator()(const int& col) const { return _matrix[col+_current_row*_cols]; }
private:
void initDistBuffer(){
_distBuffer = std::make_unique<Matrix<T>>(1, _cols, 0, _n_threads);
}
std::unique_ptr<Matrix<T>> _distBuffer;
// we want to store the previous state of mapped centroids, toggle: 1 to switch between rows
int _toggle = 0;
// we store the toggled row
int _toggled_row = 0;
int _current_row = 0;
}; |
kpoint.c | /* Copyright (C) 2008 Atsushi Togo */
/* All rights reserved. */
/* This file is part of spglib. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the spglib project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include "mathfunc.h"
#include "kpoint.h"
#include "kgrid.h"
#ifdef KPTWARNING
#include <stdio.h>
#define warning_print(...) fprintf(stderr,__VA_ARGS__)
#else
#define warning_print(...)
#endif
#define KPT_NUM_BZ_SEARCH_SPACE 125
static int bz_search_space[KPT_NUM_BZ_SEARCH_SPACE][3] = {
{ 0, 0, 0},
{ 0, 0, 1},
{ 0, 0, 2},
{ 0, 0, -2},
{ 0, 0, -1},
{ 0, 1, 0},
{ 0, 1, 1},
{ 0, 1, 2},
{ 0, 1, -2},
{ 0, 1, -1},
{ 0, 2, 0},
{ 0, 2, 1},
{ 0, 2, 2},
{ 0, 2, -2},
{ 0, 2, -1},
{ 0, -2, 0},
{ 0, -2, 1},
{ 0, -2, 2},
{ 0, -2, -2},
{ 0, -2, -1},
{ 0, -1, 0},
{ 0, -1, 1},
{ 0, -1, 2},
{ 0, -1, -2},
{ 0, -1, -1},
{ 1, 0, 0},
{ 1, 0, 1},
{ 1, 0, 2},
{ 1, 0, -2},
{ 1, 0, -1},
{ 1, 1, 0},
{ 1, 1, 1},
{ 1, 1, 2},
{ 1, 1, -2},
{ 1, 1, -1},
{ 1, 2, 0},
{ 1, 2, 1},
{ 1, 2, 2},
{ 1, 2, -2},
{ 1, 2, -1},
{ 1, -2, 0},
{ 1, -2, 1},
{ 1, -2, 2},
{ 1, -2, -2},
{ 1, -2, -1},
{ 1, -1, 0},
{ 1, -1, 1},
{ 1, -1, 2},
{ 1, -1, -2},
{ 1, -1, -1},
{ 2, 0, 0},
{ 2, 0, 1},
{ 2, 0, 2},
{ 2, 0, -2},
{ 2, 0, -1},
{ 2, 1, 0},
{ 2, 1, 1},
{ 2, 1, 2},
{ 2, 1, -2},
{ 2, 1, -1},
{ 2, 2, 0},
{ 2, 2, 1},
{ 2, 2, 2},
{ 2, 2, -2},
{ 2, 2, -1},
{ 2, -2, 0},
{ 2, -2, 1},
{ 2, -2, 2},
{ 2, -2, -2},
{ 2, -2, -1},
{ 2, -1, 0},
{ 2, -1, 1},
{ 2, -1, 2},
{ 2, -1, -2},
{ 2, -1, -1},
{-2, 0, 0},
{-2, 0, 1},
{-2, 0, 2},
{-2, 0, -2},
{-2, 0, -1},
{-2, 1, 0},
{-2, 1, 1},
{-2, 1, 2},
{-2, 1, -2},
{-2, 1, -1},
{-2, 2, 0},
{-2, 2, 1},
{-2, 2, 2},
{-2, 2, -2},
{-2, 2, -1},
{-2, -2, 0},
{-2, -2, 1},
{-2, -2, 2},
{-2, -2, -2},
{-2, -2, -1},
{-2, -1, 0},
{-2, -1, 1},
{-2, -1, 2},
{-2, -1, -2},
{-2, -1, -1},
{-1, 0, 0},
{-1, 0, 1},
{-1, 0, 2},
{-1, 0, -2},
{-1, 0, -1},
{-1, 1, 0},
{-1, 1, 1},
{-1, 1, 2},
{-1, 1, -2},
{-1, 1, -1},
{-1, 2, 0},
{-1, 2, 1},
{-1, 2, 2},
{-1, 2, -2},
{-1, 2, -1},
{-1, -2, 0},
{-1, -2, 1},
{-1, -2, 2},
{-1, -2, -2},
{-1, -2, -1},
{-1, -1, 0},
{-1, -1, 1},
{-1, -1, 2},
{-1, -1, -2},
{-1, -1, -1}
};
static MatINT *get_point_group_reciprocal(const MatINT * rotations,
const int is_time_reversal);
static MatINT *get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal,
const double symprec,
const size_t num_q,
SPGCONST double qpoints[][3]);
static size_t get_dense_ir_reciprocal_mesh(int grid_address[][3],
size_t ir_mapping_table[],
const int mesh[3],
const int is_shift[3],
const MatINT *rot_reciprocal);
static size_t get_dense_ir_reciprocal_mesh_normal(int grid_address[][3],
size_t ir_mapping_table[],
const int mesh[3],
const int is_shift[3],
const MatINT *rot_reciprocal);
static size_t get_dense_ir_reciprocal_mesh_distortion(int grid_address[][3],
size_t ir_mapping_table[],
const int mesh[3],
const int is_shift[3],
const MatINT *rot_reciprocal);
static size_t get_dense_num_ir(size_t ir_mapping_table[], const int mesh[3]);
static size_t relocate_dense_BZ_grid_address(int bz_grid_address[][3],
size_t bz_map[],
SPGCONST int grid_address[][3],
const int mesh[3],
SPGCONST double rec_lattice[3][3],
const int is_shift[3]);
static double get_tolerance_for_BZ_reduction(SPGCONST double rec_lattice[3][3],
const int mesh[3]);
static int check_mesh_symmetry(const int mesh[3],
const int is_shift[3],
const MatINT *rot_reciprocal);
/* grid_address (e.g. 4x4x4 mesh, unless GRID_ORDER_XYZ is defined) */
/* [[ 0 0 0] */
/* [ 1 0 0] */
/* [ 2 0 0] */
/* [-1 0 0] */
/* [ 0 1 0] */
/* [ 1 1 0] */
/* [ 2 1 0] */
/* [-1 1 0] */
/* .... ] */
/* */
/* Each value of 'map' correspnds to the index of grid_point. */
int kpt_get_irreducible_reciprocal_mesh(int grid_address[][3],
int ir_mapping_table[],
const int mesh[3],
const int is_shift[3],
const MatINT *rot_reciprocal)
{
int num_ir;
size_t i;
size_t *dense_ir_mapping_table;
if ((dense_ir_mapping_table =
(size_t*)malloc(sizeof(size_t) * mesh[0] * mesh[1] * mesh[2])) == NULL) {
warning_print("spglib: Memory of unique_rot could not be allocated.");
return 0;
}
num_ir = kpt_get_dense_irreducible_reciprocal_mesh(grid_address,
dense_ir_mapping_table,
mesh,
is_shift,
rot_reciprocal);
for (i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++) {
ir_mapping_table[i] = dense_ir_mapping_table[i];
}
free(dense_ir_mapping_table);
dense_ir_mapping_table = NULL;
return num_ir;
}
size_t kpt_get_dense_irreducible_reciprocal_mesh(int grid_address[][3],
size_t ir_mapping_table[],
const int mesh[3],
const int is_shift[3],
const MatINT *rot_reciprocal)
{
size_t num_ir;
num_ir = get_dense_ir_reciprocal_mesh(grid_address,
ir_mapping_table,
mesh,
is_shift,
rot_reciprocal);
return num_ir;
}
int kpt_get_stabilized_reciprocal_mesh(int grid_address[][3],
int ir_mapping_table[],
const int mesh[3],
const int is_shift[3],
const int is_time_reversal,
const MatINT * rotations,
const size_t num_q,
SPGCONST double qpoints[][3])
{
int num_ir;
size_t i;
size_t *dense_ir_mapping_table;
if ((dense_ir_mapping_table =
(size_t*)malloc(sizeof(size_t) * mesh[0] * mesh[1] * mesh[2])) == NULL) {
warning_print("spglib: Memory of unique_rot could not be allocated.");
return 0;
}
num_ir = kpt_get_dense_stabilized_reciprocal_mesh(grid_address,
dense_ir_mapping_table,
mesh,
is_shift,
is_time_reversal,
rotations,
num_q,
qpoints);
for (i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++) {
ir_mapping_table[i] = dense_ir_mapping_table[i];
}
free(dense_ir_mapping_table);
dense_ir_mapping_table = NULL;
return num_ir;
}
size_t kpt_get_dense_stabilized_reciprocal_mesh(int grid_address[][3],
size_t ir_mapping_table[],
const int mesh[3],
const int is_shift[3],
const int is_time_reversal,
const MatINT * rotations,
const size_t num_q,
SPGCONST double qpoints[][3])
{
size_t num_ir;
MatINT *rot_reciprocal, *rot_reciprocal_q;
double tolerance;
rot_reciprocal = NULL;
rot_reciprocal_q = NULL;
rot_reciprocal = get_point_group_reciprocal(rotations, is_time_reversal);
tolerance = 0.01 / (mesh[0] + mesh[1] + mesh[2]);
rot_reciprocal_q = get_point_group_reciprocal_with_q(rot_reciprocal,
tolerance,
num_q,
qpoints);
num_ir = get_dense_ir_reciprocal_mesh(grid_address,
ir_mapping_table,
mesh,
is_shift,
rot_reciprocal_q);
mat_free_MatINT(rot_reciprocal_q);
rot_reciprocal_q = NULL;
mat_free_MatINT(rot_reciprocal);
rot_reciprocal = NULL;
return num_ir;
}
void
kpt_get_dense_grid_points_by_rotations(size_t rot_grid_points[],
const int address_orig[3],
SPGCONST int (*rot_reciprocal)[3][3],
const int num_rot,
const int mesh[3],
const int is_shift[3])
{
int i;
int address_double_orig[3], address_double[3];
for (i = 0; i < 3; i++) {
address_double_orig[i] = address_orig[i] * 2 + is_shift[i];
}
for (i = 0; i < num_rot; i++) {
mat_multiply_matrix_vector_i3(address_double,
rot_reciprocal[i],
address_double_orig);
rot_grid_points[i] = kgd_get_dense_grid_point_double_mesh(address_double, mesh);
}
}
void
kpt_get_dense_BZ_grid_points_by_rotations(size_t rot_grid_points[],
const int address_orig[3],
SPGCONST int (*rot_reciprocal)[3][3],
const int num_rot,
const int mesh[3],
const int is_shift[3],
const size_t bz_map[])
{
int i;
int address_double_orig[3], address_double[3], bzmesh[3];
for (i = 0; i < 3; i++) {
bzmesh[i] = mesh[i] * 2;
address_double_orig[i] = address_orig[i] * 2 + is_shift[i];
}
for (i = 0; i < num_rot; i++) {
mat_multiply_matrix_vector_i3(address_double,
rot_reciprocal[i],
address_double_orig);
rot_grid_points[i] =
bz_map[kgd_get_dense_grid_point_double_mesh(address_double, bzmesh)];
}
}
int kpt_relocate_BZ_grid_address(int bz_grid_address[][3],
int bz_map[],
SPGCONST int grid_address[][3],
const int mesh[3],
SPGCONST double rec_lattice[3][3],
const int is_shift[3])
{
int i, num_bz_map, num_bzgp;
size_t *dense_bz_map;
num_bz_map = mesh[0] * mesh[1] * mesh[2] * 8;
if ((dense_bz_map =
(size_t*)malloc(sizeof(size_t) * num_bz_map)) == NULL) {
warning_print("spglib: Memory of unique_rot could not be allocated.");
return 0;
}
num_bzgp = kpt_relocate_dense_BZ_grid_address(bz_grid_address,
dense_bz_map,
grid_address,
mesh,
rec_lattice,
is_shift);
for (i = 0; i < num_bz_map; i++) {
if (dense_bz_map[i] == num_bz_map) {
bz_map[i] = -1;
} else {
bz_map[i] = dense_bz_map[i];
}
}
free(dense_bz_map);
dense_bz_map = NULL;
return num_bzgp;
}
size_t kpt_relocate_dense_BZ_grid_address(int bz_grid_address[][3],
size_t bz_map[],
SPGCONST int grid_address[][3],
const int mesh[3],
SPGCONST double rec_lattice[3][3],
const int is_shift[3])
{
return relocate_dense_BZ_grid_address(bz_grid_address,
bz_map,
grid_address,
mesh,
rec_lattice,
is_shift);
}
MatINT *kpt_get_point_group_reciprocal(const MatINT * rotations,
const int is_time_reversal)
{
return get_point_group_reciprocal(rotations, is_time_reversal);
}
MatINT *kpt_get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal,
const double symprec,
const size_t num_q,
SPGCONST double qpoints[][3])
{
return get_point_group_reciprocal_with_q(rot_reciprocal,
symprec,
num_q,
qpoints);
}
/* Return NULL if failed */
static MatINT *get_point_group_reciprocal(const MatINT * rotations,
const int is_time_reversal)
{
int i, j, num_rot;
MatINT *rot_reciprocal, *rot_return;
int *unique_rot;
SPGCONST int inversion[3][3] = {
{-1, 0, 0 },
{ 0,-1, 0 },
{ 0, 0,-1 }
};
rot_reciprocal = NULL;
rot_return = NULL;
unique_rot = NULL;
if (is_time_reversal) {
if ((rot_reciprocal = mat_alloc_MatINT(rotations->size * 2)) == NULL) {
return NULL;
}
} else {
if ((rot_reciprocal = mat_alloc_MatINT(rotations->size)) == NULL) {
return NULL;
}
}
if ((unique_rot = (int*)malloc(sizeof(int) * rot_reciprocal->size)) == NULL) {
warning_print("spglib: Memory of unique_rot could not be allocated.");
mat_free_MatINT(rot_reciprocal);
rot_reciprocal = NULL;
return NULL;
}
for (i = 0; i < rot_reciprocal->size; i++) {
unique_rot[i] = -1;
}
for (i = 0; i < rotations->size; i++) {
mat_transpose_matrix_i3(rot_reciprocal->mat[i], rotations->mat[i]);
if (is_time_reversal) {
mat_multiply_matrix_i3(rot_reciprocal->mat[rotations->size+i],
inversion,
rot_reciprocal->mat[i]);
}
}
num_rot = 0;
for (i = 0; i < rot_reciprocal->size; i++) {
for (j = 0; j < num_rot; j++) {
if (mat_check_identity_matrix_i3(rot_reciprocal->mat[unique_rot[j]],
rot_reciprocal->mat[i])) {
goto escape;
}
}
unique_rot[num_rot] = i;
num_rot++;
escape:
;
}
if ((rot_return = mat_alloc_MatINT(num_rot)) != NULL) {
for (i = 0; i < num_rot; i++) {
mat_copy_matrix_i3(rot_return->mat[i], rot_reciprocal->mat[unique_rot[i]]);
}
}
free(unique_rot);
unique_rot = NULL;
mat_free_MatINT(rot_reciprocal);
rot_reciprocal = NULL;
return rot_return;
}
/* Return NULL if failed */
static MatINT *get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal,
const double symprec,
const size_t num_q,
SPGCONST double qpoints[][3])
{
int i, j, k, l, is_all_ok, num_rot;
int *ir_rot;
double q_rot[3], diff[3];
MatINT * rot_reciprocal_q;
ir_rot = NULL;
rot_reciprocal_q = NULL;
is_all_ok = 0;
num_rot = 0;
if ((ir_rot = (int*)malloc(sizeof(int) * rot_reciprocal->size)) == NULL) {
warning_print("spglib: Memory of ir_rot could not be allocated.");
return NULL;
}
for (i = 0; i < rot_reciprocal->size; i++) {
ir_rot[i] = -1;
}
for (i = 0; i < rot_reciprocal->size; i++) {
for (j = 0; j < num_q; j++) {
is_all_ok = 0;
mat_multiply_matrix_vector_id3(q_rot,
rot_reciprocal->mat[i],
qpoints[j]);
for (k = 0; k < num_q; k++) {
for (l = 0; l < 3; l++) {
diff[l] = q_rot[l] - qpoints[k][l];
diff[l] -= mat_Nint(diff[l]);
}
if (mat_Dabs(diff[0]) < symprec &&
mat_Dabs(diff[1]) < symprec &&
mat_Dabs(diff[2]) < symprec) {
is_all_ok = 1;
break;
}
}
if (! is_all_ok) {
break;
}
}
if (is_all_ok) {
ir_rot[num_rot] = i;
num_rot++;
}
}
if ((rot_reciprocal_q = mat_alloc_MatINT(num_rot)) != NULL) {
for (i = 0; i < num_rot; i++) {
mat_copy_matrix_i3(rot_reciprocal_q->mat[i],
rot_reciprocal->mat[ir_rot[i]]);
}
}
free(ir_rot);
ir_rot = NULL;
return rot_reciprocal_q;
}
static size_t get_dense_ir_reciprocal_mesh(int grid_address[][3],
size_t ir_mapping_table[],
const int mesh[3],
const int is_shift[3],
const MatINT *rot_reciprocal)
{
if (check_mesh_symmetry(mesh, is_shift, rot_reciprocal)) {
return get_dense_ir_reciprocal_mesh_normal(grid_address,
ir_mapping_table,
mesh,
is_shift,
rot_reciprocal);
} else {
return get_dense_ir_reciprocal_mesh_distortion(grid_address,
ir_mapping_table,
mesh,
is_shift,
rot_reciprocal);
}
}
static size_t get_dense_ir_reciprocal_mesh_normal(int grid_address[][3],
size_t ir_mapping_table[],
const int mesh[3],
const int is_shift[3],
const MatINT *rot_reciprocal)
{
/* In the following loop, mesh is doubled. */
/* Even and odd mesh numbers correspond to */
/* is_shift[i] are 0 or 1, respectively. */
/* is_shift = [0,0,0] gives Gamma center mesh. */
/* grid: reducible grid points */
/* ir_mapping_table: the mapping from each point to ir-point. */
long i;
size_t grid_point_rot;
int j;
int address_double[3], address_double_rot[3];
kgd_get_all_grid_addresses(grid_address, mesh);
#pragma omp parallel for private(j, grid_point_rot, address_double, address_double_rot)
for (i = 0; i < mesh[0] * mesh[1] * (size_t)(mesh[2]); i++) {
kgd_get_grid_address_double_mesh(address_double,
grid_address[i],
mesh,
is_shift);
ir_mapping_table[i] = i;
for (j = 0; j < rot_reciprocal->size; j++) {
mat_multiply_matrix_vector_i3(address_double_rot,
rot_reciprocal->mat[j],
address_double);
grid_point_rot = kgd_get_dense_grid_point_double_mesh(address_double_rot, mesh);
if (grid_point_rot < ir_mapping_table[i]) {
#ifdef _OPENMP
ir_mapping_table[i] = grid_point_rot;
#else
ir_mapping_table[i] = ir_mapping_table[grid_point_rot];
break;
#endif
}
}
}
return get_dense_num_ir(ir_mapping_table, mesh);
}
static size_t
get_dense_ir_reciprocal_mesh_distortion(int grid_address[][3],
size_t ir_mapping_table[],
const int mesh[3],
const int is_shift[3],
const MatINT *rot_reciprocal)
{
long i;
size_t grid_point_rot;
int j, k, indivisible;
int address_double[3], address_double_rot[3];
long long_address_double[3], long_address_double_rot[3], divisor[3];
/* divisor, long_address_double, and long_address_double_rot have */
/* long integer type to treat dense mesh. */
kgd_get_all_grid_addresses(grid_address, mesh);
for (j = 0; j < 3; j++) {
divisor[j] = mesh[(j + 1) % 3] * mesh[(j + 2) % 3];
}
#pragma omp parallel for private(j, k, grid_point_rot, address_double, address_double_rot, long_address_double, long_address_double_rot)
for (i = 0; i < mesh[0] * mesh[1] * (size_t)(mesh[2]); i++) {
kgd_get_grid_address_double_mesh(address_double,
grid_address[i],
mesh,
is_shift);
for (j = 0; j < 3; j++) {
long_address_double[j] = address_double[j] * divisor[j];
}
ir_mapping_table[i] = i;
for (j = 0; j < rot_reciprocal->size; j++) {
/* Equivalent to mat_multiply_matrix_vector_i3 except for data type */
for (k = 0; k < 3; k++) {
long_address_double_rot[k] =
rot_reciprocal->mat[j][k][0] * long_address_double[0] +
rot_reciprocal->mat[j][k][1] * long_address_double[1] +
rot_reciprocal->mat[j][k][2] * long_address_double[2];
}
for (k = 0; k < 3; k++) {
indivisible = long_address_double_rot[k] % divisor[k];
if (indivisible) {break;}
address_double_rot[k] = long_address_double_rot[k] / divisor[k];
if ((address_double_rot[k] % 2 != 0 && is_shift[k] == 0) ||
(address_double_rot[k] % 2 == 0 && is_shift[k] == 1)) {
indivisible = 1;
break;
}
}
if (indivisible) {continue;}
grid_point_rot =
kgd_get_dense_grid_point_double_mesh(address_double_rot, mesh);
if (grid_point_rot < ir_mapping_table[i]) {
#ifdef _OPENMP
ir_mapping_table[i] = grid_point_rot;
#else
ir_mapping_table[i] = ir_mapping_table[grid_point_rot];
break;
#endif
}
}
}
return get_dense_num_ir(ir_mapping_table, mesh);
}
static size_t get_dense_num_ir(size_t ir_mapping_table[], const int mesh[3])
{
long i;
size_t num_ir;
num_ir = 0;
#pragma omp parallel for reduction(+:num_ir)
for (i = 0; i < mesh[0] * mesh[1] * (size_t)(mesh[2]); i++) {
if (ir_mapping_table[i] == i) {
num_ir++;
}
}
#ifdef _OPENMP
for (i = 0; i < mesh[0] * mesh[1] * (size_t)(mesh[2]); i++) {
ir_mapping_table[i] = ir_mapping_table[ir_mapping_table[i]];
}
#endif
return num_ir;
}
static size_t relocate_dense_BZ_grid_address(int bz_grid_address[][3],
size_t bz_map[],
SPGCONST int grid_address[][3],
const int mesh[3],
SPGCONST double rec_lattice[3][3],
const int is_shift[3])
{
double tolerance, min_distance;
double q_vector[3], distance[KPT_NUM_BZ_SEARCH_SPACE];
int bzmesh[3], bz_address_double[3];
size_t i, boundary_num_gp, total_num_gp, bzgp, gp, num_bzmesh;
int j, k, min_index;
tolerance = get_tolerance_for_BZ_reduction(rec_lattice, mesh);
for (j = 0; j < 3; j++) {
bzmesh[j] = mesh[j] * 2;
}
num_bzmesh = bzmesh[0] * bzmesh[1] * (size_t)(bzmesh[2]);
for (i = 0; i < num_bzmesh; i++) {
bz_map[i] = num_bzmesh;
}
boundary_num_gp = 0;
total_num_gp = mesh[0] * mesh[1] * (size_t)(mesh[2]);
/* Multithreading doesn't work for this loop since gp calculated */
/* with boundary_num_gp is unstable to store bz_grid_address. */
for (i = 0; i < total_num_gp; i++) {
for (j = 0; j < KPT_NUM_BZ_SEARCH_SPACE; j++) {
for (k = 0; k < 3; k++) {
q_vector[k] =
((grid_address[i][k] + bz_search_space[j][k] * mesh[k]) * 2 +
is_shift[k]) / ((double)mesh[k]) / 2;
}
mat_multiply_matrix_vector_d3(q_vector, rec_lattice, q_vector);
distance[j] = mat_norm_squared_d3(q_vector);
}
min_distance = distance[0];
min_index = 0;
for (j = 1; j < KPT_NUM_BZ_SEARCH_SPACE; j++) {
if (distance[j] < min_distance) {
min_distance = distance[j];
min_index = j;
}
}
for (j = 0; j < KPT_NUM_BZ_SEARCH_SPACE; j++) {
if (distance[j] < min_distance + tolerance) {
if (j == min_index) {
gp = i;
} else {
gp = boundary_num_gp + total_num_gp;
}
for (k = 0; k < 3; k++) {
bz_grid_address[gp][k] =
grid_address[i][k] + bz_search_space[j][k] * mesh[k];
bz_address_double[k] = bz_grid_address[gp][k] * 2 + is_shift[k];
}
bzgp = kgd_get_dense_grid_point_double_mesh(bz_address_double, bzmesh);
bz_map[bzgp] = gp;
if (j != min_index) {
boundary_num_gp++;
}
}
}
}
return boundary_num_gp + total_num_gp;
}
static double get_tolerance_for_BZ_reduction(SPGCONST double rec_lattice[3][3],
const int mesh[3])
{
int i, j;
double tolerance;
double length[3];
for (i = 0; i < 3; i++) {
length[i] = 0;
for (j = 0; j < 3; j++) {
length[i] += rec_lattice[j][i] * rec_lattice[j][i];
}
length[i] /= mesh[i] * mesh[i];
}
tolerance = length[0];
for (i = 1; i < 3; i++) {
if (tolerance < length[i]) {
tolerance = length[i];
}
}
tolerance *= 0.01;
return tolerance;
}
static int check_mesh_symmetry(const int mesh[3],
const int is_shift[3],
const MatINT *rot_reciprocal)
{
int i, j, k, sum;
int eq[3];
eq[0] = 0; /* a=b */
eq[1] = 0; /* b=c */
eq[2] = 0; /* c=a */
/* Check 3 and 6 fold rotations and non-convensional choice of unit cells */
for (i = 0; i < rot_reciprocal->size; i++) {
sum = 0;
for (j = 0; j < 3; j++) {
for (k = 0; k < 3; k++) {
sum += abs(rot_reciprocal->mat[i][j][k]);
}
}
if (sum > 3) {
return 0;
}
}
for (i = 0; i < rot_reciprocal->size; i++) {
if (rot_reciprocal->mat[i][0][0] == 0 &&
rot_reciprocal->mat[i][1][0] == 1 &&
rot_reciprocal->mat[i][2][0] == 0) {eq[0] = 1;}
if (rot_reciprocal->mat[i][0][0] == 0 &&
rot_reciprocal->mat[i][1][0] == 1 &&
rot_reciprocal->mat[i][2][0] == 0) {eq[1] = 1;}
if (rot_reciprocal->mat[i][0][0] == 0 &&
rot_reciprocal->mat[i][1][0] == 0 &&
rot_reciprocal->mat[i][2][0] == 1) {eq[2] = 1;}
}
return (((eq[0] && mesh[0] == mesh[1] && is_shift[0] == is_shift[1]) || (!eq[0])) &&
((eq[1] && mesh[1] == mesh[2] && is_shift[1] == is_shift[2]) || (!eq[1])) &&
((eq[2] && mesh[2] == mesh[0] && is_shift[2] == is_shift[0]) || (!eq[2])));
}
|
laplace_openmp.c | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#define WIDTH 1000
#define HEIGHT 1000
#define TEMP_TOLERANCE 0.01
double Temperature[HEIGHT+2][WIDTH+2];
double Temperature_previous[HEIGHT+2][WIDTH+2];
void initialize();
void track_progress(int iter);
int main(int argc, char *argv[]) {
int i, j;
int iteration=1;
double worst_dt=100;
struct timeval start_time, stop_time, elapsed_time;
gettimeofday(&start_time,NULL);
initialize();
while ( worst_dt > TEMP_TOLERANCE ) {
#pragma omp parallel for private(i,j)
for(i = 1; i <= HEIGHT; i++) {
for(j = 1; j <= WIDTH; j++) {
Temperature[i][j] = 0.25 * (Temperature_previous[i+1][j] + Temperature_previous[i-1][j] +
Temperature_previous[i][j+1] + Temperature_previous[i][j-1]);
}
}
worst_dt = 0.0;
#pragma omp parallel for reduction(max:worst_dt) private(i,j)
for(i = 1; i <= HEIGHT; i++){
for(j = 1; j <= WIDTH; j++){
worst_dt = fmax( fabs(Temperature[i][j]-Temperature_previous[i][j]), worst_dt);
Temperature_previous[i][j] = Temperature[i][j];
}
}
if((iteration % 100) == 0) {
track_progress(iteration);
}
iteration++;
}
gettimeofday(&stop_time,NULL);
timersub(&stop_time, &start_time, &elapsed_time);
printf("\nMax error at iteration %d was %f\n", iteration-1, worst_dt);
printf("Total time was %f seconds.\n", elapsed_time.tv_sec+elapsed_time.tv_usec/1000000.0);
}
void initialize(){
int i,j;
for(i = 0; i <= HEIGHT+1; i++){
for (j = 0; j <= WIDTH+1; j++){
Temperature_previous[i][j] = 0.0;
}
}
for(i = 0; i <= HEIGHT+1; i++) {
Temperature_previous[i][0] = 0.0;
Temperature_previous[i][WIDTH+1] = (100.0/HEIGHT)*i;
}
for(j = 0; j <= WIDTH+1; j++) {
Temperature_previous[0][j] = 0.0;
Temperature_previous[HEIGHT+1][j] = (100.0/WIDTH)*j;
}
}
void track_progress(int iteration) {
int i;
printf("---------- Iteration number: %d ------------\n", iteration);
for(i = HEIGHT-5; i <= HEIGHT; i++) {
printf("[%d,%d]: %5.2f ", i, i, Temperature[i][i]);
}
printf("\n");
}
|
GB_binop__eq_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__eq_fp64
// A.*B function (eWiseMult): GB_AemultB__eq_fp64
// A*D function (colscale): GB_AxD__eq_fp64
// D*A function (rowscale): GB_DxB__eq_fp64
// C+=B function (dense accum): GB_Cdense_accumB__eq_fp64
// C+=b function (dense accum): GB_Cdense_accumb__eq_fp64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__eq_fp64
// C=scalar+B GB_bind1st__eq_fp64
// C=scalar+B' GB_bind1st_tran__eq_fp64
// C=A+scalar GB_bind2nd__eq_fp64
// C=A'+scalar GB_bind2nd_tran__eq_fp64
// C type: bool
// A type: double
// B,b type: double
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x == y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EQ || GxB_NO_FP64 || GxB_NO_EQ_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__eq_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__eq_fp64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__eq_fp64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__eq_fp64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__eq_fp64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__eq_fp64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__eq_fp64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__eq_fp64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double bij = Bx [p] ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__eq_fp64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB_bind1st_tran__eq_fp64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB_bind2nd_tran__eq_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Parallel.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main()
{
int n, i, token, t = 2, result, *a;
printf("Inserire la dimensione del vettore: ");
scanf("%d", &n);
a = (int *)malloc(n*sizeof(int));
for(i = 0; i < n; i++)
{
a[i] = 2 * i;
printf("[%d]\t", a[i]);
}
printf("\nInserire il numero da cercare: ");
scanf("%d", &token);
result = binarySearch(0, n - 1, t, token, a);
if (result < 0)
printf("\nThe number not is in the vector.");
else
printf("\nThe number is in the position: %d\n", result+1);
return 0;
}
int binarySearch(int left, int right, int t, int token, int *a)
{
int result = -1, i;
int size = (right - left + 1)/2; //Sottoporzione del vettore.
if (size == 0) //Se il vettore è composto da un solo elemento.
{
if(a[left] != token) //Se l'unico elemento presente nel vettore è diverso dalla chiave, ritorna -1.
{
return -1;
}
else
{
return left; //Se l'unico elemento presente nel vettore è uguale alla chiave, ritorna la posizione in cui si trova l'elemento.
}
}
omp_set_num_threads(t); //t = 2, quindi 2 thread.
omp_set_nested(1);
#pragma omp parallel shared(a, token, left, right, size, result)
{
int id = omp_get_thread_num();
int leftThread = left + id * size;
int rightThread = leftThread + size - 1;
printf("\nLeftThread: %d", leftThread);
printf("\nRightThread: %d", rightThread);
if(id == t-1)
rightThread = right;
if(a[leftThread] <= token && a[rightThread] >= token)
{
result = binarySearch(leftThread, rightThread, t, token, a);
}
}
}
|
opencl_dmg_fmt_plug.c | /*
* DMG cracker patch for JtR. Hacked together during August of 2012
* by Dhiru Kholia <dhiru.kholia at gmail.com>
*
* This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net>
* Copyright (c) 2015, magnum
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted. */
/*
* Debug levels:
* 1 show what "test" hits
* 2 dump printables from the decrypted blocks
* 3 dump hex from the decrypted blocks
* 4 dump decrypted blocks to files (will overwrite with no mercy):
* dmg.debug.main main block
* dmg.debug alternate block (if present, this is the start block)
*/
//#define DMG_DEBUG 2
#ifdef HAVE_OPENCL
#if FMT_EXTERNS_H
extern struct fmt_main fmt_opencl_dmg;
#elif FMT_REGISTERS_H
john_register_one(&fmt_opencl_dmg);
#else
#include <string.h>
#include <openssl/des.h>
#include "aes.h"
#include "hmac_sha.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#ifdef DMG_DEBUG
#define NEED_OS_FLOCK
#include "os.h"
#endif
#include "arch.h"
#include "formats.h"
#include "common.h"
#include "stdint.h"
#include "options.h"
#include "jumbo.h"
#include "loader.h"
#include "common-opencl.h"
#define FORMAT_LABEL "dmg-opencl"
#define FORMAT_NAME "Apple DMG"
#define FORMAT_TAG "$dmg$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL 3DES/AES"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1001
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define PLAINTEXT_LENGTH 64
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN sizeof(uint32_t)
#undef HTONL
#define HTONL(n) (((((unsigned long)(n) & 0xFF)) << 24) | \
((((unsigned long)(n) & 0xFF00)) << 8) | \
((((unsigned long)(n) & 0xFF0000)) >> 8) | \
((((unsigned long)(n) & 0xFF000000)) >> 24))
#ifdef DMG_DEBUG
extern volatile int bench_running;
#endif
typedef struct {
uint32_t length;
uint8_t v[PLAINTEXT_LENGTH];
} dmg_password;
typedef struct {
uint32_t v[32/4];
} dmg_hash;
typedef struct {
uint32_t iterations;
uint32_t outlen;
uint32_t skip_bytes;
uint8_t length;
uint8_t salt[64];
} dmg_salt;
static int *cracked;
static int any_cracked;
static struct custom_salt {
unsigned int saltlen;
unsigned char salt[20];
unsigned int ivlen;
unsigned char iv[32];
int headerver;
unsigned char chunk[8192];
uint32_t encrypted_keyblob_size;
uint8_t encrypted_keyblob[128];
unsigned int len_wrapped_aes_key;
unsigned char wrapped_aes_key[296];
unsigned int len_hmac_sha1_key;
unsigned char wrapped_hmac_sha1_key[300];
char scp; /* start chunk present */
unsigned char zchunk[4096]; /* chunk #0 */
int cno;
int data_size;
unsigned int iterations;
} *cur_salt;
static cl_int cl_error;
static dmg_password *inbuffer;
static dmg_hash *outbuffer;
static dmg_salt currentsalt;
static cl_mem mem_in, mem_out, mem_setting;
static struct fmt_main *self;
size_t insize, outsize, settingsize, cracked_size;
static struct fmt_tests dmg_tests[] = {
// testimage.AES-256.64k.header_v2.dmg
{"$dmg$2*20*fd70ac1e078f01fce55a2e56145a2494446db32a*32*9110b1778f09b1a7000000000000000000000000000000000000000000000000*64*68a32866b0e67515f35dc67c4d6747a8561a9f4f6a6718a894b0a77a47c452471e04ecef9bf56f0d83d1201a509a374e00000000000000000000000000000000*14*8192*70ebe6f1d387e33e3d1093cca2e94c9a32e2c9ba47d461d737d49a7dc1b1f69407b7dbc16f7671689ea4a4641652b3f976b6f1c73c551a0a407d5a335caa169db4a6a25bbd27fbbc38fc71b29ee9b1eae349b0d8a21d57959ecca6bf74bc26ccaee69cfee4999b55374605491af6d0b9066c26995209cd1b71925bcb45a8ef5727a6c20338f08de4357d4cb42cb65ecdc2344a5d7387633c913258ba40699ea5f88804b5e562bf973096337b17b4fc1236d3c8a80b9b48aed63c5a0eae3ae924a883e948f374771bba46923658f225fd2795ce0e795269f589e0ffc81615585e1224cddde654d689a3260e69683c6198bdfcd87507c23cefe36d72f8878cb27bbe5dce868752a7cce067f5a3110f20ebd31ecd53840103e0b2d44385656398edc487bf6d1a5ec3a56af54f9d4254fd20988df41eb85e366f13da1270a3f42c6672ad5faf00fa21e9ba3691bde78ab2c267a142f275467d5b853a107dbf1d75839f0e87b3b4f1d2cec88cc02a26bc4a63aa6836b0c43c5dbb44a832050385a48d46968361ebb053c2416c02458b76c95e50970922556d40b100967340a32824e6b6e44c0c1e0da7ce989d9d5ad91560156"
"ed39666cbfbea71f28797a5a7a40e77665612e977ecb8b7fe71d500eafc29d9a0ec1d0ff1723fea7c405bc181ea93c0df42f5bf886eace3cfeee8b0dba52ba8cd2ae009e75d8845264d12dd632ca3236bc1b643437881b270183d2e2bd20808ae73d32bfe88347e33bef4921fcfac9646b74f116be1f04fc353d2222499d5247fa842d0d0f00fc9642ea7524adb65c18fff87b6efd060ec850d7de6f59869387b3d4cc8e38014d52d94ead07d16b8d94327fe5533941497c9be2dd6c04142ba57e29daaeef96d0f2d109522651d797715f4bc5f4cc3fb69fa92623b5ea3e08ff78dc59913993c877f4e2c8964dffd2c8cde6c6b6738da2883505486df5b633aaa8c66acbc2886107f3dd61b1df29f54a13ef27a7d2785c02153375240885e5c54297d88827403320799e05213761549eedc1c159c922087983410d2abadf9ef8ae460d018c278a9ea724f52b866e3d7ff2374496103b5137297100c970d195fca8c1286a8f9d3859ee12c84bdaa4b56ca91e307580b61dbe435ce4021007e4a2a8085976549cf1d195f439bb6e642567f91a0224e98796614d9ea6bfab8f6d13f91b7a80a54e538a1a785cd07b5d7ed2b7e45a0658b5722b5f8844f5139cff3b33ce244946757c020c54c8b5e43324023ed11001201213ffe4829e37135686a8bec1837b35fb234049570868dc5ba9c84cef6890d9ec400a794b1723eb209a60758ba9ae9abd23a7ea9f94fc6b73d29a560e24973c9160f195fbe82376c81dfeec1a7f912a8c22c067a26786a22f0b7db298"
"3631400f120010706c78acc36ddcc29c7055fe82105f770e2dadf131ab49af93539fb5186d32dbe4a4df6cb0fdf6840c0609c8769fe242cc60d87e04e6e3be1a7884a05d9fb96c3bc1bbc769d96bbcc0413492eefc5502e9c1ac7c3f237b9851dc453b5bfa899b7b68e5e3b92711e7c92945feb6f6e452d6216e154a952cc28a3740925554d9fd44acedc8a44b0c25bbb6aa637fe9560437c08b17992c74de38fe1fb8fd5f66c2933c2d573ddc914f68f42d6cb350f126a51f607a2dd23b63e6382ec1e6ae434f47cfcd1e7d96c8293ef2994f850a27ef2d8210a0df0c219eadd2376ce36a22db56827d92a90d5e2fa55a4154c39061bd5490ba29f8309cf3e2056f761762dff56803bbe0607faef510d023b249663368977fede0577944f2ff05ead4b432bbb07a7d90148ebd1e30bf1204cd9069725d9fdbb850d3d6fde5044da1b9ffa222d99061c8ae217bc5b249960db545e6fece3ea2faeefa7702f065764b326ae0e62f3b8745cb73f35bea1bb9f6ed4fcda591f4d84da0415a0552306f6691a64a1d0efc8ac93559a79e57e357b63df48506c12dde74f6ea8fc5eeb1846c394fb8fd0fd40df26a42e53692db51bb36403305c1aff797e20adb6f8f1721e316705dcf8fe6e6989a5c3da253fdc6cb5de426f1c018161d72e34e6791d73023c5df69c0f83d3ea1d097f3a7ff37720a66868f40d3b87755bdaf508086c7e478ac1efc0dc421987af6db9b2f096a7270de91f5b3b84ee6d1d268d581718d3c534eeffbe2889388e9930cb051b5752c1a"
"b1faf1e367866af7d4b37ba25c15a030d9a5f32bb8912ce853fe7988dc62aa61264e3c5a29d18c5121a605558b15004c817cb0ab1646138cbf6375f1a179852bc22d80b83891edfd38e25efcc0dbb78062f479a9dc792e5822e09ba3e0b8ef71c62ad7747dba8cc97707f31383baa93108d5c7253dce2395fa24d77c42cbf3559b5dc0235c0ce49ef9e3cc816598698c8f8c5b32abfaeb44f3c35a01a4f47421a166d5aa893aaba80e57eb576b838c95ed6f9d5b3d389a8f86b97fe629408ec7c7ba7fd95d7625e950c7324fdd35989570b24f2e1e24d52b65ed6116e728dc3a1004d3d8fbfeeaea1c7dc5d3dc7a029f97f8dc7f740e2386eb27e9793680d959821031fda08c7146f46e8ee47ec28c7d25574eb690de09849725e490c39e524b74aecfc68ff0d760d115b4d0a126609cef83b6c80731dd17f4a307331464953c6b41875b6e5fea328fd59f275e2fabd25717781cf9d5cc52286246ebc92527eeac7acc6e2652c6fcff405e7b4a78b8f9475f46bb82a68a6e44037d61de0df58a8b7a81f407aaa260f3a49c4a2641776404fc15bfb77573dc8728573a1872e7e093663842d9368e74cbe3ae547355fa101daeaa0f97dc0a63927e54ae59fe13aac4f488e938fa67a12876d103b4a56b6eb88ff0104330e5cdc7c6886b46545d523bfbfc88f40f9654fcd0f8c4f443a225b50b44af9674166d3de36b6ac63a150fbcda2e2511ae2a42fbe51c08f7238366aada5c6be8eeb41963c6a5374a94b332012e860d6cfbc1b8a4d5a9825b88a90c9a5f"
"5615ca503698ad00df2cd93467b66d9b15876bc49895a081959132bad2e63757aa4e5ff77c6f25dd2581a3e9bb8e213c9313ceca0fcf5f8416882849fbee576d8ffb9dc057eb96bf6b81db60a82b0e6f315a13dd31706c0e36f4f21b9ce977ff6700cd77db603120d59ad8088e121cc3c502e37774b098eee7c8244f9bbe0d4a9d0deba3ec22e5abfea69ab72cdb75a001bb53672fe12b4fdbdf7e82c0bb2608de5d8e1961fb4524dd1acc890361923fb691bc5ea436246428a70b5021f9eee2c637eeab574babde4c0d55f57925e511ff623af5c4224d3ccb9c8572179e2610b4b79817ca18ddcb5302151f9facffca96269ff5fbb11e48209e20145bdd70d72bae54f6fbb89a3396bdaaa3d45413e3c5bc672ab98dfbeb3274156096f641494c1c946baab7c388a16c71ce5009b32f45dbbe37998906570045027950bd758b7ab2f72c243eccf9551d539946a99779848b16cddf9f163fcefe1e1ebee3ba7d5240b92698ad56a036274ca798eae19b0dbcf39a1c0ea1a58b29dc0e3de89def08e6c5800c94db47b7eaef5514c002d687b4d99b00fbd44137f56557830d63156f43bf73db8b330bca0ebb4ea5d50941b758929722aaa5452cd4a4e00640165dfc35fd35daaf929997adeb4c4f7611d66befb80809dc7bc6c763879c3bcd8dd0fe6b621898717fd095fb7eb403b07591b931a8e16ab488b01acd636bf4f1e71d5460532b8a3b00d7353e84c071de5cfa25de685cb85b569e08d2f177727cda11f196b040d25c97ccb83e355db98c2bc14844"
"1ca95b5f612020bc53a81184ccd0c5f14bf6d9fd6318ec28bafe8d668cb3c98c56ad416007bef4a3ed9e12eafe8f9e7d87fbb02d1f557b497db1a2c0fe40ec3f23ea88332513c68f724cc8a8af6636c9f332a8e55c2d41fd81a23e92e9ffacd3ef14cda669e7dbe31ca08a5238c7fbfe7020933087bf2ce0a7489fd5a3becce5de09628234f60c833002aa8e9c9ec51f57c8e4ba095c1d054750d46d64041bb1f567a82d63bb5e88fb70bdddad0ed7572229e56b90e74dd88ca829f1ce8424bd24a0bbfe3dc3f77d244ee59f364b36a4b05fb511b5b0d7f876c65ab4233803543b0a68b9d2d6d45d292f91eb4700c2dbf431e40c77a4fcc3ac3fdf3a2bae3df35b6417b8f1eedfe84cc65a07c426780871d16ec5ed3201ea4eaa778b71f04cc1999587bb4645bbc43e365395e9188c85bd024f758304aee979f8e67d07636fea251423e920e2b7258580d1918fce772bf02ee66926fc5f9a3dd6a8c89e6ce7e4fc03d4784296df1a9152a1fc66050983a287e3520bf3e04d900d25316c8bd5ab489bf97a2f31f4061f895111caff9968ecb22d75cb9e5400ca1d0fb044acb4fb9cccaa4766cf6c63ae5a7a3f9af90d1b225067f671d85cdb4e2e21d2850f351d995d54520fdcbb8cb30bfa82190ab2071eb8bf350f984408b206597371736110114d12d79da4027f9a58c8fede63cf16fa552d2a956ae2a49c83b0afca3056f87f1e27bdeb9d14a7e5cf30550017a3233c4f386769021a853b971746aa28aa69ca980bb02979779c5bd29259c84911e2b252"
"61b92be669e8a731dd74edce66b6f3ab5944695efd57c0004ff637eabfbc02ae346528fedbf2ae80d420580adc4d571a37fa1397fc2b85ec458d5262c15620c88f2dca0eb1bae4ec39d67fef56ecbdf89703919e5a6767d0f77bf6f0f60ba21003d033c9dc3057df18d855a5801110fa9a29a42ce10a44a39ed883df249ccddef8aaf832387e70048d9ad6014cc17f9a2bf7146696ee4eed388d06a45f7bd7696e57500ecfada9e9eb17926b16bbd90146e406e281141f0a918c320cacc9d1f045ac1bba87ce8d1d45cb6303988d5228da6ad33df6d2a5bd7f265b8f610078e9db5fa3db0e08286e500063f0fd6860a11d9985226ad382a95bc3c3941d43378ea1bf28fc85749f616092d77e7c292e311337168b52eba08ffc0f76582710a1a7d33c55162b3c7fbf227a324e1f4579e035ae0fa17fafb1ea964aa977490b5a3fc16c75e1fc50a6d17e193345b71369df804c61a71bf60be4281c3d1f945c690368c23caab006f9dfc913dbe6119d6fe8349cdd424db7074726e8bdd0ae99e2bfb9b800ddb965c06e0587cd10108c9b431cad4fd10d3654a22ceac73553a6b2b2218ed6526c362df46cfa776e2caea0de61b9d5c0c74e03e299ceb2221ed0f30ffc5876354d5607c3eafc77f78e4fce5e0c7f6ba7d417ac5f0511e2635b41b28dfb4f2fbb73d351a69fff920b76f5687386114b3d5ab9cad056c88840a023b7e2df73f007852763570d38a966c8258365b014a12a3497f506dbe55c073244333547223785438372884ecd8b66aa0a794ab5fb"
"94b0a519bb3cbf01b43463c0c7fc6ebc67754ca25686002e13edad54c817b0aef64698637d18a4a8bba382add892f4918b720aa99b09ed2a6e02b7140f89e3e00680f37343d3e47412d04ef78005b8b9a23b92d145a8da9c5efafce374955727367a7f1a179b990868550cf960c6df6baf2cddda5fe3e689de8dfcf1474db419ecf88cbce9de7a58e9d8a15991fdf5361846273d195a2892fbc95ad079ca8153910984c4694edb4c790f430043c4019fbd96fe49d8afa5e7d1f6674e4a125bfbdc916b0d3819566898599443ebf2a87b1fdaf41378227d396d2d320dc5b860705bc87f45eba2b6473234fe054267698dba0913ab1234b46697c54e2b19526d1ad4b7e3eab40a413f86170fe9f2a71eae2fb959a021b0b43516f1c8a3e674f37ee235ade79ca296364b0cad5ebe8449e09b63a34e8711587f7f2fe6e181a787b1d3a8f30012ce9549abb834fb80c673c575a25d3c33bb6d846ac231f411dd6422c59215e0a267424c0c57e6c9bd5486e8b6327e9dd16b7065eb74ef91ec9204360b03d08654a4e418346ec2d4d21edd5608a76903494791546d430eac38178d158d61951de3c61fbe5d56c22cbda4a3d40297f7abd83913e8b483d9a80cf000810d90a921f453bcf9e35732d2579c1aaef4a6980c666e3b273a9f91d9918f850bd6e4475d8aa5cb616cec58d6ab6d70dbe2b0f7ad85618b6e60dd4ff5d0faf19dfdf27a9ee48cd7b2d6613e76f04ab6ef5f0af12966a90875816c27c4297a2bf622ddf66fbe7c211670d0c46c7295b93bd2f1"
"22568df3dc46e9294c7258a0b7e81b2d45979680edbb7ab323e4857d84306ccc16ca79c711144eab7b37e3437245d7b78ced1cfebfc45892791b9ac6cc1211f83e328ce3f57af3d89b5be89dd2efeac9d738330bd0d8d4a059bfac06d1ad73bf6d427541e559c3d16eb5adc4380c1b25c1b8a9097ce7eeeed1c5d6884dd1a32ee2bfaab8371593a0eef65f80e705b9b56adfc0db4c272024a71947755032a5ebc1bb346ee8a99b01b408cc0b1658a319ffa5ab2eb87e9aa8b3dd9d9d92ce3bc04e4ebcc011a280143927676360f249ccdaf7949bb23770a06ff5861661d36d761508f7e9ba149310d1347c3165e07997853d415abdacfae9579d1dc0b5990a05ae9e6dce8931ac2db9414546dc64f8161a64cf30b9ce8c50ef2a99775f03dfc2c611e780a5cbcc27cab920a87d940acd8b3fd42897ab6f51b29214275bd564c50eb7aab3ad19a2c903c84d2ed5a23c49c81d87cf3244505424332c917d7b671d4a90765b8953c26bb7ed5dfe3e93632610ab44296afee2b5c631fe643a0a78eb9af94d700250f5a82bc57d24825423f1ecfd8cc2bb0daa229670d0d9a4fb342ee8c9b7b16d86d29abc2a57633303b918ac78ea8d2672dfdd4a06ea0bbd756fbadfb0c09e2426a65e90ca829ea00ad66ca8c9e79b9aa5ddd02d435cb23014b1033da00381ddf2dcf408660d1eebd1f6c7bf5ae9fc3fe47e75ff7ca482716534a9f3365f5cdb48f3d59fb19d11bb8782ef96e394296594812e8a7da23a953f6117ce577e55f3d6cb1d3a4007dc7d252c7123a8"
"37be12884e54ad10757af405beffb5cff189133bb7df5fc009544b2d62ec44fdc0c1c8240d4413af5b36e031510b1f1537a690ba7049cce9df4bf4dd63f6987c513992fca78a1cb7e8d670fb43a52ea2ca2f49724e35397041e5c75a365b510f40fa9bd076377274d6a95af801981d71972da0a08b536b024f439c43d13902878798153ed825ddd7dee8937181823076f036caecec170edf1b5fbdd84e530bc50a7acc257bb9679d72de3f115602d18d2d12e6ecf4d3242ccbe9a71a1483e7fe40d2447ba028a76aa92c13516ebde90dc4d204095a554cbfad79d6efe4ec540c7b51593413465b929742b729ca688f67ee9d9fe76431fa81217fb135d0dd6ebc91904efcb0cb6dee22867e5ddd7453f530d04935f41575de9ca457da55b67791d2e8b83890b5be543366b92ba6579a6f19f8e82a0bd87e379967766e5b0a58305b984778c562ea03a8b8392e3160ea4532b6ce5de74bc8fa0e8ebe88fbd62a73d7106a309f5a5f5d7617664b015e166fcd87906caa80ab4eb3e62f73e527b5d951a0ed0340fe17bb7b2692e4a31d14798879788fed12413bac50e490ab93ed66311599a6c1362fc60da5319ad907c7ef7852985ce86246276a138379d2004772d4d9a989b83b3e780bdda9825ad06a4b3dcc9a9d4d8025cbdee7cb2e02ea1f77bc90bf4ae56903859025b7283ba6410aa91933466623b996e9ad07e3095e376b11a27ca451c246d5561501e69c6747013ecda44f8d1fa50a75572453c9ddecc07b1aaeebc04cc7e976915f5e68d1236ae2ff"
"dea4b9fc4f8e91b03982801e2ba604b46ad80f966838ae09d2734c6482dd16d7738cadc1276593a336e2ce8cf7ce48d1535c7865f7b90445ff3ab9e56f58e254115bc07710de50d7953238d7ca419013d104d90fe79794995c28f219c963d716bf8942e0cc5cb432aafce4afb42f74596b847fde5d87fba9adce5c17fe590fe58e60379393e521ee194fe063211d72c29d58f7dde89addb6b0e20515ca7aa270df2ef2d77f92219781502c49292c6c4a985242b9447521cdef5a52b53b5eefcc43e8036ebe90b51a3565cbb180ea1b3e3d20f63b8f420c2a7f01c475428d5f63c66f122654af4edcbafebe34970c152767cf623eb4f1ee33931a79622cafc70cdd2bc7ccd55ecc1e0aafde3f66f5414315048d3c5c51638c35fa920cfcf7a18ada48a589c12e4da2c801cb8bf3b182463707a17891cf296ae8aae6a8a88ee3d602cc1bb7647861f65ec1a278433ae08d8c8e63727633425fda0b86d78378ac80b1bc1a48abf270dc2b5ea71691eeeb979950cbe0ddfdc451dcf8e3dc657060f4c3f96512b21bcb228a966381efa94bbf5ff4bbf38a803b6aafc719a545e4d0582a62e81e6468aa04eaf131f8d2f545c060651e115032f5b3579fdfb95a2328f5c9a0308874630e840ae1dcec1b9543c36267a9651c94c91cea42a93a91ba3a054ded4a8343864b449e46abec49474e218c8c541b00eb0f8997e710025631ac28be3f08126446dee0cf61bc69b85e4fc021f203c796cbd2ca16ebc8fa15f55510a08ed334155233c6459d2d428df31a3f376c"
"d81a530700b3ef08631dc5b50f787d4efe2bf219bd17f0431803d9d946255716e8543bf77fc44a48abc70a97feae8398c2059938d39fb4ac5f7214d92bb89fb9c45b6d117fd51f6207935beb1a89963fb9d1aa020669bf809c21154c20e720aa1178ed2bc13fd548e0d7d01eb1d028aa48318a02dc7aa412e2ae01ff59a86dae40771ad3f48f0fa54b6e679854be00deb9938e37ab3a4c9a96f3b7849ac75b82619cbc806c42f4bc4feb1141f6a8391bf9335f643ce5cd2791590b28b19d03cca7b5cf702f10ffa0317327e828deb4791f71500f243be77a451e5759c6c711b38f8f62757c54d7fc6dc586a90df7777d8cf1c72f9c0947af005d770f4a74b6c9413738c3b5ab32306ff5b41a6446c2de3f59a27b79d877d3f05fe22d11afd69e49e59f35b3725a0ad126642f388602b7816abe397a9c9233cf7d1e12a00362306d2d9b81fddb279544f35e23a8c198930f75986f26e6f292ae8debe5da0a7a5b8add2be71efc78179eff7fa2a2dad35863b69e85e8172073f434f48fb03f7bd1bc78fc2badbda261a68f7bfa171c898897b3b0d4852920674b8d9ffdb37ce66c1b6aaf9b375253a0d74eba4d359737f7fddb42471969d81605e41f615399c5fd6cce1808e9b511ac54f75f774e84b00970474f5136447af04b4866ab6c54aabf7a247c6caf3ee891fecb14073f3cfdc7368ac00f6b1c9b23e301e49257840f949a57c28a95c5c490bca91bf979d40403f7b9458bd255df757e6eea0bf41d5175548aa46243d98f2f0f6c754d6e7e58fbea97"
"7d7e0af8b7d0a6bce07d0c483293868a914a50aaedfb9b239b4c3c472381535b287a4146fd52e7bf882c9c3eff7bb2fae15d5b96bb1222d81d26dba563ac550e716b6c08b062cad6702a33a9db4274fa2e81af815e8325101d5a9ce9b345e29619da9e45dcbcd7b0935d7dde07644edc6b049eee9371511bb2cac50ec1170c7aad835c54fa52c8e0a0e8446356488e09c2f07b17413a7ddb872d05016aba129cc36de609831863747310f0fa443480a47524dfc5e1f34eef3ba2fefa29e596e7fff86a924462781930fab55e71fc2f06271e62878e51e0db08ee5dea31f1d2afe9a4f548ad6a4f4763c9d0eecbcdc32323aba1c9c12554a5cfedb5310b4a03caf426a80d725fabd557493c46f2a174aac851d3d39529d5ad919fdb7fb0dc1e5b0ffdf706a9f5af36fcd2bdde28d68c5af4a1da4e67cd44f97b555b62b39cee1274b7c3dd3971ace3da6101c87f9b8f28c5e13d4066a3e63543825dd8bddc3e90b6dc75bac78931da98929a337817f68deec6065f6f7883d5bb10cab909c9945f71a672eb2cda9fadf4a8d9da906e2a5d1f589193b4e791772663f1bbe751498bda065f90244391169d80490208083de39bec984af73dc99b10d85958f372004a03962c45c531b347851dc5e26bf7bcdd68c9b129524d6734282bdd431f991170d6a5c67138a5405d8005b355ec7ce95496a8e98782f6d978c42c30a17db9c12671d82f2d3e257f66980f20bb6380303f1e89b10035ae7bdb3e55d31f2d1574784aed5c95aa09aaa9614989d957a65d893dbd"
"abbfaaf30cae0cad575e39f5311aa00a6979fa52ec12dfb2f731a3ce5f8b6097a612c2ce98f5898eb2d1780d0cf9ad30ce5395ae871ba7ca6a0884a13c09732cefc5aed9d7a28c09041cdd62e75d7396432545f0c16496b7f5f516fb2cc603c0ec10a51ee952b7cd0593ec00dddf67e27dfe3f0cdc5bf737170243a8ed3c1f59733fb47bde4b6578d7ef11f95790d4c678d95ab2cbdb1673d2d516c189af00f996371077276e672f1223926fdcd6627ff86816906edad3aa97e3a9e7346562add05ec1a94c2dbb7f3b28ef537715a1d69761bfb8c2092e608311af2f79a4f8188665a48539944374437bcff6e59bdff4e4b9e4dce11307d892915071157698460b9e9fd68ee0d1acd21434810fc8ae702fb8dc794ad5364c79fdd74c8a70f390556930fc2a23064f36411c626179d1d745d4875f5c2b37292cb8ba37bb78d419f05e9a5d2245a38da20b6b14eba2d5ca3d58d23bb5ade1322cf337eb75a97ce98c167b6305907c3fe18038bee1e2450c3095480f99c9f12d2b543b33866e5546a39d539c6e2d639356bdbcbdb3b4e0935ac76e0fdaf54cfdf241d2c5ce135324885f8cd69e6562f48979352bbab357c6861c66b4ff7d9dd5d32a8ab8b6e759a2f5ddcee847fa439a5f9e3989039aa60751019eca6c7dfcc2464ca4a1ae12f079d200961797cb0e52cb046d1f0cb1d97c4699e07f019b48edd6f4a71b99ba26c2e5e72745cd9bb9a7e89d8eaba646461bb76818fcc447de2820196e32cdcf4a57c527c52f64d316b513f6a611c929890be5b0"
"3b3d3352cef23bf86d0e058b1cd9c4a10a9a01060aa9c9cc4bf42c7c6cbb677724db3f0c3736461c1828e67c9916e953057024371bb4ad8995672f760c47574bde9df9e73af90773cd46c9df8cb655f8c37eed8cbda40da06304471e32bc828a7dd9457fbe4d63a15633009c1a9f003f3db7f5b2b5e3b22c60f747d5627bce3eb4398a543cf24b18cf0a56728adcc253d7f5343245c1426b5bcd9daff94394499cb6d7ac2b4e63ec424c66f5dbceaf877fc13f47e744aca7d8b5d89c8d5621f4e13488b141062ee04c2312528a0a987a5d32ebc6ffae45657f4b2d1420890970e363a124b75374594dea0560320b36133e31d6a978f90ef079b81484503c7fc3edbceadfc9fcea06f271a60ea6c5d434b694ace1b506eaf013aca2c6103acfe6c565a5a24cdf638f8ee282ac812e32cc2662a8e2d4a31239952836c4896870d973bb65b280f0370f4c3a54c7f4723b2bef522ca4c233d7646da3fdb9743e273afa1e3bfcb947eea9f323ca908bb4961b214aa906cca1d2d56eff25d60952cc5897ee6390f9af4efd5d48b2aee8734cf6b8042f2de75b107f8d135d9a63148e88e43df815fe7871a354741f8863af4e114ed0369515bca104f8d3b24a2d740b8617de3e96a23*0", "vilefault"},
{"$dmg$1*20*f615ec6c463799eccc6a2dfbedf12c6bdc422a2a*56*a595f4a81a490e7aa6378034661da57a424f922c971d3db3f856f8d54b0784bcc5d7182905c4237153c5d250b8aee1d26410b1dca7b1cb73*48*74a060efbaf2c79d5523219d8162c425befbb2094fb46e7ffaedc7cd4f192e6f0c47d8aa91e0a3201346725d3ddadfff", "vilefault"},
{"$dmg$1*20*9c82b419bdac1b3e6b71f8a6b99a7501f34b6950*40*5da479e292e0acf67a9fa3e24d0a767cae2f645ff63836665068637188f4b80295de79aabdbc2536*48*9b136165ee73418631ccf28d5e77073788ae921df596649a7a7789585db0f13f446d5927967e2ede20ce8a4f5389185d", "vilefault"},
{"$dmg$2*20*839730be2331c69df4f729ffe8a10c26653bea94*32*1f24e25712c2d70d000000000000000000000000000000000000000000000000*48*3231e20aa642889a7e087cb87c84ba1cd52864007cfea677796a6f52e16b2609696dde9230aeb5603aeb1f70f6701be6*14*8192*75884a049d2b7a40c14002ab6e511bf3c73ca79a2bb8285a3d2ac1d5b9b0cbf92d4a483fb762bae8485dc3fc9cd7a54141da2b74a86ea833d253d56f52eecb9dd4d40b9f846690378cb8a5db74fbc6d756ef9fcdbb5d21805ed43a7fb45d6caf6b3d2564f4a7760030aad69ed9e56789e8b2699bebfaac3cd73130fae1d8ef7f003e765e86eb84e990f3c24780022fdff3ba283ece4fa8d31716e5cb1ea22e408431eeb2cda1460217efda86461e940cb10ae602a84ddd22be53064e66c0973a04405ff17afa020b24f1bb4ce42750b28cf4e98c4f542576e712f3c2fe0a0539a411290f65ca763a94d865fc24b1beeefbb6b055db453da38e62bc383e74b188b86c54b62f589334de8ce3ab2e4643f76eb4db95bfc088bea8c4e88cfccd19b89b818fb698982f73df634c8a8148e4c8d3ec2dab02aabcf48ec0a78686fe0b4f5e589a067d6c54f0732e559cf9db5b4ae1f0468f5681226d3b03002cb6ec528b96470f1d1aee5d3b51b4c5f45a2702830ea35056e02279e76fdd30b3ac174cd91b65fd6a26a192f6e632b0fae660d0861059a62bc512f610f4974c22993bbafa364fd2e8eb53d07244d165f990c876320d99070fbfa6fe7e0ca42c0ef2f17205ca"
"7196376d4026a8a93fa83a99cd3b6cde354ed3122dfc07ffef91c24f2036b0d83467e120b85a92fa04120cc8f7af3196adb6420f519c610983d163964b0cbd048adfb89266d9ccf9845cd17ed04accff9d106b7bfffefb365e97357fdb9ab2d0956411c0c73bdf235a9ea4b50962c8f258583899ff2c0bad6602e8a3c14f3c870fa14686d15aa17f5cfd1ddeecc7b061cb5c00db7d198d083a690ecee97a1b4b0251349beab744c4bcb53a4c1702d1094f6591ee5ae15a29271ee3d3d22f0f833219c3676236c9e9620a206ab6ab08fe5fc663f4f2ccfdae6e34adc68e59fcba5363f44cbc5d8345f184ccb38d52bc2bbe6ad996c3d4316ce644698bba6044209d108c698c3d18f4b64161651224cb015052d2e9bee0079b779d77b6623e9669c4ff99988bc612c4099f6b8bc9719444cecbc5f87bf9ca6dc30f3b346c3cf20cc342cd4d156ed67c8be0f1801c3e672bfdf2fb9e6c6f1ef3570d059405a8a0c5bcfcd70f7bfc1d2417e3ca205be70a5ffc9b4d1d123ff64cf72b20df25e9861e1da57fd1311451e542c25100c19d1d70bba2c26752e4cf1c59a6373fceceebf2b4c392a45e2cc7151f4cc1c7292720b5f0716cf7ea752a8a44cfcb7f638c5387a410efbfae90598f2d99cc79baa298e30076d5ac8a2094dc14d81953c09fca8b41f88cbca2274158b93fe5a151b93bec1fdabe1a6c67807d5f9d46b2a19ba85f9540cfb54656fe473216ee1922046c5b6cd08b325e0c25a420765a61e5f7a266c9e0ea1148f0e62ec65736d4cacef77940a0eb"
"24e93b7b656e3b591f5827e78b577b628da26c1e5bd7544dd439d15ca21a3fbe96d3833ab1bddbb03beb8f0fe39517958b7bf43afdbc68b5061b41145e151d228bb5e5220b31a86878be40060839855db438368e40dd6b8d534c5c39009455c0a783455b41b572f2864eed60e5dad80979b97efd6dd08549c154b76f748101396847efd56a97b82cf62a25e26ecaebfa35d545cdf886ecc22460cc0e2983b9da14ac41dd1e1dead58a2c29a85f6bc900268d755d1158939470c4793359b50da19addd3d8f722c0a889ebd8dc69bd955b524bbe452cc98834613ea48d7a73a9b93820c0ba718cf664d82a1745451a204a2845d4e2a846f0f18923ad0315896b1c1ac1942fbdcba119ceed9e02b0e707b28feaba44bac94888ba1a31670cdce6348d58d2072eb13ee805d569815fb28749c392d11eb06d8b1746ba8eef3313072fdb4685f1401717933fd18edbc99e3d89d08a4c7798bc1d724d6bca02a31642ca0ac6223884580c0be8f6508a6650b783a9ef24de3713f65fadcb2da6d68c4bbbdc216ff91ea7bd24bd7365b91087c14edf70dbd4eceb2676797ead7fbedae77a0add9d22a515e2a79d075958d8fb87aa62700c62df007abaa3a5e002403205fe04edaa4aac3da6d08ad9ba909974e9091148208db90f330b2c2c702521d4b1b32acc4fe6b7ffd9f96fdca05b6c404afcc789fb9ad8c52063fc0f9b9cb4116ee11f07aa17dff57b889a4f4abaedc51a07481c1e954d78ead32c6e808d3eafe7cfa9d2d4ab4886abcd2f64ba2df2d8d507cabfa8"
"d01f785409d71896461adaeb4e34d18f9b2fa38779f0932c27ba2f3f75ece12f6eaf7a0d728dc02e97cd44ff175b592b8234c3e3b5491726c58dcf0a1b77698cd38d861fcd549aa793f8d2b58d6afd1d9b7bb96c8936c960eaa7072c00e69f68f948ee24494b8152bd8e5d6923c8eb26023dc660d202e41663888a8e8550092b5e1610452c79069b3cab41a2e7459dc0d361ded09c9f1589999623f6deacf276eb72996a355e4f7dc19a5217e9dcb2d6a3e4679bed9f980a5dc8f24a1c5f4eef00d706566e12ac8deeee964ab9501be5e57e326a6fcb794e4f4fe14922704206a343724913ca2e1d26e3d83cf994cb7aaaf9a916ea6eaa06987a9822c5a8e556b16ad72d5f5640b3490d6b0f290f9f2db7c3ead435e534406dee40366efb98f0b53930a83ff9bad177b84343d204a1083801f1d68b3aff78ec4246f670f924969e4608b419ea9f5aafec40d902492f62844d9a83d65f38af2531b875b964abc781b3537c708fe65f70a11552990447bf6db287412367ca918a39d9e2b2e228451807b01174afc33f5f67d45f9c765015da6abd318c980fc8bcba60ccd5193e7a8caa54193aa83bff7b77725be99780da88b3209a3cec620c17f979fb16e640473b0d98a2f492702ab99f2f0f83bbdcabc2a6dc4986476f420f112ffbc7bddac8cffe59e82ff558151b9160e2f99bf37a05654253321591ef31d01b32b8d69297b3bd57f127e9f574fd472b6d29b6e9a0e1fd43252bc1f1b2c8c959f3f4d80177b4fd6a77dde8fcbaf1eabcd5e7f6d38630f35d"
"efc161ba7432cc9af6bc73baabcb343c469ab18e4cf88eee21e49311b4f20077bd6e30705338f047a9c7bbdbe4dfa6d7be3a827c92823a3c8f36909f9e4df4dd91426b75ac6b5d953357929b0bcd91ebd24e651a855755edca82c4664d3c89fca6001ba88688e5ec8d5e5c3fb145b963b29424192530601d74e3b815be85ca44640ca89c57ec4ac7084639b82e23f065ac561779c040cbfe63310ec846db02873203feccc3f88a28fa78d8d567905abc9f8f561b4a29ec5c380849ada42100c15efd3d73fc203e63a315cc27b82f62c4ca0df9ea213dbf7eb39552fcc38edfba0ce7e25dd097bfad5224369f1d2a175ab88ee5a3371daece3342e99c60cde76a1ff5dc7e5ebaa7e0fb59d4d088cfbe7704126b2697d62d7b82289a35ea778ea4ca347410513513084f1fa971686724761f711a916ae1e92402ff3d52f948fdbd9c1d961c6ad6923c8ae9cf3a4eae7a9369daa5cbdadfc786e873b90ed1e8f5933ebd011081ae7ea236c11f0c53e00c1c0f9206f91e6954123b5caa08c7615a787c1661dc17f297c8ed2ff6c90dfdd9a262ab5e9a4489d6ed7ac032f72bcbbc2248e7f1675e2b2da0bf85caf89921fcd8e78403f11a28970f673ec7adbea798b3eff87fec642ef77c15b3f3d19dfeb74d1ef6a38ab938692207133aaeaf722aec4f6082a4cd742bd37fba0f1f83f01cd2fad6a169c4716940f7d74b8f29001f406de5897a5e5d813b995df132cc57a5d9bdecdad9024dff7dee8b89189d35085a70bba2e5e0a8c1c71cc593238f3acbd1337b2c"
"c5a8647ce6bbd669eb939279d3b964d661112752bd7fb877c4c6ccb5ef72ff5446410286fc69347841c5595a3408e0c73fed8984d0c0fdd2544a168ccfe41386702f6ab7b3675a78b57f9782f23e0471e6dceb176dc9eb871ddd92dc0b86b2a11293523189c75019200a45213f0cbd86823f65f28cbe6569a58512dd469431322b7ca5b9b8ca57e56a139dc4788ffbac10fb57441f2435584651fa572450a4719c8c9b4a322f3aaedd3693a55820c725b63096d3f211d830d39aa89be83d59b13145dea9231266ef6b1eb1fdef31203922308cff81b166426d662989a350ec712dba14ced58df7dda0d0fad05ad8d9c6b247307d481f79e6a3cffdb2ab9b21a8208d6d7faa72b6f22a505d2b950884474862f6f67effc81c6292f3550c4e8852c39c52d952648b256e961d478c0c6979300c5188c490ce5c1e34ff6dcfca63c0f0571ea616651ef6f9781f2d355dbca208e56948ab9e26c5d2d3f8509952bba3e93241837b11a89caef6c956c9354ac10425a6d8d4e82bd5d7411d18655393d7c542a7c914a5ea6aba717a226e0f51200cc949f38c703f4f6ce452cc1d7d6ee8acf26d34f74981f6850b11610c11d1c5e6689c1b6fcd6b6e997ea145851c6655560c33dcf5ed7315578263c39fe6a838c5de867f1b3cd482c0206f56ebea0617ae25b3ca8d7e13849bb2b58ea4e21409762d549636bb7cf5ec32d3216d827d94cba1f36e7632e3a43b3203fc596cdbf879d1aaee90804fa0cbf46d08ff4c40aff8fb2b46f7ba8ce21d17c2d3d025b67702054e"
"9d76716fe7b5c9d2f43036d86e6a17924d2f160f91110ed1f3364a1177aa6193baf59878ec84f450914faad409618bf25cae17ba5545abd33833ebf408990fa4236d322089aa42eebea965e59456250fa14bdb61a32be8d70372891a83e7bf298168c5431e0b326229c36c667217bedbf64e3a07019534a087e84cd1a9cf35a889d9e65a7be63e8d638373774148e127b328734963437e7f00253d2fcce7bc0d798c09326ccd4f379f8a29f2d308ab2fece6fcadd653b1a3ba53a078e51a1a87e8dc03c5c118444d82d9166c0c4c1bfbe8ee09be6f8cd497a20132d4b6e1edd13683b363dc6587de2f11cdd51674ebdaafc41654d639b6cdbcc040f5889efb1f64e1b873442493ebffd8f867f0e1ba2cc629bc5239ded578336a9e88ee8b2d1b71f6d9303cbfb8a35e4015d2f9ec25eb4618c2ac17166e8964b68a66e60cb7b464e36a2251243a218ee542dac96062ec7db751273435dca23bf3e8aaea895ef1d6f6bdc98fcb6a9e0658dbe734450682cd1a3fe16161a9fbd035270fc86684971e20f1f1869546e1b77a481774c9449ac6499f376bc3c0f0efa589abe3bf676fb385ea50618c681eff6e5359678f078292da285c4b5e66d5ddb43499abc3558490aca6481299c351c6b053739d0065c187f59767e7de24f1b7bcd2d80d0ab2e7c789a9f5172a8411a88d2c69d8f9d2744ca7e42ba8478648df29919c23c0f4cf14e2428c792f2d8abae1073b97d86c2d5cf2e5beebc7fdfc449ec3804a81199d6c4f24d9b040bd1feeaf141b7eea626c1fa812"
"e499b74e86dded2641ce3e11a04a35c8b8831a4de563c3614b4048eaa656d8dea460d2c46f6d748be434718e9f54934804756fad07d2a8ace694bccbd7bf2e33c09199a22a98726d2e1a690b2a9c33e39c8746d8125d93f675c571247b0a060114eff4c32231898a05e3ced4721edaaee9ebab9b46692c65f086d9fcd34b86a499685010ae0f4423625263d0a2a62672624662a6613bd4235b7402573af1b0571c364f7c14e277b84e4a102b1055a1456b912431f9ce9e875056f8b48345ab09bf06b3de6126fae32e2bd61d2fdea29a2f3cb46d963fa40694c02657352b9b9918bc50fd7e26584e51ab5e4bbcdcbc18b9bc17d3efc5935ae5077a269fb8e912dfc91a2c287686590c3e2671f6d29365c044fac2c077fb5ff280b0a4d69eee3b9538b4c8a029a3360902ee8291ca9f1088074f307392b70a7a43ceaa07c47d175b286c052e2412237da3f6acb1eb6b1ec386dbcdf5b49d2391615788f401ec234b58b112d296b389ede47243c01a1a6d18ca5dd3f2646d483b97e41370faa1c023118a1d2006694debebe35046f6e5852952bb520c9991cf9dfdcf89e51fe29d3cdad6f1091fc7c450782f06b09cb8aed1e1f95221af7ad369e49ed672fbbf2d255549d0fc0398dc6b4d37d038a8dc9e8d9b4d6faacf3c5fd10663107cec0e171ea6e1c26eb8a1534646e0813ab0fb449d15b4865eb2e9914d404d06c1e284f66e39d09e99eaf7c2f36997ac6ecb9197f8ea7fbdf7da38e427dd5179ef265f1471a096fd24d8ea2a2ec3b820c54356cd912f06"
"9accfd370ca945e60c72b5d479b15d52a5c3c4423c73f4ec06d9201ddbfdaac2e304b1408674d40c203ed48fbf4b126904900349228b28fe262539c9a12270632f28241198381c6e7174d275227c99178ef4942655ec95acbc19a3b96fd1e07b5e0e91488c979e7e25be5ea733bc3171b2874801157c83a6de754ecd05cd78d6d2846e7ce19f641bdb53075dca078ad0ddfa871c16e47da96d007b5e2b2854d151dccfad21875fcd12df56dee7f4aed6a54fa248ba2721ab2f58c1157c85a3df8486f99295f2c9b8e8cd7a65145b69ca93d0ac4fe328e31c07bc1d0af2db886266def575d74be200ec9a4ccb0213743eace8d7d39f810e3877876082238d72c375a5cbdc4d7de36c2ad90904a173df80195cff86f19a0904d18a1f8a92cc4779e5997dacba58770c5091dab5b832dfaab2d0fd102b99e3b8a799ac6e7357b294a31db5f9bc3d04036a4a6e18dd47dc88b0f07e1c4271e5106f329731ce4dea9f56f6d63beddad788d7eeb955589a13990cbe3454b07f63477642613bd77f3bc5d024dbc5c55a0c7426ac7cfe63dd2da9f0d5a7e816dfe5856b646b648c302c16b50296882c62334c9b8e56ba6dab63a9c787fa153d04e5e64503c6bbb9bfc8957d2fa607ecdd3714123dd52b6f9c1a3a73f649dfe67fd7195857955cb8c5470a9f363116cbb580b793033280dfb63ae47b384e6aed677251b63a7a27447f37e9817f10f27c4a0560ef34c0255617cfb90769aea2e5971077cc89022f8a44493d5157ab2962946c7fe600a24f002cfc6108d345"
"469a65f2f29b55e4da3f4c767324f173a11567ccc401628f2934989b29875ededce223de3134b7e99384f94436bed28329daff8da5690984b491d43f14d86d5a5e783545442f913dfa39f25f6360d2143fbe4c7e234a40f65b2c48ff5835c3fab67a92d0adbac9e63993db052a832b1c7b6045a495b82ed0d7f1068ec96fe1519493f7376a9f9f331f6ae89420fd1b523278df3e78c7b957f599767057113d5a1895801f1fff1b7021fde8360c4fc1ec8165132244b680645df7a1c0673728ca6323379739905856537091dba18f762b7be6f5f7e95212c402b005d73dce6a7775e90093f927edcf0d9ca24d04809f953ece372414d5f987ec2ae030dbb547db5ec17bef47dcb097fcd2fdd873eb93a99e2209425d4fbb589530fe41bdb5daf8ad8f83e48557a01d2ff6b658368e39bc8324cc2756160cdf56b8d7fe231aa03e82bf0b3f55eeaba71133a6bbf72342727a52ff7d158992895c61c0bab4cfe42ba5e4d5f239ef5efb6433dff84a02e2a5f12bfc35c1062e4103a3f8fdd1c5be28bc83725023c8a72d2cf5103a7c97a23b2d9903a1870726ad2bbaef7b7a6dac3e36c1b92769cb3f43eea1faf95c53db0cda2a8bea38efc1dd11695bb5de4baf583b175a32d49f98c37510e9e56f3d9e10bb4aff163abc91a36f24fb38d33d87fb4299d5ceb5144c69cb741b03d35436002d7740c38753e284a808a77cc1d4ff9e63b9ece720e778497c25b46ccf757449cb3b3fa8e5bb6d5a9f6eab58c97e9469cc6192b7b31362453faac839327067f41f25ff"
"34c2cd40e9fee3a0b8133f266407587ac40db20e7d7d397e90558e54250111f540a44a70d427497b5a06c8ef87f6bba0082e00d42adc7eb38e890dcf5cd426c1bc2b4c781b07670382aa0d13e227e05c1987d3cd0241b5ad78387e19dfe4804189dd8a10cab05c79409b9414a6a384cfaadbefcbe8e3521fcbcaf52d92dcf1611ba3a824b576051aa24f42cadd7b7e9841375646740f2a6271d81d2d5f4819ae6a5d3f1feb6f7923f4252872c3a2709a8b8556b3977af8c4423bdbcf66ade1b3c4303539e06957e8930aea8ff70d6a202407aa44c6c8dab0232a33ff3f3ee9f61ed664bfadde8d294022da21b10e0aee583379d8dcdc078639cf3a1ee18d6ee1740bf1b917ff56070bf807b90d5a19f37a5c31214c6a19532f364d463595262ca057f5865f0d55636ce080acfd4e303f03372af014a3c32d2efec8f7f6cd6c825e5edf309ed16008e50aafa2584804c1897f6433e350cd91e155ac786dd9c3deb22a39d69e85331086842f32ba7cb6b4d4f13e08d90acaff24315020f7efb2b74214b14e840d739378afadcb06d45e7bcc17f2a03ed54d0da71d865508900334386ab96e11b88d2811c84539e4e2a93aa27d66620500789bb4d595a8b2e5972b1805d88af2b722e1e9b8aef10ca3dcf5ddbf3d20a6f101bf8f8a8cad825946dbf0c64193689f461bc0c62d138f902575ed601e26184a10ed9df17ad4be7c9672147c0158f132452ea502948a749b474cd0a63ae5cf942609e4864985b4060239d0cee6c78ce4dfdf5750b51ffbd5ee920967f5"
"dcc52df6771e286eb83dac1c576f1a073687411cef3701ce6de66ed17bfe0fa5f03c63f96fb40ad70b478aae1e16efe22cb9e8c2aa57d5498803d35fde7f920b32ec686e6091a9ba6eb91fdd17b3302b760d084bda32244f704e14af619a5c9e72bd14c4e69f51177a26174c16d2e3eac934f184d460df5640fd84c3d3dbbc6785c249a501203374c0d58852d52c4c64a6d70ead2af1bca1d61f6f4cd00c3892565e085d3e603a0586d176f478062b092b205807fe7438a065ae7dbcb14f69c92cae4000dbd6804bf4eabf112813ff0599a29b1fd8bcf9d0ba7d9b14e40e38826b48204d8c0a50fd804167c88056cfe77e7a75ac36b5bd049571639b3f02a7e973abfaff1327080630a4bbaf6a096005ca2ccd54f076f2c3311e6e7b48bafbc9de38d01c8a01ee41d25ff0f775a2db4e34566e377683bad9a133482ab87907769bd783bd170b616d48974ad332e3defe94a2e7d6eccfb4cc43cad93b53c476e7795a087fe58cc074b591315daceee3c02af54d9beac8162b70dd9863bcd7702b7c8c72022856f78b2d249cacaea6c1dbf1317ca9e35664c518bf4155501ae77ecc3f47be6e7151c4d5fe56b893c69f1f939cdfd2b68830d9ea47a89fa7b3d4f620e0909d5a97f2637e2eaf223f25fb5ce7949e3ceb87d93db628872fc469f58a749e8b4841798ef505ef2712a3ba713386dc56b83e504c3d24d2ae8200698f9b3eca8d7971f7b82dbd5df6deb34865e2e6336fcd2fc3ff00bf9c8d04992f012dc9473e347ac05aff1040f010b1683c10dcd0bb"
"49b7b5883ceb6c0bee4bd2ea6d275f884a37fc7151245274f208a457f4bcf180d793de68f09c7b03e7e430dd34e553362f91c4e721926eafd54d6c8464082d2d4a4c5b4b44495ddb06290f01913e68c7cd95963242df31741eae89eec41d0af689518ae335aae42c60041154356ce475ba0bc7f6c5ec798cd7c493aeac5e08d7ef554dc23832161a615a6b902e1d4f7bd076f3bf045360cdb73c3b2d7c158b74d2b718b95189225a0824a38836d1d4dbc5a2861e62f8a8c2723cbf1fe8951860f0cf7b4c6bc4c307cca509435e077f3947b8fcbb8ba1252b89d61b69b0328a2b1c31255c2c9df670bc244af42599cb5982878fa363627b321302255f2a20e04b70e8f4f63638af83a98ba40c55ecc46230798224de084d2cc203841d91c4f049c9b0a98535f3f905bb80b24679de883470c8225af80361031354483d879f98b78cdc5aeb07b371fea8355d146f9bbe16c9178f3d83ed63e2812048a386ef85d6c35ad696936a008a524f358ec8a2e40081c3c50b73fcdc6199f59e14b6ee213a8161f675d5938ce72a848ba9e7ed930198d9ae6c43dd86d94d88c5312be17b9dc590072e382607390e247869674ff446e8c37d89b7276aa61b5ebeb0ab18f500389a326341ee13283965dd4cce69b666d2c114372cb0e5b5d9921cfdb5e12aea0d95ec0a73c8d07b3b3e0dd8d159d323feb4bdaf6ea184bc2fbed75e7cc13bde26aa597ea7eaf0e37aa4be069c2c629af7debd8692befbf74d6c9939165e3238d8b2b573001ce957942b199e5c57935ecf5ae0"
"c3b161b96f1f637605bc29bf5230fc65524041d9970e9b4bd6e7469e0c0bfb62e672b30a7094b014c27a06e3982d83a951ea4207a4d7b38eb155259b847ecba4675c3f82c48343a07e2d5fe16d3189c8dc0f4bb1fe2ca4abce4638a4462f0dd79d69c240eeac8ee4bea297bc1bd5683ca97a352712bb4461fd507f9125f895fc7ca8fc76c7f78207224d0fd142669137ccbac0f023fe1700eef77abc804e9b9da27ad5c3a767202a0d0a36f8fe86e2a8ac5f30303c39fad8b65a206239b881910f9d904f96edae31e4befce7822a7399ad06355bc3c7198eb1a4b2c7c8b4c92a604dfa4905109c35edb62dd3c817cbf5261f5069bccbcf98da9ee5ea192151237b31131953509157f833bb1b482cd011c361d768347b2d0da11b1dc43b392d609f0c4806d7325e92f9d76ecd278fcfb9d91e9993addffa55d66acf9211b7cdcf28c73bd4e7cf83a869532c90f9880bb963cec69cf40e117b3fdf9c0c5c9d6570a2458aa9d14716ecb8b6642a4cb1fe0fbcf8298ad0db3c676b9836910658f03bd47ded56ed210cb1e2f1088c87f4e225faabf29e2d450468ff6614f282e15b4a6fbcc9463a16f802d3ba071fa5b009403478f1088ca8a8d9eded648be7394aa6bb3590c0725ec87fdcc53c4d2afea49ba11f9f2b3231c912bdd9431ad941a7d89f70d8e1669e90553b047b5f4a033437fe3b84c05105227efb5390e6e99b597fa1c35a1940f513ee8aaef9485d1ffdf7ce94fd34dfccfa8f178dc113c32082e0345f6d39294ef283b6f9a566a87b1122e74411"
"8e643cd6a2ecf14e47d68254d26942666fcf957586497c72c9e5814ab3371fe4b0f9a7fa1e5d9629d0dfe9e93fb388865a599076e7ba983365fb3bf574d335787416c099c545feeea69e3069d841b62e4db9833e6865e24cda78e2bc46ee83ad5d79bee507c44007200e64b5d1329930bd658e6f051cdefdf758e5b023650c2abda7a6827ca394c086057c617dfa8c161ea1f953446d8e0d5f6d5c76bedde8d596d1641a973e2b53bddb8f7bfcfbd0fbe4883f4d6d4e6f930e51d47ccc40148e6ed1b409705e9a777f1bf86af2621cb1f04ba160a5faad78a0949032e9dd7e34bbe6b2fa1c478a990d3b7c474a2f81af7f7246bdcc669df005adf397cef71869237c53126d1301ceab14011a529d4897cb00f7d93f35031facdcfda8110b9fb5d55a057ac9087a9cc8f1034e03f79a806db8a8e726e8afbfcb2c7c39d3315ecad3a2e542d94753b88717b7791c66c47a45f499885f6c096cb1093d9dd6082ba8eb2132e4a80e22ee309b7f74af55530e190d73315023fe4b52fca855a06fd111fbe1125910f4ace6dcf228447c007cf82fc50993de0202d28aed32ae795d2d75ba8c975b78c657af*0", "vilefault"},
{"$dmg$2*20*186673f316ce762e8f2b2595b3e8ea204aef584e*32*df036556654b76eb000000000000000000000000000000000000000000000000*48*71793cfc457320157f12b1351051f60e59fc80a728f82f0156cc8b3f20f75bfb4289c65e6c8c21589f3dc6187540551a*2*5953*3c25089e22f54dfa868b7460f43185a32b6988681952eca4a493ff4699e2340f8cccd06ba2df28334dd01b83f8bafa3754b7afce8f859ffaf64d33950a817d5ffa9671894f71d6ef35aefd00d237f7f8f413b8b8424db42e6fe7bf503d1d4222d77d5c3c2a16f26a1e15d7797cedd59fbeb45f70ff7731cf8be628895f13cc2937f82c92e0d5c6b6ee0214c668ad1ee4f41501dca668af0f83ef252bd6b6444f9028f12ce15134fcd8610426b5a6a75ac25fa938f93280143b5c991a683fb008a08e133a962dd4e3aa9ddb57e72955e3a840c3599b84d874d61cff4236fb487e2a344ee3311d30a531a20ec800ec591607edb97599b297ac67e173a4f7d98ce2d73b66c37659bc75becb65b799f0a1642a4282ad623ee574091821c971363128e307288b4377e1e90e831b800936f2b5eb05fd5d0e505d71e7e34311950812131c5b742ea238bcdfacaf35e23a4b5b9ee2a7c0da6aca0ff02595fd4229baaf700eab8ce7ea772e133bffd5665ea3ccde2edf61d11e64dbd1919454f977a31292416c86e3e11b762a3c6f0c27cf1a07ba3c4197f21c8959e0f04fae6a086be6e77b47495d0cbfcfce05e34ef361d45b1f8c5068f0174cbb2ec9a9f37eb6ae1fb0887"
"17630b97bf46c801ca598878e6a8a96b232266479925e8f170bf76afa4acbcc6c7daa51c2b9a1821e5b5df170a8b57aa371019c240626b2f2a9d60587c34383ea7c12b300fb478e2b62ca9bf54b00f04f4970a68d6689c4087713e9b6be1e7c92ef16a7cd527d1ef33140d8d3994c07d8ae237e047bf478f164aee1c6300545bf986e570a403ef626c5fd14044611621bc5d5f37e417175a22288c2fb45b0e11e946f755fccdd774e5ace72bd2ba44be8f673235e9b49c0fd4d6a912493fa797bd97462de0402f77da7eee2ea6c0d02fa880ba57390eb1f73927d4616b95067d18103ad4b10af7a40b35e620211acf4c9f47fd12080b2df1d350d17afb649ea5e8a038157561b107e7d1d00284a59541c0b759bb424d2795ff1d3bfd7749461a9f67502df649d2d69e72036ab4f8869c7bb35fc999a9179612524e2f9bbb00e7dd5ef8fbdbfc486447ad5ea93b7220608aff49eebb98a1de88c68ce2b9846a63ac6b8878fd645bfc0c0fea6bb746b15301f58d2b9d2ace73828a623885fb495761be85780668b436fcaa6367776dee9e3af641ed5755f1cca7a931c97162f6879c7a3bf6eb47f98590d07654be8fd8582c5774f89bebf6fb113d75d28afe74443a64af360f41b9d243d8fb865039d924fff4586e3c76d9d0d43f8487200e802adb9e01460eb6ad5538d8549999c4b38c41dcd878b8dbd049b853aaa4426e74226fa19d3d501e6a93aa99dcea681f0044e15a05c2d08ae49f625ffe88181d2c1fe55e91b6f602409fdf961af1da851fff67f1e9"
"c9ac10dd3960f460bb8f937ec415870cb9e99e150f5b2a2308f2136960d199ccf5900f130a3f4610cda347991cf34fe46717071dd5ab2e8dc5bc20757fe6357fa56a18a606b25c51612975f51cad52e5a20a8eb2cefc79732fe19baee7b8c65167e2949a4ddc8d1e262b47c97286c2d0fb7078b3f553453445053d82a865320ead1ff4bf4fea84cfd7ce21e7aee696a15f92da1f3d73c394d47a254247492fec3b6582c94cad0df1b1b097048c9c91bae6aa269f5a074b796bf86770059cc767aa07fcf84010b1686437042d16d693775a03d9832857bdde9f7d98392bbcc579db3bddbc58d8cf08f04064e3eb92d87829e6617efab245cfbb6d564c5fa333ef560d6105c525e39177ff5530dc154b691b1dabf14d0da99229a04ca5c6e7956d474c0ee578b1b287b0a5971506687670ea848820c44875c74e69a79b36eaa3cc2a5a27fd5098f0fd3c190089736a271ecf3f14b3259cab95b941bbebfb5be132d875328a1b0ddeed958e8ea454ef80724f878a2a690bef56fe3ea62f47cfb6db303ae608957dbbd57735195d6b1b2ed73e69d1ac4b4b4fb01c20eddcb29e8b44bbd71fc25515885a56b8b7e55edd4c21d5e8cc43417e94e57cc49f279d0ed740b286d4e27c0b909729c4250ea2d1857f3f7d801a87afcee46f455f8a53e211fa0a311006cdde262ad4bc47941bc52db89c4b454b7075bf29d9cad6c98b7e84318a071789a78d1a83ece7a24cbf17691aec06c5fb7bb8a832c0aa33b27a5b3a68ef36364fd85cbd19e8f75e184c3d1cbccaf7eb"
"c71211506021ce0d38bf8c0885a205d7f4a60f7fbc972c7e2365b07d5a52fe8ae02608c7bfb1650ebdb4f2620f2698f5fc90c7b42a34a31732d2cdd12a4bcae3ce399623211946f74c67c5e82c0f53701bb4460504e17c1d6fa14288a63d97a86068be8ec36670adc16670b5cb3c09972b596cd441e4bb9b50471708bab77691417517e91883df9f0b353c2bea3d0acffe5410097edd2b3886592cc70ccaccbbf64d168637a8a3fff0d143e497e5311a9b13b4adcbe8d2625dd1fcb5ffe9c83ddd4a1cb3046616296faed945fe7b29ab6f912be6959f8768ce28958f2441a1e161147145a1621693b9f2d24fb9c7a89535456dab48dbe15c689709e2af6a6805edf923d8504f3d2cb8220ff9966f854c84e9ff04fbf45e42a5c73df4f719b9ed287695a4a03d5c0a3a964a7b6e95bcfc36a292b23774812e8567a02cb8a5baaf89afb900b3fb7be40c9e8432656307fbf2487c0d1f3baeda11e803f9f298e7e0c478f9fac11a43ca32e2cda46ca6491cc7b31aa1725d24805587722248dc326cf81fea4fc1ba9a58bdce9e34740e3732b96889b36e917cf029c7027c5cc985f8b3f0fa4e504325d56c7e653ce903e8410a6b06a2126b3aae2030404441273c1e486bc8285dc078c1874635e75cdb753a0fa821567e8116179b78039f8cc52675d538fe38a71f46792af445b125dcee671bf7789f2e874b25f05a431ce574a2d85762ceade5e5cfebfa5ff62b1ef5ee155fe418b16638c1562b29be425e05ef0237f03bb42181f55d4370272a13d5fbb353358d"
"a434519cbd0e4fca54f9cad4a7735238098d3984b0cb9360eccfc63b3b4339e0ad2b2719552085d7445681c919f21a6b482402c271e34d7f9fbe4fbad68eaf825c57d22ec0a2c5ddec8c1273131b867a3760626abe779e37ee632f41f212e9a9aaf26fd5cb28df689d9c4875c49db62213faa1e18c35b5d2df1fec21852e7c35d20d6df85ca2a6b10898b244da31dbb6de3a3a8553601c0dabf1e5f4755fc77c1561223cf0b1ee43441c3aa9d855df0831db6a7f6949ff0ae1cdd465aee616b789c268417de07e9c0f0ddae6b07ce5186b3b83ef96fa1ba9fabda1bd79986efa852a348364e33e89458550049522e64491a9b24514665af058b4be4ba690299d3c2379b25ec97575a9312b38d3106f805e829bd77033f4d5f1b35ffc7289c118749b31f17babb56f48aec597049d635c055d056db0434493a379d15010f3325690444e1021abd622d18ea7e0b5d5b97054708ea9087b4721bf857e3504aafec84516feab2a6f6309a506cd3e931ef3ef47807feba8ff0b6dd56eb83349d99be8633675eed19be804c06d4d81b0a256ec95cfbb2b6565d7906537c5adc404713baa8fc2e0f425c577660df47198e91d2eb3ee7a9a5025641aaa759e7e1f3dfd85c83a17a6a59df4af62bc669f28d12544254f4e0527a6b10958664af9378e41aa9f88ef3041ee6880f23a858254b5d0fa7899655e9d06f12fa863b63c2c950a0c3eae774149502f0fa3c3a44d24add7f9426ceaa21dcdc5408f0b96d63dcfd97dc4a3ce03ccd56c8d48ccb253e82d50123e8a51"
"76ae5d1b9cf6b6c11d2decea9f91e9ddfea605eec75391ffc4e01f4988c0ee78ccb3adb8a5e16644eb30e7e76ff251192fb3a8c48a68224a2cfee4aefa616ccbb68abea13d335a4b212b0b9841a42b418cf413fc868a842a26950e11061608a623a5dbd520aaebddfd1a559705e8cadf6abfa272925651f84130223b0056be28b618bfdfb164d2c5db86d82ac0eb2c457198a6cf8b0c2f2560eeac4441df45a9192cdef63a00adee0aafed7e0ab0bbb0c0b9a066f9f45f5e0c6a9376a069a45512081ee3edd2e9679d6c46d71e3740c5ada7457fc5d21610edccc2bef851d18f89e8307105855da15dfa749c44370b8149de48309f99fb5040d05d0739a64cf253855c185550339af73be6d5cc2de3186ff4b004ac816c1f4afcc83ec3ad66740c57b9cf660de7ab97b0771189fae5957751eec58a3aa6d3ec6121bf767d13533ff413c84c1ef47142f51ebf515c3d60a3c5cc3b9eaf9d43d2a84b94ce02db3f254862cf3c6330574fde5f8257c215c416ac3c9833839d5b33436fc12c21046025a4b0be90f18dbf002e001b8541b888835ad138def9910c4546fa0cf496bb4415463cb10004959dc6b0e379c18090bbd1aba6e9588fc21a89778ed1a1c0533049867569691aef6bc310fe4853e9e9bdd94a58943017a197526c70d2d278c66e94aa97abe5af8d9faceb0fd4e102bb69c824a1e4709be2125de420aebb11506bd62ae6b32eb1bb2cbcbc35dda3c992193086b11203775b33dcf4206a976b31222fcfd8b0e6beab7eed02f9f6d0dc2959929e1d"
"30c856a672379ea1a20bdea6e023fb7ada31f6f9e02f354f464b2261879372c0c92ea462ad11a83d54bacfce3febcafe14753d697e905a7c77031beb83076444aebdb99cd1aa470d5774ed91cded7eeccf7fb18860fc39577a054b17aacae86d02c2dabbd3ab068c982cb095d135c11daedd863bf9abafe991656d1f7773cbc05aa66c4c800b5763fe845d06c3b19f4f73dedbcd50ea363aa11e8274d541ab754209fe7fc159e7bbe317f8d9ba602bde8fe02171f8daf608bcd4663eb401c7a3f2cc814bd8fc195cc192d4d6fefbb15b9d9738f5e6ade7826d65b9d8477ef500afe2e40077b6ecd7d3ed78233fe980332a313fb2fe854d6becf9ab4c1008cb1b16a513d3fbed8036ddaaf372e8891c59c6e9bcdaf2d88e22d528b975d1a36af2fa792028a3e1161a74545eab1cd6284079c2353ef1c49e3e1242ea52d22d8c7d64f553e4c396e7d62c4a6619ec698b56cf25cecb6673d8a3a703f65e480f1b8b91e4427e9f1e9dfa1939134d03cb3115167567835d449f50cc9bae06adc68e3211d8e0cc1faa34f7bda6e1cfb088fe980397f4643e89052d2bfeb233ad81c3cd466bca1b1007e2e6459e3aa1e51f1a326a2f5d89407c05946b0dc7741f458464b5e4ceea5e367a2e4f0d007e9e31b24f5b7bf69aecdef4ef57de58719cf9fb5e8f5366452013a5bb69c3f1807d83e26bb63493dc141ab1ae8eeea11c495650b346919de060c4af1a80823fb10b4cbc333b9d6d05c6a4c293a7fd524c5259a841500617ee442222ef2cfc71a0e4bffa87903ff5"
"31898a44452ca2b132c4a633c91c7a24bbc885a01001988ab845e53a350c3b283dda71360c7a9b47ae40f72737ab6be068ed8ecbde1d0bcaecb729c5bea691ba0de6867e6e6879fdd99efec2b6de4c2691ec9031189491a01329fafb2f0d0cc28e26a22bf55be6ca866dd4a473153901f244c63967e829d9ae2ed83451a365558b697055a3b9a6bcb1bb40ae56f13d4b60defeb1a06cc6831e175ccbdb92a34462e786ea28e2ff25b813b63b30ea3b8d9a0921a5a5bf45576b39fbab6071fb1412670c936b5fc31d668026d297c5b84739021c4e763686e4011a2bb7e109db8e1d6bc853235a44ddd93f1012f7168ba3091a2a92a3e05bbc761fd97ebfa22265e6c1c2bccaa9d327d4ad61de87d3b5f0c5b29e604f79827064e05eede8b574c8982bcc0439db27b15bd7ea9a38923a1982fa7063f9f1572963c75168d53756803f6f60604ab33388ccc1294fb0ea143fa5e128a060da40f4dfa0382906b878a602c568f3c99809cf1d5912f224b2adfdcdda84df149217bf8edae18fb4bd825900ddc57ecca2eb7d209ac44e06e674c2b7c126756bdbad066dcf187344824050b16ff9414fe957c37a048c3a260a8dea72f7a12bf5b35e1c2205866bdf85367d94af939bf52a3027e2c560ca096a449b7297687bee98e4cc56e1449448461d028e435fef26f060097cd96bd605d5a1cf6b1cc95c49037401878b85d437ee43bcfbd7b2b8c145c05a33fe01226a637dd677bfd28c8acebc4a30494917c253957462cdd5a3d200e350f5d92c5c57bbbc7b2392e4"
"569610f35e3707aae8a481b8500dc8dcfac689a018671a0f3634d18fc7bf4f7c58933da452308e348a446ade0bdd6f02d29cd8d273544ba46f1767873717fea45f0e0980339fc187acb7045612e95db5dd9c89169daccfef2e3a01c4d19984f8b1cc960d054285119f23e746d743a0db459bdd5803fcdbfe92137e80d47c84c547848ae563695cbf113253b8a96e368bdacf59ff73c023d043348c1dfaf143ed13424662c2da644c25b9d22598813e1973f30ab103c0ada9ed247ca038a056d18f2e7c8443fd2c95366b387e9ab972170cd2b4438455dc73619ab3444da0d64b0b2d3a9d640ea917b1c09d17c37fd587eedab367235e1748dad753e4cbc74dd53017ba65571a5a65269666df0a24bc694a2d24e862830e7808ea8ffc1fd6cf4b29564c8d77d9692d7fd55e496c69f5f17fe145abc0dd1818f2cf6eb979c33eaf41050901dbbe5a49c8bf9983b1284fce92703b45c4131b3204fb9edd58b6cda3918cc490051bf9d6751b7702e577b700230f1820238b959e46f7dc3a3abad842814c69a76be5376c1e7b35e3ad7318b3439008e4c3801bd6754fe67cc7aed658d89550a30cbb1193eb5d2144eb7f84c5c6ee9e13947daa3534ad4902ceb9cedcae471547bf95e2337760322b55af97457d23d174b1c6f3e1d3585feb000953e298e35aeb467e90342bc61bd05af59c72921b2fd4795c19bba268bc6bf4f18349ca91b89cbd6814a62dffd4684ab78e998f7e3833b51ffc495ca3e789e685417a0d972bf4192b0c50016a64ba839da14c3c5bdd"
"58a74e96e56c66d73e2869323093892c5272aba5e6edff5a8976c5e04976c8bc1b8cefa630cd924b5bc7d28dbc67b8aac4d7571623c4d412acbfdf61603d2cdf1bed6fdcf8d88519a3ce3c4803317587c4a7dd33147f66aad06554d69138959fc3172298be9f5f83748b83c6618758bb45058fab1bbc1434b993890288a42910b91bd52ac1abe775acb09cf7173ff9fdf0e644ee94b000c8ac5cbce24d424800a9df431e03c650b3f4196115f100b49b7a41f68ce27e5dab5865b40a0977cc1be995d3504dd3bfcdc8db2a57765b1a80f6cdac0db795336bc9ffa4cc163df1d9d6e034d5b246cf59ffb2f81ec02ad4c48eb652be03c97a11427ab519d8fc8d704fea98d597e44cfeb168f3fc1385f1a1dc5926dfda78be4c3a3e1d024e4492e952cc8471ae1f26150cc065bef433c0431128c7df6c57bd79dbd409fb0684137465ec0687ec2ec45c6fb76eb88bb7bfb4df3fe69421dc7e0809e2474f987a59980fdd92b2a66ee31fb9560b4657a112ae523caec636642e44b507ed5a900fd65e29d35c89d252708b7f2c2daa29062b94577b0406ab9cda76c921694998192078e2ba7a90386e1544444c228db678f9c7da51a06b9c0a22ea26ebd3dbd8880a6e981decba2f659ddfcd15af8d06031e2d8ddc587417ab536fd4cef49372e0510c58060f2900e030fc894f1edb6aea502b0e2642a8cb1e0d22cc11a43cfe8eda906711e059d6e4a55959cc337dd54428eec2c123f5cfe185a78f442266f54213537af2f4b42176951bd9b0d1b70c61ef5e728acd"
"1a5b0c8f0360fc3d4106d1f1a6a100326500e25cf6ce2c7f230e5e54526c3affad6bba78eb0a275ef942e441919384b0420571655eff68e32cd97a322e22765fe736eaf329f41b2ea005ad56acb4c092b7bcdbf2bf3e54b058827259bac8bd94ea73e1d61cba79deb078857c63e255da3b8ed4bf5d4f603d8e3e19813fbe997afbd272102aef06950ab6daab60139fae51f0fa8b48f3e056a360f074692f982aac57ac3472539e7484862997ed283dda8be4b22b83235299d1b20df4ccbf0fa24faf392a8433535d3f3cc3ad7453b9b150dae24b8c78f149b53f5394af065082540b46f6ec3e70e2428b873fa564b548cc1e39fb406ff897662ac7e901384b3094c328bd484980c120518a8504511644b0616215df50ce1ab6106762d52ef24d40b9851168c69b3068682525f1050fa3ae139c9500f89d1b5a96c35f71e25f8ac229518a79fbdbfafcd67d7356bfc3e9699f0e5a8c9fceb068f810cf2c8e3042b5fef34778a3edcda569dde4fbc240996038e50e233652eb5f303fca7f8f29c633684566f6548bbc311bd24d7e0ba95da8f02917048d9777e5f142f83cce4187ec1af72b6b6c3825e38646f9f29697f6fe3b3cd76*0", "password#"},
/* test vectors from CMIYC 2012 */
{"$dmg$2*20*dc39029a22b86bb4f930499578d0dc9eee69398e*32*bb47bff69b10ae67000000000000000000000000000000000000000000000000*48*c4559cada09552ab075e73dbefa4aea1aa21209011946e423ca707753a91c87f6c4cbed3beae20a244d33568f852068a*6*4315*504c0c37c600618fd54da114fc0eb24d6f24585568543126ac56c034cd8d7b3dd991f1418d0c95791e091921c02bf695b7835f7b0da2c1b96524e72b4bd3f671c592aa176b6a58de77a35a26bd1d0c313b2ca23581027fc52c7c63f37439404218d720171d3b178125e6ce0646bd6fa1033f2ab7b6849b3a35a430cbd1401f73b5deb478d6d0f58364579c208c613cb2349fb19adaf98be2d4a74a6030215793fe4f1129189626bb87c23d26dc2af51a98e1fabf2f58e106271c7759d104b9e5171d8f952ceeb14317614b7a14a5313029aa4068b898f7e0f5b68683feff0d375f2ada37f20135df443bae913c7e96a29c6c3388b4b51432add89ee22826ad0b1b0a4ca9233e691f71a5ae2c76b5e5a135dc793e081dc53781faa4f844928db94084b53b39f1820c8342b563e3f46b002bc52ced63e4588388e69c9e85e2002438a1a703de411717d24ea88adef3051b27def61e4b9a31548d3714c3bee39fed866254033a123429043d0c08a052d2999a171b010ffd119f90bf9222462508ac914e0a68daf93f63caaa0c4302c9b1f6447ac3856b09eb45096b3a294731f110b90826b0d611e6e045397b07e5aa64afd271f1c92664e648af648642f786c0c8aae"
"6218f4282d8efa713dce232fb24df4073a0e04edc86d940e8ad22db8ca751143743f9f12585bd788551cc7b70821b5c42b133cb7781f60d1b9c345e9adb122ae444be456b8e49f9bab0e2033019b52f2ede4e7f56cc1d1dc3a48bf0666cc7a4dc6b4ffd5077673f2f6761688e4452a4c11b82598cc0ef57213f6c7c12ecc67164ae501b3e87e25a361d0615e48cde249f0193f2aa69a1eccf029340531becdee8eefbddca18905451b48c1085d4cb965786d3892d7144841300b8d2722e92af50fb828cdd8e825dbfb16328f7cf792f311f84078d45306fa570661e1ef2b34d5d36de2fc4b295f5e84fae8d55ca22bc15764932d0c5dd3cfd914b2b8f67477b2b5139c822ee2c511a03f7e9c717a5e8eca6c4b54f9c3b7d85765a78f03b29fb979811ff0c655522b341bb54ae3bc412eb760eb689c6b4c3bfb85a8ce794946214c574105e577acc01d3f8885e72db52075d05a75260a6e4a54872d087040ff38f8942cf150c3615088588cc53fed11040bed573c0e9ab14b987f9223ad089bb73284443f61ffdd61616b8a783e85618217e8bb491a31b7050421f4b0a0bfa5003775933db00e47e4452adc1433da2603f6dc5b9dfe58efe458da25699e512660ac6f1129dd9d7b176a24109c6e6e0c201d784addc9c7f8d4f309ef6fcfb02493abb7c836ba3a371e64fea941031a59adbcd4ef59f0dbf31f361f4282a0e60ced4d9d17675b0422faa1c2f932cb525ee07df7eb2643a67963aa99daf5b119884557ef1585d81eac5c8acf32438636a10d043bf"
"47093fb53a5b3ad544a38fbc3588bea3ed616167a79b2133efd8c509f53626b9cd7b71828fbd5d61b1df6ef3713b5347f65e7c0770715ac1fae561cc548864f9cfe281c6e5770f053f68ace64702c81c97976f471ad11c7551789ca21a4d5480c5d3528503f2f7fcb268c34498888d5fd3edf1c71d12581c393db2ff863e22c1f6c037106e5928aac9118702b45bd36782b2295782f93458dc120e79cb3d1632c2c5e527e56060b79a751cb7653b8c0ed2acc32168b56fe5b50ff9e49a71dc9b82f812b53e095660cd7d59c04f31ee47773a04eabccd7a4a6455ebc7d719c9eaedc4e6c935fc99642acd3e60e0f564efae90d7d1308d6ddfe7eb89520c234cafca6bc7e8ac96ed401bf96e3c9de704ad124b0f9381f22d9ce846fad0b14eeb5f93eb0e0fd0657c480fd2a1109d735f3825db598e2aa7e624f282673947c38aee8832ec8d4dc5d6a7306e3477ab4e37588788109a3ed76741f8f2a796d0f5bef8247eb298fb973c4e5d13666d87b0bf5a7a553f208050dd7140f64fcc27793ea82cf58fd86ddf805a700065888bbf6b5037815afe8c03eaea355c90bbbb448de13773e977fa4c6f06e7695e80882cdac40301b537fe254eb1ee437a6ccf3efa68899a7188e6829b58977917a9d6124cd2af7cfa567fb85aac9c6b971423681a0b6658575ea0dd32054800e08be5683faf46165c56647e1c346961608bdd8e6f999eb033caf73f000a71961cf2fa8c319f4084c0ab499caab87d13aca3f057d17748522f08b36c56c1746e49d731f9355100879"
"d7d114000293520c9ce71098d26b2114030615aeedabd5a6f7fb9a91f98b7ff00ec72c82136a00e5a19384084e0aebc78bb3cf05c3c1e3872f56e254c68694d930eeb46ca8e99329eb923ee0f1b5af0b7276e8600e25f18642247111eca41da427e5b9034a6a22627734ee024c2e2c4277edcb3a0309c3007c19416fa131086eccc6f73784e1a008dba5166e7c8aa4cf8efc3a4e14f59d665800982e46341b9b098508510c7dadde295a784f7a7085f5ddab5b6881b305f99d87ce3883e557280bf2a1f3adc69b7cc9d4f339623d21d569230e57a2bce611de7495d403adf451725d7ef11df4bde5a31a95bdda0d0c2a7869ddeedf2ca7e1986ef430ed44bff6ae6e44f740b2c65364477ade4dff6f4eacbffc67a2e0494c81e0424bc9220bf20aa795e2b20db6076667088b6863243ccd2bf897d4b6e1e58e2662cac593fb9a86220d65964e7f6e0f1987d07a4a8242c41c001ec38ed2442011d8a56919800b4d590338eb8db02833031ed0422bc08b11dd59b59f1d301e82154803076053464120217ca64bacc02465cdf629732cf709777452e177f4a4d1015fec4c36337ebdb8daf57f19bfeb247a27131ec5280038f3d1a766e071470ffb685cf4d9763b7e1b5776589874f3cbd4761d5fd35638918ad144a4a1bcedab9d652477951a716e4073cb36640fc257031f06e4d6f586a9a0b6172727933179e4cd433ba940571f3eb908535a12e9cc3ec1e8f8aa9975bc17241779d972a8fd8581dd3850905cec48061dd5fff1b295757e38ed8568c3a2967"
"ba271e00fb507b10bdd5ac5b90426e48e596ed430b5a3c554ca1cd0d18a90809d8db18853e2580cf2b2ca52ff686b7cf360799bf69c008f87191ee372b44f96696a12632af003eba51adf1e6101628168b92c718c6f7aecb765125880f180047ec3b89fa23bf57e4fabbce38ef0fcba829123f0a3ff527dad6d6b5b0c4b0c4c4cd13787e98c829bec08728acc5e90ddc6bcfe2254eb29ae8450ae87841a39958ab80a38c8a742de64a44e25df0360a9e8672148347d7812bdfcd9037723edbc5fb4a8bba689dfe3baf113778a498e2689e8cf1ad194df422838a618b0cb222aaf020705fcfe1475a8c205690379cbe2d0b5f9a0de41a4d2e6ff85f1f19a97712bdbf49bb90051ab934407bdda9bdbc1a57b0e874f3b2a09df45b7d01bda15330ccc57a752deb2751e495e394471f09f33d98d8face401d418affeeab86be36cd8cfb0f435d9939822041f256ad860733ccf137e582e1cfb5a8b96ffe646d1928657c05c67b8589a90fb32e078697fdf8a3ec58dc6d350a7f50c83d09e5884317829d8e850b7fe17bd2ba4d7fd94b86d060a3a97880fb350b95cde4542cb7d1a2f44f8ea065ae30fd4d4b5fb24f787b8462115b3a918155bae098f0fd7ae2d4646d3731d228909f690cf0116e1ac15899513957834e0a74d8c07f0c696cd3268d631ce1292f66b2633a3287a7e058781aef9d3d566e4e41395fa7e1793aa9f669aff116b99660a5a29fe127a0459eacc3fefa4be95a13499dc844d9faf72dca38d8032932084faca23e4022869f2034ace2de0"
"b286e71f2b569951214fd2eaa3d32da48a234265acec4967c74976b5b5d635eb12cff038a4a23d6c8e86a11a408aee5eedfa7209a8ce8d6bc10271e4b5627e16c5f8ce8000882c461de0113efd8ae9cec6ac4819ab2d6f8a9f189fa2929807fb20a895204edad9821d180c54e865548f9b3eafd8073a734e61d574923f0d1f69d266d970102434b0bab705465833ec9926b03798fa8a95ab98d35863b7490db07fa1abd600abcc3718d105f26f96d20e593ce0c82efc68ae65d03e4e2ed3faed27bc5799e359588fa884ac79c1ad4f5f8bcbc9a2a5605f97551710e2e416aacf149941265406490d32cc6bdde994943fac2102e57785dca3c20358cd431cee285768d9eed6ed32a9919e13f1a38304db6a57f637b6a5c8adf4e829baa82ce674ec7444fd9f7f1807b8f65d4b68ef7b6c3fe5bf653e81525f7900916f5d5809a52c070256e6b4cb332fced5e460c9a2f62bd73392bdf4522be7c211577559f59f62869e0a71f832ff493fab76bbe70f3c0b902fdf45cf49793afdb87558f1a6ec289018035d861990eca1dbfc412492cf86503af00c7db7a0a2c6374eed42b440293938a36f61e1c4c187cd50d974f2a0989b05b8ee207398560b516aea520044e37229fe0efa8b7038441fd584d79c010c0f31030d60eaa4dc1fbdb5a254c089198bb5eba6fe20655808c1d22b9604af1247e2b820823b3c622be2b01ca5f16f86af880908ace8765520c813afefef18e2c112a72fcd4760da91f7d1066cb5c8c902745b83be8defa193bc8b6b93a82efdf17"
"13a223660c6ff4dbbbaccb1a4e5482cc238388448e8b9c24c9aa3acac9467e1f6d96d6deb1cbc9fbbf77b7e756068e22bc3b9e6c275987c5eb99da6a5e2d90a1e0558c4f9fc392371c07a7844cb947b19dd1a6d9c1ebb6496f36bdce2967bea2971cc1c6330b1c31054c07f8d853858a46ae9370ff1d6ab755beb120a61b4774fba521baec6fe8a079862a0471cdc5080c0f073f7e3d33f0f25978d098f61bcb4905c776ce6c0562dfe08d8b9f17de4bc2048d962ad7f4baf132cd0152a904fea9530e7c1f52a85c0188d6ca38ff9b692b2a68204a6dfbfbec06f2d800b4444503bf2dde736be4108845c5a28909cdb42391b5a0207c157003b8dbd4e43996ab5017c5f21cf0d4d9b3145c0cb70fefa767b4689cb750fa7657c4a788b7759f86496998fd4b99b2ad1b2918bf330c1a81e8986eab031e9f86cd93b7d623c72e1a394f0862a193f21eeb858524477c3192fdf5b61ce9dd5b0bf3b3d7adbfa828f1a9ecd4dabf5e318fc40262f0dd204f28b934d1af7b0d7cbcc20be21f1c7e04fdf76104767892404b14965bf8d53003ca9ff0a8f15f5d9b2e152a662ddd8eaf7902854d8561ff088fe2e880a18a036d06c29997dddbfaba32ae4ed70b47413c2a037122d830d55bfde89ba645562cfa1d29f428da108d93562bd291748a728d1b3090b8a7f56293a3135f05d6876021e92aeede437dc7ab610e1e5af0a00c880887754d76b42b059f32f9159d25ffc56a993661d06a7973d190fd10c4ac998c8627b494444389c529e41982726f47135212b67"
"8b69ff36ad29e225856ad2081bd393249f469648e6ea4445e0011adfe320b4eb5cff1d9332c1779edae5d5d66931015e793f730be8482b5f488ca6372edfc71abc4b8aeaecf8051bbcc848d736eb0aa0d7ee4cdb9eaddfdcd4200c3e2f58a97a162565409abc44b8e982fb883b619fa80c7c4f2318954767ea1c63c70124f4342118f2c798adaa7ab5f6ebed1b0a15e12f40978ca8e5f0972a47cf397746f9f482902abdda10ee7f4c610935070f888b5ef8eeb07933e1d6ecaba243fb475b4c788cf8b453638ac43b9f6eb74654835678b47d9437a14300a12553fdb10daff3690e0802dab80fbffc401422a465e10e6414975358249d68e4ad5a1f1c93e295bc10b8c5c11ed98c7ca5773014a2739c0592dfa30d8756be1f66e4fcc01beb2dd58d87800e71d136c12b8f73298cd37b1bb5758376b2111921fa9f7040e69d3620415ace96ebf29fc1a87e392a9e701f4075208a1a8fda7a59b28997c017da70c18d2bbb5c91db86d701cae85a5742842fafec723be9d93b4225619c7188f5bd23c900ef3863068785363ab861b58aab8e91b562b26f72a812e7892ca0bb6ed91086a2935ba82938b367b34f70cbe40c02a8cea92a78588f90cddcabd2738c9a18450f6d3a87c7f827a1773c2c7629452f64e1528258a8ba75bc53245c705246963369f1179a765bed41d*0", "654321"},
{"$dmg$2*20*0e2a3f19e5f9a89ef8371580fc08738b0dd02ee9*32*57b5e138dcba821a000000000000000000000000000000000000000000000000*48*4a33cb05d5fc441fe39477724556bf2a3445d2826dab91031374075f9b5cda25084769a7af11b2e678d79514be8e5f63*2726*8192*585b8129cddff9f9f5875d62364faf4dccb0625867ebf2cf7ebe08913e340c8bc5b62e4c4152b2274a19c3fb7d0f6ee32e7b6c502073785bbc213c28890b9910c878702b2e16ea0c0b0ed1462b831b1eb02a0a5ef586de3e1bb7b5f70b64e713f2bfe7f401ccf0a4430981b89d23afd47d05d1d28d64917ad2895af8264350f306b7a0b67029f6da75fc60137b99131d3678cb8c596295bef4eee92110d09c52cb30486709fff75b80753378918af4db98e69905245ec52c2c6ce7e71ea62b6e530269af23836fb40cbe12a1498d3d4e66ac26b04c31d4a1cc169909f51c0468edd44d051d79c361f547d7f4891195b96950ebff98f70b36106772abb775308cd6d42fae3a60d748330dadf7ca90bd474d05cdc678a0cf41a5f4461285ce0ef0a6df3a400d0116d1d1f17cd10be2c8f164ffbc3797dc022ffe52b69f0303526d3a17c113a56e67e54b4de121787dc62977af8bcde3f4fb596762ce31460a6f97d3d07874ad42f97ace146ada9b63f579a411fca985d85d64bd3262d1d2ab5721119b0cf8348abacf7aae2f57d3b667a5997d0fa448d3da4c51a6f59c6686a92a35ff4d6d951dc74acab9d956e9a942d9356291f56046c612ff09d1e10d8a0c60"
"bb2a4d273b03962f5399ff455ef480018dff09125f6c343f28b13acdbe7f0309e64406d2c453d57d6e78f10caf01d8dd274e0ca6e4a82a208750de92640ef97f67dddf90b0c6de767f185b6bf17a119a735cc97075b93fceeda807d0ec20bb4ed923ed8855202d7d285b767727bb5db55241cd21cd5a7353cc872f0d4a00fa0a50608eeb4cfbda71109a4a2ae97f2c01a40c4968c32ff2c01f05ee768b2ab22f12697805396916d8fbc1b06eeb320d619b0e472b763e7a72acd949e17620f69839543c3852c83e5c3b1cbdcfcfe0e3507a4fecfaf3f27118b6738ae8e33801cb1a2b4168f8f614dea5e673878964d6e27a1d8d8aede3bcf366400cd0155cf502cbc04234a2a418638531ef13c48917328d2bc1736e85be9cd80cf0d99b98d0baf9dd9bb3f840fd15d74788043be9f791540248b5dea621487810371995e5fff578de770699ed8de1f5190cfcd5d47320594299af29efaf204e0a411670c6f4f60652422a7e25ded5fcf26c1d83f805938c1ae578bcab6ea5c679939e5fc6593248d6b8fd55c454d2c69e8c756982c01ff76b4911ab494d90df56d7743f4d8017423a045eb4215963317164bdbb473620e8a17507a9cf26749c6141ab7b94af974db92c875ecfc4ba4421a37da4454867ea3f7d8580185eed9ae3271050d039c25f7b72e18024f91edbf3e1bba71f697c8451302b1ba97c8463b3699754fabf472ac399bd3a783b51cc945051ba1b411ea8093278606efe2b34b3992033fb773fc42cef45fb0482992d5f867416faac3912b82"
"eaa852935b54c1c05d2b5be854fa75ee754235ff1e84a53564070de838fbea7704fc249a98c7fd8a4d4ffdc06d5fc0ca39071fc5be83b0e37591e14ee76379f4c5ac64b21f016517ac44a12161543c43d40a8f92237c99de44ec220fdb502d82e96f01f020eef2752279a5aa3d3928a4cb594c5e145d016375e3d7a89d2bf12d4daf3886393c31615fef9e4201cc0208821e932e8b26df396e7c29f2c0b74c9f59ab79fa44b4f9c1156741e3da93df51bb23b756657187f1902f3d5c79aed88190b4a5f814ee1010b2fe82a3edd867457dbbf0598566d80261f83db810d058e785261635cfd1260c6b3b43081deedbf0b2a30d801618090d07340a6ad528b73c7d652efdc48fed161b0a0529d5d1e80fb0a63411d53e75e9ea9873d25a3bcb243faa406293f53a21b37e80023a302682943a30c8f1a5804a3700fb92092677602c39235246f359503cb79d2e084cccd2b40840acc7ac7b18b4e1a665e3833f5b4aefb40f0b36b70dd6b125ac9999d113fed15e5cdcb6ea6043036df3dec7f5638379971758e50f1453af5e48ecddf1d46e575cd2cde1b2091c1797df41f152fa77621f69169d42398312155caa88850800f9a8792c364021463467248e385bf45cd40c7869efcd6e9a24152bcfc8370ae901c7757a19627573a8832e5ea62c344fcd60230a3915561b6fd957750af61ced54ca1ff1a8edfe5ebbad51a79777ebd4e66c63a248687220e66d923c746f56f009f9d3f1f186d987c057af87f7a70a213c9c6eb93867983c3191ee956c8991275c5"
"5b07b2ef0eccb8b0287414a154afaca67f218ca43924fffe6e6161690756e3d6a19a29ca972987f603727397e5f4fa19d0c3f1e74f026d35c028bb81450c7b5493a7d837e83504ae7369a49b2354c6c6219c79ad8cf9f5bda3765541d9691b84d19cf1fb9534f859b58257e80a7548c12ca2c0fa34b8b6248b30213be0eb60de5bd04621c163e4ab00d80adec931ee00288fb98e5eaa8f6ec83af863b8a3634f955b54aff779725479d80f2fa51d25e721b159a3dd814db70836a32b3a4e55c4def271a1918805f31fd3af464c01006560b36e1ce0a745d3bb121710083101d1ee469b971400d49483b6c4d858cee24614786f227f320fe6105d61fa8cf21136e9160770167e1b7451a3d9171f56bc436f097d73dd4c21c245efd72b63fe21d1600213ab4f2250e6c5a16cfd3823de93c9c56ced668faddb77d60f4d4d9a9a3b3cb9de0eb5694410fb760b7421cbf6e40ca4e8bfd4577fc3528e0162ea4c9aef069b3e4f199120a10209a6acb1eb6e39fbb23896860eb1366c6eef023c2bd63edcf73aac6094d25cf3c1cb0caf82b1010503fc8e09bc537e8e690f8bbc0ef492f848f77442cbf28bdb42aa8932109ccefbd2ad6563fd3d315cb79a0a5f04772105e8564e01c1e22f1c2ab98813979da0a08ee8812acc1c18097b8f1fd95424ec0d1b63a85e84257d382400c5f44f570382ae8128fc0935a5f7f518ae3808b79ae7aed4990edd9257ccc74dd19adcde363d4c7e5a4594e3d3ce88d308cbb48fe26edad968cd54cb715e460c7b421f6debe9c70"
"3bd684a52b6b9571a7cde4568d7656e9bbfc5559d2c60e11054cba9eb54120bdf13c4c5103fc777033014404d6b4a65ea0a716f76a1433ecb904e9ac28b0bb8ab5c5b0216f62c18aa29b685cbe1c9172d51bdef81e7ead1ebb5d6c7cb078fd32cd63c72b163d2848de4c6dd59b35e853d6ec578b681af969941c16692c9010576f6f3777a24e87084c4b78a8502d083c137237a60705080aa90b2441e2f01ef9eef5b0f2b25b2b745136cb143405fe5c7ca013f88392428868bd9f06bbe41872c4cb1f98b16d74d064e66b0c435b52913b8153d47f52fd95ee73ab1f25f1533febb72e9dbf65d11a7568a17d2e8ea2616019297846551c6a3248b0a23e91ac1f38b21878a28f828e8aeb19893478aa2ff2f16833d1b69fbffe68b569afdd1980cdf6d8d4ff52d9e2708568db1a1b50847c8310e4d85dc73b59ee31a63bc894712f2d2214973c2741f4db4f3ca9a337e1f6c4ed3858370626b62e975a85e94b498f8c3c2073e6d6fbedb40e8a356e6d6c77c2b5e13ee52fafab4c8d369ce17a5c40deb98c98b60f433889e092d7da5e7e991b73c15127364d70a879b16ae774d65834fd0029c3a1239143b6398bb19ecda0328f39f39ade7a090b2c5c4e75e4922c50f858195c7fad64e4305d04dea5b85d4dd5a52ac4e60681c2337d3a2eb0b47745563f69352e1c17b08a3625f7ba530dc5a393238b6a2b92bebe6b94966537763ef66179b5c622ac068acfaf796ed4f4214d7fbb36eba5c9216cd5ee1d42132c459042063c71a1323eaacca0a94dc119145"
"cef90f744d16226d7168dc9abf46551dbe25ce179e85bd44cf15374ee498f3f3f8fb5800c6cbfc427a834e3f7b3b6b6c7333c5ed46eb2a0c93e4eaaa6f95072221d7cc27d36ad53fd5fee1e65d91e37957a9d34901602d5f49799db3cb4e47e2c5bcfe36008ff0fbf166d9e541504aeed187251b80cc72804687f58b646ca3893e8c9e4340c9580a2008d268e07f7a0705bf062c6b1ebb3a62a4c961ad2f65ec9d44c67ad3a39117d2427d9c3d067df7c089bbc905b319b30d61d099265de1ff42a97540bd08a1ec79a4cef4f692bbe54ca6f95d6ecb82d3ad2316d6cfaf9a66a8b5e5f00847b55509cdd344ccc3fc640da87be6cd4ad8ab3e510b31831d3151b2aea6675c97767076360bcfe1b317c3786dca2e4b3e90818064abb319cca7bae051390063bc6a0a0a133187a60a6eb82162a5061fba5fe17f157e9e589ad83d2f1760f4055879445b0934c954622476c29c9c577c053c723786c8d25829db7a896c66eec594a6b798ed278a824550795b0904e154fc06ce8783a773a8919b624dab70f92000b832475b77db27d0b5bbc5578765adaeac6f61166094fe11603f37a41fa047156f2e57d80a47d110901d96e33b5247a587552e37b7a0712cec420a5680ee8e5550ce5d0996b235b8898d67126415184bc9a0ec172d9f78f595182400c010d905fa73b5a6fef2f722b7f9dc51b9d21d85ec554c9f32612fcdd89577c47b3cb5203132e76ed5a39af7e9cfa2c92369464e14f8333fc29fe7a662b9373011f0d4627c9ba7b0ab0c050d0e67c625c"
"dc83a0e244dcfc7f5b58ceb0d1ca2f16349ad8b16a48dbbd63da41eb5d0732a13ce5a7ee7c9088739eec6d63e0a410fb53f83cc75915c0b6353a75fd2d219986ee35bd3991161fd054f0d39c2c9da696ec2968e801cfe726cd512ddcb6cc28af65b1f8e542d1ad6a6d76dd1582dda6af4f6c9363ad7117e0ea0102cffc1ba0d94dd8abdb5ac37ef9b444387bfac2b811479086e550ce3452f77461febec72ce35d06ec70b94779b794dab1a3fba727f364bd0a65e7255da20d77ac6b85ffee926a1c3c635366a4d5c8233b798e565752103c66d5e7f18f315f7fe2641dec5944e51e373f19fbe1b34dd00f4604a4f741a5d4a8c720bf4e51511fb3316951ea63c3129c4f6242a9014a78a050e633ea5bf85960fe340c54043d9bffb969f8abe458a8c9dd02e9416e0f3504a5bdbf6cd0b4013b4b548bbe59a23149a24296e0c326d69affa61a878baff7525bea12a4bacaee6c216de31e22e218a3bffc996eb7a3b8570caa06193b56452ab7f3430c758c3b447db98c7a1faeafffa497d938d9b952e3ab3f6774333a02742375e7e1dc39cee15313d69e8cad1a251274ecf48f273cb79c58aac657adc8d77f7cd1755ad9a2fd43b69cad9d2f8bd77695dac3c43d2469e4ab34e26c7debaf33eb2ca6cb7fd0a963a37b7dfd5304b9d5f0bc1ae0940bb40375001e9920d4956f4011f4f1263c3b7cb38afa1d8f7c8c188bd226ac3e23867f3989d76a402a9476756e03c6c3bc4e3ce78095125ee11e7b47347bab7a638b0088a3b18f23abae9ab2f94650a30e2"
"9abdbba8ae9d9d03cf5b12ab23f5a6464547bb7078b91f533ea06541941483359a8562e709608e0c5d1da2c7206c5af49be0df87a3244903293bbcc121fd2e20ff909a90ed836f1822ee2b40530084f02bd9c42b350a4703851d197d9c465485112f1bbb21aff46daef510159a1f354e5fb7b11508a3ffe12577b40d3bc16631f8a79191745fe828303cbe5b6d9578cd80f736971e1f108f02039e0bbcc12b42e8860cea15cc18505c3e4242ef481930f3e2c4b64ccedb5b4d9837461efc7c48f8b1a6dae1041e696b99fd8c9108ac1fa9d975b4d5a740c4e5bab92004b7c91cb64e80a67aff2596c919b73d88943538e0996a775b88857187e9f97828f8661f89252cd0c5577b27151b5b0021f17937a9abbfd8ac3946fec79a4063af00802d54eb08461f951cdbcec92f593eeba457f381a7a98f313ba28d21d2574fc751449e1c3b497e09b90f8e1840e7a56159915d98b36647dcc15e1b335102074741f1dba46f0df9e7114ca29d02a7e4581fc45c48e6b31cb291760a05774fdfdc0448abe313ca496bd2d1f011f4706072d69eb0207b0289f5dbe4d1f73355b206ab3d5c777d1d9dd65281a0dcdf598569109e8fc3b56af94e4340929457d2c45d9a9bbc37741dc031136a11955a465e0baea8c11c06ae9321dedadc498570efc3191e67354f0cae6a763e84aaf74597dc1d329c81231546df2fd965d2ce0fa2026e0ca896d48bf8cff97e9e1fc5e035a13a1dce07810a9e87c21988d7e9bf19dd68379f346d232f83d776c36791ed1ede88f8bdc1b"
"62e3e7857fddb802ef7771be6a2428b7bb7e419cd95042d7de60359365efec7397b4d7fd32a4d7e8b924930606e7adc49333809812635939f79a20eae6066fc494ad27aa5be989663ed12f9f1c82d092b7a4af546f6dd33ab862fe21cc45c2c7c58842360070e206ac341c26ef2f92cc7629d873a219ea1177ac6354e7192f4c3f3aedb580c322e1644c92b9882a96addd01a35371c07b6cd3d7e4e38d089559ee41bdaeaf81650dc263a69fffa6d2713d3a8ffcadde7601cd2a87c23187463d3f3305a36ea01743d2cd846cc5ac96c89241c86b3c38ab97f1ab7b9685e68260fc116b7d02db8cff929b871dc02379d203aea4160c6302a7bad3379ce2b77effb3f9eb37d7826181ac8f606e67026fac0f43e39c72a04a6278f89d16a6c14c6d6e3dab80e9089a83c7a370726fffd0a2e6a9a6a950fad60982eb28b638ebf2315932911b91e465f076e97aacad4c6e19ec46a8ba9e7a19fca03b7796cd6d8efe6d2fbbb96b3fd3f85d4622fef029819efb34abc28143faf10ba4879fa69d493908649f03853ea84bf7d5bb21c6c541edf0c0aa96347b4102cde3c27a58ba0788ac02cdba243a3f52e0ce4d682d41d432e632635cdce5be1542b6b6a8708e144a6acf80ab3ff5842ca2db90e9d75401cfc99746a0919ed81983d2171b4093b1b07e5e5c45992f657c892e91c16cc6017a66af6466ade21f4b378a6fea6a8e4bf000ee986bbc0a170467548e7f6e797381ee89fc431f7aa562110555dfa5c275523c202744541d51701d70a8f3006ddbdfa5f72"
"9563bc0234d0b2759efb747633221706cfe73d47743ce6e6077943ef6d0801729e1301ff9bbf37f50667909f1cdc70f95040c841106ce566de5dded0fa485ea539978a88ca8618e566e9da4f2e215d544ee62accbe75dc17ea26962d78bcad516e6bff3152642e346444db494a909478bf6d80aec53f3ffb3311c6283711eb96fdbdd8e6d94c71cbfb9d7ddc7f092df5092199dfd822b98e21239bb8dd17f0c101909bd38d309bb5456232f5a1b731990a4cce847394fc40b859a8d89c7c02c388e7d6ad42bcf4818de33d696ed6d6ace4c23d51fc9d7d82d0602dbea094aa2db51d9aa8ef5c1f4803e40f6f5fae44da3c3c6ce9b1003d95300871353762062d1ad49a31cae73d569bf07d147a0c8d212e60b1be486df08bc353a2e3ca7337b83e3db43be03147114c229fd32fc2eea5f64d5d5d9848709ad7335dab3909c1232d93e76eac218e7e0497ad5b7b1ca8d9ad5447879b20dd370398eb8ce4bc6805064ccdaa6d8ed1e98e259b7654a75848705dbf2c3804b455a9e3dd2890f8d74f0e968dd050ee81af2f98fdfbe831c16dae6589b9b2a16965713b8fa52e5d2d4df504411ad9c14929e560a5f7e74e98d72f71223a5eee41a40d85c177183c510881950bebd3f0ac907fbc5a4efe70a60da6bdfb6870d7fcefe04fdfffd1492c5033ec79b8de002c41895ea6e84393db391b9692983c84148928ba0fae6b2ee3aed2289a9e053d47340b5faa4870fa632c1b81c516a58a049728f941f57bc34ad53c236d33dc2ab6a196e896968d0a2bf651889"
"825b8f358ef4874b0e75e39331e513c506b29a61495e78722bb25475ec2ddcda0816ff634062a54721c9fb425ff286336e7036928cfac29216dd0eacd3e5328b6979f831dccf403e87ccfc4346f5743d972d5047f6055bd86c98b8fb720a3cc3f459750ddb870a845c1ff4bc3499b1c92b6e591eca7e94f1f8d2fa3c57fc97b573a738f7f55e3b6cc975a813ffb7f897930b8de8382c5883ebffba463ce72b0c50c721db403cef01d5be035730ac3c6f6a3f78681218656f397966753c04507e08a09f7176c3e37de40b9c7faaef1b675fd083c9cced4261dbd4a289f6aa0ba04964e1a6d328ef05786933d67d6da009aaac7d4a8ca31df5a15e3874eb9b288edf7d794e1abdf9e411c5bb87f7fb27f76bd62968bba4d53844e76487818ddd38620854debdced8930ead6b46f3bce6009683d3ffedfff0be83cd8727bbcbf428c761b79a3c06a7c2de7b99394030b51eeb954cfa3fa307a37881a8dcbcedf9549e2600b72f3665946d14071d9d22894020346466bfd2062e092f21e38e920609df77e3b8ec024334c9708a415d3408e22645f06cd6d805e8da2f4005000aed542aa995816bbbf32597d9025daea32fd07733e080188d6c5c7af4ce8b7bb25d7c""50e9f3cec80e86a8f9f6d4e78a40ee20fc3c83bbbd07020f0092cdac8ffc2d52c24166d78da8ec32ebc49f815264c5ab29ab84f3b44ba75c06b80aba2966a617830efb08fd3fdda831fedeb67b7d593c661538d422e1a9fe378acf51b0f2a07f34d84624e0b90af172e5976a237a7dea10f"
"a7cbfd3203d1b4985a1af6c2d2300136226b2edf519fdd2b7b5e3fb5b0c70f2e3160305fe9dd0c09b98d522666e5100532f516bfe24d12d46b5decb4d4cbdd5fe9cd647006c1c7eba14a56262fa7a3b7b6d7b22032c1d444fe023d66b7f51004c6176f4c198a2998beab66ca70e1343187ae697e9fbfa6ca6443d617552e6b7bb73c59613ce0a7cab58545bb40636f54ccdf89c507098680f4486f821b2fb2c7baa182686b0b6f893fc9575df701196b14255b547b925387cacd5f4a762b1d4b7f713e7aebe4f75ed648b8666e60a4f8d92f752451d704e19aa102bb3dda418c80f3b4f395965ec36fd9474088ac213b38220df73c8159401ff87751bbe392e0aab031de59691a0a77ba2ab7cfbf4daf09fa4d7d61dc5b456dfdbf7a60eab671ed1f1a67fd58bceb34e981a2dc3c3bb8a7a14fc8443b47a123662d96b4df2c584856ba257f39749d51caa70b147d50c68d4aafe51ee195f1ccb99b7015de726b5f0e85bf37617138d2b24d1cbe985d8d1cbb40a52e4c57e20c799e2f5ffc0557be9d3e2bc5b99dde628c4dffd5c8704c78689e967bc870c0fec80c3c69a2453b052a46e142309fb21bcbdad7c6c5a67df409bfb9899ec58ff0973e1813f47ec6428e35a932c117b5dc70a8f5b1a9fa402d59fa45714b4bd79bc214d488939f997add26d13c147aa4d4239d8aa0e3c70994eb4a8debb7cf292b3ff59bc36f97a9acad107fcc556c24a309c4a15dab16a47a71f31324dcc8183fdaabe1fbd1cb3808c1c35c311ea51188759d4e1533d39a9547f"
"04054e2ef994c97e213669f08db02702dd8b54154e7376f256dedc67fcd3dc48f5e0be91f1f88766415d203bb4bb11c4a0f6d0888e0c98d3b8519aab741b20ced0e02a5638e40ad2ffc301318a77e57787995acea46eb8ff7edb535036c3b3781d63a02bce56499cd03ae75ba6610ef27124da36dce85ad406c82e72a0319dcd6e05dbc66523be5015036de859af45be32c664c18ad712bf09d361769be3e568d5f51c943ec2c9f74077cb9f5757de92c643a2963d69c2cc3f010908e661f3a6ce202d50d72a436319bb2337ab1babd4f2cf1bffc3de25a09dfc5cffb31c7080c5473b4ff673fdae11e64cd492a784a106beb65bfc01f9b7b97384d877d9f4440b7434240e98656703edd66279f1bd5b7cfacc8a6b511f1db9060e813f2e37a8be5de25087b0520e7729a873e125d7cba84b93cdd333e8756630d9dc9e1815832c8dba1a3c51776948b184a916ae44694664192af75a616387f47319bcd5da1d94fce857c8e76c3438ae5c7c810310058558e01b01cfb5676f1a5a5d027bcd1ec62428a82b78fdc9dfe69ae9c0301f6f2dbf1475e1cd1804d05cb04583ae62efe63a6f1d20d5c5675f4822ddb8f6f6af3d639f56839b1993dc40223341c04d829849dea53aba7d0d2a2db0a89881a2ecee4f66698aef5ebdbb3c6d65ff03cc1a00b714112f0b111e7a97ded2abde97767e0ea6e19a04f96d708d419f457022ac21715ca86305b8d5e4f45d6382c7ce8d87a8f0f2f1a18134deb9a33b334bc04697479c4f438f5e58a62a1b22b49580fd46eb4"
"946d07c505e9c778dc56524880e8fb565487da236bb1340d92dbe21516f40a05dc3cec3fa4a56bc93ce57e7be50ef2fb38c94790acb9702dbf2ed30d6b5cc1e0173ed4c19e2822e79e711a523ecdeb6742d90353c904876e66b30fba8975d35418f0ef3fc8e5621d8d243973addf756d1e4621618fcae42af188a22f47f0f8bd0e821c16c8ca2a15e35d855ccc5c9660ebd2fe8966e6b86326905267b80358328483d0045fc63af4edda4020ecba5853f005b9058dbb81092cc12ebb3205ade902cef207f783a3921225f3a8a108eccf02cc303b11a2a7db60c897f31480db900fb1a6e1ccd1ba0aa61214037e50d8eb1ac777fc4a467ff9b9ffcaf34fe721300067d33a25f9acd43888ba09cbd26e8b269fe84065b5c44fdf734545fe21689b838eec4a00860f654df33f87d0f115a6fc1ba4f0de641f06eb8a19d2e75aad7dddc6f00c8d598015541fc8bd22540b9bd3babbbf3e41212d35cfef1236edfa5746b733de738c60901b87bfc3a4c7d49eb16e7fbb7ab93083cab5c225f79ef03db6d490169b5ecd2791fef9045e017f9dac41dbaf841f050729c6adf789b8008a82e61c80cc4d06207dbfd6b2a9cdfb67ac26280fa9ecc298dac1878fac6188066b9d8637f772136edaa7f64fa491b0bb4775656f5f1a3135686205b8217a590c088cf448892e134a29ef4cc61bd76886663afb18ad504b204ea52ef61782ce9ba44fbf2e18e1d59302a1b69717375be70a295517b069d26e161c91ec3a1a782e38efa6ac867dbe488cfddcf8c200135b059a0"
"da4b4dbadda9b742b906266a879da79da144eba455fa7cc5062d326996acdddec0eba8666b0e1e6c7116a1e5f04f1e94e5d85b77b2d35deb45402a589d46734810ba3a74414eb53181f75c2f0bad61d9f4aaeb94f30a1051f5ba2b2b30f1445bfe889da81e550449d863cd5af77d49d344b63666df8206bc04686ebdaee954da5f14692bc2bf1b4b01cd6b2bfad93dcc7e5c08a5059d047f6ffe96a17c828244b234a2abf28674b15d14b735956c0a9bd438183666d6926912358edea95ac5b1b6a53784f47819a3cfd4ddb9af8e74f30e06c30e218edda9eb8207dc7cd931d6e926af59f8238225dd037b47c7a4c8af558d981a7c9a7dbae3fb66345874b27cb229f1c82b841cac0cad018e8f75d0731d5a8ea0c4d530f575de7d39d77fffde64c9d1fd87b9af3759d8a275d5a1d95f1d2d0bee007544f5c39ecf4013c80cd89821f79af3979f23dfff87d093b85b892b93bec546c5eccabf41d04c65bb571543f2312ed5e3596ec5d6bf8e57e9854164d34b48ca0ca4044a526e038332348eb801a6ff342bf25750abbcfc27e7cb5e7b026db3743b210b91d1fb688c8f16d4e40203d39272f22b5bd0f796f0fa09c90*1*b48bda800b2b3665adca330cfc990283a604b08074521335437c0ed7f2a997069c88d620b638ee988edb3f6f32be1ccd01ffb14b66b2c213d31aad92b25f66f226f2793b5e554475ce8c1a7f9541ce66c594379303ce730fd77a6591c97f5bdc400ba7e8cbd496c188c2112208778ff9699674b117631d8f385ebe45ed91dd60a"
"4a657ca39c11c135e426c03ce2219392f55c635c1736f31b1a7a892273b6d9e2867864606aa0244b82c8be1748123f0b8478baa9402521583f24ac86c11801fe340e64628e8840aee6a093b1bf25aa05c74d1c1dd8ec48321b34a53bf78347a59fa9ee394a60b845cfd4c2f5bc53541065f1c5a0d3953d9808b26ee51d17dc026ea97a2ffae213bb9818f3c4009480ac0d1774e6237546204339db20ab366a805ba8c34304070959a16639006ced72bc3ba6430ef7e5a10e9a969ee233efc23b2d99bd8d49c3615f0da372cb98e077829f07e112a5bf4357a3cdee0268bbee69d31fea1ac66564d4b1c7c303f9b41e2b23b3c7825d1ef93ae1ca1aed1607177bf92cdce38fc68325a652efd3791e922a196eba24e9816c52afeb1d84577b8a22125c1d90beb57cacff4b2a637061d69bf7f1f006d102ca2acb8471909689d36196ec300691ddb9369868f3fd577e463d8b74c7a8e95fe2fd2954136f9650f7301d4a91d9c41f647675d37c1663d4b5c50cfb175facf30598a9be1ecc2f33fd4ec7e1ecc7dffbb1180a5b224b4eb6d0e0af4ecad6cbcb2a26cb3365a723caa2eacf9404083a427d5e7e62e967875e53a8eaf4f5873627717ce802b6b66d627f3390b50c0c950dac739ab46fad66920de3fb8edb0ad0a3c93e7b3beeb90a26a1553aecf4d1f3b17b7f852cf5441bd626012ca14d8e4aa2c43ef6a272f9f6990672b2ead99d839617069117aa10f840c379fc62de5ebf5c82ed59a5a1f76b0fec724ea809411709d88fd2f986c35edf9a562e3fd"
"bb13577e2ac78bb854768ab38850daf931c1b8cc3e6f3c244fb339d288348f88f792954e90b68d664b7f941b634aec4b2d54995ba08b999d32d007e85e7e0df4dc6022b0d6d7a23ac5bcbfb2dd6cdc300fd0e4c9b4403a53a67a1c8979774833ba4b8f338b1932424b8654e02ff039967bb43c3f0661bf22f638a4caef57d50acce63e472f1316fdb93e75218d630d958c1aef855a9a7bc54122a26ff94d78e74d48aff82a485f584b8acbea147666712d35a7167dc5f92ef4059e42c28ba66fbdccaafe71efc630b8ce7fd840bd2802c2d69a4b09a11cf17c9321d9ccfb1623bfaa89786df732b405e2cf118611e9ff153dd2db2df1953fdd888f023e74e23f3a5595b81456b6ffb33e91d65f08fc8eab545412b18be47d14ab77827073286a735187bed1b12fbed879969f7d06c53041a6bd79bf6c5260342480cdb50cb617c2b4111da501ea98f368320094c5353a36df520824ec52dd15e818bec43d80b537c0d809845645429ea4f7635528cb7b8149924053a76d3c05b0c31e5970eaa014708c64c902be5272513111a73e682ed9f473c87b964a4957934424bf957d1e86c6c90a967a8643eec2b65f08d4c91252cb9663a4e5aa4ad9180166ac633c0e5f5170656373489126e6be09e9e8bd6f226f0833bd392884dfce749d68ad51b1f0e0ef5fc5a8876e54558e191abcfc4632409547a8a5c46c2b546db07ba324b4d327ebe86f87dac27b64d6e0c8250019c1114a4f8fa39523dc3f5d597aa33af245ecca15ea8cbef7604eca5ed804ac4f57c12"
"6e335763925b88128b7289566270a5d7d1602481647f74d71bc1eafd0913851bcf07047dfef51b41fc02215d136885e647001f9f47546e9ea6ba0beab1d8a276cf9b85d780c05d4031f55d35d54c56f7fceeae9d62c58e7e928e591c2d6b1d14391f829f3e30bda6132bc513227cfad357be2c6f045bad7be72d01ceccd059327a72ce044edd534a5ddf71831bf07ebe84806feb621a5b8d71f4a608878e5e5daf3f8b4b3eda75f74f03d1ae5aebd029f037f66253f542aa06cd6c29ac5ed27ecdc7641fb6d54c98e71491772944303d3b6be683ac44b7bda5d49209133ff564cee31912b8e024cf628e0719522b11eff2e32874818f9a0ebde427657558a72943d6eb25c4b9d523336f37453af157035a3bc5ffd13847a928450d4e01f2ce7ca51d456939363c3e5a69b0d25311682c7b266cf86d12b63dcd322be77594c7f929a77467566a8d86a7d2b583b95f76626244738251fa762e0b2825c7668d6dde8ac5579c1a06318e5c5a6b2b1bc93bce6cd4853c50b6662482549290b15500722e3d6772c7541e3c864291dcbed84496dcc9ff4dddc974aa8b17b7ccea56c856f24ee2277a391c3c0c2c5584111ed24fe64e478e3c4d22380b8183222570fa3c70d29230aa21fd21808baacfd41e2430fed7c3316235e6b4c2c3331ee36d9e5c94ddbd73b351897cab7ede8a7c417c753d8023cf46694acbc9aa6ca556da7de108005330704cf54b1ec7bf7df02e36cd736237316b3523bca0a53a2472e68d30d95b1eb49282b27530bc69cd154b7a4dce75d"
"a3efc65c12ce45de7a63632d340fc61a1789129df1554813a15c9a6ad101c07363ba8d967b70ae1767f8927440678bab989dbe994922779c3c277055a35bf12d6909caba8a4b6bec7f49dd32426d858e53164c8db77bd1b9321b31e6c1ad1e92596bec4ad39d5b6944c7585a5ad0c6f83f64727a7f6397f784d865ba3b9c85343f3a2828a0e71d75f19036ea0f17e265750d6a01513be2bee0bd0a837996971b87305dafda12679bc118a1df188888396e10074254e4aeecb6801e00e8f3ade2889b65aba9e29d2d146001740116c893df1899175dbbf88ec175216df3d93a88fb6957adf64a3849e26194edb91188c0373fdf9be85a520c173817ccac3e4e9c88ce0bd9448be3f6cf3eb92b9337ecf2e63db5887e1113ee31529c373e83ec02012ddaa8812fa5c6b8be8febe29d0c286fe03832aee79018fdbaedd8bec03345c05faa1231ad148bf4531679738a537ec490bdcf78a0d9dd13e6988e360273c388b91006a66176c93caf3594cb098d5f4287a37d79b636eb566eaeb73ef76a4a480fad73caad3378d17a9395bf71c6c43f643b04b4f1773939329470e51053467b67ed8ac0807b8806d26d16f6f4fc15b3f3cc197d24ea26418cf970a5e7009bd871aff96be823fd80efe1adcaa882c168692b53bdb47effc666a1768d04d0d8bf199d36604e82b72fcce53e86d063c347aeecc79a846f8e12cdec679b857f85a75fe59a1338a411950459443b3fec6511dcc78d5bb6dc60accd6013400c0ef71f19d7713b37777a75e96d0d341d416c9cd94"
"7e3c442f6ddb31daec66bd96ca31b01d2dfb99d312a651ba5ec1765354de39d7aa4bb096ce7edbd93829d8ee2b7e3ff364f5d87f653a541f033db6c3266a03046f8612ad8d56a1c78912c9774c86a8d7e2eaa7f3bb1033470789ac2c32bd3c2ba1269bb01b176b167688f8fbe1f6094c3e2736bdc1cb1733364011681be98047cdad7d998241e121e6508cfd665c42b30f22bc442f940b5c7d93659f59abcb17aab1f28a02d0b59239f148211c525dd209cb932c54f24fa8a9541f0eab28b4c8df80845058e71e5447959bfc7f7d28e15542523410bc162f566875ed6d9d4fba519000b8c5d90f894f2bc74dc8307e26d4e0a9b418487d7470fbd64e97e660a3038a10a26a80e7cca09a3280ce3c87d07befd6f65127096d6075a18f30906828cee1f8b968dd3247210041078cf6d28f05977e5c172a9ecd83167873881e0ffcc56615ad0d64b0189ed8d559e43cccb1e2f8805df7156cb11f5df9dfbc067fce9fb3ee3230e28edfcf98741b9883f9f0f42913cc2be1036a0590107c69a9fadd4c9fc39df872f0db664ea7172fd72e0ad756be95417487d0c2bb38061c52124dcb2545f15a5bfd39d950b5878a067945733d8b1dc37cb85dd9393c98b0751c83d8e848fd1bd3ad243f6a8af7a8cb8cda7e1dc05324fa3932423fea0428131646534e74398f1604146da26a615045ee49ae2df3c8fcd16da64672845a946de4c26c1417c534a2b62a408a8c30c2e4f73ee44571259b628249c9e3f65e7b8d22002a170e7e53dc7c4cdc0073491db2cd6de20cd"
"df07501ff08378ac1cfe3ef479491f3fc475f8aa1fb188706c264e276da3e0399e2bc17cffd6ad0ff94d2d3b9a3b46e8c1472c41fc1c002daa76634f94b3bdf8560cb3241352c6f1be21fee70cd54a1d96e31d71ef99589b93e7ca8d026abcb4a4fbfc8c0f57d59a6d9e760f02fd0a569702da7f59da495c2dd7f92d60fb3220cd7932a032d40ed29deaa5fe971128c6503eb9d1029a23ed6dc4fd5e8c5cf0347841424d60a5a07a9781d08c85222cf7241d199609762488332a6eafbc08cec42c876da9bd3fa287bca12f71b6e33c4453afb970b425a45b9baa9aa69ebb3907e06e6610f100b00c86752b2c106c2e0b71963f1933d315ceef89132c7744149db0c28f62b3d7b43d570d1f5c40bf4b7470b3b8de30b0d756b8326542743f2fa5cf3eff226b6a658ecbe44dc9a0e59f073f999d8c3340ba30ecff6f2fa4f3815f0d4c665b5109ce8984971e5cbec806888c2acdf73d2a330de9e5133787aa4950d08759f4cfcb55ec8efb43d421cf3a9f601a096677eb95f61e352a9adae7c0b971fb455f170c7ed95329b699d6e93f024786507e2e0acbeffb452c26d8c041cb88316d09a08af54ec48451f9bb685a23910e97ac82bb41f19f6b42fa10cfb75f9fa8edd61653c14a27b51544e3fb28009aab76d060135df2d097fd4c2f2e63dba1192c648215fdd1dace4824d71e038e23184ede7f61baefd747aed93b9807d0b3b7b4f7cb9eb171d1ba241b19cf1c74781eaaaca99a458253777522dedcf3d1db6bd4eec4459e59ad635904201b5d91c77bb"
"b6e91f00f5a6f29794b35afde3dcd850f08ac5da097549ded05159567e9f7a023e08e49253766c0e151852714987201e90df675368ee638a947b7e6dc20bedf60656971170afe2d453662685dc1ceef8436ca8071680d0346239b41a6825839e9d5af12f9574d51b4672c5fa7f84bac497c8ba5fad2c10fbffe5ee713090b903d7723cd28c1b189a47c6a9fe9a88d0881dd60d1970c6e8a6d812bbd089c10841e5ced1417bef41f400118fa990d157bca93267d407989de017bd48f0231d43b9487526072e2755461274b3f5bf27847dda36c652a2b1fdd3815fd4ab93863426b31ecd1e6a9094dd2ed0190f8138e650dd2174fcc6b6ab1b8b91cc8020f2dcbb14855e7dd0bc1b5a01f55f81c0476daf1684cc4e72a68327120730ae92c45ab4e447c4ee900d61f79681667eec61343e4eebdd65c5b38a1ba5e3478f4d2f59d184ec39aca445a0f6edaa6840f04bfc19acf23db4507609cbdb44514b36aa5ef4ffe46577b711d1028970916eae919f1b4913d5894a24117cd7cc1aa8965840865554ce663af470455c0f756c795fb29eec04b727b12f7f3796f572ca2ec1e8771a88f68999e16b2acb235a7d9146f85f2be5a034babc3bdde750eb7895396d4777c144aee517a07310dcc8c9ce0ead93abb7f1eb4e34ed5036361d682c97eac1ad7c8158035e40a713f0f2e6f6e677d4b11ecc97e101a5b48420435dd218846ae622b416faeba7e0003bbbece71c2aa046715173b408c8ab2888b0b5dc4c34683f83ba9a83795f86122e6d80597d3a952a44f"
"5a1edb6f294a0ceebefc3cb54db814cf91fe450ed4c71d0b4091a1fc7474", "goodjob"},
{NULL}
};
#define STEP 0
#define SEED 256
// This file contains auto-tuning routine(s). Has to be included after formats definitions.
#include "opencl-autotune.h"
#include "memdbg.h"
static const char * warn[] = {
"xfer: ", ", crypt: ", ", xfer: "
};
/* ------- Helper functions ------- */
static size_t get_task_max_work_group_size()
{
return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel);
}
static void create_clobj(size_t gws, struct fmt_main *self)
{
insize = sizeof(dmg_password) * gws;
outsize = sizeof(dmg_hash) * gws;
settingsize = sizeof(dmg_salt);
cracked_size = sizeof(*cracked) * gws;
inbuffer = mem_calloc(1, insize);
outbuffer = mem_alloc(outsize);
cracked = mem_calloc(1, cracked_size);
/// Allocate memory
mem_in =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem in");
mem_setting =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize,
NULL, &cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem setting");
mem_out =
clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem out");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in),
&mem_in), "Error while setting mem_in kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out),
&mem_out), "Error while setting mem_out kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting),
&mem_setting), "Error while setting mem_salt kernel argument");
}
static void release_clobj(void)
{
if (cracked) {
HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in");
HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting");
HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out");
MEM_FREE(inbuffer);
MEM_FREE(outbuffer);
MEM_FREE(cracked);
}
}
static void done(void)
{
if (autotuned) {
release_clobj();
HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel");
HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
autotuned--;
}
}
static void init(struct fmt_main *_self)
{
self = _self;
opencl_prepare_dev(gpu_id);
}
static void reset(struct db_main *db)
{
if (!autotuned) {
char build_opts[64];
snprintf(build_opts, sizeof(build_opts),
"-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d",
PLAINTEXT_LENGTH,
(int)sizeof(currentsalt.salt),
(int)sizeof(outbuffer->v));
opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl",
gpu_id, build_opts);
crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error);
HANDLE_CLERROR(cl_error, "Error creating kernel");
// Initialize openCL tuning (library) for this format.
opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self,
create_clobj, release_clobj,
sizeof(dmg_password), 0, db);
// Auto tune execution from shared/included code.
autotune_run(self, 1, 0, 1000);
}
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr;
char *p;
int headerver;
int res, extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN; /* skip over "$dmg$" marker */
if ((p = strtokm(ctcopy, "*")) == NULL)
goto err;
headerver = atoi(p);
if (headerver == 2) {
if ((p = strtokm(NULL, "*")) == NULL) /* salt len */
goto err;
if(!isdec(p))
goto err;
res = atoi(p);
if (res > 20)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt */
goto err;
if (hexlenl(p, &extra) != res*2 || extra)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* ivlen */
goto err;
if(!isdec(p))
goto err;
res = atoi(p);
if (atoi(p) > 32)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iv */
goto err;
if (hexlenl(p, &extra) != res*2 || extra)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* encrypted_keyblob_size */
goto err;
if(!isdec(p))
goto err;
res = atoi(p);
if (res > 128)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* encrypted keyblob */
goto err;
if (hexlenl(p, &extra) != res*2 || extra)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* chunk number */
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* data_size */
goto err;
if(!isdec(p))
goto err;
res = atoi(p);
if ((p = strtokm(NULL, "*")) == NULL) /* chunk */
goto err;
if (hexlenl(p, &extra) != res*2 || extra)
goto err;
if (res > 8192)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* scp */
goto err;
if(!isdec(p))
goto err;
res = atoi(p);
/* FIXME: which values are allowed here? */
if (res == 1) {
if ((p = strtokm(NULL, "*")) == NULL) /* zchunk */
goto err;
if (strlen(p) != 4096 * 2)
goto err;
}
}
else if (headerver == 1) {
if ((p = strtokm(NULL, "*")) == NULL) /* salt len */
goto err;
if(!isdec(p))
goto err;
res = atoi(p);
if (res > 20)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt */
goto err;
if (hexlenl(p, &extra) != res*2 || extra)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* len_wrapped_aes_key */
goto err;
if(!isdec(p))
goto err;
res = atoi(p);
if (res > 296)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* wrapped_aes_key */
goto err;
if (hexlenl(p, &extra) != res*2 || extra)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* len_hmac_sha1_key */
goto err;
if(!isdec(p))
goto err;
res = atoi(p);
if (res > 300)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* hmac_sha1_key */
goto err;
if (strlen(p) / 2 != res)
goto err;
}
else
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += FORMAT_TAG_LEN;
p = strtokm(ctcopy, "*");
cs.headerver = atoi(p);
if (cs.headerver == 2) {
p = strtokm(NULL, "*");
cs.saltlen = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.saltlen; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.ivlen = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.ivlen; i++)
cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.encrypted_keyblob_size = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.encrypted_keyblob_size; i++)
cs.encrypted_keyblob[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.cno = atoi(p);
p = strtokm(NULL, "*");
cs.data_size = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.data_size; i++)
cs.chunk[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.scp = atoi(p);
if (cs.scp == 1) {
p = strtokm(NULL, "*");
for (i = 0; i < 4096; i++)
cs.zchunk[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
}
if ((p = strtokm(NULL, "*")))
cs.iterations = atoi(p);
else
cs.iterations = 1000;
}
else {
p = strtokm(NULL, "*");
cs.saltlen = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.saltlen; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.len_wrapped_aes_key = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.len_wrapped_aes_key; i++)
cs.wrapped_aes_key[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.len_hmac_sha1_key = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.len_hmac_sha1_key; i++)
cs.wrapped_hmac_sha1_key[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
if ((p = strtokm(NULL, "*")))
cs.iterations = atoi(p);
else
cs.iterations = 1000;
}
if (cs.iterations == 0)
cs.iterations = 1000;
MEM_FREE(keeptr);
return (void *)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
memcpy((char*)currentsalt.salt, cur_salt->salt, 20);
currentsalt.length = 20;
currentsalt.outlen = 32;
currentsalt.iterations = cur_salt->iterations;
currentsalt.skip_bytes = 0;
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting,
CL_FALSE, 0, settingsize, ¤tsalt, 0, NULL, NULL),
"Copy setting to gpu");
}
#undef set_key
static void set_key(char *key, int index)
{
uint8_t length = strlen(key);
if (length > PLAINTEXT_LENGTH)
length = PLAINTEXT_LENGTH;
inbuffer[index].length = length;
memcpy(inbuffer[index].v, key, length);
}
static char *get_key(int index)
{
static char ret[PLAINTEXT_LENGTH + 1];
uint8_t length = inbuffer[index].length;
memcpy(ret, inbuffer[index].v, length);
ret[length] = '\0';
return ret;
}
static int apple_des3_ede_unwrap_key1(const unsigned char *wrapped_key, const int wrapped_key_len, const unsigned char *decryptKey)
{
DES_key_schedule ks1, ks2, ks3;
unsigned char TEMP1[sizeof(cur_salt->wrapped_hmac_sha1_key)];
unsigned char TEMP2[sizeof(cur_salt->wrapped_hmac_sha1_key)];
unsigned char IV[8] = { 0x4a, 0xdd, 0xa2, 0x2c, 0x79, 0xe8, 0x21, 0x05 };
int outlen, i;
DES_set_key((DES_cblock*)(decryptKey + 0), &ks1);
DES_set_key((DES_cblock*)(decryptKey + 8), &ks2);
DES_set_key((DES_cblock*)(decryptKey + 16), &ks3);
DES_ede3_cbc_encrypt(wrapped_key, TEMP1, wrapped_key_len, &ks1, &ks2, &ks3,
(DES_cblock*)IV, DES_DECRYPT);
outlen = check_pkcs_pad(TEMP1, wrapped_key_len, 8);
if (outlen < 0)
return 0;
for (i = 0; i < outlen; i++)
TEMP2[i] = TEMP1[outlen - i - 1];
outlen -= 8;
DES_ede3_cbc_encrypt(TEMP2 + 8, TEMP1, outlen, &ks1, &ks2, &ks3,
(DES_cblock*)TEMP2, DES_DECRYPT);
outlen = check_pkcs_pad(TEMP1, outlen, 8);
if (outlen < 0)
return 0;
return 1;
}
static int hash_plugin_check_hash(unsigned char *derived_key)
{
unsigned char hmacsha1_key_[20];
unsigned char aes_key_[32];
int ret = 0;
if (cur_salt->headerver == 1) {
if (apple_des3_ede_unwrap_key1(cur_salt->wrapped_aes_key, cur_salt->len_wrapped_aes_key, derived_key) &&
apple_des3_ede_unwrap_key1(cur_salt->wrapped_hmac_sha1_key, cur_salt->len_hmac_sha1_key, derived_key)) {
return 1;
}
}
else {
DES_key_schedule ks1, ks2, ks3;
unsigned char TEMP1[sizeof(cur_salt->wrapped_hmac_sha1_key)];
AES_KEY aes_decrypt_key;
unsigned char outbuf[8192 + 1];
unsigned char outbuf2[4096 + 1];
unsigned char iv[20];
#ifdef DMG_DEBUG
unsigned char *r;
#endif
const char nulls[8] = { 0 };
DES_set_key((DES_cblock*)(derived_key + 0), &ks1);
DES_set_key((DES_cblock*)(derived_key + 8), &ks2);
DES_set_key((DES_cblock*)(derived_key + 16), &ks3);
memcpy(iv, cur_salt->iv, 8);
DES_ede3_cbc_encrypt(cur_salt->encrypted_keyblob, TEMP1,
cur_salt->encrypted_keyblob_size, &ks1, &ks2, &ks3,
(DES_cblock*)iv, DES_DECRYPT);
memcpy(aes_key_, TEMP1, 32);
memcpy(hmacsha1_key_, TEMP1, 20);
hmac_sha1(hmacsha1_key_, 20, (unsigned char*)&cur_salt->cno, 4, iv, 20);
if (cur_salt->encrypted_keyblob_size == 48)
AES_set_decrypt_key(aes_key_, 128, &aes_decrypt_key);
else
AES_set_decrypt_key(aes_key_, 128 * 2, &aes_decrypt_key);
AES_cbc_encrypt(cur_salt->chunk, outbuf, cur_salt->data_size, &aes_decrypt_key, iv, AES_DECRYPT);
/* 8 consecutive nulls */
if (memmem(outbuf, cur_salt->data_size, (void*)nulls, 8)) {
#ifdef DMG_DEBUG
if (!bench_running)
fprintf(stderr, "NULLS found!\n\n");
#endif
ret = 1;
}
/* These tests seem to be obsoleted by the 8xNULL test */
#ifdef DMG_DEBUG
/* </plist> is a pretty generic signature for Apple */
if (memmem(outbuf, cur_salt->data_size, (void*)"</plist>", 8)) {
if (!bench_running)
fprintf(stderr, "</plist> found!\n\n");
ret = 1;
}
/* Journalled HFS+ */
if (memmem(outbuf, cur_salt->data_size, (void*)"jrnlhfs+", 8)) {
if (!bench_running)
fprintf(stderr, "jrnlhfs+ found!\n\n");
ret = 1;
}
/* Handle compressed DMG files, CMIYC 2012 and self-made
samples. Is this test obsoleted by the </plist> one? */
if ((r = memmem(outbuf, cur_salt->data_size, (void*)"koly", 4))) {
unsigned int *u32Version = (unsigned int *)(r + 4);
if (HTONL(*u32Version) == 4) {
if (!bench_running)
fprintf(stderr, "koly found!\n\n");
ret = 1;
}
}
/* Handle VileFault sample images */
if (memmem(outbuf, cur_salt->data_size, (void*)"EFI PART", 8)) {
if (!bench_running)
fprintf(stderr, "EFI PART found!\n\n");
ret = 1;
}
/* Apple is a good indication but it's short enough to
produce false positives */
if (memmem(outbuf, cur_salt->data_size, (void*)"Apple", 5)) {
if (!bench_running)
fprintf(stderr, "Apple found!\n\n");
ret = 1;
}
#endif /* DMG_DEBUG */
/* Second buffer test. If present, *this* is the very first block of the DMG */
if (cur_salt->scp == 1) {
int cno = 0;
hmac_sha1(hmacsha1_key_, 20, (unsigned char*)&cno, 4, iv, 20);
if (cur_salt->encrypted_keyblob_size == 48)
AES_set_decrypt_key(aes_key_, 128, &aes_decrypt_key);
else
AES_set_decrypt_key(aes_key_, 128 * 2, &aes_decrypt_key);
AES_cbc_encrypt(cur_salt->zchunk, outbuf2, 4096, &aes_decrypt_key, iv, AES_DECRYPT);
/* 8 consecutive nulls */
if (memmem(outbuf2, 4096, (void*)nulls, 8)) {
#ifdef DMG_DEBUG
if (!bench_running)
fprintf(stderr, "NULLS found in alternate block!\n\n");
#endif
ret = 1;
}
#ifdef DMG_DEBUG
/* This test seem to be obsoleted by the 8xNULL test */
if (memmem(outbuf2, 4096, (void*)"Press any key to reboot", 23)) {
if (!bench_running)
fprintf(stderr, "MS-DOS UDRW signature found in alternate block!\n\n");
ret = 1;
}
#endif /* DMG_DEBUG */
}
#ifdef DMG_DEBUG
/* Write block as hex, strings or raw to a file. */
if (ret && !bench_running) {
#if DMG_DEBUG == 4
int fd;
if ((fd = open("dmg.debug.main", O_RDWR | O_CREAT | O_TRUNC, 0660)) == -1)
perror("open()");
else {
#if FCNTL_LOCKS
struct flock lock = { 0 };
lock.l_type = F_WRLCK;
while (fcntl(fd, F_SETLKW, &lock)) {
if (errno != EINTR)
pexit("fcntl(F_WRLCK)");
}
#elif OS_FLOCK
while (flock(fd, LOCK_EX)) {
if (errno != EINTR)
pexit("flock(LOCK_EX)");
}
#endif
if ((write(fd, outbuf, cur_salt->data_size) == -1))
perror("write()");
if (cur_salt->scp == 1)
if ((write(fd, outbuf2, 4096) == -1))
perror("write()");
if (close(fd))
perror("close");
}
#endif
#if DMG_DEBUG == 3
dump_stuff(outbuf, cur_salt->data_size);
if (cur_salt->scp == 1) {
fprintf(stderr, "2nd block:\n");
dump_stuff(outbuf2, 4096);
}
#endif
#if DMG_DEBUG == 2
dump_text(outbuf, cur_salt->data_size);
if (cur_salt->scp == 1) {
fprintf(stderr, "2nd block:\n");
dump_text(outbuf2, 4096);
}
#endif
}
#endif /* DMG_DEBUG */
}
return ret;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index;
size_t *lws = local_work_size ? &local_work_size : NULL;
global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size);
if (any_cracked) {
memset(cracked, 0, cracked_size);
any_cracked = 0;
}
/// Copy data to gpu
BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0,
insize, inbuffer, 0, NULL, multi_profilingEvent[0]),
"Copy data to gpu");
/// Run kernel
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1,
NULL, &global_work_size, lws, 0, NULL,
multi_profilingEvent[1]), "Run kernel");
/// Read the result back
BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0,
outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back");
if (ocl_autotune_running)
return count;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
if (hash_plugin_check_hash((unsigned char*)outbuffer[index].v) == 1)
{
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
return count;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->iterations;
}
struct fmt_main fmt_opencl_dmg = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef DMG_DEBUG
FMT_NOT_EXACT |
#endif
#ifdef _OPENMP
FMT_OMP |
#endif
FMT_CASE | FMT_8_BIT,
{
"iteration count",
},
{ FORMAT_TAG },
dmg_tests
}, {
init,
done,
reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_OPENCL */
|
Cover.h | /*
* Cover.h
*
* Created on: 03.10.2013
* Author: cls
*/
#ifndef COVER_H_
#define COVER_H_
#include <cinttypes>
#include <set>
#include <vector>
#include <map>
#include <cassert>
#include <limits>
#include "Partition.h"
namespace NetworKit {
typedef uint64_t index;
typedef uint64_t count;
/**
* @ingroup structures
* Implements a cover of a set, i.e. an assignment of
* its elements to possibly overlapping subsets.
*/
class Cover {
public:
/** Default constructor */
Cover();
/**
* Create a new cover data structure for elements up to a maximum element index.
*
* @param[in] z maximum index
*/
Cover(index z);
/**
* Creates a new cover data structure which contains the given partition.
*
* @param[in] p The partition to construct the cover from
*/
Cover(const Partition &p);
/** Default destructor */
virtual ~Cover() = default;
/**
* Index operator.
*
* @param[in] e an element
*/
inline std::set<index>& operator [](const index& e) {
return this->data[e];
}
/**
* Index operator for const instances of this class.
*
* @param[in] e an element
*/
inline const std::set<index>& operator [](const index& e) const {
return this->data[e];
}
/**
* Return the ids of subsets in which the element @a e is contained.
*
* @param[in] e an element
* @return A set of subset ids in which @a e is contained.
*/
inline std::set<index> subsetsOf(index e) const {
// TODO: assert (e < this->numberOfElements());
return this->data[e];
}
/**
* Check if cover assigns a valid subset to the element @a e.
*
* @param e an element.
* @return @c true, if @a e is assigned to a valid subset, @c false otherwise.
*/
bool contains(index e) const;
/**
* Check if two elements @a e1 and @a e2 belong to the same subset.
*
* @param e1 an element.
* @param e2 an element.
* @return @c true, if @a e1 and @a e2 belong to the same subset, @c false otherwise.
*/
bool inSameSubset(index e1, index e2) const;
/**
* Get the members of a specific subset @a s.
*
* @return The set of members of subset @a s.
*/
std::set<index> getMembers(const index s) const;
/**
* Add the (previously unassigned) element @a e to the set @a s.
* @param[in] s a subset
* @param[in] e an element
*/
void addToSubset(index s, index e);
/**
* Remove the element @a e from the set @a s.
* @param[in] s a subset
* @param[in] e an element
*/
void removeFromSubset(index s, index e);
/**
* Move the element @a e to subset @a s, i.e. remove it from all
* other subsets and place it in the subset.
* @param[in] s a subset
* @param[in] e an element
*/
void moveToSubset(index s, index e);
/**
* Creates a singleton set containing the element @a e and returns the index of the new set.
* @param[in] e an element
* @return The index of the new set.
*/
index toSingleton(index e);
/**
* Assigns every element to a singleton set.
* Set id is equal to element id.
*/
void allToSingletons();
/**
* Assigns the elements from both sets to a new set.
* @param[in] s a subset
* @param[in] t a subset
*/
void mergeSubsets(index s, index t);
/**
* Get an upper bound for the subset ids that have been assigned.
* (This is the maximum id + 1.)
*
* @return An upper bound.
*/
index upperBound() const;
/**
* Get a lower bound for the subset ids that have been assigned.
* @return A lower bound.
*/
index lowerBound() const;
/**
* Get a list of subset sizes. Indices do not necessarily correspond to subset ids.
*
* @return A list of subset sizes.
*/
std::vector<count> subsetSizes() const;
/**
* Get a map from subset id to size of the subset.
*
* @return A map from subset id to size of the subset.
*/
std::map<index, count> subsetSizeMap() const;
/**
* Get the current number of sets in this cover.
*
* @return The number of sets in this cover.
*/
count numberOfSubsets() const;
/**
* Get the current number of elements in this cover.
*
* @return The current number of elements.
*/
count numberOfElements() const;
/**
* Get the ids of nonempty subsets.
*
* @return A set of ids of nonempty subsets.
*/
std::set<index> getSubsetIds() const;
/**
* Sets an upper bound for the subset ids that CAN be assigned.
*
* @param[in] upper highest assigned subset ID + 1
*/
void setUpperBound(index upper);
/**
* Iterate over all entries (node, subset ID of node) and execute callback function @a func (lambda closure).
*
* @param func Takes parameters <code>(node, index)</code>
*/
template<typename Callback> void forEntries(Callback func) const;
/**
* Iterate over all entries (node, subset ID of node) in parallel and execute callback function @a func (lambda closure).
*
* @param func Takes parameters <code>(node, index)</code>
*/
template<typename Callback> void parallelForEntries(Callback handle) const;
private:
index z; //!< maximum element index that can be mapped
index omega; //!< maximum subset index ever assigned
std::vector<std::set<index>> data; //!< data container, indexed by element id, containing set of subset ids
/**
* Allocates and returns a new subset id.
*/
inline index newSubsetId() {
omega++;
index s = omega;
return s;
}
};
template<typename Callback>
inline void Cover::forEntries(Callback handle) const {
for (index e = 0; e <= this->z; e += 1) {
handle(e, data[e]);
}
}
template<typename Callback>
inline void Cover::parallelForEntries(Callback handle) const {
#pragma omp parallel for
for (index e = 0; e <= this->z; e += 1) {
handle(e, data[e]);
}
}
} /* namespace NetworKit */
#endif /* COVER_H_ */
|
partial.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "aux_interp.h"
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildPartialExtPIInterp
* Comment:
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildPartialExtPIInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global,
HYPRE_BigInt *num_old_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
HYPRE_Real trunc_factor, HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P_ptr)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PARTIAL_INTERP] -= hypre_MPI_Wtime();
#endif
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
/*HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;*/
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int *old_coarse_to_fine = NULL;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
HYPRE_Int sgn;
/* Variables to keep count of interpolatory points */
/*HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter, coarse_counter_offd; */
HYPRE_Int n_coarse_old;
HYPRE_BigInt total_old_global_cpts;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
/*HYPRE_Int strong_f_marker = -2;*/
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int cnt, old_cnt;
HYPRE_Int start_indexing = 0;
HYPRE_Int i;
/*HYPRE_Int i, ii, i1, i2, j, jj, kk, k1, jj1;*/
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Real wall_time;
HYPRE_Int max_num_threads;
HYPRE_Int *P_diag_array = NULL;
HYPRE_Int *P_offd_array = NULL;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
max_num_threads = hypre_NumThreads();
my_first_cpt = num_cpts_global[0];
/*my_first_old_cpt = num_old_cpts_global[0];*/
n_coarse_old = (HYPRE_Int)(num_old_cpts_global[1] - num_old_cpts_global[0]);
/*n_coarse = num_cpts_global[1] - num_cpts_global[0];*/
if (my_id == (num_procs -1))
{
total_global_cpts = num_cpts_global[1];
total_old_global_cpts = num_old_cpts_global[1];
}
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
hypre_MPI_Bcast(&total_old_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
if (hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1))
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
return hypre_error_flag;
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST);
if (n_fine)
{
old_coarse_to_fine = hypre_CTAlloc(HYPRE_Int, n_coarse_old, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
/*P_marker = hypre_CTAlloc(HYPRE_Int, n_fine); */
}
if (full_off_procNodes)
{
/*P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);*/
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
/*hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);*/
for (i=0; i < full_off_procNodes; i++)
{
fine_to_coarse_offd[i] = -1;
tmp_CF_marker_offd[i] = -1;
}
cnt = 0;
old_cnt = 0;
for (i = 0; i < n_fine; i++)
{
fine_to_coarse[i] = -1;
if (CF_marker[i] == 1)
{
fine_to_coarse[i] = cnt++;
old_coarse_to_fine[old_cnt++] = i;
}
else if (CF_marker[i] == -2)
{
old_coarse_to_fine[old_cnt++] = i;
}
}
P_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads+1, HYPRE_MEMORY_HOST);
P_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads+1, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i, diagonal, distribute, sgn, sum)
#endif
{
HYPRE_Int ii, jj_counter, jj_counter_offd, jj, kk, i1, i2, k1, jj1;
HYPRE_BigInt big_k1;
HYPRE_Int loc_col, jj_begin_row, jj_begin_row_offd;
HYPRE_Int jj_end_row, jj_end_row_offd, strong_f_marker;
HYPRE_Int size, rest, ne, ns;
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
strong_f_marker = -2;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
size = n_coarse_old/num_threads;
rest = n_coarse_old - size*num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num*(size+1);
ne = (my_thread_num+1)*(size+1);
}
else
{
ns = my_thread_num*size+rest;
ne = (my_thread_num+1)*size+rest;
}
if (n_fine) P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
for (ii=0; ii < n_fine; ii++)
P_marker[ii] = -1;
if (full_off_procNodes) P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
for (ii=0; ii < full_off_procNodes; ii++)
P_marker_offd[ii] = -1;
/*coarse_counter = 0;
coarse_counter_offd = 0;*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
for (ii = ns; ii < ne; ii++)
{
jj_begin_row = jj_counter;
jj_begin_row_offd = jj_counter_offd;
/*P_diag_i[ii] = jj_counter;
if (num_procs > 1)
P_offd_i[ii] = jj_counter_offd;*/
i = old_coarse_to_fine[ii];
if (CF_marker[i] > 0)
{
jj_counter++;
/*coarse_counter++;*/
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] == -2)
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{ /* i1 is a C point */
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{ /* i1 is a F point, loop through it's strong neighbors */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] > 0)
{
if(P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] > 0)
{
if(P_marker_offd[k1] < jj_begin_row_offd)
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{
if(P_marker_offd[i1] < jj_begin_row_offd)
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if(P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
P_diag_array[my_thread_num] = jj_counter;
P_offd_array[my_thread_num] = jj_counter_offd;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d determine structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
if (debug_flag== 4) wall_time = time_getWallclockSeconds();
for (i=0; i < max_num_threads; i++)
{
P_diag_array[i+1] += P_diag_array[i];
P_offd_array[i+1] += P_offd_array[i];
}
P_diag_size = P_diag_array[max_num_threads];
P_offd_size = P_offd_array[max_num_threads];
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
}
P_diag_i[n_coarse_old] = P_diag_size;
P_offd_i[n_coarse_old] = P_offd_size;
/* Fine to coarse mapping */
if(num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
}
for (i = 0; i < n_fine; i++)
P_marker[i] = -1;
for (i = 0; i < full_off_procNodes; i++)
P_marker_offd[i] = -1;
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (my_thread_num)
{
jj_counter = P_diag_array[my_thread_num-1];
jj_counter_offd = P_offd_array[my_thread_num-1];
}
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
for (ii = ns; ii < ne; ii++)
{
jj_begin_row = jj_counter;
jj_begin_row_offd = jj_counter_offd;
P_diag_i[ii] = jj_counter;
P_offd_i[ii] = jj_counter_offd;
i = old_coarse_to_fine[ii];
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] > 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] == -2)
{
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
k1 = S_offd_j[kk];
if(CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] >= 0)
{
if(P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
/* Find local col number */
if(big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1-col_1);
if(P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if(P_marker[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly incluence i. */
for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if((P_marker[i2] >= jj_begin_row || i2 == i) && (sgn*A_diag_data[jj1]) < 0)
sum += A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
sum += A_offd_data[jj1];
}
}
if(sum != 0)
{
distribute = A_diag_data[jj]/sum;
/* Loop over row of A for point i1 and do the distribution */
for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute*A_diag_data[jj1];
if(i2 == i && (sgn*A_diag_data[jj1]) < 0)
diagonal += distribute*A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute*A_offd_data[jj1];
}
}
}
else
{
diagonal += A_diag_data[jj];
}
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
if(num_procs > 1)
{
for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if(P_marker_offd[i1] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
else if(P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if(P_marker[loc_col] >= jj_begin_row || loc_col == i)
sum += A_ext_data[jj1];
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd)
sum += A_ext_data[jj1];
}
}
if(sum != 0)
{
distribute = A_offd_data[jj] / sum;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if(P_marker[loc_col] >= jj_begin_row)
P_diag_data[P_marker[loc_col]] += distribute*
A_ext_data[jj1];
if(loc_col == i)
diagonal += distribute*A_ext_data[jj1];
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[loc_col]] += distribute*
A_ext_data[jj1];
}
}
}
else
{
diagonal += A_offd_data[jj];
}
}
else if (CF_marker_offd[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
if (diagonal)
{
for(jj = jj_begin_row; jj < jj_end_row; jj++)
P_diag_data[jj] /= -diagonal;
for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
P_offd_data[jj] /= -diagonal;
}
}
strong_f_marker--;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
} /* end parallel region */
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d fill structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P = hypre_ParCSRMatrixCreate(comm,
total_old_global_cpts,
total_global_cpts,
num_old_cpts_global,
num_cpts_global,
0,
P_diag_i[n_coarse_old],
P_offd_i[n_coarse_old]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_CSRMatrixMemoryLocation(P_diag) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixMemoryLocation(P_offd) = HYPRE_MEMORY_HOST;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_coarse_old];
P_offd_size = P_offd_i[n_coarse_old];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if(P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i=0; i < n_fine; i++)
if (CF_marker[i] < -1) CF_marker[i] = -1;
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(old_coarse_to_fine, HYPRE_MEMORY_HOST);
hypre_TFree(P_diag_array, HYPRE_MEMORY_HOST);
hypre_TFree(P_offd_array, HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if(num_functions > 1)
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PARTIAL_INTERP] += hypre_MPI_Wtime();
#endif
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildPartialStdInterp
* Comment: The interpolatory weighting can be changed with the sep_weight
* variable. This can enable not separating negative and positive
* off diagonals in the weight formula.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildPartialStdInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global,
HYPRE_BigInt *num_old_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
HYPRE_Real trunc_factor, HYPRE_Int max_elmts,
HYPRE_Int sep_weight,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int *old_coarse_to_fine = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
HYPRE_Int n_coarse_old;
HYPRE_BigInt total_old_global_cpts;
HYPRE_Int *ihat = NULL;
HYPRE_Int *ihat_offd = NULL;
HYPRE_Int *ipnt = NULL;
HYPRE_Int *ipnt_offd = NULL;
HYPRE_Int strong_f_marker = -2;
/* Interpolation weight variables */
HYPRE_Real *ahat = NULL;
HYPRE_Real *ahat_offd = NULL;
HYPRE_Real sum_pos, sum_pos_C, sum_neg, sum_neg_C, sum, sum_C;
HYPRE_Real diagonal, distribute;
HYPRE_Real alfa, beta;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int cnt, old_cnt;
HYPRE_Int start_indexing = 0;
HYPRE_Int i, ii, i1, j1, jj, kk, k1;
HYPRE_BigInt big_k1;
HYPRE_Int cnt_c, cnt_f, cnt_c_offd, cnt_f_offd, indx;
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Real wall_time;
HYPRE_Real wall_1 = 0;
HYPRE_Real wall_2 = 0;
HYPRE_Real wall_3 = 0;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
if (debug_flag== 4) wall_time = time_getWallclockSeconds();
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
my_first_cpt = num_cpts_global[0];
/*my_first_old_cpt = num_old_cpts_global[0];*/
n_coarse_old = (HYPRE_Int)(num_old_cpts_global[1] - num_old_cpts_global[0]);
/*n_coarse = num_cpts_global[1] - num_cpts_global[0];*/
if (my_id == (num_procs -1))
{
total_global_cpts = num_cpts_global[1];
total_old_global_cpts = num_old_cpts_global[1];
}
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
hypre_MPI_Bcast(&total_old_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
if (hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 0))
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
return hypre_error_flag;
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST);
if (n_fine)
{
old_coarse_to_fine = hypre_CTAlloc(HYPRE_Int, n_coarse_old, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
cnt = 0;
old_cnt = 0;
for (i = 0; i < n_fine; i++)
{
fine_to_coarse[i] = -1;
if (CF_marker[i] == 1)
{
fine_to_coarse[i] = cnt++;
old_coarse_to_fine[old_cnt++] = i;
}
else if (CF_marker[i] == -2)
{
old_coarse_to_fine[old_cnt++] = i;
}
}
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (ii = 0; ii < n_coarse_old; ii++)
{
P_diag_i[ii] = jj_counter;
if (num_procs > 1)
P_offd_i[ii] = jj_counter_offd;
i = old_coarse_to_fine[ii];
if (CF_marker[i] > 0)
{
jj_counter++;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] == -2)
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{ /* i1 is a C point */
if (P_marker[i1] < P_diag_i[ii])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{ /* i1 is a F point, loop through it's strong neighbors */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] > 0)
{
if(P_marker[k1] < P_diag_i[ii])
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] > 0)
{
if(P_marker_offd[k1] < P_offd_i[ii])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{
if(P_marker_offd[i1] < P_offd_i[ii])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if(CF_marker[loc_col] >= 0)
{
if(P_marker[loc_col] < P_diag_i[ii])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(CF_marker_offd[loc_col] >= 0)
{
if(P_marker_offd[loc_col] < P_offd_i[ii])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
}
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d determine structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
}
P_diag_i[n_coarse_old] = jj_counter;
P_offd_i[n_coarse_old] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/* Fine to coarse mapping */
if(num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
/* Initialize ahat, which is a modification to a, used in the standard
* interpolation routine. */
if (n_fine)
{
ahat = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST);
ihat = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
ipnt = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
ahat_offd = hypre_CTAlloc(HYPRE_Real, full_off_procNodes, HYPRE_MEMORY_HOST);
ihat_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
ipnt_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
ahat[i] = 0;
ihat[i] = -1;
}
for (i = 0; i < full_off_procNodes; i++)
{
P_marker_offd[i] = -1;
ahat_offd[i] = 0;
ihat_offd[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
for (ii = 0; ii < n_coarse_old; ii++)
{
jj_begin_row = jj_counter;
jj_begin_row_offd = jj_counter_offd;
i = old_coarse_to_fine[ii];
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] > 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] == -2)
{
if (debug_flag==4) wall_time = time_getWallclockSeconds();
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] > 0)
{
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = i1;
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] > 0)
{
if(P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = k1;
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
k1 = S_offd_j[kk];
if(CF_marker_offd[k1] > 0)
{
if(P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] > 0)
{
if(P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd]=i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
if(big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1-col_1);
if(CF_marker[loc_col] > 0)
{
if(P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = loc_col;
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(CF_marker_offd[loc_col] > 0)
{
if(P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
wall_1 += wall_time;
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
cnt_c = 0;
cnt_f = jj_end_row-jj_begin_row;
cnt_c_offd = 0;
cnt_f_offd = jj_end_row_offd-jj_begin_row_offd;
ihat[i] = cnt_f;
ipnt[cnt_f] = i;
ahat[cnt_f++] = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is direct neighbor */
i1 = A_diag_j[jj];
if (P_marker[i1] != strong_f_marker)
{
indx = ihat[i1];
if (indx > -1)
ahat[indx] += A_diag_data[jj];
else if (P_marker[i1] >= jj_begin_row)
{
ihat[i1] = cnt_c;
ipnt[cnt_c] = i1;
ahat[cnt_c++] += A_diag_data[jj];
}
else if (CF_marker[i1] != -3)
{
ihat[i1] = cnt_f;
ipnt[cnt_f] = i1;
ahat[cnt_f++] += A_diag_data[jj];
}
}
else
{
if(num_functions == 1 || dof_func[i] == dof_func[i1])
{
distribute = A_diag_data[jj]/A_diag_data[A_diag_i[i1]];
for (kk = A_diag_i[i1]+1; kk < A_diag_i[i1+1]; kk++)
{
k1 = A_diag_j[kk];
indx = ihat[k1];
if (indx > -1)
ahat[indx] -= A_diag_data[kk]*distribute;
else if (P_marker[k1] >= jj_begin_row)
{
ihat[k1] = cnt_c;
ipnt[cnt_c] = k1;
ahat[cnt_c++] -= A_diag_data[kk]*distribute;
}
else
{
ihat[k1] = cnt_f;
ipnt[cnt_f] = k1;
ahat[cnt_f++] -= A_diag_data[kk]*distribute;
}
}
if(num_procs > 1)
{
for (kk = A_offd_i[i1]; kk < A_offd_i[i1+1]; kk++)
{
k1 = A_offd_j[kk];
indx = ihat_offd[k1];
if(num_functions == 1 || dof_func[i1] == dof_func_offd[k1])
{
if (indx > -1)
ahat_offd[indx] -= A_offd_data[kk]*distribute;
else if (P_marker_offd[k1] >= jj_begin_row_offd)
{
ihat_offd[k1] = cnt_c_offd;
ipnt_offd[cnt_c_offd] = k1;
ahat_offd[cnt_c_offd++] -= A_offd_data[kk]*distribute;
}
else
{
ihat_offd[k1] = cnt_f_offd;
ipnt_offd[cnt_f_offd] = k1;
ahat_offd[cnt_f_offd++] -= A_offd_data[kk]*distribute;
}
}
}
}
}
}
}
if(num_procs > 1)
{
for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if(P_marker_offd[i1] != strong_f_marker)
{
indx = ihat_offd[i1];
if (indx > -1)
ahat_offd[indx] += A_offd_data[jj];
else if (P_marker_offd[i1] >= jj_begin_row_offd)
{
ihat_offd[i1] = cnt_c_offd;
ipnt_offd[cnt_c_offd] = i1;
ahat_offd[cnt_c_offd++] += A_offd_data[jj];
}
else if (CF_marker_offd[i1] != -3)
{
ihat_offd[i1] = cnt_f_offd;
ipnt_offd[cnt_f_offd] = i1;
ahat_offd[cnt_f_offd++] += A_offd_data[jj];
}
}
else
{
if(num_functions == 1 || dof_func[i] == dof_func_offd[i1])
{
distribute = A_offd_data[jj]/A_ext_data[A_ext_i[i1]];
for (kk = A_ext_i[i1]+1; kk < A_ext_i[i1+1]; kk++)
{
big_k1 = A_ext_j[kk];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /*diag*/
loc_col = (HYPRE_Int)(big_k1 - col_1);
indx = ihat[loc_col];
if (indx > -1)
ahat[indx] -= A_ext_data[kk]*distribute;
else if (P_marker[loc_col] >= jj_begin_row)
{
ihat[loc_col] = cnt_c;
ipnt[cnt_c] = loc_col;
ahat[cnt_c++] -= A_ext_data[kk]*distribute;
}
else
{
ihat[loc_col] = cnt_f;
ipnt[cnt_f] = loc_col;
ahat[cnt_f++] -= A_ext_data[kk]*distribute;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(num_functions == 1 ||
dof_func_offd[loc_col] == dof_func_offd[i1])
{
indx = ihat_offd[loc_col];
if (indx > -1)
ahat_offd[indx] -= A_ext_data[kk]*distribute;
else if(P_marker_offd[loc_col] >= jj_begin_row_offd)
{
ihat_offd[loc_col] = cnt_c_offd;
ipnt_offd[cnt_c_offd] = loc_col;
ahat_offd[cnt_c_offd++] -= A_ext_data[kk]*distribute;
}
else
{
ihat_offd[loc_col] = cnt_f_offd;
ipnt_offd[cnt_f_offd] = loc_col;
ahat_offd[cnt_f_offd++] -= A_ext_data[kk]*distribute;
}
}
}
}
}
}
}
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
wall_2 += wall_time;
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
diagonal = ahat[cnt_c];
ahat[cnt_c] = 0;
sum_pos = 0;
sum_pos_C = 0;
sum_neg = 0;
sum_neg_C = 0;
sum = 0;
sum_C = 0;
if(sep_weight == 1)
{
for (jj=0; jj < cnt_c; jj++)
{
if (ahat[jj] > 0)
{
sum_pos_C += ahat[jj];
}
else
{
sum_neg_C += ahat[jj];
}
}
if(num_procs > 1)
{
for (jj=0; jj < cnt_c_offd; jj++)
{
if (ahat_offd[jj] > 0)
{
sum_pos_C += ahat_offd[jj];
}
else
{
sum_neg_C += ahat_offd[jj];
}
}
}
sum_pos = sum_pos_C;
sum_neg = sum_neg_C;
for (jj=cnt_c+1; jj < cnt_f; jj++)
{
if (ahat[jj] > 0)
{
sum_pos += ahat[jj];
}
else
{
sum_neg += ahat[jj];
}
ahat[jj] = 0;
}
if(num_procs > 1)
{
for (jj=cnt_c_offd; jj < cnt_f_offd; jj++)
{
if (ahat_offd[jj] > 0)
{
sum_pos += ahat_offd[jj];
}
else
{
sum_neg += ahat_offd[jj];
}
ahat_offd[jj] = 0;
}
}
if (sum_neg_C*diagonal != 0.0) alfa = sum_neg/sum_neg_C/diagonal;
if (sum_pos_C*diagonal != 0.0) beta = sum_pos/sum_pos_C/diagonal;
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
j1 = ihat[P_diag_j[jj]];
if (ahat[j1] > 0)
P_diag_data[jj] = -beta*ahat[j1];
else
P_diag_data[jj] = -alfa*ahat[j1];
P_diag_j[jj] = fine_to_coarse[P_diag_j[jj]];
ahat[j1] = 0;
}
for (jj=0; jj < cnt_f; jj++)
ihat[ipnt[jj]] = -1;
if(num_procs > 1)
{
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
j1 = ihat_offd[P_offd_j[jj]];
if (ahat_offd[j1] > 0)
P_offd_data[jj] = -beta*ahat_offd[j1];
else
P_offd_data[jj] = -alfa*ahat_offd[j1];
ahat_offd[j1] = 0;
}
for (jj=0; jj < cnt_f_offd; jj++)
ihat_offd[ipnt_offd[jj]] = -1;
}
}
else
{
for (jj=0; jj < cnt_c; jj++)
{
sum_C += ahat[jj];
}
if(num_procs > 1)
{
for (jj=0; jj < cnt_c_offd; jj++)
{
sum_C += ahat_offd[jj];
}
}
sum = sum_C;
for (jj=cnt_c+1; jj < cnt_f; jj++)
{
sum += ahat[jj];
ahat[jj] = 0;
}
if(num_procs > 1)
{
for (jj=cnt_c_offd; jj < cnt_f_offd; jj++)
{
sum += ahat_offd[jj];
ahat_offd[jj] = 0;
}
}
if (sum_C*diagonal != 0.0) alfa = sum/sum_C/diagonal;
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
j1 = ihat[P_diag_j[jj]];
P_diag_data[jj] = -alfa*ahat[j1];
P_diag_j[jj] = fine_to_coarse[P_diag_j[jj]];
ahat[j1] = 0;
}
for (jj=0; jj < cnt_f; jj++)
ihat[ipnt[jj]] = -1;
if(num_procs > 1)
{
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
j1 = ihat_offd[P_offd_j[jj]];
P_offd_data[jj] = -alfa*ahat_offd[j1];
ahat_offd[j1] = 0;
}
for (jj=0; jj < cnt_f_offd; jj++)
ihat_offd[ipnt_offd[jj]] = -1;
}
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
wall_3 += wall_time;
fflush(NULL);
}
}
}
if (debug_flag==4)
{
hypre_printf("Proc = %d fill part 1 %f part 2 %f part 3 %f\n",
my_id, wall_1, wall_2, wall_3);
fflush(NULL);
}
P = hypre_ParCSRMatrixCreate(comm,
total_old_global_cpts,
total_global_cpts,
num_old_cpts_global,
num_cpts_global,
0,
P_diag_i[n_coarse_old],
P_offd_i[n_coarse_old]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_CSRMatrixMemoryLocation(P_diag) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixMemoryLocation(P_offd) = HYPRE_MEMORY_HOST;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_coarse_old];
P_offd_size = P_offd_i[n_coarse_old];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if(P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i=0; i < n_fine; i++)
if (CF_marker[i] < -1) CF_marker[i] = -1;
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(old_coarse_to_fine, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(ahat, HYPRE_MEMORY_HOST);
hypre_TFree(ihat, HYPRE_MEMORY_HOST);
hypre_TFree(ipnt, HYPRE_MEMORY_HOST);
if (full_off_procNodes)
{
hypre_TFree(ahat_offd, HYPRE_MEMORY_HOST);
hypre_TFree(ihat_offd, HYPRE_MEMORY_HOST);
hypre_TFree(ipnt_offd, HYPRE_MEMORY_HOST);
}
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if(num_functions > 1)
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildPartialExtInterp
* Comment:
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildPartialExtInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global,
HYPRE_BigInt *num_old_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
HYPRE_Real trunc_factor, HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int *old_coarse_to_fine = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
HYPRE_Int sgn;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
HYPRE_Int n_coarse_old;
HYPRE_BigInt total_old_global_cpts;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
HYPRE_Int strong_f_marker = -2;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int cnt, old_cnt;
HYPRE_Int start_indexing = 0;
HYPRE_Int i, ii, i1, i2, jj, kk, k1, jj1;
HYPRE_BigInt big_k1;
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Real wall_time;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
my_first_cpt = num_cpts_global[0];
/*my_first_old_cpt = num_old_cpts_global[0];*/
n_coarse_old = (HYPRE_Int)(num_old_cpts_global[1] - num_old_cpts_global[0]);
/*n_coarse = num_cpts_global[1] - num_cpts_global[0];*/
if (my_id == (num_procs -1))
{
total_global_cpts = num_cpts_global[1];
total_old_global_cpts = num_old_cpts_global[1];
}
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
hypre_MPI_Bcast(&total_old_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
if (hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1))
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
return hypre_error_flag;
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST);
if (n_fine)
{
old_coarse_to_fine = hypre_CTAlloc(HYPRE_Int, n_coarse_old, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
cnt = 0;
old_cnt = 0;
for (i = 0; i < n_fine; i++)
{
fine_to_coarse[i] = -1;
if (CF_marker[i] == 1)
{
fine_to_coarse[i] = cnt++;
old_coarse_to_fine[old_cnt++] = i;
}
else if (CF_marker[i] == -2)
{
old_coarse_to_fine[old_cnt++] = i;
}
}
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (ii = 0; ii < n_coarse_old; ii++)
{
P_diag_i[ii] = jj_counter;
if (num_procs > 1)
P_offd_i[ii] = jj_counter_offd;
i = old_coarse_to_fine[ii];
if (CF_marker[i] > 0)
{
jj_counter++;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] == -2)
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{ /* i1 is a C point */
if (P_marker[i1] < P_diag_i[ii])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{ /* i1 is a F point, loop through it's strong neighbors */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] > 0)
{
if(P_marker[k1] < P_diag_i[ii])
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] > 0)
{
if(P_marker_offd[k1] < P_offd_i[ii])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{
if(P_marker_offd[i1] < P_offd_i[ii])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if(P_marker[loc_col] < P_diag_i[ii])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] < P_offd_i[ii])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d determine structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
if (debug_flag== 4) wall_time = time_getWallclockSeconds();
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
}
P_diag_i[n_coarse_old] = jj_counter;
P_offd_i[n_coarse_old] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/* Fine to coarse mapping */
if(num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
for (i = 0; i < n_fine; i++)
P_marker[i] = -1;
for (i = 0; i < full_off_procNodes; i++)
P_marker_offd[i] = -1;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
for (ii = 0; ii < n_coarse_old; ii++)
{
jj_begin_row = jj_counter;
jj_begin_row_offd = jj_counter_offd;
i = old_coarse_to_fine[ii];
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] > 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] == -2)
{
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
k1 = S_offd_j[kk];
if(CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] >= 0)
{
if(P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
/* Find local col number */
if(big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1-col_1);
if(P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if(P_marker[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly incluence i. */
for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if((P_marker[i2] >= jj_begin_row) && (sgn*A_diag_data[jj1]) < 0)
sum += A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
sum += A_offd_data[jj1];
}
}
if(sum != 0)
{
distribute = A_diag_data[jj]/sum;
/* Loop over row of A for point i1 and do the distribution */
for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute*A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute*A_offd_data[jj1];
}
}
}
else
{
diagonal += A_diag_data[jj];
}
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
if(num_procs > 1)
{
for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if(P_marker_offd[i1] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
else if(P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if(P_marker[loc_col] >= jj_begin_row )
sum += A_ext_data[jj1];
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd &&
(sgn*A_ext_data[jj1]) < 0)
sum += A_ext_data[jj1];
}
}
if(sum != 0)
{
distribute = A_offd_data[jj] / sum;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if(P_marker[loc_col] >= jj_begin_row)
P_diag_data[P_marker[loc_col]] += distribute*
A_ext_data[jj1];
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[loc_col]] += distribute*
A_ext_data[jj1];
}
}
}
else
{
diagonal += A_offd_data[jj];
}
}
else if (CF_marker_offd[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
if (diagonal)
{
for(jj = jj_begin_row; jj < jj_end_row; jj++)
P_diag_data[jj] /= -diagonal;
for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
P_offd_data[jj] /= -diagonal;
}
}
strong_f_marker--;
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d fill structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P = hypre_ParCSRMatrixCreate(comm,
total_old_global_cpts,
total_global_cpts,
num_old_cpts_global,
num_cpts_global,
0,
P_diag_i[n_coarse_old],
P_offd_i[n_coarse_old]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_CSRMatrixMemoryLocation(P_diag) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixMemoryLocation(P_offd) = HYPRE_MEMORY_HOST;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_coarse_old];
P_offd_size = P_offd_i[n_coarse_old];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if(P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i=0; i < n_fine; i++)
if (CF_marker[i] < -1) CF_marker[i] = -1;
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(old_coarse_to_fine, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if(num_functions > 1)
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
|
pr59467.c | /* PR libgomp/59467 */
int v;
void
foo (void)
{
int x = 0, y = 0;
#pragma omp parallel
{
int z;
#pragma omp single copyprivate (x) /* { dg-error "is not threadprivate or private in outer context" } */
{
#pragma omp atomic write
x = 6;
}
#pragma omp atomic read
z = x;
#pragma omp atomic
y += z;
}
#pragma omp parallel
{
int z;
#pragma omp single copyprivate (v) /* { dg-error "is not threadprivate or private in outer context" } */
{
#pragma omp atomic write
v = 6;
}
#pragma omp atomic read
z = v;
#pragma omp atomic
y += z;
}
#pragma omp parallel private (x)
{
int z;
#pragma omp single copyprivate (x)
{
#pragma omp atomic write
x = 6;
}
#pragma omp atomic read
z = x;
#pragma omp atomic
y += z;
}
x = 0;
#pragma omp parallel reduction (+:x)
{
#pragma omp single copyprivate (x)
{
#pragma omp atomic write
x = 6;
}
#pragma omp atomic
y += x;
}
#pragma omp single copyprivate (x)
{
x = 7;
}
#pragma omp single copyprivate (v) /* { dg-error "is not threadprivate or private in outer context" } */
{
#pragma omp atomic write
v = 6;
}
}
|
rose_Stress-1_47.c | //#include <float.h>
//#include <math.h>
#define MIN(a, b) ( (a < b) ? a : b)
#define MAX(a, b) ( (a > b) ? a : b)
#include "omp.h"
typedef double real8;
void StressCheckEpsFail(real8 *newSxx,real8 *newSyy,real8 *newSzz,real8 *newTxy,real8 *newTxz,real8 *newTyz,real8 *eps,real8 eps_failure_model,const int *zoneset,int length)
{
int i;
int index;
#pragma omp parallel for private (index,i) firstprivate (eps_failure_model,length)
for (i = 0; i <= length - 1; i += 1) {
index = zoneset[i];
if (eps[zoneset[i]] > eps_failure_model) {
newSxx[i] = 0.0;
newSyy[i] = 0.0;
newSzz[i] = 0.0;
newTxy[i] = 0.0;
newTxz[i] = 0.0;
newTyz[i] = 0.0;
eps[zoneset[i]] = eps_failure_model * 1.01;
}
}
}
void StressStrainWork(real8 *deltz,real8 *delts,const real8 *newSxx,const real8 *newSyy,const real8 *newSzz,const real8 *newTxy,const real8 *newTxz,const real8 *newTyz,const real8 *sxx,const real8 *syy,const real8 *txy,const real8 *txz,const real8 *tyz,const real8 *dxx,const real8 *dyy,const real8 *dzz,const real8 *dxy,const real8 *dxz,const real8 *dyz,real8 deltaTime,const int *zoneset,const real8 *vc,const real8 *vnewc,int length)
{
int i;
int index;
real8 quarterDelta = 0.25 * deltaTime;
real8 szz;
#pragma omp parallel for private (index,szz,i) firstprivate (length,quarterDelta)
for (i = 0; i <= length - 1; i += 1) {
index = zoneset[i];
szz = -sxx[zoneset[i]] - syy[zoneset[i]];
deltz[zoneset[i]] += quarterDelta * (vnewc[i] + vc[i]) * (dxx[zoneset[i]] * (sxx[zoneset[i]] + newSxx[i]) + dyy[zoneset[i]] * (syy[zoneset[i]] + newSyy[i]) + dzz[zoneset[i]] * (szz + newSzz[i]) + 2. * dxy[zoneset[i]] * (txy[zoneset[i]] + newTxy[i]) + 2. * dxz[zoneset[i]] * (txz[zoneset[i]] + newTxz[i]) + 2. * dyz[zoneset[i]] * (tyz[zoneset[i]] + newTyz[i]));
delts[i] += quarterDelta * (vnewc[i] + vc[i]) * (dxx[zoneset[i]] * sxx[zoneset[i]] + dyy[zoneset[i]] * syy[zoneset[i]] + dzz[zoneset[i]] * szz + 2. * dxy[zoneset[i]] * txy[zoneset[i]] + 2. * dxz[zoneset[i]] * txz[zoneset[i]] + 2. * dyz[zoneset[i]] * tyz[zoneset[i]]);
}
}
void StressStrainHeat(const real8 *deltz,real8 *deltzh,real8 *deltrh,const real8 *shearMod,const real8 *shearRatio,const real8 *shearDer,const real8 *newSxx,const real8 *newSyy,const real8 *newSzz,const real8 *newTxy,const real8 *newTxz,const real8 *newTyz,const real8 *sxx,const real8 *syy,const real8 *txy,const real8 *txz,const real8 *tyz,real8 deltaTime,const int *zoneset,const real8 *vc,const real8 *vnewc,int length)
{
real8 shearr;
real8 sheari;
real8 avgMod;
int nz;
int i;
/* Quiet the compiler - unused argument */
deltaTime = deltaTime;
#pragma omp parallel for private (shearr,sheari,avgMod,nz,i) firstprivate (length)
for (i = 0; i <= length - 1; i += 1) {
nz = zoneset[i];
shearr = 0.5 * shearRatio[i];
if (shearMod[zoneset[i]] > 0.) {
sheari = 0.5 / shearMod[zoneset[i]];
deltrh[zoneset[i]] = 0.25 * (vnewc[i] + vc[i]) * ((newSxx[i] * sheari - sxx[zoneset[i]] * shearr) * (sxx[zoneset[i]] + newSxx[i]) + (newSyy[i] * sheari - syy[zoneset[i]] * shearr) * (syy[zoneset[i]] + newSyy[i]) + (newSzz[i] * sheari + (syy[zoneset[i]] + sxx[zoneset[i]]) * shearr) * (newSzz[i] - sxx[zoneset[i]] - syy[zoneset[i]]) + 2. * (newTxy[i] * sheari - txy[zoneset[i]] * shearr) * (txy[zoneset[i]] + newTxy[i]) + 2. * (newTxz[i] * sheari - txz[zoneset[i]] * shearr) * (txz[zoneset[i]] + newTxz[i]) + 2. * (newTyz[i] * sheari - tyz[zoneset[i]] * shearr) * (tyz[zoneset[i]] + newTyz[i]));
}
else {
deltrh[zoneset[i]] = - 0.25 * (vnewc[i] + vc[i]) * (sxx[zoneset[i]] * (sxx[zoneset[i]] + newSxx[i]) + syy[zoneset[i]] * (syy[zoneset[i]] + newSyy[i]) - (syy[zoneset[i]] + sxx[zoneset[i]]) * (newSzz[i] - sxx[zoneset[i]] - syy[zoneset[i]]) + 2. * txy[zoneset[i]] * (txy[zoneset[i]] + newTxy[i]) + 2. * txz[zoneset[i]] * (txz[zoneset[i]] + newTxz[i]) + 2. * tyz[zoneset[i]] * (tyz[zoneset[i]] + newTyz[i])) * shearr;
}
deltzh[zoneset[i]] = deltz[zoneset[i]] - deltrh[zoneset[i]];
avgMod = 0.5 * shearMod[zoneset[i]];
if (shearRatio[i] > 0.)
avgMod = avgMod + 0.5 / shearRatio[i];
if (avgMod > 0.)
deltrh[zoneset[i]] = shearDer[i] * deltrh[zoneset[i]] / avgMod;
else
deltrh[zoneset[i]] = 0.0;
}
}
|
sum.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#include <sys/time.h>
enum {
N = 10000000,
SUM_OMP_ARRAY_MIN_SIZE = 1000
};
double wtime()
{
struct timeval t;
gettimeofday(&t, NULL);
return (double)t.tv_sec + (double)t.tv_usec * 1E-6;
}
double sum(double *v, int low, int high)
{
if (low == high)
return v[low];
int mid = (low + high) / 2;
return sum(v, low, mid) + sum(v, mid + 1, high);
}
double sum_omp_tasks_threshold(double *v, int low, int high)
{
if (low == high)
return v[low];
if (high - low < SUM_OMP_ARRAY_MIN_SIZE)
return sum(v, low, high);
double sum_left, sum_right;
int mid = (low + high) / 2;
#pragma omp task shared(sum_left)
sum_left = sum_omp_tasks_threshold(v, low, mid);
//#pragma omp task shared(sum_right)
sum_right = sum_omp_tasks_threshold(v, mid + 1, high);
#pragma omp taskwait
return sum_left + sum_right;
}
double sum_omp(double *v, int low, int high)
{
double s = 0;
#pragma omp parallel
{
#pragma omp single nowait
s = sum_omp_tasks_threshold(v, low, high);
}
return s;
}
double run_serial()
{
double *v = malloc(sizeof(*v) * N);
for (int i = 0; i < N; i++)
v[i] = i + 1.0;
double t = wtime();
double res = sum(v, 0, N - 1);
t = wtime() - t;
printf("Result (serial): %.4f; error %.12f\n", res, fabs(res - (1.0 + N) / 2.0 * N));
free(v);
return t;
}
double run_parallel()
{
double *v = malloc(sizeof(*v) * N);
for (int i = 0; i < N; i++)
v[i] = i + 1.0;
double t = wtime();
double res = sum_omp(v, 0, N - 1);
t = wtime() - t;
printf("Result (parallel): %.4f; error %.12f\n", res, fabs(res - (1.0 + N) / 2.0 * N));
free(v);
return t;
}
int main(int argc, char **argv)
{
printf("Recursive summation N = %d\n", N);
double tserial = run_serial();
double tparallel = run_parallel();
printf("Execution time (serial): %.6f\n", tserial);
printf("Execution time (parallel): %.6f\n", tparallel);
printf("Speedup: %.2f\n", tserial / tparallel);
return 0;
}
|
HGEMM_v1.h | #include <iostream>
#include <math.h>
#include <float.h>
#include <assert.h>
#include <string.h>
#include <stdio.h>
#include <stdint.h>
#include <cholUtils.h>
#ifndef HALIDE_ATTRIBUTE_ALIGN
#ifdef _MSC_VER
#define HALIDE_ATTRIBUTE_ALIGN(x) __declspec(align(x))
#else
#define HALIDE_ATTRIBUTE_ALIGN(x) __attribute__((aligned(x)))
#endif
#endif
#ifndef BUFFER_T_DEFINED
#define BUFFER_T_DEFINED
#include <stdbool.h>
#include <stdint.h>
typedef struct buffer_t {
uint64_t dev;
uint8_t* host;
int32_t extent[4];
int32_t stride[4];
int32_t min[4];
int32_t elem_size;
HALIDE_ATTRIBUTE_ALIGN(1) bool host_dirty;
HALIDE_ATTRIBUTE_ALIGN(1) bool dev_dirty;
HALIDE_ATTRIBUTE_ALIGN(1) uint8_t _padding[10 - sizeof(void *)];
} buffer_t;
#endif
#define __user_context_ NULL
#define HSS
struct halide_filter_metadata_t;
extern "C" {
void *sympiler_malloc(void *ctx, size_t s){return(malloc(s));}
void sympiler_free(void *ctx, void *ptr){free(ptr);};
}
#ifdef _WIN32
float roundf(float);
double round(double);
#else
inline float asinh_f32(float x) {return asinhf(x);}
inline float acosh_f32(float x) {return acoshf(x);}
inline float atanh_f32(float x) {return atanhf(x);}
inline double asinh_f64(double x) {return asinh(x);}
inline double acosh_f64(double x) {return acosh(x);}
inline double atanh_f64(double x) {return atanh(x);}
#endif
inline float sqrt_f32(float x) {return sqrtf(x);}
inline float sin_f32(float x) {return sinf(x);}
inline float asin_f32(float x) {return asinf(x);}
inline float cos_f32(float x) {return cosf(x);}
inline float acos_f32(float x) {return acosf(x);}
inline float tan_f32(float x) {return tanf(x);}
inline float atan_f32(float x) {return atanf(x);}
inline float sinh_f32(float x) {return sinhf(x);}
inline float cosh_f32(float x) {return coshf(x);}
inline float tanh_f32(float x) {return tanhf(x);}
inline float hypot_f32(float x, float y) {return hypotf(x, y);}
inline float exp_f32(float x) {return expf(x);}
inline float log_f32(float x) {return logf(x);}
inline float pow_f32(float x, float y) {return powf(x, y);}
inline float floor_f32(float x) {return floorf(x);}
inline float ceil_f32(float x) {return ceilf(x);}
inline float round_f32(float x) {return roundf(x);}
inline double sqrt_f64(double x) {return sqrt(x);}
inline double sin_f64(double x) {return sin(x);}
inline double asin_f64(double x) {return asin(x);}
inline double cos_f64(double x) {return cos(x);}
inline double acos_f64(double x) {return acos(x);}
inline double tan_f64(double x) {return tan(x);}
inline double atan_f64(double x) {return atan(x);}
inline double sinh_f64(double x) {return sinh(x);}
inline double cosh_f64(double x) {return cosh(x);}
inline double tanh_f64(double x) {return tanh(x);}
inline double hypot_f64(double x, double y) {return hypot(x, y);}
inline double exp_f64(double x) {return exp(x);}
inline double log_f64(double x) {return log(x);}
inline double pow_f64(double x, double y) {return pow(x, y);}
inline double floor_f64(double x) {return floor(x);}
inline double ceil_f64(double x) {return ceil(x);}
inline double round_f64(double x) {return round(x);}
inline float nan_f32() {return NAN;}
inline float neg_inf_f32() {return -INFINITY;}
inline float inf_f32() {return INFINITY;}
inline bool is_nan_f32(float x) {return x != x;}
inline bool is_nan_f64(double x) {return x != x;}
inline float float_from_bits(uint32_t bits) {
union {
uint32_t as_uint;
float as_float;
} u;
u.as_uint = bits;
return u.as_float;
}
inline int64_t make_int64(int32_t hi, int32_t lo) {
return (((int64_t)hi) << 32) | (uint32_t)lo;
}
inline double make_float64(int32_t i0, int32_t i1) {
union {
int32_t as_int32[2];
double as_double;
} u;
u.as_int32[0] = i0;
u.as_int32[1] = i1;
return u.as_double;
}
template<typename A, typename B> A reinterpret(B b) {A a; memcpy(&a, &b, sizeof(a)); return a;}
double one [2]={1.0,0.}, zero [2]={0.,0.};
int sw = false, lb1 = 0, ub1 = 0;
double *cur; int info=0;
#ifdef __cplusplus
extern "C" {
#endif
int32_t HGEMM(double *D,
double *B, double *VT, uint64_t *Dptr, uint64_t *Bptr, int32_t *VTptr, int32_t *lchildren, int32_t *rchildren, int32_t *levelset, int32_t *idx, double *mrhs,
double *apres, int32_t nrhs, int32_t *Ddim, int32_t *wptr, int32_t *uptr, double *wskel, int32_t *wskeloffset, double *uskel, int32_t *uskeloffset, int32_t *lm,
int32_t *slen, int32_t *wpart, int32_t *clevelset) {
#pragma omp parallel for
for (int i = 0; i < 512; i++)
{
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans,
Ddim[i],nrhs,Ddim[i],
float_from_bits(1065353216 /* 1 */), &D[Dptr[i]],
Ddim[i], &mrhs[wptr[i]], Ddim[i], float_from_bits(0 /* 0 */),
&apres[uptr[i]], Ddim[i]);
} // for i
for (int i = 0; i < 1; i++)
{
int i=0;
int32_t _0 = i + 1;
#pragma omp parallel for
for (int k = clevelset[i]; k < clevelset[_0]; k++)
{
int32_t _1 = k + 1;
for (int j = wpart[k]; j < wpart[_1]; j++)
{
int32_t _2 = (int32_t)(4294967295);
bool _3 = lchildren[idx[j]] == _2;
if (_3)
{
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans,
slen[idx[j]],nrhs,Ddim[lm[idx[j]]],
float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]],
slen[idx[j]], &mrhs[wptr[lm[idx[j]]]], Ddim[lm[idx[j]]], float_from_bits(0 /* 0 */),
&wskel[wskeloffset[idx[j]]], slen[idx[j]]);
} // if _3
else
{
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans,
slen[idx[j]],nrhs,slen[lchildren[idx[j]]],
float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]],
slen[idx[j]], &wskel[wskeloffset[lchildren[idx[j]]]], slen[lchildren[idx[j]]], float_from_bits(0 /* 0 */),
&wskel[wskeloffset[idx[j]]], slen[idx[j]]);
int32_t _4 = slen[idx[j]] * slen[lchildren[idx[j]]];
int32_t _5 = _4 + VTptr[idx[j]];
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans,
slen[idx[j]],nrhs,slen[rchildren[idx[j]]],
float_from_bits(1065353216 /* 1 */), &VT[_5],
slen[idx[j]], &wskel[wskeloffset[rchildren[idx[j]]]], slen[rchildren[idx[j]]], float_from_bits(1065353216 /* 1 */),
&wskel[wskeloffset[idx[j]]], slen[idx[j]]);
} // if _3 else
} // for j
} // for k
// for i
for (int i = 1; i < 3; i++)
{
int32_t _0 = i + 1;
#pragma omp parallel for
for (int k = clevelset[i]; k < clevelset[_0]; k++)
{
int32_t _1 = k + 1;
for (int j = wpart[k]; j < wpart[_1]; j++)
{
// int32_t _2 = (int32_t)(4294967295);
// bool _3 = lchildren[idx[j]] == _2;
// if (_3)
// {
// cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans,
// slen[idx[j]],nrhs,Ddim[lm[idx[j]]],
// float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]],
// slen[idx[j]], &mrhs[wptr[lm[idx[j]]]], Ddim[lm[idx[j]]], float_from_bits(0 /* 0 */),
// &wskel[wskeloffset[idx[j]]], slen[idx[j]]);
// } // if _3
// else
{
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans,
slen[idx[j]],nrhs,slen[lchildren[idx[j]]],
float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]],
slen[idx[j]], &wskel[wskeloffset[lchildren[idx[j]]]], slen[lchildren[idx[j]]], float_from_bits(0 /* 0 */),
&wskel[wskeloffset[idx[j]]], slen[idx[j]]);
int32_t _4 = slen[idx[j]] * slen[lchildren[idx[j]]];
int32_t _5 = _4 + VTptr[idx[j]];
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans,
slen[idx[j]],nrhs,slen[rchildren[idx[j]]],
float_from_bits(1065353216 /* 1 */), &VT[_5],
slen[idx[j]], &wskel[wskeloffset[rchildren[idx[j]]]], slen[rchildren[idx[j]]], float_from_bits(1065353216 /* 1 */),
&wskel[wskeloffset[idx[j]]], slen[idx[j]]);
} // if _3 else
} // for j
} // for k
} // for i
mkl_set_num_threads(12);
for (int i = 3; i < 5; i++)
{
int32_t _0 = i + 1;
//#pragma omp parallel for
for (int k = clevelset[i]; k < clevelset[_0]; k++)
{
int32_t _1 = k + 1;
for (int j = wpart[k]; j < wpart[_1]; j++)
{
// int32_t _2 = (int32_t)(4294967295);
// bool _3 = lchildren[idx[j]] == _2;
// if (_3)
// {
// cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans,
// slen[idx[j]],nrhs,Ddim[lm[idx[j]]],
// float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]],
// slen[idx[j]], &mrhs[wptr[lm[idx[j]]]], Ddim[lm[idx[j]]], float_from_bits(0 /* 0 */),
// &wskel[wskeloffset[idx[j]]], slen[idx[j]]);
// } // if _3
// else
{
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans,
slen[idx[j]],nrhs,slen[lchildren[idx[j]]],
float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]],
slen[idx[j]], &wskel[wskeloffset[lchildren[idx[j]]]], slen[lchildren[idx[j]]], float_from_bits(0 /* 0 */),
&wskel[wskeloffset[idx[j]]], slen[idx[j]]);
int32_t _4 = slen[idx[j]] * slen[lchildren[idx[j]]];
int32_t _5 = _4 + VTptr[idx[j]];
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans,
slen[idx[j]],nrhs,slen[rchildren[idx[j]]],
float_from_bits(1065353216 /* 1 */), &VT[_5],
slen[idx[j]], &wskel[wskeloffset[rchildren[idx[j]]]], slen[rchildren[idx[j]]], float_from_bits(1065353216 /* 1 */),
&wskel[wskeloffset[idx[j]]], slen[idx[j]]);
} // if _3 else
} // for j
} // for k
} // for i
//mkl_set_num_threads(1);
uint32_t _6 = (uint32_t)(1);
uint32_t _7 = (uint32_t)(1023);
#pragma omp parallel for
for (int i = _6; i < _7; i++)
{
uint32_t _8 = (uint32_t)(1);
int32_t _9 = i - _8;
int32_t _10 = i + _8;
int32_t _11 = i & 1;
uint32_t _12 = (uint32_t)(0);
bool _13 = _11 == _12;
int32_t _14 = (int32_t)(_13 ? _9 : _10);
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans,
slen[i],nrhs,slen[_14],
_8, &B[Bptr[_9]],
slen[i], &wskel[wskeloffset[_14]], slen[_14], _12,
&uskel[uskeloffset[i]], slen[i]);
} // for i
int32_t _15 = 3 - 1;
int32_t _16 = 5 - 1;
mkl_set_num_threads(12);
for (int i = _16; i > _15; i--)
{
int32_t _17 = i + 1;
//#pragma omp parallel for
for (int k = clevelset[i]; k < clevelset[_17]; k++)
{
int32_t _18 = wpart[k] - 1;
int32_t _19 = k + 1;
int32_t _20 = wpart[_19] - 1;
for (int j = _20; j > _18; j--)
{
// int32_t _21 = (int32_t)(4294967295);
// bool _22 = lchildren[idx[j]] == _21;
// if (_22)
// {
// cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans,
// Ddim[lm[idx[j]]],nrhs,slen[idx[j]],
// float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]],
// slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */),
// &apres[uptr[lm[idx[j]]]], Ddim[lm[idx[j]]]);
// } // if _22
// else
{
cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans,
slen[lchildren[idx[j]]],nrhs,slen[idx[j]],
float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]],
slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */),
&uskel[uskeloffset[lchildren[idx[j]]]], slen[lchildren[idx[j]]]);
int32_t _23 = slen[idx[j]] * slen[lchildren[idx[j]]];
int32_t _24 = _23 + VTptr[idx[j]];
cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans,
slen[rchildren[idx[j]]],nrhs,slen[idx[j]],
float_from_bits(1065353216 /* 1 */), &VT[_24],
slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */),
&uskel[uskeloffset[rchildren[idx[j]]]], slen[rchildren[idx[j]]]);
} // if _22 else
} // for j
} // for k
} // for i
_15 = 1 - 1;
_16 = 3 - 1;
mkl_set_dynamic(true);
for (int i = _16; i > _15; i--)
{
int32_t _17 = i + 1;
#pragma omp parallel for
for (int k = clevelset[i]; k < clevelset[_17]; k++)
{
int32_t _18 = wpart[k] - 1;
int32_t _19 = k + 1;
int32_t _20 = wpart[_19] - 1;
for (int j = _20; j > _18; j--)
{
// int32_t _21 = (int32_t)(4294967295);
// bool _22 = lchildren[idx[j]] == _21;
// if (_22)
// {
// cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans,
// Ddim[lm[idx[j]]],nrhs,slen[idx[j]],
// float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]],
// slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */),
// &apres[uptr[lm[idx[j]]]], Ddim[lm[idx[j]]]);
// } // if _22
// else
{
cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans,
slen[lchildren[idx[j]]],nrhs,slen[idx[j]],
float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]],
slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */),
&uskel[uskeloffset[lchildren[idx[j]]]], slen[lchildren[idx[j]]]);
int32_t _23 = slen[idx[j]] * slen[lchildren[idx[j]]];
int32_t _24 = _23 + VTptr[idx[j]];
cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans,
slen[rchildren[idx[j]]],nrhs,slen[idx[j]],
float_from_bits(1065353216 /* 1 */), &VT[_24],
slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */),
&uskel[uskeloffset[rchildren[idx[j]]]], slen[rchildren[idx[j]]]);
} // if _22 else
} // for j
} // for k
} // for i
_15 = 0 - 1;
_16 = 1 - 1;
for (int i = _16; i > _15; i--)
{
int32_t _17 = i + 1;
#pragma omp parallel for
for (int k = clevelset[i]; k < clevelset[_17]; k++)
{
int32_t _18 = wpart[k] - 1;
int32_t _19 = k + 1;
int32_t _20 = wpart[_19] - 1;
for (int j = _20; j > _18; j--)
{
int32_t _21 = (int32_t)(4294967295);
bool _22 = lchildren[idx[j]] == _21;
if (_22)
{
cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans,
Ddim[lm[idx[j]]],nrhs,slen[idx[j]],
float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]],
slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */),
&apres[uptr[lm[idx[j]]]], Ddim[lm[idx[j]]]);
} // if _22
else
{
cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans,
slen[lchildren[idx[j]]],nrhs,slen[idx[j]],
float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]],
slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */),
&uskel[uskeloffset[lchildren[idx[j]]]], slen[lchildren[idx[j]]]);
int32_t _23 = slen[idx[j]] * slen[lchildren[idx[j]]];
int32_t _24 = _23 + VTptr[idx[j]];
cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans,
slen[rchildren[idx[j]]],nrhs,slen[idx[j]],
float_from_bits(1065353216 /* 1 */), &VT[_24],
slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */),
&uskel[uskeloffset[rchildren[idx[j]]]], slen[rchildren[idx[j]]]);
} // if _22 else
} // for j
} // for k
} // for i
return 0;
}
#ifdef __cplusplus
} // extern "C"
#endif
|
jacobi.pluto.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define pmax(x,y) ((x) > (y)? (x) : (y))
#define pmin(x,y) ((x) < (y)? (x) : (y))
#include "smoothers.h"
#include <math.h>
// cannot be 8 or below
#ifndef TS
#define TS 16
#endif
#ifndef T3
#define T3 64
#endif
#ifndef T4
#define T4 256
#endif
//void jacobi( GRID &u, GRID &f, GRID &tmp, int nu ) {
long long int jacobi( GRID &u, GRID &f, GRID &tmp, int nu ) {
int N = u.n;
int lda = u.lda;
int lda2 = u.lda * u.lda;
double hh = u.h*u.h;
double invhh = 1.0 / hh;
double DinvXomega = hh/6.0 * 8.0/9.0;
double* w = &(tmp.p[0][0][0]);
double* a = &(u.p[0][0][0]);
double* b = &(f.p[0][0][0]);
long long int count = 0;
#ifdef USE_MM_ALLOC
__assume_aligned(w,64);
__assume_aligned(a,64);
__assume_aligned(b,64);
#endif
if ((N >= 1) && (nu >= 1)) {
for (int t1=-1;t1<=floord(nu-2,8);t1++) {
int lbp=pmax(ceild(t1,2),ceild(16*t1-nu+3,16));
int ubp=pmin(floord(nu+N-2,16),floord(8*t1+N+7,16));
#pragma omp parallel for
for (int t2=lbp;t2<=ubp;t2++) {
for (int t3=pmax(pmax(0,ceild(t1-1,2)),ceild(16*t2-N-14,16));t3<=pmin(pmin(floord(nu+N-2,16),floord(8*t1+N+15,16)),floord(16*t2+N+14,16));t3++) {
for (int t4=pmax(pmax(pmax(0,ceild(t1-124,125)),ceild(16*t2-N-998,1000)),ceild(16*t3-N-998,1000));t4<=pmin(pmin(pmin(pmin(floord(8*t1-8*t2+N+7,500),floord(nu+N-2,1000)),floord(8*t1+N+15,1000)),floord(16*t2+N+14,1000)),floord(16*t3+N+14,1000));t4++) {
for (int t5=pmax(pmax(pmax(pmax(pmax(0,8*t1),16*t2-N),16*t3-N),1000*t4-N),16*t1-16*t2+1);t5<=pmin(pmin(pmin(pmin(pmin(nu-2,8*t1+15),16*t2+14),16*t3+14),1000*t4+998),16*t1-16*t2+N+15);t5++) {
for (int t6=pmax(pmax(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=pmin(pmin(16*t2+15,t5+N),-16*t1+16*t2+2*t5);t6++) {
for (int t7=pmax(16*t3,t5+1);t7<=pmin(16*t3+15,t5+N);t7++) {
int lbv= (-t5+t6)*lda2 + (-t5+t7)*lda -t5+pmax(1000*t4,t5+1);
int ubv= (-t5+t6)*lda2 + (-t5+t7)*lda -t5+pmin(1000*t4+999,t5+N);
int ks = lbv-lda;
int kn = lbv+lda;
int ke = lbv-1;
int kw = lbv+1;
int kf = lbv-lda2;
int kb = lbv+lda2;
if (t5%2==0) {
#pragma loop_count min(1),max(64),avg(32)
#pragma ivdep
#pragma vector always
for (int k=lbv;k<=ubv;k++) {
w[k] = a[k] - DinvXomega*((6.0*a[k]-a[kw]-a[ke]-a[kn]-a[ks]-a[kf]-a[kb])*invhh - b[k]);
kn++; ks++; ke++; kw++; kf++; kb++;
//count++;
}
} else {
#pragma loop_count min(1),max(64),avg(32)
#pragma ivdep
#pragma vector always
for (int k=lbv;k<=ubv;k++) {
a[k] = w[k] - DinvXomega*((6.0*w[k]-w[kw]-w[ke]-w[kn]-w[ks]-w[kf]-w[kb])*invhh - b[k]);
kn++; ks++; ke++; kw++; kf++; kb++;
// count++;
}
}
/*
lbv=pmax(1000*t4,t5+1);
ubv=pmin(1000*t4+999,t5+N);
#pragma ivdep
#pragma vector always
for (int t8=lbv;t8<=ubv;t8++) {
a[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (a[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] - (c * (((((((((6.0 * a[ t5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) - a[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)]) - a[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) - a[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) - a[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) - a[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) - a[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]) * invhh) - b[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)])));;
}*/
}
}
}
}
}
}
}
}
if (nu%2==1) {
double*** t = u.p;
u.p = tmp.p;
tmp.p = t;
}
return count;
}
|
fx.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF X X %
% F X X %
% FFF X %
% F X X %
% F X X %
% %
% %
% MagickCore Image Special Effects Methods %
% %
% Software Design %
% snibgo (Alan Gibson) %
% January 2022 %
% %
% %
% %
% Copyright @ 2022 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/fx.h"
#include "MagickCore/fx-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/layer.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
#define MaxTokenLen 100
#define RpnInit 100
#define TableExtend 0.1
#define InitNumOprStack 50
#define MinValStackSize 100
#define InitNumUserSymbols 50
typedef long double fxFltType;
typedef enum {
oAddEq,
oSubtractEq,
oMultiplyEq,
oDivideEq,
oPlusPlus,
oSubSub,
oAdd,
oSubtract,
oMultiply,
oDivide,
oModulus,
oUnaryPlus,
oUnaryMinus,
oLshift,
oRshift,
oEq,
oNotEq,
oLtEq,
oGtEq,
oLt,
oGt,
oLogAnd,
oLogOr,
oLogNot,
oBitAnd,
oBitOr,
oBitNot,
oPow,
oQuery,
oColon,
oOpenParen,
oCloseParen,
oOpenBracket,
oCloseBracket,
oOpenBrace,
oCloseBrace,
oAssign,
oNull
} OperatorE;
typedef struct {
OperatorE op;
const char * str;
int precedence; /* Higher number is higher precedence */
int nArgs;
} OperatorT;
static const OperatorT Operators[] = {
{oAddEq, "+=", 12, 1},
{oSubtractEq, "-=", 12, 1},
{oMultiplyEq, "*=", 13, 1},
{oDivideEq, "/=", 13, 1},
{oPlusPlus, "++", 12, 0},
{oSubSub, "--", 12, 0},
{oAdd, "+", 12, 2},
{oSubtract, "-", 12, 2},
{oMultiply, "*", 13, 2},
{oDivide, "/", 13, 2},
{oModulus, "%", 13, 2},
{oUnaryPlus, "+", 14, 1},
{oUnaryMinus, "-", 14, 1},
{oLshift, "<<", 11, 2},
{oRshift, ">>", 11, 2},
{oEq, "==", 9, 2},
{oNotEq, "!=", 9, 2},
{oLtEq, "<=", 10, 2},
{oGtEq, ">=", 10, 2},
{oLt, "<", 10, 2},
{oGt, ">", 10, 2},
{oLogAnd, "&&", 6, 2},
{oLogOr, "||", 5, 2},
{oLogNot, "!", 16, 1},
{oBitAnd, "&", 8, 2},
{oBitOr, "|", 7, 2},
{oBitNot, "~", 16, 1},
{oPow, "^", 15, 2},
{oQuery, "?", 4, 1},
{oColon, ":", 4, 1},
{oOpenParen, "(", 0, 0},
{oCloseParen, ")", 0, 0},
{oOpenBracket, "[", 0, 0},
{oCloseBracket,"]", 0, 0},
{oOpenBrace, "{", 0, 0},
{oCloseBrace, "}", 0, 0},
{oAssign, "=", 3, 1},
{oNull, "onull", 17, 0}
};
typedef enum {
cEpsilon,
cE,
cOpaque,
cPhi,
cPi,
cQuantumRange,
cQuantumScale,
cTransparent,
cMaxRgb,
cNull
} ConstantE;
typedef struct {
ConstantE cons;
fxFltType val;
const char * str;
} ConstantT;
static const ConstantT Constants[] = {
{cEpsilon, MagickEpsilon, "epsilon"},
{cE, 2.7182818284590452354, "e"},
{cOpaque, 1.0, "opaque"},
{cPhi, MagickPHI, "phi"},
{cPi, MagickPI, "pi"},
{cQuantumRange, QuantumRange, "quantumrange"},
{cQuantumScale, QuantumScale, "quantumscale"},
{cTransparent, 0.0, "transparent"},
{cMaxRgb, QuantumRange, "MaxRGB"},
{cNull, 0.0, "cnull"}
};
#define FirstFunc ((FunctionE) (oNull+1))
typedef enum {
fAbs = oNull+1,
#if defined(MAGICKCORE_HAVE_ACOSH)
fAcosh,
#endif
fAcos,
#if defined(MAGICKCORE_HAVE_J1)
fAiry,
#endif
fAlt,
#if defined(MAGICKCORE_HAVE_ASINH)
fAsinh,
#endif
fAsin,
#if defined(MAGICKCORE_HAVE_ATANH)
fAtanh,
#endif
fAtan2,
fAtan,
fCeil,
fChannel,
fClamp,
fCosh,
fCos,
fDebug,
fDrc,
#if defined(MAGICKCORE_HAVE_ERF)
fErf,
#endif
fExp,
fFloor,
fGauss,
fGcd,
fHypot,
fInt,
fIsnan,
#if defined(MAGICKCORE_HAVE_J0)
fJ0,
#endif
#if defined(MAGICKCORE_HAVE_J1)
fJ1,
#endif
#if defined(MAGICKCORE_HAVE_J1)
fJinc,
#endif
fLn,
fLogtwo,
fLog,
fMax,
fMin,
fMod,
fNot,
fPow,
fRand,
fRound,
fSign,
fSinc,
fSinh,
fSin,
fSqrt,
fSquish,
fTanh,
fTan,
fTrunc,
fDo,
fFor,
fIf,
fWhile,
fU,
fU0,
fUP,
fS,
fV,
fP,
fSP,
fVP,
fNull
} FunctionE;
typedef struct {
FunctionE func;
const char * str;
int nArgs;
} FunctionT;
static const FunctionT Functions[] = {
{fAbs, "abs" , 1},
#if defined(MAGICKCORE_HAVE_ACOSH)
{fAcosh, "acosh" , 1},
#endif
{fAcos, "acos" , 1},
#if defined(MAGICKCORE_HAVE_J1)
{fAiry, "airy" , 1},
#endif
{fAlt, "alt" , 1},
#if defined(MAGICKCORE_HAVE_ASINH)
{fAsinh, "asinh" , 1},
#endif
{fAsin, "asin" , 1},
#if defined(MAGICKCORE_HAVE_ATANH)
{fAtanh, "atanh" , 1},
#endif
{fAtan2, "atan2" , 2},
{fAtan, "atan" , 1},
{fCeil, "ceil" , 1},
{fChannel, "channel" , 5},
{fClamp, "clamp" , 1},
{fCosh, "cosh" , 1},
{fCos, "cos" , 1},
{fDebug, "debug" , 1},
{fDrc, "drc" , 2},
#if defined(MAGICKCORE_HAVE_ERF)
{fErf, "erf" , 1},
#endif
{fExp, "exp" , 1},
{fFloor, "floor" , 1},
{fGauss, "gauss" , 2},
{fGcd, "gcd" , 2},
{fHypot, "hypot" , 2},
{fInt, "int" , 1},
{fIsnan, "isnan" , 1},
#if defined(MAGICKCORE_HAVE_J0)
{fJ0, "j0" , 1},
#endif
#if defined(MAGICKCORE_HAVE_J1)
{fJ1, "j1" , 1},
#endif
#if defined(MAGICKCORE_HAVE_J1)
{fJinc, "jinc" , 1},
#endif
{fLn, "ln" , 1},
{fLogtwo, "logtwo", 1},
{fLog, "log" , 1},
{fMax, "max" , 2},
{fMin, "min" , 2},
{fMod, "mod" , 2},
{fNot, "not" , 1},
{fPow, "pow" , 2},
{fRand, "rand" , 0},
{fRound, "round" , 1},
{fSign, "sign" , 1},
{fSinc, "sinc" , 1},
{fSinh, "sinh" , 1},
{fSin, "sin" , 1},
{fSqrt, "sqrt" , 1},
{fSquish, "squish", 1},
{fTanh, "tanh" , 1},
{fTan, "tan" , 1},
{fTrunc, "trunc" , 1},
{fDo, "do", 2},
{fFor, "for", 3},
{fIf, "if", 3},
{fWhile, "while", 2},
{fU, "u", 1},
{fU0, "u0", 0},
{fUP, "up", 3},
{fS, "s", 0},
{fV, "v", 0},
{fP, "p", 2},
{fSP, "sp", 2},
{fVP, "vp", 2},
{fNull, "fnull" , 0}
};
#define FirstImgAttr ((ImgAttrE) (fNull+1))
typedef enum {
aDepth = fNull+1,
aExtent,
aKurtosis,
aMaxima,
aMean,
aMedian,
aMinima,
aPage,
aPageX,
aPageY,
aPageWid,
aPageHt,
aPrintsize,
aPrintsizeX,
aPrintsizeY,
aQuality,
aRes,
aResX,
aResY,
aSkewness,
aStdDev,
aH,
aN,
aT,
aW,
aZ,
aNull
} ImgAttrE;
typedef struct {
ImgAttrE attr;
const char * str;
int NeedStats;
} ImgAttrT;
static const ImgAttrT ImgAttrs[] = {
{aDepth, "depth", 1},
{aExtent, "extent", 0},
{aKurtosis, "kurtosis", 1},
{aMaxima, "maxima", 1},
{aMean, "mean", 1},
{aMedian, "median", 1},
{aMinima, "minima", 1},
{aPage, "page", 0},
{aPageX, "page.x", 0},
{aPageY, "page.y", 0},
{aPageWid, "page.width", 0},
{aPageHt, "page.height", 0},
{aPrintsize, "printsize", 0},
{aPrintsizeX, "printsize.x", 0},
{aPrintsizeY, "printsize.y", 0},
{aQuality, "quality", 0},
{aRes, "resolution", 0},
{aResX, "resolution.x", 0},
{aResY, "resolution.y", 0},
{aSkewness, "skewness", 1},
{aStdDev, "standard_deviation", 1},
{aH, "h", 0},
{aN, "n", 0},
{aT, "t", 0},
{aW, "w", 0},
{aZ, "z", 0},
{aNull, "anull", 0}
};
#define FirstSym ((SymbolE) (aNull+1))
typedef enum {
sHue = aNull+1,
sIntensity,
sLightness,
sLuma,
sLuminance,
sSaturation,
sA,
sB,
sC,
sG,
sI,
sJ,
sK,
sM,
sO,
sR,
sY,
sNull
} SymbolE;
typedef struct {
SymbolE sym;
const char * str;
} SymbolT;
static const SymbolT Symbols[] = {
{sHue, "hue"},
{sIntensity, "intensity"},
{sLightness, "lightness"},
{sLuma, "luma"},
{sLuminance, "luminance"},
{sSaturation, "saturation"},
{sA, "a"},
{sB, "b"},
{sC, "c"},
{sG, "g"},
{sI, "i"},
{sJ, "j"},
{sK, "k"},
{sM, "m"},
{sO, "o"},
{sR, "r"},
{sY, "y"},
{sNull, "snull"}
};
/*
There is no way to access new value of pixels. This might be a future enhancement, eg "q".
fP, oU and oV can have channel qualifier such as "u.r".
For meta channels, we might also allow numbered channels eg "u.2" or "u.16".
... or have extra argument to p[].
*/
#define FirstCont (sNull+1)
/* Run-time controls are in the RPN, not explicitly in the input string. */
typedef enum {
rGoto = FirstCont,
rIfZeroGoto,
rIfNotZeroGoto,
rCopyFrom,
rCopyTo,
rZerStk,
rNull
} ControlE;
typedef struct {
ControlE cont;
const char * str;
int nArgs;
} ControlT;
static const ControlT Controls[] = {
{rGoto, "goto", 0},
{rIfZeroGoto, "ifzerogoto", 1},
{rIfNotZeroGoto, "ifnotzerogoto", 1},
{rCopyFrom, "copyfrom", 0},
{rCopyTo, "copyto", 1},
{rZerStk, "zerstk", 0},
{rNull, "rnull", 0}
};
#define NULL_ADDRESS -2
typedef struct {
int addrQuery;
int addrColon;
} TernaryT;
typedef struct {
const char * str;
PixelChannel pixChan;
} ChannelT;
#define NO_CHAN_QUAL ((PixelChannel) (-1))
#define THIS_CHANNEL ((PixelChannel) (-2))
#define HUE_CHANNEL ((PixelChannel) (-3))
#define SAT_CHANNEL ((PixelChannel) (-4))
#define LIGHT_CHANNEL ((PixelChannel) (-5))
#define INTENSITY_CHANNEL ((PixelChannel) (-6))
static const ChannelT Channels[] = {
{"r", RedPixelChannel},
{"g", GreenPixelChannel},
{"b", BluePixelChannel},
{"c", CyanPixelChannel},
{"m", MagentaPixelChannel},
{"y", YellowPixelChannel},
{"k", BlackPixelChannel},
{"a", AlphaPixelChannel},
{"o", AlphaPixelChannel},
{"hue", HUE_CHANNEL},
{"saturation", SAT_CHANNEL},
{"lightness", LIGHT_CHANNEL},
{"intensity", INTENSITY_CHANNEL},
{"all", CompositePixelChannel},
{"this", THIS_CHANNEL},
{"", NO_CHAN_QUAL}
};
/* The index into UserSymbols is also the index into run-time UserSymVals.
*/
typedef struct {
char * pex;
size_t len;
} UserSymbolT;
typedef enum {
etOperator,
etConstant,
etFunction,
etImgAttr,
etSymbol,
etColourConstant,
etControl
} ElementTypeE;
static const char * sElementTypes[] = {
"Operator",
"Constant",
"Function",
"ImgAttr",
"Symbol",
"ColConst",
"Control"
};
typedef struct {
ElementTypeE type;
fxFltType
val, val1, val2;
int oprNum;
int nArgs;
MagickBooleanType IsRelative;
MagickBooleanType DoPush;
int EleNdx;
int nDest; /* Number of Elements that "goto" this element */
PixelChannel ChannelQual;
ImgAttrE ImgAttrQual;
char * pExpStart;
int lenExp;
} ElementT;
typedef enum {
rtUnknown,
rtEntireImage,
rtCornerOnly
} RunTypeE;
typedef struct {
CacheView *View;
/* Other per-image metadata could go here. */
} ImgT;
typedef struct {
RandomInfo * magick_restrict random_info;
int numValStack;
int usedValStack;
fxFltType * ValStack;
fxFltType * UserSymVals;
Quantum * thisPixel;
} fxRtT;
struct _FxInfo {
Image * image;
size_t ImgListLen;
ssize_t ImgNum;
MagickBooleanType NeedStats;
MagickBooleanType GotStats;
MagickBooleanType NeedHsl;
MagickBooleanType DebugOpt; /* Whether "-debug" option is in effect */
MagickBooleanType ContainsDebug; /* Whether expression contains "debug ()" function */
char * expression;
char * pex;
char ShortExp[MagickPathExtent]; /* for reporting */
int teDepth;
char token[MagickPathExtent];
size_t lenToken;
int numElements;
int usedElements;
ElementT * Elements; /* Elements is read-only at runtime. */
int numUserSymbols;
int usedUserSymbols;
UserSymbolT * UserSymbols;
int numOprStack;
int usedOprStack;
int maxUsedOprStack;
OperatorE * OperatorStack;
ChannelStatistics ** statistics;
int precision;
RunTypeE runType;
RandomInfo
**magick_restrict random_infos;
ImgT * Imgs;
Image ** Images;
ExceptionInfo * exception;
fxRtT * fxrts;
};
/* Forward declarations for recursion.
*/
static MagickBooleanType TranslateStatementList
(FxInfo * pfx, const char * strLimit, char * chLimit);
static MagickBooleanType TranslateExpression
(FxInfo * pfx, const char * strLimit, char * chLimit, MagickBooleanType * needPopAll);
static MagickBooleanType GetFunction (FxInfo * pfx, FunctionE fe);
static MagickBooleanType InitFx (FxInfo * pfx, const Image * img,
MagickBooleanType CalcAllStats, ExceptionInfo *exception)
{
ssize_t i=0;
const Image * next;
pfx->ImgListLen = GetImageListLength (img);
pfx->ImgNum = GetImageIndexInList (img);
pfx->image = (Image *)img;
pfx->NeedStats = MagickFalse;
pfx->GotStats = MagickFalse;
pfx->NeedHsl = MagickFalse;
pfx->DebugOpt = IsStringTrue (GetImageArtifact (img, "fx:debug"));
pfx->statistics = NULL;
pfx->Imgs = NULL;
pfx->Images = NULL;
pfx->exception = exception;
pfx->precision = GetMagickPrecision ();
pfx->random_infos = AcquireRandomInfoThreadSet ();
pfx->ContainsDebug = MagickFalse;
pfx->runType = (CalcAllStats) ? rtEntireImage : rtCornerOnly;
pfx->Imgs = (ImgT *)AcquireQuantumMemory (pfx->ImgListLen, sizeof (ImgT));
if (!pfx->Imgs) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"Imgs", "%lu",
pfx->ImgListLen);
return MagickFalse;
}
next = GetFirstImageInList (img);
for ( ; next != (Image *) NULL; next=next->next)
{
ImgT * pimg = &pfx->Imgs[i];
pimg->View = AcquireVirtualCacheView (next, pfx->exception);
if (!pimg->View) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"View", "[%li]",
i);
/* dealloc any done so far, and Imgs */
for ( ; i > 0; i--) {
pimg = &pfx->Imgs[i-1];
pimg->View = DestroyCacheView (pimg->View);
}
pfx->Imgs=(ImgT *) RelinquishMagickMemory (pfx->Imgs);
return MagickFalse;
}
i++;
}
pfx->Images = ImageListToArray (img, pfx->exception);
return MagickTrue;
}
static MagickBooleanType DeInitFx (FxInfo * pfx)
{
ssize_t i;
if (pfx->Images) pfx->Images = (Image**) RelinquishMagickMemory (pfx->Images);
if (pfx->Imgs) {
for (i = (ssize_t)GetImageListLength(pfx->image); i > 0; i--) {
ImgT * pimg = &pfx->Imgs[i-1];
pimg->View = DestroyCacheView (pimg->View);
}
pfx->Imgs=(ImgT *) RelinquishMagickMemory (pfx->Imgs);
}
pfx->random_infos = DestroyRandomInfoThreadSet (pfx->random_infos);
if (pfx->statistics) {
for (i = (ssize_t)GetImageListLength(pfx->image); i > 0; i--) {
pfx->statistics[i-1]=(ChannelStatistics *) RelinquishMagickMemory (pfx->statistics[i-1]);
}
pfx->statistics = (ChannelStatistics**) RelinquishMagickMemory(pfx->statistics);
}
return MagickTrue;
}
static ElementTypeE TypeOfOpr (int op)
{
if (op < oNull) return etOperator;
if (op == oNull) return etConstant;
if (op <= fNull) return etFunction;
if (op <= aNull) return etImgAttr;
if (op <= sNull) return etSymbol;
if (op <= rNull) return etControl;
return (ElementTypeE) 0;
}
static char * SetPtrShortExp (FxInfo * pfx, char * pExp, size_t len)
{
#define MaxLen 20
size_t slen;
char * p;
*pfx->ShortExp = '\0';
if (pExp && len) {
slen = CopyMagickString (pfx->ShortExp, pExp, len);
if (slen > MaxLen) {
(void) CopyMagickString (pfx->ShortExp+MaxLen, "...", 4);
}
p = strchr (pfx->ShortExp, '\n');
if (p) (void) CopyMagickString (p, "...", 4);
p = strchr (pfx->ShortExp, '\r');
if (p) (void) CopyMagickString (p, "...", 4);
}
return pfx->ShortExp;
}
static char * SetShortExp (FxInfo * pfx)
{
return SetPtrShortExp (pfx, pfx->pex, MaxTokenLen-1);
}
static int FindUserSymbol (FxInfo * pfx, char * name)
/* returns index into pfx->UserSymbols, and thus into pfxrt->UserSymVals,
or NULL_ADDRESS if not found.
*/
{
int i;
size_t lenName;
lenName = strlen (name);
for (i=0; i < pfx->usedUserSymbols; i++) {
UserSymbolT *pus = &pfx->UserSymbols[i];
if (lenName == pus->len && LocaleNCompare (name, pus->pex, lenName)==0) break;
}
if (i == pfx->usedUserSymbols) return NULL_ADDRESS;
return i;
}
static MagickBooleanType ExtendUserSymbols (FxInfo * pfx)
{
pfx->numUserSymbols = (int) ceil (pfx->numUserSymbols * (1 + TableExtend));
pfx->UserSymbols = (UserSymbolT*) ResizeMagickMemory (pfx->UserSymbols, pfx->numUserSymbols * sizeof(UserSymbolT));
if (!pfx->UserSymbols) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"UserSymbols", "%i",
pfx->numUserSymbols);
return MagickFalse;
}
return MagickTrue;
}
static int AddUserSymbol (FxInfo * pfx, char * pex, size_t len)
{
UserSymbolT *pus;
if (++pfx->usedUserSymbols >= pfx->numUserSymbols) {
if (!ExtendUserSymbols (pfx)) return -1;
}
pus = &pfx->UserSymbols[pfx->usedUserSymbols-1];
pus->pex = pex;
pus->len = len;
return pfx->usedUserSymbols-1;
}
static void DumpTables (FILE * fh)
{
int i;
for (i=0; i <= rNull; i++) {
const char * str = "";
if ( i < oNull) str = Operators[i].str;
if (i >= FirstFunc && i < fNull) str = Functions[i-FirstFunc].str;
if (i >= FirstImgAttr && i < aNull) str = ImgAttrs[i-FirstImgAttr].str;
if (i >= FirstSym && i < sNull) str = Symbols[i-FirstSym].str;
if (i >= FirstCont && i < rNull) str = Controls[i-FirstCont].str;
if (i==0 ) fprintf (stderr, "Operators:\n ");
else if (i==oNull) fprintf (stderr, "\nFunctions:\n ");
else if (i==fNull) fprintf (stderr, "\nImage attributes:\n ");
else if (i==aNull) fprintf (stderr, "\nSymbols:\n ");
else if (i==sNull) fprintf (stderr, "\nControls:\n ");
fprintf (fh, " %s", str);
}
fprintf (fh, "\n");
}
static char * NameOfUserSym (FxInfo * pfx, int ndx, char * buf)
{
UserSymbolT * pus;
assert (ndx >= 0 && ndx < pfx->usedUserSymbols);
pus = &pfx->UserSymbols[ndx];
(void) CopyMagickString (buf, pus->pex, pus->len+1);
return buf;
}
static void DumpUserSymbols (FxInfo * pfx, FILE * fh)
{
char UserSym[MagickPathExtent];
int i;
fprintf (fh, "UserSymbols (%i)\n", pfx->usedUserSymbols);
for (i=0; i < pfx->usedUserSymbols; i++) {
fprintf (fh, " %i: '%s'\n", i, NameOfUserSym (pfx, i, UserSym));
}
}
static MagickBooleanType BuildRPN (FxInfo * pfx)
{
pfx->numUserSymbols = InitNumUserSymbols;
pfx->usedUserSymbols = 0;
pfx->UserSymbols = (UserSymbolT*) AcquireMagickMemory (pfx->numUserSymbols * sizeof(UserSymbolT));
if (!pfx->UserSymbols) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"UserSymbols", "%i",
pfx->numUserSymbols);
return MagickFalse;
}
pfx->numElements = RpnInit;
pfx->usedElements = 0;
pfx->Elements = NULL;
pfx->Elements = (ElementT*) AcquireMagickMemory (pfx->numElements * sizeof(ElementT));
if (!pfx->Elements) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"Elements", "%i",
pfx->numElements);
return MagickFalse;
}
pfx->usedOprStack = 0;
pfx->maxUsedOprStack = 0;
pfx->numOprStack = InitNumOprStack;
pfx->OperatorStack = (OperatorE*) AcquireMagickMemory (pfx->numOprStack * sizeof(OperatorE));
if (!pfx->OperatorStack) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"OperatorStack", "%i",
pfx->numOprStack);
return MagickFalse;
}
return MagickTrue;
}
static MagickBooleanType AllocFxRt (FxInfo * pfx, fxRtT * pfxrt)
{
int nRnd;
int i;
pfxrt->random_info = AcquireRandomInfo ();
pfxrt->thisPixel = NULL;
nRnd = 20 + 10 * (int) GetPseudoRandomValue (pfxrt->random_info);
for (i=0; i < nRnd; i++) (void) GetPseudoRandomValue (pfxrt->random_info);;
pfxrt->usedValStack = 0;
pfxrt->numValStack = 2 * pfx->maxUsedOprStack;
if (pfxrt->numValStack < MinValStackSize) pfxrt->numValStack = MinValStackSize;
pfxrt->ValStack = (fxFltType*) AcquireMagickMemory (pfxrt->numValStack * sizeof(fxFltType));
if (!pfxrt->ValStack) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"ValStack", "%i",
pfxrt->numValStack);
return MagickFalse;
}
pfxrt->UserSymVals = NULL;
if (pfx->usedUserSymbols) {
pfxrt->UserSymVals = (fxFltType*) AcquireMagickMemory (pfx->usedUserSymbols * sizeof(fxFltType));
if (!pfxrt->UserSymVals) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"UserSymVals", "%i",
pfx->usedUserSymbols);
return MagickFalse;
}
for (i = 0; i < pfx->usedUserSymbols; i++) pfxrt->UserSymVals[i] = (fxFltType) 0;
}
return MagickTrue;
}
static MagickBooleanType ExtendRPN (FxInfo * pfx)
{
pfx->numElements = (int) ceil (pfx->numElements * (1 + TableExtend));
pfx->Elements = (ElementT*) ResizeMagickMemory (pfx->Elements, pfx->numElements * sizeof(ElementT));
if (!pfx->Elements) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"Elements", "%i",
pfx->numElements);
return MagickFalse;
}
return MagickTrue;
}
static MagickBooleanType inline OprInPlace (int op)
{
return (op >= oAddEq && op <= oSubSub ? MagickTrue : MagickFalse);
}
static const char * OprStr (int oprNum)
{
const char * str;
if (oprNum < 0) str = "bad OprStr";
else if (oprNum <= oNull) str = Operators[oprNum].str;
else if (oprNum <= fNull) str = Functions[oprNum-FirstFunc].str;
else if (oprNum <= aNull) str = ImgAttrs[oprNum-FirstImgAttr].str;
else if (oprNum <= sNull) str = Symbols[oprNum-FirstSym].str;
else if (oprNum <= rNull) str = Controls[oprNum-FirstCont].str;
else {
str = "bad OprStr";
}
return str;
}
static MagickBooleanType DumpRPN (FxInfo * pfx, FILE * fh)
{
int i;
fprintf (fh, "DumpRPN:");
fprintf (fh, " numElements=%i", pfx->numElements);
fprintf (fh, " usedElements=%i", pfx->usedElements);
fprintf (fh, " maxUsedOprStack=%i", pfx->maxUsedOprStack);
fprintf (fh, " ImgListLen=%g", (double) pfx->ImgListLen);
fprintf (fh, " NeedStats=%s", pfx->NeedStats ? "yes" : "no");
fprintf (fh, " GotStats=%s", pfx->GotStats ? "yes" : "no");
fprintf (fh, " NeedHsl=%s\n", pfx->NeedHsl ? "yes" : "no");
if (pfx->runType==rtEntireImage) fprintf (stderr, "EntireImage");
else if (pfx->runType==rtCornerOnly) fprintf (stderr, "CornerOnly");
fprintf (fh, "\n");
for (i=0; i < pfx->usedElements; i++) {
ElementT * pel = &pfx->Elements[i];
pel->nDest = 0;
}
for (i=0; i < pfx->usedElements; i++) {
ElementT * pel = &pfx->Elements[i];
if (pel->oprNum == rGoto || pel->oprNum == rIfZeroGoto || pel->oprNum == rIfNotZeroGoto) {
if (pel->EleNdx >= 0 && pel->EleNdx < pfx->numElements) {
ElementT * pelDest = &pfx->Elements[pel->EleNdx];
pelDest->nDest++;
}
}
}
for (i=0; i < pfx->usedElements; i++) {
char UserSym[MagickPathExtent];
ElementT * pel = &pfx->Elements[i];
const char * str = OprStr (pel->oprNum);
const char *sRelAbs = "";
if (pel->oprNum == fP || pel->oprNum == fUP || pel->oprNum == fVP || pel->oprNum == fSP)
sRelAbs = pel->IsRelative ? "[]" : "{}";
if (pel->type == etColourConstant)
fprintf (fh, " %i: %s vals=%.*Lg,%.*Lg,%.*Lg '%s%s' nArgs=%i ndx=%i %s",
i, sElementTypes[pel->type],
pfx->precision, pel->val, pfx->precision, pel->val1, pfx->precision, pel->val2,
str, sRelAbs, pel->nArgs, pel->EleNdx,
pel->DoPush ? "push" : "NO push");
else
fprintf (fh, " %i: %s val=%.*Lg '%s%s' nArgs=%i ndx=%i %s",
i, sElementTypes[pel->type], pfx->precision, pel->val, str, sRelAbs,
pel->nArgs, pel->EleNdx,
pel->DoPush ? "push" : "NO push");
if (pel->ImgAttrQual != aNull)
fprintf (fh, " ia=%s", OprStr(pel->ImgAttrQual));
if (pel->ChannelQual != NO_CHAN_QUAL) {
if (pel->ChannelQual == THIS_CHANNEL) fprintf (stderr, " ch=this");
else fprintf (stderr, " ch=%i", pel->ChannelQual);
}
if (pel->oprNum == rCopyTo) {
fprintf (fh, " CopyTo ==> %s", NameOfUserSym (pfx, pel->EleNdx, UserSym));
} else if (pel->oprNum == rCopyFrom) {
fprintf (fh, " CopyFrom <== %s", NameOfUserSym (pfx, pel->EleNdx, UserSym));
} else if (OprInPlace (pel->oprNum)) {
fprintf (fh, " <==> %s", NameOfUserSym (pfx, pel->EleNdx, UserSym));
}
if (pel->nDest > 0) fprintf (fh, " <==dest(%i)", pel->nDest);
fprintf (fh, "\n");
}
return MagickTrue;
}
static void DestroyRPN (FxInfo * pfx)
{
pfx->numOprStack = 0;
pfx->usedOprStack = 0;
if (pfx->OperatorStack) pfx->OperatorStack = (OperatorE*) RelinquishMagickMemory (pfx->OperatorStack);
pfx->numElements = 0;
pfx->usedElements = 0;
if (pfx->Elements) pfx->Elements = (ElementT*) RelinquishMagickMemory (pfx->Elements);
pfx->usedUserSymbols = 0;
if (pfx->UserSymbols) pfx->UserSymbols = (UserSymbolT*) RelinquishMagickMemory (pfx->UserSymbols);
}
static void DestroyFxRt (fxRtT * pfxrt)
{
pfxrt->usedValStack = 0;
if (pfxrt->ValStack) pfxrt->ValStack = (fxFltType*) RelinquishMagickMemory (pfxrt->ValStack);
if (pfxrt->UserSymVals) pfxrt->UserSymVals = (fxFltType*) RelinquishMagickMemory (pfxrt->UserSymVals);
pfxrt->random_info = DestroyRandomInfo (pfxrt->random_info);
}
static size_t GetToken (FxInfo * pfx)
/* Returns length of token that starts with an alpha,
or 0 if it isn't a token that starts with an alpha.
j0 and j1 have trailing digit.
Also colours like "gray47" have more trailing digits.
After intial alpha(s) also allow single "_", eg "standard_deviation".
Does not advance pfx->pex.
This splits "mean.r" etc.
*/
{
char * p = pfx->pex;
size_t len = 0;
*pfx->token = '\0';
pfx->lenToken = 0;
if (!isalpha((int)*p)) return 0;
/* Regard strings that start "icc-" or "device-",
followed by any number of alphas,
as a token.
*/
if (LocaleNCompare (p, "icc-", 4) == 0) {
len = 4;
p += 4;
while (isalpha ((int)*p)) { len++; p++; }
} else if (LocaleNCompare (p, "device-", 7) == 0) {
len = 7;
p += 7;
while (isalpha ((int)*p)) { len++; p++; }
} else {
while (isalpha ((int)*p)) { len++; p++; }
if (*p == '_') { len++; p++; }
while (isalpha ((int)*p)) { len++; p++; }
while (isdigit ((int)*p)) { len++; p++; }
}
if (len >= MaxTokenLen) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"GetToken: too long", "%g at '%s'",
(double) len, SetShortExp(pfx));
len = MaxTokenLen;
}
if (len) {
(void) CopyMagickString (pfx->token, pfx->pex, (len+1<MaxTokenLen)?len+1:MaxTokenLen);
}
pfx->lenToken = strlen (pfx->token);
return len;
}
static MagickBooleanType TokenMaybeUserSymbol (FxInfo * pfx)
{
char * p = pfx->token;
int i = 0;
while (*p) {
if (!isalpha ((int)*p++)) return MagickFalse;
i++;
}
if (i < 2) return MagickFalse;
return MagickTrue;
}
static MagickBooleanType AddElement (FxInfo * pfx, fxFltType val, int oprNum)
{
ElementT * pel;
assert (oprNum <= rNull);
if (++pfx->usedElements >= pfx->numElements) {
if (!ExtendRPN (pfx)) return MagickFalse;
}
pel = &pfx->Elements[pfx->usedElements-1];
pel->type = TypeOfOpr (oprNum);
pel->val = val;
pel->val1 = (fxFltType) 0;
pel->val2 = (fxFltType) 0;
pel->oprNum = oprNum;
pel->DoPush = MagickTrue;
pel->EleNdx = 0;
pel->ChannelQual = NO_CHAN_QUAL;
pel->ImgAttrQual = aNull;
pel->nDest = 0;
pel->pExpStart = NULL;
pel->lenExp = 0;
if (oprNum <= oNull) pel->nArgs = Operators[oprNum].nArgs;
else if (oprNum <= fNull) pel->nArgs = Functions[oprNum-FirstFunc].nArgs;
else if (oprNum <= aNull) pel->nArgs = 0;
else if (oprNum <= sNull) pel->nArgs = 0;
else pel->nArgs = Controls[oprNum-FirstCont].nArgs;
return MagickTrue;
}
static MagickBooleanType AddAddressingElement (FxInfo * pfx, int oprNum, int EleNdx)
{
ElementT * pel;
if (!AddElement (pfx, (fxFltType) 0, oprNum)) return MagickFalse;
pel = &pfx->Elements[pfx->usedElements-1];
pel->EleNdx = EleNdx;
if (oprNum == rGoto || oprNum == rIfZeroGoto || oprNum == rIfNotZeroGoto
|| oprNum == rZerStk)
{
pel->DoPush = MagickFalse;
}
/* Note: for() may or may not need pushing,
depending on whether the value is needed, eg "for(...)+2" or debug(for(...)).
*/
return MagickTrue;
}
static MagickBooleanType AddColourElement (FxInfo * pfx, fxFltType val0, fxFltType val1, fxFltType val2)
{
ElementT * pel;
if (!AddElement (pfx, val0, oNull)) return MagickFalse;
pel = &pfx->Elements[pfx->usedElements-1];
pel->val1 = val1;
pel->val2 = val2;
pel->type = etColourConstant;
return MagickTrue;
}
static void inline SkipSpaces (FxInfo * pfx)
{
while (isspace ((int)*pfx->pex)) pfx->pex++;
}
static char inline PeekChar (FxInfo * pfx)
{
SkipSpaces (pfx);
return *pfx->pex;
}
static MagickBooleanType inline PeekStr (FxInfo * pfx, const char * str)
{
SkipSpaces (pfx);
return (LocaleNCompare (pfx->pex, str, strlen(str))==0 ? MagickTrue : MagickFalse);
}
static MagickBooleanType ExpectChar (FxInfo * pfx, char c)
{
if (PeekChar (pfx) != c) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Expected char", "'%c' at '%s'", c, SetShortExp (pfx));
return MagickFalse;
}
pfx->pex++;
return MagickTrue;
}
static int MaybeXYWH (FxInfo * pfx, ImgAttrE * pop)
/* If ".x" or ".y" or ".width" or ".height" increments *pop and returns 1 to 4 .
Otherwise returns 0.
*/
{
int ret=0;
if (*pop != aPage && *pop != aPrintsize && *pop != aRes) return 0;
if (PeekChar (pfx) != '.') return 0;
if (!ExpectChar (pfx, '.')) return 0;
(void) GetToken (pfx);
if (LocaleCompare ("x", pfx->token)==0) ret=1;
else if (LocaleCompare ("y", pfx->token)==0) ret=2;
else if (LocaleCompare ("width", pfx->token)==0) ret=3;
else if (LocaleCompare ("height", pfx->token)==0) ret=4;
if (!ret)
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Invalid 'x' or 'y' or 'width' or 'height' token=", "'%s' at '%s'",
pfx->token, SetShortExp(pfx));
if (*pop == aPage) (*pop) = (ImgAttrE) (*pop + ret);
else {
if (ret > 2) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Invalid 'width' or 'height' token=", "'%s' at '%s'",
pfx->token, SetShortExp(pfx));
} else {
(*pop) = (ImgAttrE) (*pop + ret);
}
}
pfx->pex+=pfx->lenToken;
return ret;
}
static MagickBooleanType ExtendOperatorStack (FxInfo * pfx)
{
pfx->numOprStack = (int) ceil (pfx->numOprStack * (1 + TableExtend));
pfx->OperatorStack = (OperatorE*) ResizeMagickMemory (pfx->OperatorStack, pfx->numOprStack * sizeof(OperatorE));
if (!pfx->OperatorStack) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"OprStack", "%i",
pfx->numOprStack);
return MagickFalse;
}
return MagickTrue;
}
static MagickBooleanType PushOperatorStack (FxInfo * pfx, int op)
{
if (++pfx->usedOprStack >= pfx->numOprStack) {
if (!ExtendOperatorStack (pfx))
return MagickFalse;
}
pfx->OperatorStack[pfx->usedOprStack-1] = (OperatorE) op;
if (pfx->maxUsedOprStack < pfx->usedOprStack)
pfx->maxUsedOprStack = pfx->usedOprStack;
return MagickTrue;
}
static OperatorE GetLeadingOp (FxInfo * pfx)
{
OperatorE op = oNull;
if (*pfx->pex == '-') op = oUnaryMinus;
else if (*pfx->pex == '+') op = oUnaryPlus;
else if (*pfx->pex == '~') op = oBitNot;
else if (*pfx->pex == '!') op = oLogNot;
else if (*pfx->pex == '(') op = oOpenParen;
return op;
}
static MagickBooleanType inline OprIsUnaryPrefix (OperatorE op)
{
return (op == oUnaryMinus || op == oUnaryPlus || op == oBitNot || op == oLogNot ? MagickTrue : MagickFalse);
}
static MagickBooleanType TopOprIsUnaryPrefix (FxInfo * pfx)
{
if (!pfx->usedOprStack) return MagickFalse;
return OprIsUnaryPrefix (pfx->OperatorStack[pfx->usedOprStack-1]);
}
static MagickBooleanType PopOprOpenParen (FxInfo * pfx, OperatorE op)
{
if (!pfx->usedOprStack) return MagickFalse;
if (pfx->OperatorStack[pfx->usedOprStack-1] != op) return MagickFalse;
pfx->usedOprStack--;
return MagickTrue;
}
static int GetCoordQualifier (FxInfo * pfx, int op)
/* Returns -1 if invalid CoordQualifier, +1 if valid and appropriate.
*/
{
if (op != fU && op != fV && op != fS) return -1;
(void) GetToken (pfx);
if (pfx->lenToken != 1) {
return -1;
}
if (*pfx->token != 'p' && *pfx->token != 'P') return -1;
if (!GetFunction (pfx, fP)) return -1;
return 1;
}
static PixelChannel GetChannelQualifier (FxInfo * pfx, int op)
{
if (op == fU || op == fV || op == fP ||
op == fUP || op == fVP ||
op == fS || (op >= FirstImgAttr && op <= aNull)
)
{
const ChannelT * pch = &Channels[0];
(void) GetToken (pfx);
while (*pch->str) {
if (LocaleCompare (pch->str, pfx->token)==0) {
if (op >= FirstImgAttr && op <= (OperatorE)aNull &&
(pch->pixChan == HUE_CHANNEL ||
pch->pixChan == SAT_CHANNEL ||
pch->pixChan == LIGHT_CHANNEL)
)
{
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Can't have image attribute with HLS qualifier at", "'%s'",
SetShortExp(pfx));
return NO_CHAN_QUAL;
}
pfx->pex += pfx->lenToken;
return pch->pixChan;
}
pch++;
}
}
return NO_CHAN_QUAL;
}
static ImgAttrE GetImgAttrToken (FxInfo * pfx)
{
ImgAttrE ia = aNull;
const char * iaStr;
for (ia = FirstImgAttr; ia < aNull; ia=(ImgAttrE) (ia+1)) {
iaStr = ImgAttrs[ia-FirstImgAttr].str;
if (LocaleCompare (iaStr, pfx->token)==0) {
pfx->pex += strlen(pfx->token);
if (ImgAttrs[ia-FirstImgAttr].NeedStats == 1) pfx->NeedStats = MagickTrue;
MaybeXYWH (pfx, &ia);
break;
}
}
if (ia == aPage || ia == aPrintsize || ia == aRes) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Attribute", "'%s' needs qualifier at '%s'",
iaStr, SetShortExp(pfx));
}
return ia;
}
static ImgAttrE GetImgAttrQualifier (FxInfo * pfx, int op)
{
ImgAttrE ia = aNull;
if (op == (OperatorE)fU || op == (OperatorE)fV || op == (OperatorE)fP || op == (OperatorE)fS) {
(void) GetToken (pfx);
if (pfx->lenToken == 0) {
return aNull;
}
ia = GetImgAttrToken (pfx);
}
return ia;
}
static MagickBooleanType IsQualifier (FxInfo * pfx)
{
if (PeekChar (pfx) == '.') {
pfx->pex++;
return MagickTrue;
}
return MagickFalse;
}
static ssize_t GetProperty (FxInfo * pfx, fxFltType *val)
/* returns number of character to swallow.
"-1" means invalid input
"0" means no relevant input (don't swallow, but not an error)
*/
{
if (PeekStr (pfx, "%[")) {
int level = 0;
size_t len;
char sProperty [MagickPathExtent];
char * p = pfx->pex + 2;
while (*p) {
if (*p == '[') level++;
else if (*p == ']') {
if (level == 0) break;
level--;
}
p++;
}
if (!*p || level != 0) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"After '%[' expected ']' at", "'%s'",
SetShortExp(pfx));
return -1;
}
len = (size_t) (p - pfx->pex + 1);
if (len > MaxTokenLen) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Too much text between '%[' and ']' at", "'%s'",
SetShortExp(pfx));
return -1;
}
(void) CopyMagickString (sProperty, pfx->pex, len+1);
sProperty[len] = '\0';
{
char * tailptr;
char * text;
text = InterpretImageProperties (pfx->image->image_info, pfx->image,
sProperty, pfx->exception);
if (!text || !*text) {
text = DestroyString(text);
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Unknown property", "'%s' at '%s'",
sProperty, SetShortExp(pfx));
return -1;
}
*val = strtold (text, &tailptr);
if (text == tailptr) {
text = DestroyString(text);
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Property", "'%s' text '%s' is not a number at '%s'",
sProperty, text, SetShortExp(pfx));
return -1;
}
text = DestroyString(text);
}
return ((ssize_t) len);
}
return 0;
}
static ssize_t inline GetConstantColour (FxInfo * pfx, fxFltType *v0, fxFltType *v1, fxFltType *v2)
/* Finds named colour such as "blue" and colorspace function such as "lab(10,20,30)".
Returns number of characters to swallow.
Return -1 means apparantly a constant colour, but with an error.
Return 0 means not a constant colour, but not an error.
*/
{
PixelInfo
colour;
ExceptionInfo
*dummy_exception = AcquireExceptionInfo ();
char
*p;
MagickBooleanType
IsGray,
IsIcc,
IsDev;
char ColSp[MagickPathExtent];
(void) CopyMagickString (ColSp, pfx->token, MaxTokenLen);
p = ColSp + pfx->lenToken - 1;
if (*p == 'a' || *p == 'A') *p = '\0';
(void) GetPixelInfo (pfx->image, &colour);
/* "gray" is both a colorspace and a named colour. */
IsGray = (LocaleCompare (ColSp, "gray") == 0) ? MagickTrue : MagickFalse;
IsIcc = (LocaleCompare (ColSp, "icc-color") == 0) ? MagickTrue : MagickFalse;
IsDev = (LocaleNCompare (ColSp, "device-", 7) == 0) ? MagickTrue : MagickFalse;
/* QueryColorCompliance will raise a warning if it isn't a colour, so we discard any exceptions.
*/
if (!QueryColorCompliance (pfx->token, AllCompliance, &colour, dummy_exception) || IsGray) {
ssize_t type = ParseCommandOption (MagickColorspaceOptions, MagickFalse, ColSp);
if (type >= 0 || IsIcc || IsDev) {
char * q = pfx->pex + pfx->lenToken;
while (isspace((int) ((unsigned char) *q))) q++;
if (*q == '(') {
size_t lenfun;
char sFunc[MagickPathExtent];
while (*q && *q != ')') q++;
if (!*q) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"constant color missing ')'", "at '%s'",
SetShortExp(pfx));
dummy_exception = DestroyExceptionInfo (dummy_exception);
return -1;
}
lenfun = (size_t) (q - pfx->pex + 1);
if (lenfun > MaxTokenLen) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"lenfun too long", "'%lu' at '%s'",
lenfun, SetShortExp(pfx));
dummy_exception = DestroyExceptionInfo (dummy_exception);
return -1;
}
(void) CopyMagickString (sFunc, pfx->pex, lenfun+1);
if (QueryColorCompliance (sFunc, AllCompliance, &colour, dummy_exception)) {
*v0 = colour.red / QuantumRange;
*v1 = colour.green / QuantumRange;
*v2 = colour.blue / QuantumRange;
dummy_exception = DestroyExceptionInfo (dummy_exception);
return (ssize_t)lenfun;
}
} else {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"colorspace but not a valid color with '(...)' at", "'%s'",
SetShortExp(pfx));
dummy_exception = DestroyExceptionInfo (dummy_exception);
return -1;
}
}
if (!IsGray) {
dummy_exception = DestroyExceptionInfo (dummy_exception);
return 0;
}
}
*v0 = colour.red / QuantumRange;
*v1 = colour.green / QuantumRange;
*v2 = colour.blue / QuantumRange;
dummy_exception = DestroyExceptionInfo (dummy_exception);
return (ssize_t)strlen (pfx->token);
}
static ssize_t inline GetHexColour (FxInfo * pfx, fxFltType *v0, fxFltType *v1, fxFltType *v2)
/* Returns number of characters to swallow.
Negative return means it starts with '#', but invalid hex number.
*/
{
char * p;
size_t len;
PixelInfo colour;
if (*pfx->pex != '#') return 0;
/* find end of hex digits. */
p = pfx->pex + 1;
while (isxdigit ((int)*p)) p++;
if (isalpha ((int)*p)) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Bad hex number at", "'%s'",
SetShortExp(pfx));
return -1;
}
len = (size_t) (p - pfx->pex);
if (len < 1) return 0;
if (len >= MaxTokenLen) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Hex colour too long at", "'%s'",
SetShortExp(pfx));
return -1;
}
(void) CopyMagickString (pfx->token, pfx->pex, len+1);
(void) GetPixelInfo (pfx->image, &colour);
if (!QueryColorCompliance (pfx->token, AllCompliance, &colour, pfx->exception)) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"QueryColorCompliance rejected", "'%s' at '%s'",
pfx->token, SetShortExp(pfx));
return -1;
}
*v0 = colour.red / QuantumRange;
*v1 = colour.green / QuantumRange;
*v2 = colour.blue / QuantumRange;
return (ssize_t) len;
}
static MagickBooleanType GetFunction (FxInfo * pfx, FunctionE fe)
{
/* A function, so get open-parens, n args, close-parens
*/
const char * funStr = Functions[fe-FirstFunc].str;
int nArgs = Functions[fe-FirstFunc].nArgs;
char chLimit = ')';
char expChLimit = ')';
const char *strLimit = ",)";
OperatorE pushOp = oOpenParen;
char * pExpStart;
int lenExp = 0;
int FndArgs = 0;
int ndx0 = NULL_ADDRESS, ndx1 = NULL_ADDRESS, ndx2 = NULL_ADDRESS, ndx3 = NULL_ADDRESS;
MagickBooleanType coordQual = MagickFalse;
PixelChannel chQual = NO_CHAN_QUAL;
ImgAttrE iaQual = aNull;
pfx->pex += pfx->lenToken;
if (fe == fP) {
char p = PeekChar (pfx);
if (p=='{') {
(void) ExpectChar (pfx, '{');
pushOp = oOpenBrace;
strLimit = ",}";
chLimit = '}';
expChLimit = '}';
} else if (p=='[') {
(void) ExpectChar (pfx, '[');
pushOp = oOpenBracket;
strLimit = ",]";
chLimit = ']';
expChLimit = ']';
} else {
nArgs = 0;
chLimit = ']';
expChLimit = ']';
}
} else if (fe == fU) {
char p = PeekChar (pfx);
if (p=='[') {
(void) ExpectChar (pfx, '[');
pushOp = oOpenBracket;
strLimit = ",]";
chLimit = ']';
expChLimit = ']';
} else {
nArgs = 0;
chLimit = ']';
expChLimit = ']';
}
} else if (fe == fV || fe == fS) {
nArgs = 0;
pushOp = oOpenBracket;
chLimit = ']';
expChLimit = ']';
} else {
if (!ExpectChar (pfx, '(')) return MagickFalse;
}
if (!PushOperatorStack (pfx, pushOp)) return MagickFalse;
pExpStart = pfx->pex;
ndx0 = pfx->usedElements;
if (fe==fDo) {
(void) AddAddressingElement (pfx, rGoto, NULL_ADDRESS); /* address will be ndx1+1 */
}
while (nArgs > 0) {
int FndOne = 0;
if (TranslateStatementList (pfx, strLimit, &chLimit)) {
FndOne = 1;
} else {
/* Maybe don't break because other expressions may be not empty. */
if (!chLimit) break;
if (fe == fP || fe == fS|| fe == fIf) {
(void) AddElement (pfx, (fxFltType) 0, oNull);
FndOne = 1;
}
}
if (strchr (strLimit, chLimit)==NULL) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"For function", "'%s' expected one of '%s' after expression but found '%c' at '%s'",
funStr, strLimit, chLimit ? chLimit : ' ', SetShortExp(pfx));
return MagickFalse;
}
if (FndOne) {
FndArgs++;
nArgs--;
}
switch (FndArgs) {
case 1:
ndx1 = pfx->usedElements;
if (fe==fWhile) {
(void) AddAddressingElement (pfx, rIfZeroGoto, NULL_ADDRESS); /* address will be ndx2+1 */
} else if (fe==fDo) {
(void) AddAddressingElement (pfx, rIfZeroGoto, NULL_ADDRESS); /* address will be ndx2+1 */
} else if (fe==fFor) {
pfx->Elements[pfx->usedElements-1].DoPush = MagickFalse;
} else if (fe==fIf) {
(void) AddAddressingElement (pfx, rIfZeroGoto, NULL_ADDRESS); /* address will be ndx2 + 1 */
pfx->Elements[pfx->usedElements-1].DoPush = MagickTrue; /* we may need return from if() */
}
break;
case 2:
ndx2 = pfx->usedElements;
if (fe==fWhile) {
pfx->Elements[pfx->usedElements-1].DoPush = MagickFalse;
(void) AddAddressingElement (pfx, rGoto, ndx0);
} else if (fe==fDo) {
pfx->Elements[pfx->usedElements-1].DoPush = MagickFalse;
(void) AddAddressingElement (pfx, rGoto, ndx0 + 1);
} else if (fe==fFor) {
(void) AddAddressingElement (pfx, rIfZeroGoto, NULL_ADDRESS); /* address will be ndx3 */
pfx->Elements[pfx->usedElements-1].DoPush = MagickTrue; /* we may need return from for() */
(void) AddAddressingElement (pfx, rZerStk, NULL_ADDRESS);
} else if (fe==fIf) {
(void) AddAddressingElement (pfx, rGoto, NULL_ADDRESS); /* address will be ndx3 */
}
break;
case 3:
if (fe==fFor) {
pfx->Elements[pfx->usedElements-1].DoPush = MagickFalse;
(void) AddAddressingElement (pfx, rGoto, ndx1);
}
ndx3 = pfx->usedElements;
break;
default:
break;
}
if (chLimit == expChLimit) {
lenExp = pfx->pex - pExpStart - 1;
break;
}
} /* end while args of a function */
if (chLimit && chLimit != expChLimit && chLimit != ',' ) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"For function", "'%s' expected '%c', found '%c' at '%s'",
funStr, expChLimit, chLimit ? chLimit : ' ', SetShortExp(pfx));
return MagickFalse;
}
if (fe == fP || fe == fS || fe == fU) {
while (FndArgs < Functions[fe-FirstFunc].nArgs) {
(void) AddElement (pfx, (fxFltType) 0, oNull);
FndArgs++;
}
}
if (FndArgs > Functions[fe-FirstFunc].nArgs) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"For function", "'%s' expected %i arguments, found '%i' at '%s'",
funStr, Functions[fe-FirstFunc].nArgs, FndArgs, SetShortExp(pfx));
return MagickFalse;
}
if (FndArgs < Functions[fe-FirstFunc].nArgs) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"For function", "'%s' expected %i arguments, found too few (%i) at '%s'",
funStr, Functions[fe-FirstFunc].nArgs, FndArgs, SetShortExp(pfx));
return MagickFalse;
}
if (fe != fS && fe != fV && FndArgs == 0 && Functions[fe-FirstFunc].nArgs == 0) {
/* This is for "rand()" and similar. */
chLimit = expChLimit;
if (!ExpectChar (pfx, ')')) return MagickFalse;
}
if (chLimit != expChLimit) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"For function", "'%s', arguments don't end with '%c' at '%s'",
funStr, expChLimit, SetShortExp(pfx));
return MagickFalse;
}
if (!PopOprOpenParen (pfx, pushOp)) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Bug: For function", "'%s' tos not '%s' at '%s'",
funStr, Operators[pushOp].str, SetShortExp(pfx));
return MagickFalse;
}
if (IsQualifier (pfx)) {
if (fe == fU || fe == fV || fe == fS) {
coordQual = (GetCoordQualifier (pfx, fe) == 1) ? MagickTrue : MagickFalse;
if (coordQual) {
/* Remove last element, which should be fP */
ElementT * pel = &pfx->Elements[pfx->usedElements-1];
if (pel->oprNum != fP) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Bug: For function", "'%s' last element not 'p' at '%s'",
funStr, SetShortExp(pfx));
return MagickFalse;
}
chQual = pel->ChannelQual;
expChLimit = (pel->IsRelative) ? ']' : '}';
pfx->usedElements--;
if (fe == fU) fe = fUP;
else if (fe == fV) fe = fVP;
else if (fe == fS) fe = fSP;
funStr = Functions[fe-FirstFunc].str;
}
}
if ( chQual == NO_CHAN_QUAL &&
(fe == fP || fe == fS || fe == fSP || fe == fU || fe == fUP || fe == fV || fe == fVP)
)
{
chQual = GetChannelQualifier (pfx, fe);
}
if (chQual == NO_CHAN_QUAL && (fe == fU || fe == fV || fe == fS)) {
/* Note: we don't allow "p.mean" etc. */
iaQual = GetImgAttrQualifier (pfx, fe);
}
if (IsQualifier (pfx) && chQual == NO_CHAN_QUAL && iaQual != aNull) {
chQual = GetChannelQualifier (pfx, fe);
}
if (coordQual && iaQual != aNull) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"For function", "'%s', can't have qualifiers 'p' and image attribute '%s' at '%s'",
funStr, pfx->token, SetShortExp(pfx));
return MagickFalse;
}
if (!coordQual && chQual == NO_CHAN_QUAL && iaQual == aNull) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"For function", "'%s', bad qualifier '%s' at '%s'",
funStr, pfx->token, SetShortExp(pfx));
return MagickFalse;
}
if (!coordQual && chQual == CompositePixelChannel && iaQual == aNull) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"For function", "'%s', bad composite qualifier '%s' at '%s'",
funStr, pfx->token, SetShortExp(pfx));
return MagickFalse;
}
if (chQual == HUE_CHANNEL || chQual == SAT_CHANNEL || chQual == LIGHT_CHANNEL) {
pfx->NeedHsl = MagickTrue;
if (iaQual >= FirstImgAttr && iaQual < aNull) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Can't have image attribute with HLS qualifier at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
}
}
if (fe==fWhile) {
pfx->Elements[ndx1].EleNdx = ndx2+1;
} else if (fe==fDo) {
pfx->Elements[ndx0].EleNdx = ndx1+1;
pfx->Elements[ndx1].EleNdx = ndx2+1;
} else if (fe==fFor) {
pfx->Elements[ndx2].EleNdx = ndx3;
} else if (fe==fIf) {
pfx->Elements[ndx1].EleNdx = ndx2 + 1;
pfx->Elements[ndx2].EleNdx = ndx3;
} else {
if (fe == fU && iaQual == aNull) {
ElementT * pel = &pfx->Elements[pfx->usedElements-1];
if (pel->type == etConstant && pel->val == 0.0) {
pfx->usedElements--;
fe = fU0;
}
}
(void) AddElement (pfx, (fxFltType) 0, fe);
if (fe == fP || fe == fU || fe == fU0 || fe == fUP ||
fe == fV || fe == fVP || fe == fS || fe == fSP)
{
ElementT * pel = &pfx->Elements[pfx->usedElements-1];
pel->IsRelative = (expChLimit == ']' ? MagickTrue : MagickFalse);
if (chQual >= 0) pel->ChannelQual = chQual;
if (iaQual != aNull && (fe == fU || fe == fV || fe == fS)) {
/* Note: we don't allow "p[2,3].mean" or "p.mean" etc. */
pel->ImgAttrQual = iaQual;
}
}
}
if (pExpStart && lenExp) {
ElementT * pel = &pfx->Elements[pfx->usedElements-1];
pel->pExpStart = pExpStart;
pel->lenExp = lenExp;
}
if (fe == fDebug)
pfx->ContainsDebug = MagickTrue;
return MagickTrue;
}
static MagickBooleanType IsStealth (int op)
{
return (op == fU0 || op == fUP || op == fSP || op == fVP ||
(op >= FirstCont && op <= rNull) ? MagickTrue : MagickFalse
);
}
static MagickBooleanType GetOperand (
FxInfo * pfx, MagickBooleanType * UserSymbol, MagickBooleanType * NewUserSymbol, int * UserSymNdx,
MagickBooleanType * needPopAll)
{
*NewUserSymbol = *UserSymbol = MagickFalse;
*UserSymNdx = NULL_ADDRESS;
SkipSpaces (pfx);
if (!*pfx->pex) return MagickFalse;
(void) GetToken (pfx);
if (pfx->lenToken==0) {
/* Try '(' or unary prefix
*/
OperatorE op = GetLeadingOp (pfx);
if (op==oOpenParen) {
char chLimit = '\0';
if (!PushOperatorStack (pfx, op)) return MagickFalse;
pfx->pex++;
if (!TranslateExpression (pfx, ")", &chLimit, needPopAll)) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Empty expression in parentheses at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
if (chLimit != ')') {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"'(' but no ')' at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
/* Top of opr stack should be '('. */
if (!PopOprOpenParen (pfx, oOpenParen)) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Bug: tos not '(' at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
return MagickTrue;
} else if (OprIsUnaryPrefix (op)) {
if (!PushOperatorStack (pfx, op)) return MagickFalse;
pfx->pex++;
SkipSpaces (pfx);
if (!*pfx->pex) return MagickFalse;
if (!GetOperand (pfx, UserSymbol, NewUserSymbol, UserSymNdx, needPopAll)) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"After unary, bad operand at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
if (*NewUserSymbol) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"After unary, NewUserSymbol at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
if (*UserSymbol) {
(void) AddAddressingElement (pfx, rCopyFrom, *UserSymNdx);
*UserSymNdx = NULL_ADDRESS;
*UserSymbol = MagickFalse;
*NewUserSymbol = MagickFalse;
}
(void) GetToken (pfx);
return MagickTrue;
} else if (*pfx->pex == '#') {
fxFltType v0=0, v1=0, v2=0;
ssize_t lenToken = GetHexColour (pfx, &v0, &v1, &v2);
if (lenToken < 0) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Bad hex number at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
} else if (lenToken > 0) {
(void) AddColourElement (pfx, v0, v1, v2);
pfx->pex+=lenToken;
}
return MagickTrue;
}
/* Try a constant number.
*/
{
char * tailptr;
ssize_t lenOptArt;
fxFltType val = strtold (pfx->pex, &tailptr);
if (pfx->pex != tailptr) {
pfx->pex = tailptr;
if (*tailptr) {
/* Could have "prefix" K, Ki, M etc.
See https://en.wikipedia.org/wiki/Metric_prefix
and https://en.wikipedia.org/wiki/Binary_prefix
*/
double Pow = 0.0;
const char Prefices[] = "yzafpnum.kMGTPEZY";
const char * pSi = strchr (Prefices, *tailptr);
if (pSi && *pSi != '.') Pow = (pSi - Prefices) * 3 - 24;
else if (*tailptr == 'c') Pow = -2;
else if (*tailptr == 'h') Pow = 2;
else if (*tailptr == 'k') Pow = 3;
if (Pow != 0.0) {
if (*(++pfx->pex) == 'i') {
val *= pow (2.0, Pow/0.3);
pfx->pex++;
} else {
val *= pow (10.0, Pow);
}
}
}
(void) AddElement (pfx, val, oNull);
return MagickTrue;
}
val = (fxFltType) 0;
lenOptArt = GetProperty (pfx, &val);
if (lenOptArt < 0) return MagickFalse;
if (lenOptArt > 0) {
(void) AddElement (pfx, val, oNull);
pfx->pex += lenOptArt;
return MagickTrue;
}
}
} /* end of lenToken==0 */
if (pfx->lenToken > 0) {
/* Try a constant
*/
{
ConstantE ce;
for (ce = (ConstantE)0; ce < cNull; ce=(ConstantE) (ce+1)) {
const char * ceStr = Constants[ce].str;
if (LocaleCompare (ceStr, pfx->token)==0) {
break;
}
}
if (ce != cNull) {
(void) AddElement (pfx, Constants[ce].val, oNull);
pfx->pex += pfx->lenToken;
return MagickTrue;
}
}
/* Try a function
*/
{
FunctionE fe;
for (fe = FirstFunc; fe < fNull; fe=(FunctionE) (fe+1)) {
const char * feStr = Functions[fe-FirstFunc].str;
if (LocaleCompare (feStr, pfx->token)==0) {
break;
}
}
if (fe == fV && pfx->ImgListLen < 2) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Symbol 'v' but fewer than two images at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
if (IsStealth (fe)) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Function", "'%s' not permitted at '%s'",
pfx->token, SetShortExp(pfx));
}
if (fe == fDo || fe == fFor || fe == fIf || fe == fWhile) {
*needPopAll = MagickTrue;
}
if (fe != fNull) return (GetFunction (pfx, fe));
}
/* Try image attribute
*/
{
ImgAttrE ia = GetImgAttrToken (pfx);
if (ia != aNull) {
fxFltType val = 0;
(void) AddElement (pfx, val, ia);
if (ImgAttrs[ia-FirstImgAttr].NeedStats==1) {
if (IsQualifier (pfx)) {
PixelChannel chQual = GetChannelQualifier (pfx, ia);
ElementT * pel;
if (chQual == NO_CHAN_QUAL) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Bad channel qualifier at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
/* Adjust the element */
pel = &pfx->Elements[pfx->usedElements-1];
pel->ChannelQual = chQual;
}
}
return MagickTrue;
}
}
/* Try symbol
*/
{
SymbolE se;
for (se = FirstSym; se < sNull; se=(SymbolE) (se+1)) {
const char * seStr = Symbols[se-FirstSym].str;
if (LocaleCompare (seStr, pfx->token)==0) {
break;
}
}
if (se != sNull) {
fxFltType val = 0;
(void) AddElement (pfx, val, se);
pfx->pex += pfx->lenToken;
if (se==sHue || se==sSaturation || se==sLightness) pfx->NeedHsl = MagickTrue;
return MagickTrue;
}
}
/* Try constant colour.
*/
{
fxFltType v0, v1, v2;
ssize_t ColLen = GetConstantColour (pfx, &v0, &v1, &v2);
if (ColLen < 0) return MagickFalse;
if (ColLen > 0) {
(void) AddColourElement (pfx, v0, v1, v2);
pfx->pex+=ColLen;
return MagickTrue;
}
}
/* Try image artifact.
*/
{
const char *artifact;
artifact = GetImageArtifact (pfx->image, pfx->token);
if (artifact != (const char *) NULL) {
char * tailptr;
fxFltType val = strtold (artifact, &tailptr);
if (pfx->token == tailptr) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Artifact", "'%s' has value '%s', not a number, at '%s'",
pfx->token, artifact, SetShortExp(pfx));
return MagickFalse;
}
(void) AddElement (pfx, val, oNull);
pfx->pex+=pfx->lenToken;
return MagickTrue;
}
}
/* Try user symbols. If it is, don't AddElement yet.
*/
if (TokenMaybeUserSymbol (pfx)) {
*UserSymbol = MagickTrue;
*UserSymNdx = FindUserSymbol (pfx, pfx->token);
if (*UserSymNdx == NULL_ADDRESS) {
*UserSymNdx = AddUserSymbol (pfx, pfx->pex, pfx->lenToken);
*NewUserSymbol = MagickTrue;
} else {
}
pfx->pex += pfx->lenToken;
return MagickTrue;
}
}
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Expected operand at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
static MagickBooleanType inline IsRealOperator (OperatorE op)
{
return (op < oOpenParen || op > oCloseBrace) ? MagickTrue : MagickFalse;
}
static MagickBooleanType inline ProcessTernaryOpr (FxInfo * pfx, TernaryT * ptern)
/* Ternary operator "... ? ... : ..."
returns false iff we have exception
*/
{
if (pfx->usedOprStack == 0)
return MagickFalse;
if (pfx->OperatorStack[pfx->usedOprStack-1] == oQuery) {
if (ptern->addrQuery != NULL_ADDRESS) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Already have '?' in sub-expression at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
if (ptern->addrColon != NULL_ADDRESS) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Already have ':' in sub-expression at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
pfx->usedOprStack--;
ptern->addrQuery = pfx->usedElements;
(void) AddAddressingElement (pfx, rIfZeroGoto, NULL_ADDRESS);
/* address will be one after the Colon address. */
}
else if (pfx->OperatorStack[pfx->usedOprStack-1] == oColon) {
if (ptern->addrQuery == NULL_ADDRESS) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Need '?' in sub-expression at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
if (ptern->addrColon != NULL_ADDRESS) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Already have ':' in sub-expression at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
pfx->usedOprStack--;
ptern->addrColon = pfx->usedElements;
pfx->Elements[pfx->usedElements-1].DoPush = MagickTrue;
(void) AddAddressingElement (pfx, rGoto, NULL_ADDRESS);
/* address will be after the subexpression */
}
return MagickTrue;
}
static MagickBooleanType GetOperator (
FxInfo * pfx,
MagickBooleanType * Assign, MagickBooleanType * Update, MagickBooleanType * IncrDecr)
{
OperatorE op;
size_t len = 0;
MagickBooleanType DoneIt = MagickFalse;
SkipSpaces (pfx);
for (op = (OperatorE)0; op != oNull; op=(OperatorE) (op+1)) {
const char * opStr = Operators[op].str;
len = strlen(opStr);
if (LocaleNCompare (opStr, pfx->pex, len)==0) {
break;
}
}
if (!IsRealOperator (op)) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Not a real operator at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
if (op==oNull) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Expected operator at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
*Assign = (op==oAssign) ? MagickTrue : MagickFalse;
*Update = OprInPlace (op);
*IncrDecr = (op == oPlusPlus || op == oSubSub) ? MagickTrue : MagickFalse;
/* while top of OperatorStack is not empty and is not open-parens or assign,
and top of OperatorStack is higher precedence than new op,
then move top of OperatorStack to Element list.
*/
while (pfx->usedOprStack > 0) {
OperatorE top = pfx->OperatorStack[pfx->usedOprStack-1];
int precTop, precNew;
if (top == oOpenParen || top == oAssign || OprInPlace (top)) break;
precTop = Operators[top].precedence;
precNew = Operators[op].precedence;
/* Assume left associativity.
If right assoc, this would be "<=".
*/
if (precTop < precNew) break;
(void) AddElement (pfx, (fxFltType) 0, top);
pfx->usedOprStack--;
}
/* If new op is close paren, and stack top is open paren,
remove stack top.
*/
if (op==oCloseParen) {
if (pfx->usedOprStack == 0) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Found ')' but nothing on stack at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
if (pfx->OperatorStack[pfx->usedOprStack-1] != oOpenParen) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Found ')' but no '(' on stack at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
pfx->usedOprStack--;
DoneIt = MagickTrue;
}
if (!DoneIt) {
if (!PushOperatorStack (pfx, op)) return MagickFalse;
}
pfx->pex += len;
return MagickTrue;
}
static MagickBooleanType ResolveTernaryAddresses (FxInfo * pfx, TernaryT * ptern)
{
if (ptern->addrQuery == NULL_ADDRESS && ptern->addrColon == NULL_ADDRESS)
return MagickTrue;
if (ptern->addrQuery != NULL_ADDRESS && ptern->addrColon != NULL_ADDRESS) {
pfx->Elements[ptern->addrQuery].EleNdx = ptern->addrColon + 1;
pfx->Elements[ptern->addrColon].EleNdx = pfx->usedElements;
ptern->addrQuery = NULL_ADDRESS;
ptern->addrColon = NULL_ADDRESS;
} else if (ptern->addrQuery != NULL_ADDRESS) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"'?' with no corresponding ':'", "'%s' at '%s'",
pfx->token, SetShortExp(pfx));
return MagickFalse;
} else if (ptern->addrColon != NULL_ADDRESS) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"':' with no corresponding '?'", "'%s' at '%s'",
pfx->token, SetShortExp(pfx));
return MagickFalse;
}
return MagickTrue;
}
static MagickBooleanType TranslateExpression (
FxInfo * pfx, const char * strLimit, char * chLimit, MagickBooleanType * needPopAll)
{
/* There should be only one New per expression (oAssign), but can be many Old.
*/
MagickBooleanType UserSymbol, NewUserSymbol;
int UserSymNdx0, UserSymNdx1;
MagickBooleanType
Assign = MagickFalse,
Update = MagickFalse,
IncrDecr = MagickFalse;
int StartEleNdx;
TernaryT ternary;
ternary.addrQuery = NULL_ADDRESS;
ternary.addrColon = NULL_ADDRESS;
pfx->teDepth++;
*chLimit = '\0';
StartEleNdx = pfx->usedElements-1;
if (StartEleNdx < 0) StartEleNdx = 0;
SkipSpaces (pfx);
if (!*pfx->pex) {
pfx->teDepth--;
return MagickFalse;
}
if (strchr(strLimit,*pfx->pex)!=NULL) {
*chLimit = *pfx->pex;
pfx->pex++;
pfx->teDepth--;
return MagickFalse;
}
if (!GetOperand (pfx, &UserSymbol, &NewUserSymbol, &UserSymNdx0, needPopAll)) return MagickFalse;
SkipSpaces (pfx);
/* Loop through Operator, Operand, Operator, Operand, ...
*/
while (*pfx->pex && (!*strLimit || (strchr(strLimit,*pfx->pex)==NULL))) {
if (!GetOperator (pfx, &Assign, &Update, &IncrDecr)) return MagickFalse;
SkipSpaces (pfx);
if (NewUserSymbol && !Assign) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Expected assignment after new UserSymbol", "'%s' at '%s'",
pfx->token, SetShortExp(pfx));
return MagickFalse;
}
if (!UserSymbol && Assign) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Attempted assignment to non-UserSymbol", "'%s' at '%s'",
pfx->token, SetShortExp(pfx));
return MagickFalse;
}
if (!UserSymbol && Update) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Attempted update to non-UserSymbol", "'%s' at '%s'",
pfx->token, SetShortExp(pfx));
return MagickFalse;
}
if (UserSymbol && (Assign || Update) && !IncrDecr) {
if (!TranslateExpression (pfx, strLimit, chLimit, needPopAll)) return MagickFalse;
if (!*pfx->pex) break;
if (!*strLimit) break;
if (strchr(strLimit,*chLimit)!=NULL) break;
}
if (UserSymbol && !Assign && !Update && UserSymNdx0 != NULL_ADDRESS) {
ElementT * pel;
(void) AddAddressingElement (pfx, rCopyFrom, UserSymNdx0);
UserSymNdx0 = NULL_ADDRESS;
pel = &pfx->Elements[pfx->usedElements-1];
pel->DoPush = MagickTrue;
}
if (UserSymbol) {
while (TopOprIsUnaryPrefix (pfx)) {
OperatorE op = pfx->OperatorStack[pfx->usedOprStack-1];
(void) AddElement (pfx, (fxFltType) 0, op);
pfx->usedOprStack--;
}
}
if (!ProcessTernaryOpr (pfx, &ternary)) return MagickFalse;
if (ternary.addrColon != NULL_ADDRESS) {
if (!TranslateExpression (pfx, ",);", chLimit, needPopAll)) return MagickFalse;
break;
}
UserSymbol = NewUserSymbol = MagickFalse;
if ( (!*pfx->pex) || (*strLimit && (strchr(strLimit,*pfx->pex)!=NULL) ) )
{
if (IncrDecr) break;
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Expected operand after operator", "at '%s'",
SetShortExp(pfx));
return MagickFalse;
}
if (IncrDecr) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"'++' and '--' must be the final operators in an expression at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
if (!GetOperand (pfx, &UserSymbol, &NewUserSymbol, &UserSymNdx1, needPopAll)) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Expected operand at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
SkipSpaces (pfx);
if (NewUserSymbol && !Assign) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"NewUserSymbol", "'%s' after non-assignment operator at '%s'",
pfx->token, SetShortExp(pfx));
return MagickFalse;
}
if (UserSymbol && !NewUserSymbol) {
(void) AddAddressingElement (pfx, rCopyFrom, UserSymNdx1);
UserSymNdx1 = NULL_ADDRESS;
}
UserSymNdx0 = UserSymNdx1;
}
if (UserSymbol && !Assign && !Update && UserSymNdx0 != NULL_ADDRESS) {
ElementT * pel;
if (NewUserSymbol) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"NewUserSymbol", "'%s' needs assignment operator at '%s'",
pfx->token, SetShortExp(pfx));
return MagickFalse;
}
(void) AddAddressingElement (pfx, rCopyFrom, UserSymNdx0);
pel = &pfx->Elements[pfx->usedElements-1];
pel->DoPush = MagickTrue;
}
if (*pfx->pex && !*chLimit && (strchr(strLimit,*pfx->pex)!=NULL)) {
*chLimit = *pfx->pex;
pfx->pex++;
}
while (pfx->usedOprStack) {
OperatorE op = pfx->OperatorStack[pfx->usedOprStack-1];
if (op == oOpenParen || op == oOpenBracket || op == oOpenBrace) {
break;
}
if ( (op==oAssign && !Assign) || (OprInPlace(op) && !Update) ) {
break;
}
pfx->usedOprStack--;
(void) AddElement (pfx, (fxFltType) 0, op);
if (op == oAssign) {
/* Adjust last element, by deletion and add.
*/
pfx->usedElements--;
(void) AddAddressingElement (pfx, rCopyTo, UserSymNdx0);
break;
} else if (OprInPlace (op)) {
/* Modify latest element.
*/
pfx->Elements[pfx->usedElements-1].EleNdx = UserSymNdx0;
break;
}
}
(void) ResolveTernaryAddresses (pfx, &ternary);
pfx->teDepth--;
if (!pfx->teDepth && *needPopAll) {
(void) AddAddressingElement (pfx, rZerStk, NULL_ADDRESS);
*needPopAll = MagickFalse;
}
if (pfx->exception->severity != UndefinedException)
return MagickFalse;
return MagickTrue;
}
static MagickBooleanType TranslateStatement (FxInfo * pfx, char * strLimit, char * chLimit)
{
MagickBooleanType NeedPopAll = MagickFalse;
SkipSpaces (pfx);
if (!*pfx->pex) return MagickFalse;
if (!TranslateExpression (pfx, strLimit, chLimit, &NeedPopAll)) {
return MagickFalse;
}
if (pfx->usedElements && *chLimit==';') {
/* FIXME: not necessarily the last element,
but the last _executed_ element, eg "goto" in a "for()".,
Pending a fix, we will use rZerStk.
*/
ElementT * pel = &pfx->Elements[pfx->usedElements-1];
if (pel->DoPush) pel->DoPush = MagickFalse;
}
return MagickTrue;
}
static MagickBooleanType TranslateStatementList (FxInfo * pfx, const char * strLimit, char * chLimit)
{
#define MAX_SLIMIT 10
char sLimits[MAX_SLIMIT];
SkipSpaces (pfx);
if (!*pfx->pex) return MagickFalse;
(void) CopyMagickString (sLimits, strLimit, MAX_SLIMIT-1);
if (strchr(strLimit,';')==NULL)
(void) ConcatenateMagickString (sLimits, ";", MAX_SLIMIT);
for (;;) {
if (!TranslateStatement (pfx, sLimits, chLimit)) return MagickFalse;
if (!*pfx->pex) break;
if (*chLimit != ';') {
break;
}
}
if (pfx->exception->severity != UndefinedException)
return MagickFalse;
return MagickTrue;
}
/*--------------------------------------------------------------------
Run-time
*/
static ChannelStatistics *CollectOneImgStats (FxInfo * pfx, Image * img)
{
int ch;
ChannelStatistics * cs = GetImageStatistics (img, pfx->exception);
/* Use RelinquishMagickMemory() somewhere. */
for (ch=0; ch <= (int) MaxPixelChannels; ch++) {
cs[ch].mean *= QuantumScale;
cs[ch].median *= QuantumScale;
cs[ch].maxima *= QuantumScale;
cs[ch].minima *= QuantumScale;
cs[ch].standard_deviation *= QuantumScale;
cs[ch].kurtosis *= QuantumScale;
cs[ch].skewness *= QuantumScale;
cs[ch].entropy *= QuantumScale;
}
return cs;
}
static MagickBooleanType CollectStatistics (FxInfo * pfx)
{
Image * img = GetFirstImageInList (pfx->image);
size_t imgNum=0;
pfx->statistics = (ChannelStatistics**) AcquireMagickMemory (pfx->ImgListLen * sizeof (ChannelStatistics *));
if (!pfx->statistics) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"Statistics", "%lu",
pfx->ImgListLen);
return MagickFalse;
}
for (;;) {
pfx->statistics[imgNum] = CollectOneImgStats (pfx, img);
if (++imgNum == pfx->ImgListLen) break;
img = GetNextImageInList (img);
assert (img != (Image *) NULL);
}
pfx->GotStats = MagickTrue;
return MagickTrue;
}
static MagickBooleanType inline PushVal (FxInfo * pfx, fxRtT * pfxrt, fxFltType val, int addr)
{
if (pfxrt->usedValStack >=pfxrt->numValStack) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"ValStack overflow at addr=", "%i",
addr);
return MagickFalse;
}
pfxrt->ValStack[pfxrt->usedValStack++] = val;
return MagickTrue;
}
static inline fxFltType PopVal (FxInfo * pfx, fxRtT * pfxrt, int addr)
{
if (pfxrt->usedValStack <= 0) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"ValStack underflow at addr=", "%i",
addr);
return (fxFltType) 0;
}
return pfxrt->ValStack[--pfxrt->usedValStack];
}
static inline fxFltType ImageStat (
FxInfo * pfx, ssize_t ImgNum, PixelChannel channel, ImgAttrE ia)
{
ChannelStatistics * cs = NULL;
fxFltType ret = 0;
MagickBooleanType NeedRelinq = MagickFalse;
assert (channel >= 0 && channel <= MaxPixelChannels);
if (pfx->GotStats) {
cs = pfx->statistics[ImgNum];
} else if (pfx->NeedStats) {
/* If we need more than one statistic per pixel, this is inefficient. */
cs = CollectOneImgStats (pfx, pfx->Images[ImgNum]);
NeedRelinq = MagickTrue;
}
switch (ia) {
case aDepth:
ret = (fxFltType) GetImageDepth (pfx->Images[ImgNum], pfx->exception);
break;
case aExtent:
ret = (fxFltType) GetBlobSize (pfx->image);
break;
case aKurtosis:
ret = cs[channel].kurtosis;
break;
case aMaxima:
ret = cs[channel].maxima;
break;
case aMean:
ret = cs[channel].mean;
break;
case aMedian:
ret = cs[channel].median;
break;
case aMinima:
ret = cs[channel].minima;
break;
case aPage:
/* Do nothing */
break;
case aPageX:
ret = (fxFltType) pfx->Images[ImgNum]->page.x;
break;
case aPageY:
ret = (fxFltType) pfx->Images[ImgNum]->page.y;
break;
case aPageWid:
ret = (fxFltType) pfx->Images[ImgNum]->page.width;
break;
case aPageHt:
ret = (fxFltType) pfx->Images[ImgNum]->page.height;
break;
case aPrintsize:
/* Do nothing */
break;
case aPrintsizeX:
ret = (fxFltType) PerceptibleReciprocal (pfx->Images[ImgNum]->resolution.x)
* pfx->Images[ImgNum]->columns;
break;
case aPrintsizeY:
ret = (fxFltType) PerceptibleReciprocal (pfx->Images[ImgNum]->resolution.y)
* pfx->Images[ImgNum]->rows;
break;
case aQuality:
ret = (fxFltType) pfx->Images[ImgNum]->quality;
break;
case aRes:
/* Do nothing */
break;
case aResX:
ret = pfx->Images[ImgNum]->resolution.x;
break;
case aResY:
ret = pfx->Images[ImgNum]->resolution.y;
break;
case aSkewness:
ret = cs[channel].skewness;
break;
case aStdDev:
ret = cs[channel].standard_deviation;
break;
case aH:
ret = (fxFltType) pfx->Images[ImgNum]->rows;
break;
case aN:
ret = (fxFltType) pfx->ImgListLen;
break;
case aT: /* image index in list */
ret = (fxFltType) ImgNum;
break;
case aW:
ret = (fxFltType) pfx->Images[ImgNum]->columns;
break;
case aZ:
ret = (fxFltType) GetImageDepth (pfx->Images[ImgNum], pfx->exception);
break;
default:
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Unknown ia=", "%i",
ia);
}
if (NeedRelinq) cs = (ChannelStatistics *)RelinquishMagickMemory (cs);
return ret;
}
static fxFltType inline FxGcd (fxFltType x, fxFltType y, const size_t depth)
{
#define FxMaxFunctionDepth 200
if (x < y)
return (FxGcd (y, x, depth+1));
if ((fabs((double) y) < 0.001) || (depth >= FxMaxFunctionDepth))
return (x);
return (FxGcd (y, x-y*floor((double) (x/y)), depth+1));
}
static ssize_t inline ChkImgNum (FxInfo * pfx, fxFltType f)
/* Returns -1 if f is too large. */
{
ssize_t i = (ssize_t) floor ((double) f + 0.5);
if (i < 0) i += pfx->ImgListLen;
if (i < 0 || i >= (ssize_t)pfx->ImgListLen) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"ImgNum", "%lu bad for ImgListLen %lu",
i, pfx->ImgListLen);
i = -1;
}
return i;
}
#define WHICH_ATTR_CHAN \
(pel->ChannelQual == NO_CHAN_QUAL) ? CompositePixelChannel : \
(pel->ChannelQual == THIS_CHANNEL) ? channel : pel->ChannelQual
#define WHICH_NON_ATTR_CHAN \
(pel->ChannelQual == NO_CHAN_QUAL || \
pel->ChannelQual == THIS_CHANNEL || \
pel->ChannelQual == CompositePixelChannel \
) ? (channel == CompositePixelChannel ? RedPixelChannel: channel) \
: pel->ChannelQual
static fxFltType GetHslFlt (FxInfo * pfx, ssize_t ImgNum, const fxFltType fx, const fxFltType fy,
int channel)
{
Image * img = pfx->Images[ImgNum];
double red, green, blue;
double hue=0, saturation=0, lightness=0;
MagickBooleanType okay = MagickTrue;
if(!InterpolatePixelChannel (img, pfx->Imgs[ImgNum].View, RedPixelChannel, img->interpolate,
(double) fx, (double) fy, &red, pfx->exception)) okay = MagickFalse;
if(!InterpolatePixelChannel (img, pfx->Imgs[ImgNum].View, GreenPixelChannel, img->interpolate,
(double) fx, (double) fy, &green, pfx->exception)) okay = MagickFalse;
if(!InterpolatePixelChannel (img, pfx->Imgs[ImgNum].View, BluePixelChannel, img->interpolate,
(double) fx, (double) fy, &blue, pfx->exception)) okay = MagickFalse;
if (!okay)
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"GetHslFlt failure", "%lu %Lg,%Lg %i", ImgNum, fx, fy, channel);
ConvertRGBToHSL (
red, green, blue,
&hue, &saturation, &lightness);
if (channel == HUE_CHANNEL) return hue;
if (channel == SAT_CHANNEL) return saturation;
if (channel == LIGHT_CHANNEL) return lightness;
return 0.0;
}
static fxFltType GetHslInt (FxInfo * pfx, ssize_t ImgNum, const ssize_t imgx, const ssize_t imgy, int channel)
{
Image * img = pfx->Images[ImgNum];
double hue=0, saturation=0, lightness=0;
const Quantum * p = GetCacheViewVirtualPixels (pfx->Imgs[ImgNum].View, imgx, imgy, 1, 1, pfx->exception);
if (!p)
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"GetHslInt failure", "%lu %li,%li %i", ImgNum, imgx, imgy, channel);
ConvertRGBToHSL (
GetPixelRed (img, p), GetPixelGreen (img, p), GetPixelBlue (img, p),
&hue, &saturation, &lightness);
if (channel == HUE_CHANNEL) return hue;
if (channel == SAT_CHANNEL) return saturation;
if (channel == LIGHT_CHANNEL) return lightness;
return 0.0;
}
static fxFltType inline GetIntensity (FxInfo * pfx, ssize_t ImgNum, const fxFltType fx, const fxFltType fy)
{
Quantum
quantum_pixel[MaxPixelChannels];
PixelInfo
pixelinf;
Image * img = pfx->Images[ImgNum];
(void) GetPixelInfo (img, &pixelinf);
if (!InterpolatePixelInfo (img, pfx->Imgs[pfx->ImgNum].View, img->interpolate,
(double) fx, (double) fy, &pixelinf, pfx->exception))
{
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"GetIntensity failure", "%lu %Lg,%Lg", ImgNum, fx, fy);
}
SetPixelViaPixelInfo (img, &pixelinf, quantum_pixel);
return QuantumScale * GetPixelIntensity (img, quantum_pixel);
}
static MagickBooleanType ExecuteRPN (FxInfo * pfx, fxRtT * pfxrt, fxFltType *result,
const PixelChannel channel, const ssize_t imgx, const ssize_t imgy)
{
const Quantum * p = pfxrt->thisPixel;
fxFltType regA=0, regB=0, regC=0, regD=0, regE=0;
Image * img = pfx->image;
ChannelStatistics * cs = NULL;
MagickBooleanType NeedRelinq = MagickFalse;
double hue=0, saturation=0, lightness=0;
int i;
/* For -fx, this sets p to ImgNum 0.
for %[fx:...], this sets p to the currrent image.
Similarly img.
*/
if (!p) p = GetCacheViewVirtualPixels (
pfx->Imgs[pfx->ImgNum].View, imgx, imgy, 1, 1, pfx->exception);
if (pfx->GotStats) {
cs = pfx->statistics[pfx->ImgNum];
} else if (pfx->NeedStats) {
cs = CollectOneImgStats (pfx, pfx->Images[pfx->ImgNum]);
NeedRelinq = MagickTrue;
}
/* Folllowing is only for expressions like "saturation", with no image specifier.
*/
if (pfx->NeedHsl) {
ConvertRGBToHSL (
GetPixelRed (img, p), GetPixelGreen (img, p), GetPixelBlue (img, p),
&hue, &saturation, &lightness);
}
for (i=0; i < pfx->usedElements; i++) {
ElementT *pel = &pfx->Elements[i];
switch (pel->nArgs) {
case 0:
break;
case 1:
regA = PopVal (pfx, pfxrt, i);
break;
case 2:
regB = PopVal (pfx, pfxrt, i);
regA = PopVal (pfx, pfxrt, i);
break;
case 3:
regC = PopVal (pfx, pfxrt, i);
regB = PopVal (pfx, pfxrt, i);
regA = PopVal (pfx, pfxrt, i);
break;
case 4:
regD = PopVal (pfx, pfxrt, i);
regC = PopVal (pfx, pfxrt, i);
regB = PopVal (pfx, pfxrt, i);
regA = PopVal (pfx, pfxrt, i);
break;
case 5:
regE = PopVal (pfx, pfxrt, i);
regD = PopVal (pfx, pfxrt, i);
regC = PopVal (pfx, pfxrt, i);
regB = PopVal (pfx, pfxrt, i);
regA = PopVal (pfx, pfxrt, i);
break;
default:
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Too many args:", "%i", pel->nArgs);
break;
}
switch (pel->oprNum) {
case oAddEq:
regA = (pfxrt->UserSymVals[pel->EleNdx] += regA);
break;
case oSubtractEq:
regA = (pfxrt->UserSymVals[pel->EleNdx] -= regA);
break;
case oMultiplyEq:
regA = (pfxrt->UserSymVals[pel->EleNdx] *= regA);
break;
case oDivideEq:
regA = (pfxrt->UserSymVals[pel->EleNdx] *= PerceptibleReciprocal((double)regA));
break;
case oPlusPlus:
regA = pfxrt->UserSymVals[pel->EleNdx]++;
break;
case oSubSub:
regA = pfxrt->UserSymVals[pel->EleNdx]--;
break;
case oAdd:
regA += regB;
break;
case oSubtract:
regA -= regB;
break;
case oMultiply:
regA *= regB;
break;
case oDivide:
regA *= PerceptibleReciprocal((double)regB);
break;
case oModulus:
regA = fmod ((double) regA, fabs(floor((double) regB+0.5)));
break;
case oUnaryPlus:
/* Do nothing. */
break;
case oUnaryMinus:
regA = -regA;
break;
case oLshift:
if ((size_t) (regB+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException ( pfx->exception, GetMagickModule(),
OptionError, "undefined shift", "%g", (double) regB);
regA = (fxFltType) 0.0;
break;
}
regA = (fxFltType) ((size_t)(regA+0.5) << (size_t)(regB+0.5));
break;
case oRshift:
if ((size_t) (regB+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException ( pfx->exception, GetMagickModule(),
OptionError, "undefined shift", "%g", (double) regB);
regA = (fxFltType) 0.0;
break;
}
regA = (fxFltType) ((size_t)(regA+0.5) >> (size_t)(regB+0.5));
break;
case oEq:
regA = fabs((double) (regA-regB)) < MagickEpsilon ? 1.0 : 0.0;
break;
case oNotEq:
regA = fabs((double) (regA-regB)) >= MagickEpsilon ? 1.0 : 0.0;
break;
case oLtEq:
regA = (regA <= regB) ? 1.0 : 0.0;
break;
case oGtEq:
regA = (regA >= regB) ? 1.0 : 0.0;
break;
case oLt:
regA = (regA < regB) ? 1.0 : 0.0;
break;
case oGt:
regA = (regA > regB) ? 1.0 : 0.0;
break;
case oLogAnd:
regA = (regA<=0) ? 0.0 : (regB > 0) ? 1.0 : 0.0;
break;
case oLogOr:
regA = (regA>0) ? 1.0 : (regB > 0.0) ? 1.0 : 0.0;
break;
case oLogNot:
regA = (regA==0) ? 1.0 : 0.0;
break;
case oBitAnd:
regA = (fxFltType) ((size_t)(regA+0.5) & (size_t)(regB+0.5));
break;
case oBitOr:
regA = (fxFltType) ((size_t)(regA+0.5) | (size_t)(regB+0.5));
break;
case oBitNot:
/* Old fx doesn't add 0.5. */
regA = (fxFltType) (~(size_t)(regA+0.5));
break;
case oPow:
regA = pow ((double) regA, (double) regB);
break;
case oQuery:
case oColon:
break;
case oOpenParen:
case oCloseParen:
case oOpenBracket:
case oCloseBracket:
case oOpenBrace:
case oCloseBrace:
break;
case oAssign:
pel->val = regA;
break;
case oNull: {
if (pel->type == etColourConstant) {
switch (channel) {
default:
case 0:
regA = pel->val;
break;
case 1:
regA = pel->val1;
break;
case 2:
regA = pel->val2;
break;
}
} else {
regA = pel->val;
}
break;
}
case fAbs:
regA = fabs ((double) regA);
break;
#if defined(MAGICKCORE_HAVE_ACOSH)
case fAcosh:
regA = acosh ((double) regA);
break;
#endif
case fAcos:
regA = acos ((double) regA);
break;
#if defined(MAGICKCORE_HAVE_J1)
case fAiry:
if (regA==0) regA = 1.0;
else {
fxFltType gamma = 2.0 * j1 ((MagickPI*regA)) / (MagickPI*regA);
regA = gamma * gamma;
}
break;
#endif
case fAlt:
regA = (fxFltType) (((ssize_t) regA) & 0x01 ? -1.0 : 1.0);
break;
#if defined(MAGICKCORE_HAVE_ASINH)
case fAsinh:
regA = asinh ((double) regA);
break;
#endif
case fAsin:
regA = asin ((double) regA);
break;
#if defined(MAGICKCORE_HAVE_ATANH)
case fAtanh:
regA = atanh ((double) regA);
break;
#endif
case fAtan2:
regA = atan2 ((double) regA, (double) regB);
break;
case fAtan:
regA = atan ((double) regA);
break;
case fCeil:
regA = ceil ((double) regA);
break;
case fChannel:
switch (channel) {
case 0: break;
case 1: regA = regB; break;
case 2: regA = regC; break;
case 3: regA = regD; break;
case 4: regA = regE; break;
default: regA = 0.0;
}
break;
case fClamp:
if (regA < 0) regA = 0.0;
else if (regA > 1.0) regA = 1.0;
break;
case fCosh:
regA = cosh ((double) regA);
break;
case fCos:
regA = cos ((double) regA);
break;
case fDebug:
/* FIXME: debug() should give channel name. */
(void) fprintf (stderr, "%s[%g,%g].[%i]: %s=%.*Lg\n",
img->filename, (double) imgx, (double) imgy,
channel, SetPtrShortExp (pfx, pel->pExpStart, (size_t) (pel->lenExp+1)),
pfx->precision, regA);
break;
case fDrc:
regA = regA / (regB*(regA-1.0) + 1.0);
break;
#if defined(MAGICKCORE_HAVE_ERF)
case fErf:
regA = erf ((double) regA);
break;
#endif
case fExp:
regA = exp ((double) regA);
break;
case fFloor:
regA = floor ((double) regA);
break;
case fGauss:
regA = exp((double) (-regA*regA/2.0))/sqrt(2.0*MagickPI);
break;
case fGcd:
if (!IsNaN(regA))
regA = FxGcd (regA, regB, 0);
break;
case fHypot:
regA = hypot ((double) regA, (double) regB);
break;
case fInt:
regA = floor ((double) regA);
break;
case fIsnan:
regA = (fxFltType) (!!IsNaN (regA));
break;
#if defined(MAGICKCORE_HAVE_J0)
case fJ0:
regA = j0 ((double) regA);
break;
#endif
#if defined(MAGICKCORE_HAVE_J1)
case fJ1:
regA = j1 ((double) regA);
break;
#endif
#if defined(MAGICKCORE_HAVE_J1)
case fJinc:
if (regA==0) regA = 1.0;
else regA = 2.0 * j1 ((MagickPI*regA))/(MagickPI*regA);
break;
#endif
case fLn:
regA = log ((double) regA);
break;
case fLogtwo:
regA = log10((double) regA) / log10(2.0);
break;
case fLog:
regA = log10 ((double) regA);
break;
case fMax:
regA = (regA > regB) ? regA : regB;
break;
case fMin:
regA = (regA < regB) ? regA : regB;
break;
case fMod:
regA = regA - floor((double) (regA*PerceptibleReciprocal((double) regB)))*regB;
break;
case fNot:
regA = (fxFltType) (regA < MagickEpsilon);
break;
case fPow:
regA = pow ((double) regA, (double) regB);
break;
case fRand: {
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ExecuteRPN)
#endif
regA = GetPseudoRandomValue (pfxrt->random_info);
break;
}
case fRound:
regA = floor ((double) regA + 0.5);
break;
case fSign:
regA = (regA < 0) ? -1.0 : 1.0;
break;
case fSinc:
regA = sin ((double) (MagickPI*regA)) / (MagickPI*regA);
break;
case fSinh:
regA = sinh ((double) regA);
break;
case fSin:
regA = sin ((double) regA);
break;
case fSqrt:
regA = sqrt ((double) regA);
break;
case fSquish:
regA = 1.0 / (1.0 + exp ((double) -regA));
break;
case fTanh:
regA = tanh ((double) regA);
break;
case fTan:
regA = tan ((double) regA);
break;
case fTrunc:
if (regA >= 0) regA = floor ((double) regA);
else regA = ceil ((double) regA);
break;
case fDo:
case fFor:
case fIf:
case fWhile:
break;
case fU: {
/* Note: 1 value is available, index into image list.
May have ImgAttr qualifier or channel qualifier or both.
*/
ssize_t ImgNum = ChkImgNum (pfx, regA);
if (ImgNum < 0) break;
regA = (fxFltType) 0;
if (ImgNum == 0) {
Image * pimg = pfx->Images[0];
int pech = (int)pel->ChannelQual;
if (pel->ImgAttrQual == aNull) {
if (pech < 0) {
if (pech == NO_CHAN_QUAL || pech == THIS_CHANNEL) {
if (pfx->ImgNum==0) {
regA = QuantumScale * p[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset];
} else {
const Quantum * pv = GetCacheViewVirtualPixels (
pfx->Imgs[0].View, imgx, imgy, 1,1, pfx->exception);
if (!pv) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"fU can't get cache", "%lu", ImgNum);
break;
}
regA = QuantumScale * pv[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset];
}
} else if (pech == HUE_CHANNEL || pech == SAT_CHANNEL ||
pech == LIGHT_CHANNEL) {
regA = GetHslInt (pfx, ImgNum, imgx, imgy, pech);
break;
} else if (pech == INTENSITY_CHANNEL) {
regA = GetIntensity (pfx, 0, (double) imgx, (double) imgy);
break;
}
} else {
if (pfx->ImgNum==0) {
regA = QuantumScale * p[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset];
} else {
const Quantum * pv = GetCacheViewVirtualPixels (
pfx->Imgs[0].View, imgx, imgy, 1,1, pfx->exception);
if (!pv) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"fU can't get cache", "%lu", ImgNum);
break;
}
regA = QuantumScale * pv[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset];
}
}
} else {
/* we have an image atttribute */
regA = ImageStat (pfx, 0, WHICH_ATTR_CHAN, pel->ImgAttrQual);
}
} else {
/* We have non-zero ImgNum. */
if (pel->ImgAttrQual == aNull) {
const Quantum * pv;
if ((int)pel->ChannelQual < 0) {
if (pel->ChannelQual == HUE_CHANNEL || pel->ChannelQual == SAT_CHANNEL ||
pel->ChannelQual == LIGHT_CHANNEL)
{
regA = GetHslInt (pfx, ImgNum, imgx, imgy, pel->ChannelQual);
break;
} else if (pel->ChannelQual == INTENSITY_CHANNEL)
{
regA = GetIntensity (pfx, ImgNum, (fxFltType) imgx, (fxFltType) imgy);
break;
}
}
pv = GetCacheViewVirtualPixels (
pfx->Imgs[ImgNum].View, imgx, imgy, 1,1, pfx->exception);
if (!pv) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"fU can't get cache", "%lu", ImgNum);
break;
}
regA = QuantumScale *
pv[pfx->Images[ImgNum]->channel_map[WHICH_NON_ATTR_CHAN].offset];
} else {
regA = ImageStat (pfx, ImgNum, WHICH_ATTR_CHAN, pel->ImgAttrQual);
}
}
break;
}
case fU0: {
/* No args. No image attribute. We may have a ChannelQual.
If called from %[fx:...], ChannelQual will be CompositePixelChannel.
*/
Image * pimg = pfx->Images[0];
int pech = (int)pel->ChannelQual;
if (pech < 0) {
if (pech == NO_CHAN_QUAL || pech == THIS_CHANNEL) {
if (pfx->ImgNum==0) {
regA = QuantumScale * p[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset];
} else {
const Quantum * pv = GetCacheViewVirtualPixels (
pfx->Imgs[0].View, imgx, imgy, 1,1, pfx->exception);
if (!pv) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"fU0 can't get cache", "%i", 0);
break;
}
regA = QuantumScale * pv[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset];
}
} else if (pel->ChannelQual == HUE_CHANNEL || pel->ChannelQual == SAT_CHANNEL ||
pel->ChannelQual == LIGHT_CHANNEL) {
regA = GetHslInt (pfx, 0, imgx, imgy, pel->ChannelQual);
break;
} else if (pel->ChannelQual == INTENSITY_CHANNEL) {
regA = GetIntensity (pfx, 0, (fxFltType) imgx, (fxFltType) imgy);
}
} else {
if (pfx->ImgNum==0) {
regA = QuantumScale * p[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset];
} else {
const Quantum * pv = GetCacheViewVirtualPixels (
pfx->Imgs[0].View, imgx, imgy, 1,1, pfx->exception);
if (!pv) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"fU0 can't get cache", "%i", 0);
break;
}
regA = QuantumScale * pv[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset];
}
}
break;
}
case fUP: {
/* 3 args are: ImgNum, x, y */
ssize_t ImgNum = ChkImgNum (pfx, regA);
fxFltType fx, fy;
if (ImgNum < 0) break;
if (pel->IsRelative) {
fx = imgx + regB;
fy = imgy + regC;
} else {
fx = regB;
fy = regC;
}
if ((int)pel->ChannelQual < 0) {
if (pel->ChannelQual == HUE_CHANNEL || pel->ChannelQual == SAT_CHANNEL
|| pel->ChannelQual == LIGHT_CHANNEL) {
regA = GetHslFlt (pfx, ImgNum, fx, fy, pel->ChannelQual);
break;
} else if (pel->ChannelQual == INTENSITY_CHANNEL) {
regA = GetIntensity (pfx, ImgNum, fx, fy);
break;
}
}
{
double v;
Image * imUP = pfx->Images[ImgNum];
if (! InterpolatePixelChannel (imUP, pfx->Imgs[ImgNum].View, WHICH_NON_ATTR_CHAN,
imUP->interpolate, (double) fx, (double) fy, &v, pfx->exception))
{
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"fUP can't get interpolate", "%lu", ImgNum);
break;
}
regA = v * QuantumScale;
}
break;
}
case fS:
case fV: {
/* No args. */
ssize_t ImgNum = 1;
if (pel->oprNum == fS) ImgNum = pfx->ImgNum;
if (pel->ImgAttrQual == aNull) {
const Quantum * pv = GetCacheViewVirtualPixels (
pfx->Imgs[ImgNum].View, imgx, imgy, 1,1, pfx->exception);
if (!pv) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"fV can't get cache", "%lu", ImgNum);
break;
}
if ((int)pel->ChannelQual < 0) {
if (pel->ChannelQual == HUE_CHANNEL || pel->ChannelQual == SAT_CHANNEL ||
pel->ChannelQual == LIGHT_CHANNEL) {
regA = GetHslInt (pfx, ImgNum, imgx, imgy, pel->ChannelQual);
break;
} else if (pel->ChannelQual == INTENSITY_CHANNEL) {
regA = GetIntensity (pfx, ImgNum, (double) imgx, (double) imgy);
break;
}
}
regA = QuantumScale *
pv[pfx->Images[ImgNum]->channel_map[WHICH_NON_ATTR_CHAN].offset];
} else {
regA = ImageStat (pfx, ImgNum, WHICH_ATTR_CHAN, pel->ImgAttrQual);
}
break;
}
case fP:
case fSP:
case fVP: {
/* 2 args are: x, y */
fxFltType fx, fy;
ssize_t ImgNum = pfx->ImgNum;
if (pel->oprNum == fVP) ImgNum = 1;
if (pel->IsRelative) {
fx = imgx + regA;
fy = imgy + regB;
} else {
fx = regA;
fy = regB;
}
if ((int)pel->ChannelQual < 0) {
if (pel->ChannelQual == HUE_CHANNEL || pel->ChannelQual == SAT_CHANNEL ||
pel->ChannelQual == LIGHT_CHANNEL) {
regA = GetHslFlt (pfx, ImgNum, fx, fy, pel->ChannelQual);
break;
} else if (pel->ChannelQual == INTENSITY_CHANNEL) {
regA = GetIntensity (pfx, ImgNum, fx, fy);
}
}
{
double v;
if (! InterpolatePixelChannel (pfx->Images[ImgNum], pfx->Imgs[ImgNum].View,
WHICH_NON_ATTR_CHAN, pfx->Images[ImgNum]->interpolate,
(double) fx, (double) fy, &v, pfx->exception)
)
{
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"fSP or fVP can't get interp", "%lu", ImgNum);
break;
}
regA = v * (fxFltType)QuantumScale;
}
break;
}
case fNull:
break;
case aDepth:
regA = (fxFltType) GetImageDepth (img, pfx->exception);
break;
case aExtent:
regA = (fxFltType) img->extent;
break;
case aKurtosis:
regA = cs[WHICH_ATTR_CHAN].kurtosis;
break;
case aMaxima:
regA = cs[WHICH_ATTR_CHAN].maxima;
break;
case aMean:
regA = cs[WHICH_ATTR_CHAN].mean;
break;
case aMedian:
regA = cs[WHICH_ATTR_CHAN].median;
break;
case aMinima:
regA = cs[WHICH_ATTR_CHAN].minima;
break;
case aPage:
break;
case aPageX:
regA = (fxFltType) img->page.x;
break;
case aPageY:
regA = (fxFltType) img->page.y;
break;
case aPageWid:
regA = (fxFltType) img->page.width;
break;
case aPageHt:
regA = (fxFltType) img->page.height;
break;
case aPrintsize:
break;
case aPrintsizeX:
regA = (fxFltType) PerceptibleReciprocal (img->resolution.x) * img->columns;
break;
case aPrintsizeY:
regA = (fxFltType) PerceptibleReciprocal (img->resolution.y) * img->rows;
break;
case aQuality:
regA = (fxFltType) img->quality;
break;
case aRes:
break;
case aResX:
regA = (fxFltType) img->resolution.x;
break;
case aResY:
regA = (fxFltType) img->resolution.y;
break;
case aSkewness:
regA = cs[WHICH_ATTR_CHAN].skewness;
break;
case aStdDev:
regA = cs[WHICH_ATTR_CHAN].standard_deviation;
break;
case aH: /* image->rows */
regA = (fxFltType) img->rows;
break;
case aN: /* image list length */
regA = (fxFltType) pfx->ImgListLen;
break;
case aT: /* image index in list */
regA = (fxFltType) pfx->ImgNum;
break;
case aW: /* image->columns */
regA = (fxFltType) img->columns;
break;
case aZ: /* image depth */
regA = (fxFltType) GetImageDepth (img, pfx->exception);
break;
case aNull:
break;
case sHue: /* of conversion to HSL */
regA = hue;
break;
case sIntensity:
regA = GetIntensity (pfx, pfx->ImgNum, (double) imgx, (double) imgy);
break;
case sLightness: /* of conversion to HSL */
regA = lightness;
break;
case sLuma: /* calculation */
case sLuminance: /* as Luma */
regA = QuantumScale * (0.212656 * GetPixelRed (img,p) +
0.715158 * GetPixelGreen (img,p) +
0.072186 * GetPixelBlue (img,p));
break;
case sSaturation: /* from conversion to HSL */
regA = saturation;
break;
case sA: /* alpha */
regA = QuantumScale * GetPixelAlpha (img, p);
break;
case sB: /* blue */
regA = QuantumScale * GetPixelBlue (img, p);
break;
case sC: /* red (ie cyan) */
regA = QuantumScale * GetPixelCyan (img, p);
break;
case sG: /* green */
regA = QuantumScale * GetPixelGreen (img, p);
break;
case sI: /* current x-coordinate */
regA = (fxFltType) imgx;
break;
case sJ: /* current y-coordinate */
regA = (fxFltType) imgy;
break;
case sK: /* black of CMYK */
regA = QuantumScale * GetPixelBlack (img, p);
break;
case sM: /* green (ie magenta) */
regA = QuantumScale * GetPixelGreen (img, p);
break;
case sO: /* alpha */
regA = QuantumScale * GetPixelAlpha (img, p);
break;
case sR:
regA = QuantumScale * GetPixelRed (img, p);
break;
case sY:
regA = QuantumScale * GetPixelYellow (img, p);
break;
case sNull:
break;
case rGoto:
i = pel->EleNdx-1; /* -1 because 'for' loop will increment. */
break;
case rIfZeroGoto:
if (fabs((double) regA) < MagickEpsilon) i = pel->EleNdx-1;
break;
case rIfNotZeroGoto:
if (fabs((double) regA) > MagickEpsilon) i = pel->EleNdx-1;
break;
case rCopyFrom:
regA = pfxrt->UserSymVals[pel->EleNdx];
break;
case rCopyTo:
pfxrt->UserSymVals[pel->EleNdx] = regA;
break;
case rZerStk:
pfxrt->usedValStack = 0;
break;
case rNull:
break;
default:
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"pel->oprNum", "%i '%s' not yet implemented",
(int)pel->oprNum, OprStr(pel->oprNum));
break;
}
if (i < 0) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Bad run-time address", "%i", i);
}
if (pel->DoPush)
if (!PushVal (pfx, pfxrt, regA, i)) break;
}
if (pfxrt->usedValStack > 0) regA = PopVal (pfx, pfxrt, 9999);
*result = regA;
if (NeedRelinq) cs = (ChannelStatistics *)RelinquishMagickMemory (cs);
if (pfx->exception->severity != UndefinedException) {
return MagickFalse;
}
if (pfxrt->usedValStack != 0) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"ValStack not empty", "(%i)", pfxrt->usedValStack);
return MagickFalse;
}
return MagickTrue;
}
/* Following is substitute for FxEvaluateChannelExpression().
*/
MagickPrivate MagickBooleanType FxEvaluateChannelExpression (
FxInfo *pfx,
const PixelChannel channel, const ssize_t x, const ssize_t y,
double *result, ExceptionInfo *exception)
{
const int
id = GetOpenMPThreadId();
fxFltType ret;
assert (pfx != NULL);
assert (pfx->image != NULL);
assert (pfx->Images != NULL);
assert (pfx->Imgs != NULL);
assert (pfx->fxrts != NULL);
pfx->fxrts[id].thisPixel = NULL;
if (!ExecuteRPN (pfx, &pfx->fxrts[id], &ret, channel, x, y)) {
(void) ThrowMagickException (
exception, GetMagickModule(), OptionError,
"ExcuteRPN failed", " ");
return MagickFalse;
}
*result = (double) ret;
return MagickTrue;
}
static FxInfo *AcquireFxInfoPrivate (const Image * images, const char * expression,
MagickBooleanType CalcAllStats, ExceptionInfo *exception)
{
char chLimit;
FxInfo * pfx = (FxInfo*) AcquireCriticalMemory (sizeof (*pfx));
memset (pfx, 0, sizeof (*pfx));
if (!InitFx (pfx, images, CalcAllStats, exception)) {
pfx = (FxInfo*) RelinquishMagickMemory(pfx);
return NULL;
}
if (!BuildRPN (pfx)) {
(void) DeInitFx (pfx);
pfx = (FxInfo*) RelinquishMagickMemory(pfx);
return NULL;
}
if (*expression == '@')
pfx->expression = FileToString (expression+1, ~0UL, exception);
else
pfx->expression = ConstantString (expression);
pfx->pex = (char *)pfx->expression;
pfx->teDepth = 0;
if (!TranslateStatementList (pfx, ";", &chLimit)) {
(void) DestroyRPN (pfx);
pfx->expression = DestroyString (pfx->expression);
pfx->pex = NULL;
(void) DeInitFx (pfx);
pfx = (FxInfo*) RelinquishMagickMemory(pfx);
return NULL;
}
if (pfx->teDepth) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Translate expression depth", "(%i) not 0",
pfx->teDepth);
(void) DestroyRPN (pfx);
pfx->expression = DestroyString (pfx->expression);
pfx->pex = NULL;
(void) DeInitFx (pfx);
pfx = (FxInfo*) RelinquishMagickMemory(pfx);
return NULL;
}
if (chLimit != '\0' && chLimit != ';') {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"AcquireFxInfo: TranslateExpression did not exhaust input", "(chLimit=%i) at'%s'",
(int)chLimit, pfx->pex);
(void) DestroyRPN (pfx);
pfx->expression = DestroyString (pfx->expression);
pfx->pex = NULL;
(void) DeInitFx (pfx);
pfx = (FxInfo*) RelinquishMagickMemory(pfx);
return NULL;
}
if (pfx->NeedStats && pfx->runType == rtEntireImage && !pfx->statistics) {
if (!CollectStatistics (pfx)) {
(void) DestroyRPN (pfx);
pfx->expression = DestroyString (pfx->expression);
pfx->pex = NULL;
(void) DeInitFx (pfx);
pfx = (FxInfo*) RelinquishMagickMemory(pfx);
return NULL;
}
}
if (pfx->DebugOpt) {
DumpTables (stderr);
DumpUserSymbols (pfx, stderr);
(void) DumpRPN (pfx, stderr);
}
{
size_t number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
ssize_t t;
pfx->fxrts = (fxRtT *)AcquireQuantumMemory (number_threads, sizeof(fxRtT));
if (!pfx->fxrts) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"fxrts", "%lu",
number_threads);
(void) DestroyRPN (pfx);
pfx->expression = DestroyString (pfx->expression);
pfx->pex = NULL;
(void) DeInitFx (pfx);
pfx = (FxInfo*) RelinquishMagickMemory(pfx);
return NULL;
}
for (t=0; t < (ssize_t) number_threads; t++) {
if (!AllocFxRt (pfx, &pfx->fxrts[t])) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"AllocFxRt t=", "%g",
(double) t);
{
ssize_t t2;
for (t2 = t-1; t2 >= 0; t2--) {
DestroyFxRt (&pfx->fxrts[t]);
}
}
pfx->fxrts = (fxRtT *) RelinquishMagickMemory (pfx->fxrts);
(void) DestroyRPN (pfx);
pfx->expression = DestroyString (pfx->expression);
pfx->pex = NULL;
(void) DeInitFx (pfx);
pfx = (FxInfo*) RelinquishMagickMemory(pfx);
return NULL;
}
}
}
return pfx;
}
FxInfo *AcquireFxInfo (const Image * images, const char * expression, ExceptionInfo *exception)
{
return AcquireFxInfoPrivate (images, expression, MagickFalse, exception);
}
FxInfo *DestroyFxInfo (FxInfo * pfx)
{
ssize_t t;
assert (pfx != NULL);
assert (pfx->image != NULL);
assert (pfx->Images != NULL);
assert (pfx->Imgs != NULL);
assert (pfx->fxrts != NULL);
for (t=0; t < (ssize_t) GetMagickResourceLimit(ThreadResource); t++) {
DestroyFxRt (&pfx->fxrts[t]);
}
pfx->fxrts = (fxRtT *) RelinquishMagickMemory (pfx->fxrts);
DestroyRPN (pfx);
pfx->expression = DestroyString (pfx->expression);
pfx->pex = NULL;
(void) DeInitFx (pfx);
pfx = (FxInfo*) RelinquishMagickMemory(pfx);
return NULL;
}
/* Following is substitute for FxImage().
*/
MagickExport Image *FxImage (const Image *image, const char *expression,
ExceptionInfo *exception)
{
#define FxImageTag "FxNew/Image"
CacheView
*fx_view,
*image_view;
Image
*fx_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
FxInfo
*pfx;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (expression == (const char *) NULL)
return(CloneImage(image,0,0,MagickTrue,exception));
fx_image=CloneImage(image,0,0,MagickTrue,exception);
if (!fx_image) return NULL;
if (SetImageStorageClass(fx_image,DirectClass,exception) == MagickFalse) {
fx_image=DestroyImage(fx_image);
return NULL;
}
pfx = AcquireFxInfoPrivate (image, expression, MagickTrue, exception);
if (!pfx) {
fx_image=DestroyImage(fx_image);
return NULL;
}
assert (pfx->image != NULL);
assert (pfx->Images != NULL);
assert (pfx->Imgs != NULL);
assert (pfx->fxrts != NULL);
status=MagickTrue;
progress=0;
image_view = AcquireVirtualCacheView (image, pfx->exception);
fx_view = AcquireAuthenticCacheView (fx_image, pfx->exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(progress,status) \
magick_number_threads(image,fx_image,fx_image->rows, \
pfx->ContainsDebug ? 0 : 1)
#endif
for (y=0; y < (ssize_t) fx_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
fxFltType
result = 0.0;
if (status == MagickFalse)
continue;
p = GetCacheViewVirtualPixels (image_view, 0, y, image->columns, 1, pfx->exception);
q = QueueCacheViewAuthenticPixels (fx_view, 0, y, fx_image->columns, 1, pfx->exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) {
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) fx_image->columns; x++) {
ssize_t i;
pfx->fxrts[id].thisPixel = (Quantum *)p;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel (image, i);
PixelTrait traits = GetPixelChannelTraits (image, channel);
PixelTrait fx_traits = GetPixelChannelTraits (fx_image, channel);
if ((traits == UndefinedPixelTrait) ||
(fx_traits == UndefinedPixelTrait))
continue;
if ((fx_traits & CopyPixelTrait) != 0) {
SetPixelChannel (fx_image, channel, p[i], q);
continue;
}
if (!ExecuteRPN (pfx, &pfx->fxrts[id], &result, channel, x, y)) {
status=MagickFalse;
continue;
}
q[i] = ClampToQuantum ((MagickRealType) (QuantumRange*result));
}
p+=GetPixelChannels (image);
q+=GetPixelChannels (fx_image);
}
if (SyncCacheViewAuthenticPixels(fx_view, pfx->exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed = SetImageProgress (image, FxImageTag, progress, image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
fx_view = DestroyCacheView (fx_view);
image_view = DestroyCacheView (image_view);
/* Before destroying the user symbol values, dump them to stderr.
*/
if (pfx->DebugOpt && pfx->usedUserSymbols) {
int t, i;
char UserSym[MagickPathExtent];
fprintf (stderr, "User symbols (%i):\n", pfx->usedUserSymbols);
for (t=0; t < (int) GetMagickResourceLimit(ThreadResource); t++) {
for (i = 0; i < (int) pfx->usedUserSymbols; i++) {
fprintf (stderr, "th=%i us=%i '%s': %.*Lg\n",
t, i, NameOfUserSym (pfx, i, UserSym), pfx->precision, pfx->fxrts[t].UserSymVals[i]);
}
}
}
if (pfx->exception->severity != UndefinedException) {
status = MagickFalse;
}
if (status == MagickFalse)
fx_image = DestroyImage (fx_image);
pfx = DestroyFxInfo (pfx);
return(fx_image);
}
|
c-decl.c | /* Process declarations and variables for C compiler.
Copyright (C) 1988-2018 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
/* Process declarations and symbol lookup for C front end.
Also constructs types; the standard scalar types at initialization,
and structure, union, array and enum types when they are declared. */
/* ??? not all decl nodes are given the most useful possible
line numbers. For example, the CONST_DECLs for enum values. */
#include "config.h"
#define INCLUDE_UNIQUE_PTR
#include "system.h"
#include "coretypes.h"
#include "target.h"
#include "function.h"
#include "c-tree.h"
#include "timevar.h"
#include "stringpool.h"
#include "cgraph.h"
#include "intl.h"
#include "print-tree.h"
#include "stor-layout.h"
#include "varasm.h"
#include "attribs.h"
#include "toplev.h"
#include "debug.h"
#include "c-family/c-objc.h"
#include "c-family/c-pragma.h"
#include "c-family/c-ubsan.h"
#include "c-lang.h"
#include "langhooks.h"
#include "tree-iterator.h"
#include "dumpfile.h"
#include "plugin.h"
#include "c-family/c-ada-spec.h"
#include "builtins.h"
#include "spellcheck-tree.h"
#include "gcc-rich-location.h"
#include "asan.h"
#include "c-family/name-hint.h"
#include "c-family/known-headers.h"
#include "c-family/c-spellcheck.h"
/* In grokdeclarator, distinguish syntactic contexts of declarators. */
enum decl_context
{ NORMAL, /* Ordinary declaration */
FUNCDEF, /* Function definition */
PARM, /* Declaration of parm before function body */
FIELD, /* Declaration inside struct or union */
TYPENAME}; /* Typename (inside cast or sizeof) */
/* States indicating how grokdeclarator() should handle declspecs marked
with __attribute__((deprecated)). An object declared as
__attribute__((deprecated)) suppresses warnings of uses of other
deprecated items. */
enum deprecated_states {
DEPRECATED_NORMAL,
DEPRECATED_SUPPRESS
};
/* Nonzero if we have seen an invalid cross reference
to a struct, union, or enum, but not yet printed the message. */
tree pending_invalid_xref;
/* File and line to appear in the eventual error message. */
location_t pending_invalid_xref_location;
/* The file and line that the prototype came from if this is an
old-style definition; used for diagnostics in
store_parm_decls_oldstyle. */
static location_t current_function_prototype_locus;
/* Whether this prototype was built-in. */
static bool current_function_prototype_built_in;
/* The argument type information of this prototype. */
static tree current_function_prototype_arg_types;
/* The argument information structure for the function currently being
defined. */
static struct c_arg_info *current_function_arg_info;
/* The obstack on which parser and related data structures, which are
not live beyond their top-level declaration or definition, are
allocated. */
struct obstack parser_obstack;
/* The current statement tree. */
static GTY(()) struct stmt_tree_s c_stmt_tree;
/* State saving variables. */
tree c_break_label;
tree c_cont_label;
/* A list of decls to be made automatically visible in each file scope. */
static GTY(()) tree visible_builtins;
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement that specifies a return value is seen. */
int current_function_returns_value;
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement with no argument is seen. */
int current_function_returns_null;
/* Set to 0 at beginning of a function definition, set to 1 if
a call to a noreturn function is seen. */
int current_function_returns_abnormally;
/* Set to nonzero by `grokdeclarator' for a function
whose return type is defaulted, if warnings for this are desired. */
static int warn_about_return_type;
/* Nonzero when the current toplevel function contains a declaration
of a nested function which is never defined. */
static bool undef_nested_function;
/* If non-zero, implicit "omp declare target" attribute is added into the
attribute lists. */
int current_omp_declare_target_attribute;
/* Each c_binding structure describes one binding of an identifier to
a decl. All the decls in a scope - irrespective of namespace - are
chained together by the ->prev field, which (as the name implies)
runs in reverse order. All the decls in a given namespace bound to
a given identifier are chained by the ->shadowed field, which runs
from inner to outer scopes.
The ->decl field usually points to a DECL node, but there are two
exceptions. In the namespace of type tags, the bound entity is a
RECORD_TYPE, UNION_TYPE, or ENUMERAL_TYPE node. If an undeclared
identifier is encountered, it is bound to error_mark_node to
suppress further errors about that identifier in the current
function.
The ->u.type field stores the type of the declaration in this scope;
if NULL, the type is the type of the ->decl field. This is only of
relevance for objects with external or internal linkage which may
be redeclared in inner scopes, forming composite types that only
persist for the duration of those scopes. In the external scope,
this stores the composite of all the types declared for this
object, visible or not. The ->inner_comp field (used only at file
scope) stores whether an incomplete array type at file scope was
completed at an inner scope to an array size other than 1.
The ->u.label field is used for labels. It points to a structure
which stores additional information used for warnings.
The depth field is copied from the scope structure that holds this
decl. It is used to preserve the proper ordering of the ->shadowed
field (see bind()) and also for a handful of special-case checks.
Finally, the invisible bit is true for a decl which should be
ignored for purposes of normal name lookup, and the nested bit is
true for a decl that's been bound a second time in an inner scope;
in all such cases, the binding in the outer scope will have its
invisible bit true. */
struct GTY((chain_next ("%h.prev"))) c_binding {
union GTY(()) { /* first so GTY desc can use decl */
tree GTY((tag ("0"))) type; /* the type in this scope */
struct c_label_vars * GTY((tag ("1"))) label; /* for warnings */
} GTY((desc ("TREE_CODE (%0.decl) == LABEL_DECL"))) u;
tree decl; /* the decl bound */
tree id; /* the identifier it's bound to */
struct c_binding *prev; /* the previous decl in this scope */
struct c_binding *shadowed; /* the innermost decl shadowed by this one */
unsigned int depth : 28; /* depth of this scope */
BOOL_BITFIELD invisible : 1; /* normal lookup should ignore this binding */
BOOL_BITFIELD nested : 1; /* do not set DECL_CONTEXT when popping */
BOOL_BITFIELD inner_comp : 1; /* incomplete array completed in inner scope */
BOOL_BITFIELD in_struct : 1; /* currently defined as struct field */
location_t locus; /* location for nested bindings */
};
#define B_IN_SCOPE(b1, b2) ((b1)->depth == (b2)->depth)
#define B_IN_CURRENT_SCOPE(b) ((b)->depth == current_scope->depth)
#define B_IN_FILE_SCOPE(b) ((b)->depth == 1 /*file_scope->depth*/)
#define B_IN_EXTERNAL_SCOPE(b) ((b)->depth == 0 /*external_scope->depth*/)
/* Each C symbol points to three linked lists of c_binding structures.
These describe the values of the identifier in the three different
namespaces defined by the language. */
struct GTY(()) lang_identifier {
struct c_common_identifier common_id;
struct c_binding *symbol_binding; /* vars, funcs, constants, typedefs */
struct c_binding *tag_binding; /* struct/union/enum tags */
struct c_binding *label_binding; /* labels */
};
/* Validate c-lang.c's assumptions. */
extern char C_SIZEOF_STRUCT_LANG_IDENTIFIER_isnt_accurate
[(sizeof(struct lang_identifier) == C_SIZEOF_STRUCT_LANG_IDENTIFIER) ? 1 : -1];
/* The binding oracle; see c-tree.h. */
void (*c_binding_oracle) (enum c_oracle_request, tree identifier);
/* This flag is set on an identifier if we have previously asked the
binding oracle for this identifier's symbol binding. */
#define I_SYMBOL_CHECKED(node) \
(TREE_LANG_FLAG_4 (IDENTIFIER_NODE_CHECK (node)))
static inline struct c_binding* *
i_symbol_binding (tree node)
{
struct lang_identifier *lid
= (struct lang_identifier *) IDENTIFIER_NODE_CHECK (node);
if (lid->symbol_binding == NULL
&& c_binding_oracle != NULL
&& !I_SYMBOL_CHECKED (node))
{
/* Set the "checked" flag first, to avoid infinite recursion
when the binding oracle calls back into gcc. */
I_SYMBOL_CHECKED (node) = 1;
c_binding_oracle (C_ORACLE_SYMBOL, node);
}
return &lid->symbol_binding;
}
#define I_SYMBOL_BINDING(node) (*i_symbol_binding (node))
#define I_SYMBOL_DECL(node) \
(I_SYMBOL_BINDING(node) ? I_SYMBOL_BINDING(node)->decl : 0)
/* This flag is set on an identifier if we have previously asked the
binding oracle for this identifier's tag binding. */
#define I_TAG_CHECKED(node) \
(TREE_LANG_FLAG_5 (IDENTIFIER_NODE_CHECK (node)))
static inline struct c_binding **
i_tag_binding (tree node)
{
struct lang_identifier *lid
= (struct lang_identifier *) IDENTIFIER_NODE_CHECK (node);
if (lid->tag_binding == NULL
&& c_binding_oracle != NULL
&& !I_TAG_CHECKED (node))
{
/* Set the "checked" flag first, to avoid infinite recursion
when the binding oracle calls back into gcc. */
I_TAG_CHECKED (node) = 1;
c_binding_oracle (C_ORACLE_TAG, node);
}
return &lid->tag_binding;
}
#define I_TAG_BINDING(node) (*i_tag_binding (node))
#define I_TAG_DECL(node) \
(I_TAG_BINDING(node) ? I_TAG_BINDING(node)->decl : 0)
/* This flag is set on an identifier if we have previously asked the
binding oracle for this identifier's label binding. */
#define I_LABEL_CHECKED(node) \
(TREE_LANG_FLAG_6 (IDENTIFIER_NODE_CHECK (node)))
static inline struct c_binding **
i_label_binding (tree node)
{
struct lang_identifier *lid
= (struct lang_identifier *) IDENTIFIER_NODE_CHECK (node);
if (lid->label_binding == NULL
&& c_binding_oracle != NULL
&& !I_LABEL_CHECKED (node))
{
/* Set the "checked" flag first, to avoid infinite recursion
when the binding oracle calls back into gcc. */
I_LABEL_CHECKED (node) = 1;
c_binding_oracle (C_ORACLE_LABEL, node);
}
return &lid->label_binding;
}
#define I_LABEL_BINDING(node) (*i_label_binding (node))
#define I_LABEL_DECL(node) \
(I_LABEL_BINDING(node) ? I_LABEL_BINDING(node)->decl : 0)
/* The resulting tree type. */
union GTY((desc ("TREE_CODE (&%h.generic) == IDENTIFIER_NODE"),
chain_next ("(union lang_tree_node *) c_tree_chain_next (&%h.generic)"))) lang_tree_node
{
union tree_node GTY ((tag ("0"),
desc ("tree_node_structure (&%h)")))
generic;
struct lang_identifier GTY ((tag ("1"))) identifier;
};
/* Track bindings and other things that matter for goto warnings. For
efficiency, we do not gather all the decls at the point of
definition. Instead, we point into the bindings structure. As
scopes are popped, we update these structures and gather the decls
that matter at that time. */
struct GTY(()) c_spot_bindings {
/* The currently open scope which holds bindings defined when the
label was defined or the goto statement was found. */
struct c_scope *scope;
/* The bindings in the scope field which were defined at the point
of the label or goto. This lets us look at older or newer
bindings in the scope, as appropriate. */
struct c_binding *bindings_in_scope;
/* The number of statement expressions that have started since this
label or goto statement was defined. This is zero if we are at
the same statement expression level. It is positive if we are in
a statement expression started since this spot. It is negative
if this spot was in a statement expression and we have left
it. */
int stmt_exprs;
/* Whether we started in a statement expression but are no longer in
it. This is set to true if stmt_exprs ever goes negative. */
bool left_stmt_expr;
};
/* This structure is used to keep track of bindings seen when a goto
statement is defined. This is only used if we see the goto
statement before we see the label. */
struct GTY(()) c_goto_bindings {
/* The location of the goto statement. */
location_t loc;
/* The bindings of the goto statement. */
struct c_spot_bindings goto_bindings;
};
typedef struct c_goto_bindings *c_goto_bindings_p;
/* The additional information we keep track of for a label binding.
These fields are updated as scopes are popped. */
struct GTY(()) c_label_vars {
/* The shadowed c_label_vars, when one label shadows another (which
can only happen using a __label__ declaration). */
struct c_label_vars *shadowed;
/* The bindings when the label was defined. */
struct c_spot_bindings label_bindings;
/* A list of decls that we care about: decls about which we should
warn if a goto branches to this label from later in the function.
Decls are added to this list as scopes are popped. We only add
the decls that matter. */
vec<tree, va_gc> *decls_in_scope;
/* A list of goto statements to this label. This is only used for
goto statements seen before the label was defined, so that we can
issue appropriate warnings for them. */
vec<c_goto_bindings_p, va_gc> *gotos;
};
/* Each c_scope structure describes the complete contents of one
scope. Four scopes are distinguished specially: the innermost or
current scope, the innermost function scope, the file scope (always
the second to outermost) and the outermost or external scope.
Most declarations are recorded in the current scope.
All normal label declarations are recorded in the innermost
function scope, as are bindings of undeclared identifiers to
error_mark_node. (GCC permits nested functions as an extension,
hence the 'innermost' qualifier.) Explicitly declared labels
(using the __label__ extension) appear in the current scope.
Being in the file scope (current_scope == file_scope) causes
special behavior in several places below. Also, under some
conditions the Objective-C front end records declarations in the
file scope even though that isn't the current scope.
All declarations with external linkage are recorded in the external
scope, even if they aren't visible there; this models the fact that
such declarations are visible to the entire program, and (with a
bit of cleverness, see pushdecl) allows diagnosis of some violations
of C99 6.2.2p7 and 6.2.7p2:
If, within the same translation unit, the same identifier appears
with both internal and external linkage, the behavior is
undefined.
All declarations that refer to the same object or function shall
have compatible type; otherwise, the behavior is undefined.
Initially only the built-in declarations, which describe compiler
intrinsic functions plus a subset of the standard library, are in
this scope.
The order of the blocks list matters, and it is frequently appended
to. To avoid having to walk all the way to the end of the list on
each insertion, or reverse the list later, we maintain a pointer to
the last list entry. (FIXME: It should be feasible to use a reversed
list here.)
The bindings list is strictly in reverse order of declarations;
pop_scope relies on this. */
struct GTY((chain_next ("%h.outer"))) c_scope {
/* The scope containing this one. */
struct c_scope *outer;
/* The next outermost function scope. */
struct c_scope *outer_function;
/* All bindings in this scope. */
struct c_binding *bindings;
/* For each scope (except the global one), a chain of BLOCK nodes
for all the scopes that were entered and exited one level down. */
tree blocks;
tree blocks_last;
/* The depth of this scope. Used to keep the ->shadowed chain of
bindings sorted innermost to outermost. */
unsigned int depth : 28;
/* True if we are currently filling this scope with parameter
declarations. */
BOOL_BITFIELD parm_flag : 1;
/* True if we saw [*] in this scope. Used to give an error messages
if these appears in a function definition. */
BOOL_BITFIELD had_vla_unspec : 1;
/* True if we already complained about forward parameter decls
in this scope. This prevents double warnings on
foo (int a; int b; ...) */
BOOL_BITFIELD warned_forward_parm_decls : 1;
/* True if this is the outermost block scope of a function body.
This scope contains the parameters, the local variables declared
in the outermost block, and all the labels (except those in
nested functions, or declared at block scope with __label__). */
BOOL_BITFIELD function_body : 1;
/* True means make a BLOCK for this scope no matter what. */
BOOL_BITFIELD keep : 1;
/* True means that an unsuffixed float constant is _Decimal64. */
BOOL_BITFIELD float_const_decimal64 : 1;
/* True if this scope has any label bindings. This is used to speed
up searching for labels when popping scopes, particularly since
labels are normally only found at function scope. */
BOOL_BITFIELD has_label_bindings : 1;
/* True if we should issue a warning if a goto statement crosses any
of the bindings. We still need to check the list of bindings to
find the specific ones we need to warn about. This is true if
decl_jump_unsafe would return true for any of the bindings. This
is used to avoid looping over all the bindings unnecessarily. */
BOOL_BITFIELD has_jump_unsafe_decl : 1;
};
/* The scope currently in effect. */
static GTY(()) struct c_scope *current_scope;
/* The innermost function scope. Ordinary (not explicitly declared)
labels, bindings to error_mark_node, and the lazily-created
bindings of __func__ and its friends get this scope. */
static GTY(()) struct c_scope *current_function_scope;
/* The C file scope. This is reset for each input translation unit. */
static GTY(()) struct c_scope *file_scope;
/* The outermost scope. This is used for all declarations with
external linkage, and only these, hence the name. */
static GTY(()) struct c_scope *external_scope;
/* A chain of c_scope structures awaiting reuse. */
static GTY((deletable)) struct c_scope *scope_freelist;
/* A chain of c_binding structures awaiting reuse. */
static GTY((deletable)) struct c_binding *binding_freelist;
/* Append VAR to LIST in scope SCOPE. */
#define SCOPE_LIST_APPEND(scope, list, decl) do { \
struct c_scope *s_ = (scope); \
tree d_ = (decl); \
if (s_->list##_last) \
BLOCK_CHAIN (s_->list##_last) = d_; \
else \
s_->list = d_; \
s_->list##_last = d_; \
} while (0)
/* Concatenate FROM in scope FSCOPE onto TO in scope TSCOPE. */
#define SCOPE_LIST_CONCAT(tscope, to, fscope, from) do { \
struct c_scope *t_ = (tscope); \
struct c_scope *f_ = (fscope); \
if (t_->to##_last) \
BLOCK_CHAIN (t_->to##_last) = f_->from; \
else \
t_->to = f_->from; \
t_->to##_last = f_->from##_last; \
} while (0)
/* A c_inline_static structure stores details of a static identifier
referenced in a definition of a function that may be an inline
definition if no subsequent declaration of that function uses
"extern" or does not use "inline". */
struct GTY((chain_next ("%h.next"))) c_inline_static {
/* The location for a diagnostic. */
location_t location;
/* The function that may be an inline definition. */
tree function;
/* The object or function referenced. */
tree static_decl;
/* What sort of reference this is. */
enum c_inline_static_type type;
/* The next such structure or NULL. */
struct c_inline_static *next;
};
/* List of static identifiers used or referenced in functions that may
be inline definitions. */
static GTY(()) struct c_inline_static *c_inline_statics;
/* True means unconditionally make a BLOCK for the next scope pushed. */
static bool keep_next_level_flag;
/* True means the next call to push_scope will be the outermost scope
of a function body, so do not push a new scope, merely cease
expecting parameter decls. */
static bool next_is_function_body;
/* A vector of pointers to c_binding structures. */
typedef struct c_binding *c_binding_ptr;
/* Information that we keep for a struct or union while it is being
parsed. */
struct c_struct_parse_info
{
/* If warn_cxx_compat, a list of types defined within this
struct. */
auto_vec<tree> struct_types;
/* If warn_cxx_compat, a list of field names which have bindings,
and which are defined in this struct, but which are not defined
in any enclosing struct. This is used to clear the in_struct
field of the c_bindings structure. */
auto_vec<c_binding_ptr> fields;
/* If warn_cxx_compat, a list of typedef names used when defining
fields in this struct. */
auto_vec<tree> typedefs_seen;
};
/* Information for the struct or union currently being parsed, or
NULL if not parsing a struct or union. */
static struct c_struct_parse_info *struct_parse_info;
/* Forward declarations. */
static tree lookup_name_in_scope (tree, struct c_scope *);
static tree c_make_fname_decl (location_t, tree, int);
static tree grokdeclarator (const struct c_declarator *,
struct c_declspecs *,
enum decl_context, bool, tree *, tree *, tree *,
bool *, enum deprecated_states);
static tree grokparms (struct c_arg_info *, bool);
static void layout_array_type (tree);
static void warn_defaults_to (location_t, int, const char *, ...)
ATTRIBUTE_GCC_DIAG(3,4);
/* T is a statement. Add it to the statement-tree. This is the
C/ObjC version--C++ has a slightly different version of this
function. */
tree
add_stmt (tree t)
{
enum tree_code code = TREE_CODE (t);
if (CAN_HAVE_LOCATION_P (t) && code != LABEL_EXPR)
{
if (!EXPR_HAS_LOCATION (t))
SET_EXPR_LOCATION (t, input_location);
}
if (code == LABEL_EXPR || code == CASE_LABEL_EXPR)
STATEMENT_LIST_HAS_LABEL (cur_stmt_list) = 1;
/* Add T to the statement-tree. Non-side-effect statements need to be
recorded during statement expressions. */
if (!building_stmt_list_p ())
push_stmt_list ();
append_to_statement_list_force (t, &cur_stmt_list);
return t;
}
/* Build a pointer type using the default pointer mode. */
static tree
c_build_pointer_type (tree to_type)
{
addr_space_t as = to_type == error_mark_node? ADDR_SPACE_GENERIC
: TYPE_ADDR_SPACE (to_type);
machine_mode pointer_mode;
if (as != ADDR_SPACE_GENERIC || c_default_pointer_mode == VOIDmode)
pointer_mode = targetm.addr_space.pointer_mode (as);
else
pointer_mode = c_default_pointer_mode;
return build_pointer_type_for_mode (to_type, pointer_mode, false);
}
/* Return true if we will want to say something if a goto statement
crosses DECL. */
static bool
decl_jump_unsafe (tree decl)
{
if (error_operand_p (decl))
return false;
/* Always warn about crossing variably modified types. */
if ((VAR_P (decl) || TREE_CODE (decl) == TYPE_DECL)
&& variably_modified_type_p (TREE_TYPE (decl), NULL_TREE))
return true;
/* Otherwise, only warn if -Wgoto-misses-init and this is an
initialized automatic decl. */
if (warn_jump_misses_init
&& VAR_P (decl)
&& !TREE_STATIC (decl)
&& DECL_INITIAL (decl) != NULL_TREE)
return true;
return false;
}
void
c_print_identifier (FILE *file, tree node, int indent)
{
void (*save) (enum c_oracle_request, tree identifier);
/* Temporarily hide any binding oracle. Without this, calls to
debug_tree from the debugger will end up calling into the oracle,
making for a confusing debug session. As the oracle isn't needed
here for normal operation, it's simplest to suppress it. */
save = c_binding_oracle;
c_binding_oracle = NULL;
print_node (file, "symbol", I_SYMBOL_DECL (node), indent + 4);
print_node (file, "tag", I_TAG_DECL (node), indent + 4);
print_node (file, "label", I_LABEL_DECL (node), indent + 4);
if (C_IS_RESERVED_WORD (node) && C_RID_CODE (node) != RID_CXX_COMPAT_WARN)
{
tree rid = ridpointers[C_RID_CODE (node)];
indent_to (file, indent + 4);
fprintf (file, "rid " HOST_PTR_PRINTF " \"%s\"",
(void *) rid, IDENTIFIER_POINTER (rid));
}
c_binding_oracle = save;
}
/* Establish a binding between NAME, an IDENTIFIER_NODE, and DECL,
which may be any of several kinds of DECL or TYPE or error_mark_node,
in the scope SCOPE. */
static void
bind (tree name, tree decl, struct c_scope *scope, bool invisible,
bool nested, location_t locus)
{
struct c_binding *b, **here;
if (binding_freelist)
{
b = binding_freelist;
binding_freelist = b->prev;
}
else
b = ggc_alloc<c_binding> ();
b->shadowed = 0;
b->decl = decl;
b->id = name;
b->depth = scope->depth;
b->invisible = invisible;
b->nested = nested;
b->inner_comp = 0;
b->in_struct = 0;
b->locus = locus;
b->u.type = NULL;
b->prev = scope->bindings;
scope->bindings = b;
if (decl_jump_unsafe (decl))
scope->has_jump_unsafe_decl = 1;
if (!name)
return;
switch (TREE_CODE (decl))
{
case LABEL_DECL: here = &I_LABEL_BINDING (name); break;
case ENUMERAL_TYPE:
case UNION_TYPE:
case RECORD_TYPE: here = &I_TAG_BINDING (name); break;
case VAR_DECL:
case FUNCTION_DECL:
case TYPE_DECL:
case CONST_DECL:
case PARM_DECL:
case ERROR_MARK: here = &I_SYMBOL_BINDING (name); break;
default:
gcc_unreachable ();
}
/* Locate the appropriate place in the chain of shadowed decls
to insert this binding. Normally, scope == current_scope and
this does nothing. */
while (*here && (*here)->depth > scope->depth)
here = &(*here)->shadowed;
b->shadowed = *here;
*here = b;
}
/* Clear the binding structure B, stick it on the binding_freelist,
and return the former value of b->prev. This is used by pop_scope
and get_parm_info to iterate destructively over all the bindings
from a given scope. */
static struct c_binding *
free_binding_and_advance (struct c_binding *b)
{
struct c_binding *prev = b->prev;
memset (b, 0, sizeof (struct c_binding));
b->prev = binding_freelist;
binding_freelist = b;
return prev;
}
/* Bind a label. Like bind, but skip fields which aren't used for
labels, and add the LABEL_VARS value. */
static void
bind_label (tree name, tree label, struct c_scope *scope,
struct c_label_vars *label_vars)
{
struct c_binding *b;
bind (name, label, scope, /*invisible=*/false, /*nested=*/false,
UNKNOWN_LOCATION);
scope->has_label_bindings = true;
b = scope->bindings;
gcc_assert (b->decl == label);
label_vars->shadowed = b->u.label;
b->u.label = label_vars;
}
/* Hook called at end of compilation to assume 1 elt
for a file-scope tentative array defn that wasn't complete before. */
void
c_finish_incomplete_decl (tree decl)
{
if (VAR_P (decl))
{
tree type = TREE_TYPE (decl);
if (type != error_mark_node
&& TREE_CODE (type) == ARRAY_TYPE
&& !DECL_EXTERNAL (decl)
&& TYPE_DOMAIN (type) == NULL_TREE)
{
warning_at (DECL_SOURCE_LOCATION (decl),
0, "array %q+D assumed to have one element", decl);
complete_array_type (&TREE_TYPE (decl), NULL_TREE, true);
relayout_decl (decl);
}
}
}
/* Record that inline function FUNC contains a reference (location
LOC) to static DECL (file-scope or function-local according to
TYPE). */
void
record_inline_static (location_t loc, tree func, tree decl,
enum c_inline_static_type type)
{
c_inline_static *csi = ggc_alloc<c_inline_static> ();
csi->location = loc;
csi->function = func;
csi->static_decl = decl;
csi->type = type;
csi->next = c_inline_statics;
c_inline_statics = csi;
}
/* Check for references to static declarations in inline functions at
the end of the translation unit and diagnose them if the functions
are still inline definitions. */
static void
check_inline_statics (void)
{
struct c_inline_static *csi;
for (csi = c_inline_statics; csi; csi = csi->next)
{
if (DECL_EXTERNAL (csi->function))
switch (csi->type)
{
case csi_internal:
pedwarn (csi->location, 0,
"%qD is static but used in inline function %qD "
"which is not static", csi->static_decl, csi->function);
break;
case csi_modifiable:
pedwarn (csi->location, 0,
"%q+D is static but declared in inline function %qD "
"which is not static", csi->static_decl, csi->function);
break;
default:
gcc_unreachable ();
}
}
c_inline_statics = NULL;
}
/* Fill in a c_spot_bindings structure. If DEFINING is true, set it
for the current state, otherwise set it to uninitialized. */
static void
set_spot_bindings (struct c_spot_bindings *p, bool defining)
{
if (defining)
{
p->scope = current_scope;
p->bindings_in_scope = current_scope->bindings;
}
else
{
p->scope = NULL;
p->bindings_in_scope = NULL;
}
p->stmt_exprs = 0;
p->left_stmt_expr = false;
}
/* Update spot bindings P as we pop out of SCOPE. Return true if we
should push decls for a label. */
static bool
update_spot_bindings (struct c_scope *scope, struct c_spot_bindings *p)
{
if (p->scope != scope)
{
/* This label or goto is defined in some other scope, or it is a
label which is not yet defined. There is nothing to
update. */
return false;
}
/* Adjust the spot bindings to refer to the bindings already defined
in the enclosing scope. */
p->scope = scope->outer;
p->bindings_in_scope = p->scope->bindings;
return true;
}
/* The Objective-C front-end often needs to determine the current scope. */
void *
objc_get_current_scope (void)
{
return current_scope;
}
/* The following function is used only by Objective-C. It needs to live here
because it accesses the innards of c_scope. */
void
objc_mark_locals_volatile (void *enclosing_blk)
{
struct c_scope *scope;
struct c_binding *b;
for (scope = current_scope;
scope && scope != enclosing_blk;
scope = scope->outer)
{
for (b = scope->bindings; b; b = b->prev)
objc_volatilize_decl (b->decl);
/* Do not climb up past the current function. */
if (scope->function_body)
break;
}
}
/* Return true if we are in the global binding level. */
bool
global_bindings_p (void)
{
return current_scope == file_scope;
}
void
keep_next_level (void)
{
keep_next_level_flag = true;
}
/* Set the flag for the FLOAT_CONST_DECIMAL64 pragma being ON. */
void
set_float_const_decimal64 (void)
{
current_scope->float_const_decimal64 = true;
}
/* Clear the flag for the FLOAT_CONST_DECIMAL64 pragma. */
void
clear_float_const_decimal64 (void)
{
current_scope->float_const_decimal64 = false;
}
/* Return nonzero if an unsuffixed float constant is _Decimal64. */
bool
float_const_decimal64_p (void)
{
return current_scope->float_const_decimal64;
}
/* Identify this scope as currently being filled with parameters. */
void
declare_parm_level (void)
{
current_scope->parm_flag = true;
}
void
push_scope (void)
{
if (next_is_function_body)
{
/* This is the transition from the parameters to the top level
of the function body. These are the same scope
(C99 6.2.1p4,6) so we do not push another scope structure.
next_is_function_body is set only by store_parm_decls, which
in turn is called when and only when we are about to
encounter the opening curly brace for the function body.
The outermost block of a function always gets a BLOCK node,
because the debugging output routines expect that each
function has at least one BLOCK. */
current_scope->parm_flag = false;
current_scope->function_body = true;
current_scope->keep = true;
current_scope->outer_function = current_function_scope;
current_function_scope = current_scope;
keep_next_level_flag = false;
next_is_function_body = false;
/* The FLOAT_CONST_DECIMAL64 pragma applies to nested scopes. */
if (current_scope->outer)
current_scope->float_const_decimal64
= current_scope->outer->float_const_decimal64;
else
current_scope->float_const_decimal64 = false;
}
else
{
struct c_scope *scope;
if (scope_freelist)
{
scope = scope_freelist;
scope_freelist = scope->outer;
}
else
scope = ggc_cleared_alloc<c_scope> ();
/* The FLOAT_CONST_DECIMAL64 pragma applies to nested scopes. */
if (current_scope)
scope->float_const_decimal64 = current_scope->float_const_decimal64;
else
scope->float_const_decimal64 = false;
scope->keep = keep_next_level_flag;
scope->outer = current_scope;
scope->depth = current_scope ? (current_scope->depth + 1) : 0;
/* Check for scope depth overflow. Unlikely (2^28 == 268,435,456) but
possible. */
if (current_scope && scope->depth == 0)
{
scope->depth--;
sorry ("GCC supports only %u nested scopes", scope->depth);
}
current_scope = scope;
keep_next_level_flag = false;
}
}
/* This is called when we are leaving SCOPE. For each label defined
in SCOPE, add any appropriate decls to its decls_in_scope fields.
These are the decls whose initialization will be skipped by a goto
later in the function. */
static void
update_label_decls (struct c_scope *scope)
{
struct c_scope *s;
s = scope;
while (s != NULL)
{
if (s->has_label_bindings)
{
struct c_binding *b;
for (b = s->bindings; b != NULL; b = b->prev)
{
struct c_label_vars *label_vars;
struct c_binding *b1;
bool hjud;
unsigned int ix;
struct c_goto_bindings *g;
if (TREE_CODE (b->decl) != LABEL_DECL)
continue;
label_vars = b->u.label;
b1 = label_vars->label_bindings.bindings_in_scope;
if (label_vars->label_bindings.scope == NULL)
hjud = false;
else
hjud = label_vars->label_bindings.scope->has_jump_unsafe_decl;
if (update_spot_bindings (scope, &label_vars->label_bindings))
{
/* This label is defined in this scope. */
if (hjud)
{
for (; b1 != NULL; b1 = b1->prev)
{
/* A goto from later in the function to this
label will never see the initialization
of B1, if any. Save it to issue a
warning if needed. */
if (decl_jump_unsafe (b1->decl))
vec_safe_push(label_vars->decls_in_scope, b1->decl);
}
}
}
/* Update the bindings of any goto statements associated
with this label. */
FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g)
update_spot_bindings (scope, &g->goto_bindings);
}
}
/* Don't search beyond the current function. */
if (s == current_function_scope)
break;
s = s->outer;
}
}
/* Set the TYPE_CONTEXT of all of TYPE's variants to CONTEXT. */
static void
set_type_context (tree type, tree context)
{
for (type = TYPE_MAIN_VARIANT (type); type;
type = TYPE_NEXT_VARIANT (type))
TYPE_CONTEXT (type) = context;
}
/* Exit a scope. Restore the state of the identifier-decl mappings
that were in effect when this scope was entered. Return a BLOCK
node containing all the DECLs in this scope that are of interest
to debug info generation. */
tree
pop_scope (void)
{
struct c_scope *scope = current_scope;
tree block, context, p;
struct c_binding *b;
bool functionbody = scope->function_body;
bool keep = functionbody || scope->keep || scope->bindings;
update_label_decls (scope);
/* If appropriate, create a BLOCK to record the decls for the life
of this function. */
block = NULL_TREE;
if (keep)
{
block = make_node (BLOCK);
BLOCK_SUBBLOCKS (block) = scope->blocks;
TREE_USED (block) = 1;
/* In each subblock, record that this is its superior. */
for (p = scope->blocks; p; p = BLOCK_CHAIN (p))
BLOCK_SUPERCONTEXT (p) = block;
BLOCK_VARS (block) = NULL_TREE;
}
/* The TYPE_CONTEXTs for all of the tagged types belonging to this
scope must be set so that they point to the appropriate
construct, i.e. either to the current FUNCTION_DECL node, or
else to the BLOCK node we just constructed.
Note that for tagged types whose scope is just the formal
parameter list for some function type specification, we can't
properly set their TYPE_CONTEXTs here, because we don't have a
pointer to the appropriate FUNCTION_TYPE node readily available
to us. For those cases, the TYPE_CONTEXTs of the relevant tagged
type nodes get set in `grokdeclarator' as soon as we have created
the FUNCTION_TYPE node which will represent the "scope" for these
"parameter list local" tagged types. */
if (scope->function_body)
context = current_function_decl;
else if (scope == file_scope)
{
tree file_decl
= build_translation_unit_decl (get_identifier (main_input_filename));
context = file_decl;
debug_hooks->register_main_translation_unit (file_decl);
}
else
context = block;
/* Clear all bindings in this scope. */
for (b = scope->bindings; b; b = free_binding_and_advance (b))
{
p = b->decl;
switch (TREE_CODE (p))
{
case LABEL_DECL:
/* Warnings for unused labels, errors for undefined labels. */
if (TREE_USED (p) && !DECL_INITIAL (p))
{
error ("label %q+D used but not defined", p);
DECL_INITIAL (p) = error_mark_node;
}
else
warn_for_unused_label (p);
/* Labels go in BLOCK_VARS. */
DECL_CHAIN (p) = BLOCK_VARS (block);
BLOCK_VARS (block) = p;
gcc_assert (I_LABEL_BINDING (b->id) == b);
I_LABEL_BINDING (b->id) = b->shadowed;
/* Also pop back to the shadowed label_vars. */
release_tree_vector (b->u.label->decls_in_scope);
b->u.label = b->u.label->shadowed;
break;
case ENUMERAL_TYPE:
case UNION_TYPE:
case RECORD_TYPE:
set_type_context (p, context);
/* Types may not have tag-names, in which case the type
appears in the bindings list with b->id NULL. */
if (b->id)
{
gcc_assert (I_TAG_BINDING (b->id) == b);
I_TAG_BINDING (b->id) = b->shadowed;
}
break;
case FUNCTION_DECL:
/* Propagate TREE_ADDRESSABLE from nested functions to their
containing functions. */
if (!TREE_ASM_WRITTEN (p)
&& DECL_INITIAL (p) != NULL_TREE
&& TREE_ADDRESSABLE (p)
&& DECL_ABSTRACT_ORIGIN (p) != NULL_TREE
&& DECL_ABSTRACT_ORIGIN (p) != p)
TREE_ADDRESSABLE (DECL_ABSTRACT_ORIGIN (p)) = 1;
if (!DECL_EXTERNAL (p)
&& !DECL_INITIAL (p)
&& scope != file_scope
&& scope != external_scope)
{
error ("nested function %q+D declared but never defined", p);
undef_nested_function = true;
}
else if (DECL_DECLARED_INLINE_P (p)
&& TREE_PUBLIC (p)
&& !DECL_INITIAL (p))
{
/* C99 6.7.4p6: "a function with external linkage... declared
with an inline function specifier ... shall also be defined
in the same translation unit." */
if (!flag_gnu89_inline
&& !lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (p))
&& scope != external_scope)
pedwarn (input_location, 0,
"inline function %q+D declared but never defined", p);
DECL_EXTERNAL (p) = 1;
}
goto common_symbol;
case VAR_DECL:
/* Warnings for unused variables. */
if ((!TREE_USED (p) || !DECL_READ_P (p))
&& !TREE_NO_WARNING (p)
&& !DECL_IN_SYSTEM_HEADER (p)
&& DECL_NAME (p)
&& !DECL_ARTIFICIAL (p)
&& scope != file_scope
&& scope != external_scope)
{
if (!TREE_USED (p))
warning (OPT_Wunused_variable, "unused variable %q+D", p);
else if (DECL_CONTEXT (p) == current_function_decl)
warning_at (DECL_SOURCE_LOCATION (p),
OPT_Wunused_but_set_variable,
"variable %qD set but not used", p);
}
if (b->inner_comp)
{
error ("type of array %q+D completed incompatibly with"
" implicit initialization", p);
}
/* Fall through. */
case TYPE_DECL:
case CONST_DECL:
common_symbol:
/* All of these go in BLOCK_VARS, but only if this is the
binding in the home scope. */
if (!b->nested)
{
DECL_CHAIN (p) = BLOCK_VARS (block);
BLOCK_VARS (block) = p;
}
else if (VAR_OR_FUNCTION_DECL_P (p) && scope != file_scope)
{
/* For block local externs add a special
DECL_EXTERNAL decl for debug info generation. */
tree extp = copy_node (p);
DECL_EXTERNAL (extp) = 1;
TREE_STATIC (extp) = 0;
TREE_PUBLIC (extp) = 1;
DECL_INITIAL (extp) = NULL_TREE;
DECL_LANG_SPECIFIC (extp) = NULL;
DECL_CONTEXT (extp) = current_function_decl;
if (TREE_CODE (p) == FUNCTION_DECL)
{
DECL_RESULT (extp) = NULL_TREE;
DECL_SAVED_TREE (extp) = NULL_TREE;
DECL_STRUCT_FUNCTION (extp) = NULL;
}
if (b->locus != UNKNOWN_LOCATION)
DECL_SOURCE_LOCATION (extp) = b->locus;
DECL_CHAIN (extp) = BLOCK_VARS (block);
BLOCK_VARS (block) = extp;
}
/* If this is the file scope set DECL_CONTEXT of each decl to
the TRANSLATION_UNIT_DECL. This makes same_translation_unit_p
work. */
if (scope == file_scope)
{
DECL_CONTEXT (p) = context;
if (TREE_CODE (p) == TYPE_DECL
&& TREE_TYPE (p) != error_mark_node)
set_type_context (TREE_TYPE (p), context);
}
gcc_fallthrough ();
/* Parameters go in DECL_ARGUMENTS, not BLOCK_VARS, and have
already been put there by store_parm_decls. Unused-
parameter warnings are handled by function.c.
error_mark_node obviously does not go in BLOCK_VARS and
does not get unused-variable warnings. */
case PARM_DECL:
case ERROR_MARK:
/* It is possible for a decl not to have a name. We get
here with b->id NULL in this case. */
if (b->id)
{
gcc_assert (I_SYMBOL_BINDING (b->id) == b);
I_SYMBOL_BINDING (b->id) = b->shadowed;
if (b->shadowed && b->shadowed->u.type)
TREE_TYPE (b->shadowed->decl) = b->shadowed->u.type;
}
break;
default:
gcc_unreachable ();
}
}
/* Dispose of the block that we just made inside some higher level. */
if ((scope->function_body || scope == file_scope) && context)
{
DECL_INITIAL (context) = block;
BLOCK_SUPERCONTEXT (block) = context;
}
else if (scope->outer)
{
if (block)
SCOPE_LIST_APPEND (scope->outer, blocks, block);
/* If we did not make a block for the scope just exited, any
blocks made for inner scopes must be carried forward so they
will later become subblocks of something else. */
else if (scope->blocks)
SCOPE_LIST_CONCAT (scope->outer, blocks, scope, blocks);
}
/* Pop the current scope, and free the structure for reuse. */
current_scope = scope->outer;
if (scope->function_body)
current_function_scope = scope->outer_function;
memset (scope, 0, sizeof (struct c_scope));
scope->outer = scope_freelist;
scope_freelist = scope;
return block;
}
void
push_file_scope (void)
{
tree decl;
if (file_scope)
return;
push_scope ();
file_scope = current_scope;
start_fname_decls ();
for (decl = visible_builtins; decl; decl = DECL_CHAIN (decl))
bind (DECL_NAME (decl), decl, file_scope,
/*invisible=*/false, /*nested=*/true, DECL_SOURCE_LOCATION (decl));
}
void
pop_file_scope (void)
{
/* In case there were missing closebraces, get us back to the global
binding level. */
while (current_scope != file_scope)
pop_scope ();
/* __FUNCTION__ is defined at file scope (""). This
call may not be necessary as my tests indicate it
still works without it. */
finish_fname_decls ();
check_inline_statics ();
/* This is the point to write out a PCH if we're doing that.
In that case we do not want to do anything else. */
if (pch_file)
{
c_common_write_pch ();
/* Ensure even the callers don't try to finalize the CU. */
flag_syntax_only = 1;
return;
}
/* Pop off the file scope and close this translation unit. */
pop_scope ();
file_scope = 0;
maybe_apply_pending_pragma_weaks ();
}
/* Adjust the bindings for the start of a statement expression. */
void
c_bindings_start_stmt_expr (struct c_spot_bindings* switch_bindings)
{
struct c_scope *scope;
for (scope = current_scope; scope != NULL; scope = scope->outer)
{
struct c_binding *b;
if (!scope->has_label_bindings)
continue;
for (b = scope->bindings; b != NULL; b = b->prev)
{
struct c_label_vars *label_vars;
unsigned int ix;
struct c_goto_bindings *g;
if (TREE_CODE (b->decl) != LABEL_DECL)
continue;
label_vars = b->u.label;
++label_vars->label_bindings.stmt_exprs;
FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g)
++g->goto_bindings.stmt_exprs;
}
}
if (switch_bindings != NULL)
++switch_bindings->stmt_exprs;
}
/* Adjust the bindings for the end of a statement expression. */
void
c_bindings_end_stmt_expr (struct c_spot_bindings *switch_bindings)
{
struct c_scope *scope;
for (scope = current_scope; scope != NULL; scope = scope->outer)
{
struct c_binding *b;
if (!scope->has_label_bindings)
continue;
for (b = scope->bindings; b != NULL; b = b->prev)
{
struct c_label_vars *label_vars;
unsigned int ix;
struct c_goto_bindings *g;
if (TREE_CODE (b->decl) != LABEL_DECL)
continue;
label_vars = b->u.label;
--label_vars->label_bindings.stmt_exprs;
if (label_vars->label_bindings.stmt_exprs < 0)
{
label_vars->label_bindings.left_stmt_expr = true;
label_vars->label_bindings.stmt_exprs = 0;
}
FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g)
{
--g->goto_bindings.stmt_exprs;
if (g->goto_bindings.stmt_exprs < 0)
{
g->goto_bindings.left_stmt_expr = true;
g->goto_bindings.stmt_exprs = 0;
}
}
}
}
if (switch_bindings != NULL)
{
--switch_bindings->stmt_exprs;
gcc_assert (switch_bindings->stmt_exprs >= 0);
}
}
/* Push a definition or a declaration of struct, union or enum tag "name".
"type" should be the type node.
We assume that the tag "name" is not already defined, and has a location
of LOC.
Note that the definition may really be just a forward reference.
In that case, the TYPE_SIZE will be zero. */
static void
pushtag (location_t loc, tree name, tree type)
{
/* Record the identifier as the type's name if it has none. */
if (name && !TYPE_NAME (type))
TYPE_NAME (type) = name;
bind (name, type, current_scope, /*invisible=*/false, /*nested=*/false, loc);
/* Create a fake NULL-named TYPE_DECL node whose TREE_TYPE will be the
tagged type we just added to the current scope. This fake
NULL-named TYPE_DECL node helps dwarfout.c to know when it needs
to output a representation of a tagged type, and it also gives
us a convenient place to record the "scope start" address for the
tagged type. */
TYPE_STUB_DECL (type) = pushdecl (build_decl (loc,
TYPE_DECL, NULL_TREE, type));
/* An approximation for now, so we can tell this is a function-scope tag.
This will be updated in pop_scope. */
TYPE_CONTEXT (type) = DECL_CONTEXT (TYPE_STUB_DECL (type));
if (warn_cxx_compat && name != NULL_TREE)
{
struct c_binding *b = I_SYMBOL_BINDING (name);
if (b != NULL
&& b->decl != NULL_TREE
&& TREE_CODE (b->decl) == TYPE_DECL
&& (B_IN_CURRENT_SCOPE (b)
|| (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b)))
&& (TYPE_MAIN_VARIANT (TREE_TYPE (b->decl))
!= TYPE_MAIN_VARIANT (type)))
{
if (warning_at (loc, OPT_Wc___compat,
("using %qD as both a typedef and a tag is "
"invalid in C++"), b->decl)
&& b->locus != UNKNOWN_LOCATION)
inform (b->locus, "originally defined here");
}
}
}
/* An exported interface to pushtag. This is used by the gdb plugin's
binding oracle to introduce a new tag binding. */
void
c_pushtag (location_t loc, tree name, tree type)
{
pushtag (loc, name, type);
}
/* An exported interface to bind a declaration. LOC is the location
to use. DECL is the declaration to bind. The decl's name is used
to determine how it is bound. If DECL is a VAR_DECL, then
IS_GLOBAL determines whether the decl is put into the global (file
and external) scope or the current function's scope; if DECL is not
a VAR_DECL then it is always put into the file scope. */
void
c_bind (location_t loc, tree decl, bool is_global)
{
struct c_scope *scope;
bool nested = false;
if (!VAR_P (decl) || current_function_scope == NULL)
{
/* Types and functions are always considered to be global. */
scope = file_scope;
DECL_EXTERNAL (decl) = 1;
TREE_PUBLIC (decl) = 1;
}
else if (is_global)
{
/* Also bind it into the external scope. */
bind (DECL_NAME (decl), decl, external_scope, true, false, loc);
nested = true;
scope = file_scope;
DECL_EXTERNAL (decl) = 1;
TREE_PUBLIC (decl) = 1;
}
else
{
DECL_CONTEXT (decl) = current_function_decl;
TREE_PUBLIC (decl) = 0;
scope = current_function_scope;
}
bind (DECL_NAME (decl), decl, scope, false, nested, loc);
}
/* Subroutine of compare_decls. Allow harmless mismatches in return
and argument types provided that the type modes match. This function
return a unified type given a suitable match, and 0 otherwise. */
static tree
match_builtin_function_types (tree newtype, tree oldtype)
{
tree newrettype, oldrettype;
tree newargs, oldargs;
tree trytype, tryargs;
/* Accept the return type of the new declaration if same modes. */
oldrettype = TREE_TYPE (oldtype);
newrettype = TREE_TYPE (newtype);
if (TYPE_MODE (oldrettype) != TYPE_MODE (newrettype))
return NULL_TREE;
oldargs = TYPE_ARG_TYPES (oldtype);
newargs = TYPE_ARG_TYPES (newtype);
tryargs = newargs;
while (oldargs || newargs)
{
if (!oldargs
|| !newargs
|| !TREE_VALUE (oldargs)
|| !TREE_VALUE (newargs)
|| TYPE_MODE (TREE_VALUE (oldargs))
!= TYPE_MODE (TREE_VALUE (newargs)))
return NULL_TREE;
oldargs = TREE_CHAIN (oldargs);
newargs = TREE_CHAIN (newargs);
}
trytype = build_function_type (newrettype, tryargs);
/* Allow declaration to change transaction_safe attribute. */
tree oldattrs = TYPE_ATTRIBUTES (oldtype);
tree oldtsafe = lookup_attribute ("transaction_safe", oldattrs);
tree newattrs = TYPE_ATTRIBUTES (newtype);
tree newtsafe = lookup_attribute ("transaction_safe", newattrs);
if (oldtsafe && !newtsafe)
oldattrs = remove_attribute ("transaction_safe", oldattrs);
else if (newtsafe && !oldtsafe)
oldattrs = tree_cons (get_identifier ("transaction_safe"),
NULL_TREE, oldattrs);
return build_type_attribute_variant (trytype, oldattrs);
}
/* Subroutine of diagnose_mismatched_decls. Check for function type
mismatch involving an empty arglist vs a nonempty one and give clearer
diagnostics. */
static void
diagnose_arglist_conflict (tree newdecl, tree olddecl,
tree newtype, tree oldtype)
{
tree t;
if (TREE_CODE (olddecl) != FUNCTION_DECL
|| !comptypes (TREE_TYPE (oldtype), TREE_TYPE (newtype))
|| !((!prototype_p (oldtype) && DECL_INITIAL (olddecl) == NULL_TREE)
|| (!prototype_p (newtype) && DECL_INITIAL (newdecl) == NULL_TREE)))
return;
t = TYPE_ARG_TYPES (oldtype);
if (t == NULL_TREE)
t = TYPE_ARG_TYPES (newtype);
for (; t; t = TREE_CHAIN (t))
{
tree type = TREE_VALUE (t);
if (TREE_CHAIN (t) == NULL_TREE
&& TYPE_MAIN_VARIANT (type) != void_type_node)
{
inform (input_location, "a parameter list with an ellipsis can%'t match "
"an empty parameter name list declaration");
break;
}
if (c_type_promotes_to (type) != type)
{
inform (input_location, "an argument type that has a default promotion can%'t match "
"an empty parameter name list declaration");
break;
}
}
}
/* Another subroutine of diagnose_mismatched_decls. OLDDECL is an
old-style function definition, NEWDECL is a prototype declaration.
Diagnose inconsistencies in the argument list. Returns TRUE if
the prototype is compatible, FALSE if not. */
static bool
validate_proto_after_old_defn (tree newdecl, tree newtype, tree oldtype)
{
tree newargs, oldargs;
int i;
#define END_OF_ARGLIST(t) ((t) == void_type_node)
oldargs = TYPE_ACTUAL_ARG_TYPES (oldtype);
newargs = TYPE_ARG_TYPES (newtype);
i = 1;
for (;;)
{
tree oldargtype = TREE_VALUE (oldargs);
tree newargtype = TREE_VALUE (newargs);
if (oldargtype == error_mark_node || newargtype == error_mark_node)
return false;
oldargtype = (TYPE_ATOMIC (oldargtype)
? c_build_qualified_type (TYPE_MAIN_VARIANT (oldargtype),
TYPE_QUAL_ATOMIC)
: TYPE_MAIN_VARIANT (oldargtype));
newargtype = (TYPE_ATOMIC (newargtype)
? c_build_qualified_type (TYPE_MAIN_VARIANT (newargtype),
TYPE_QUAL_ATOMIC)
: TYPE_MAIN_VARIANT (newargtype));
if (END_OF_ARGLIST (oldargtype) && END_OF_ARGLIST (newargtype))
break;
/* Reaching the end of just one list means the two decls don't
agree on the number of arguments. */
if (END_OF_ARGLIST (oldargtype))
{
error ("prototype for %q+D declares more arguments "
"than previous old-style definition", newdecl);
return false;
}
else if (END_OF_ARGLIST (newargtype))
{
error ("prototype for %q+D declares fewer arguments "
"than previous old-style definition", newdecl);
return false;
}
/* Type for passing arg must be consistent with that declared
for the arg. */
else if (!comptypes (oldargtype, newargtype))
{
error ("prototype for %q+D declares argument %d"
" with incompatible type",
newdecl, i);
return false;
}
oldargs = TREE_CHAIN (oldargs);
newargs = TREE_CHAIN (newargs);
i++;
}
/* If we get here, no errors were found, but do issue a warning
for this poor-style construct. */
warning (0, "prototype for %q+D follows non-prototype definition",
newdecl);
return true;
#undef END_OF_ARGLIST
}
/* Subroutine of diagnose_mismatched_decls. Report the location of DECL,
first in a pair of mismatched declarations, using the diagnostic
function DIAG. */
static void
locate_old_decl (tree decl)
{
if (TREE_CODE (decl) == FUNCTION_DECL && DECL_BUILT_IN (decl)
&& !C_DECL_DECLARED_BUILTIN (decl))
;
else if (DECL_INITIAL (decl))
inform (input_location, "previous definition of %q+D was here", decl);
else if (C_DECL_IMPLICIT (decl))
inform (input_location, "previous implicit declaration of %q+D was here", decl);
else
inform (input_location, "previous declaration of %q+D was here", decl);
}
/* Subroutine of duplicate_decls. Compare NEWDECL to OLDDECL.
Returns true if the caller should proceed to merge the two, false
if OLDDECL should simply be discarded. As a side effect, issues
all necessary diagnostics for invalid or poor-style combinations.
If it returns true, writes the types of NEWDECL and OLDDECL to
*NEWTYPEP and *OLDTYPEP - these may have been adjusted from
TREE_TYPE (NEWDECL, OLDDECL) respectively. */
static bool
diagnose_mismatched_decls (tree newdecl, tree olddecl,
tree *newtypep, tree *oldtypep)
{
tree newtype, oldtype;
bool pedwarned = false;
bool warned = false;
bool retval = true;
#define DECL_EXTERN_INLINE(DECL) (DECL_DECLARED_INLINE_P (DECL) \
&& DECL_EXTERNAL (DECL))
/* If we have error_mark_node for either decl or type, just discard
the previous decl - we're in an error cascade already. */
if (olddecl == error_mark_node || newdecl == error_mark_node)
return false;
*oldtypep = oldtype = TREE_TYPE (olddecl);
*newtypep = newtype = TREE_TYPE (newdecl);
if (oldtype == error_mark_node || newtype == error_mark_node)
return false;
/* Two different categories of symbol altogether. This is an error
unless OLDDECL is a builtin. OLDDECL will be discarded in any case. */
if (TREE_CODE (olddecl) != TREE_CODE (newdecl))
{
if (!(TREE_CODE (olddecl) == FUNCTION_DECL
&& DECL_BUILT_IN (olddecl)
&& !C_DECL_DECLARED_BUILTIN (olddecl)))
{
error ("%q+D redeclared as different kind of symbol", newdecl);
locate_old_decl (olddecl);
}
else if (TREE_PUBLIC (newdecl))
warning (OPT_Wbuiltin_declaration_mismatch,
"built-in function %q+D declared as non-function",
newdecl);
else
warning (OPT_Wshadow, "declaration of %q+D shadows "
"a built-in function", newdecl);
return false;
}
/* Enumerators have no linkage, so may only be declared once in a
given scope. */
if (TREE_CODE (olddecl) == CONST_DECL)
{
error ("redeclaration of enumerator %q+D", newdecl);
locate_old_decl (olddecl);
return false;
}
if (!comptypes (oldtype, newtype))
{
if (TREE_CODE (olddecl) == FUNCTION_DECL
&& DECL_BUILT_IN (olddecl) && !C_DECL_DECLARED_BUILTIN (olddecl))
{
/* Accept harmless mismatch in function types.
This is for the ffs and fprintf builtins. */
tree trytype = match_builtin_function_types (newtype, oldtype);
if (trytype && comptypes (newtype, trytype))
*oldtypep = oldtype = trytype;
else
{
/* If types don't match for a built-in, throw away the
built-in. No point in calling locate_old_decl here, it
won't print anything. */
warning (OPT_Wbuiltin_declaration_mismatch,
"conflicting types for built-in function %q+D",
newdecl);
return false;
}
}
else if (TREE_CODE (olddecl) == FUNCTION_DECL
&& DECL_IS_BUILTIN (olddecl))
{
/* A conflicting function declaration for a predeclared
function that isn't actually built in. Objective C uses
these. The new declaration silently overrides everything
but the volatility (i.e. noreturn) indication. See also
below. FIXME: Make Objective C use normal builtins. */
TREE_THIS_VOLATILE (newdecl) |= TREE_THIS_VOLATILE (olddecl);
return false;
}
/* Permit void foo (...) to match int foo (...) if the latter is
the definition and implicit int was used. See
c-torture/compile/920625-2.c. */
else if (TREE_CODE (newdecl) == FUNCTION_DECL && DECL_INITIAL (newdecl)
&& TYPE_MAIN_VARIANT (TREE_TYPE (oldtype)) == void_type_node
&& TYPE_MAIN_VARIANT (TREE_TYPE (newtype)) == integer_type_node
&& C_FUNCTION_IMPLICIT_INT (newdecl) && !DECL_INITIAL (olddecl))
{
pedwarned = pedwarn (input_location, 0,
"conflicting types for %q+D", newdecl);
/* Make sure we keep void as the return type. */
TREE_TYPE (newdecl) = *newtypep = newtype = oldtype;
C_FUNCTION_IMPLICIT_INT (newdecl) = 0;
}
/* Permit void foo (...) to match an earlier call to foo (...) with
no declared type (thus, implicitly int). */
else if (TREE_CODE (newdecl) == FUNCTION_DECL
&& TYPE_MAIN_VARIANT (TREE_TYPE (newtype)) == void_type_node
&& TYPE_MAIN_VARIANT (TREE_TYPE (oldtype)) == integer_type_node
&& C_DECL_IMPLICIT (olddecl) && !DECL_INITIAL (olddecl))
{
pedwarned = pedwarn (input_location, 0,
"conflicting types for %q+D", newdecl);
/* Make sure we keep void as the return type. */
TREE_TYPE (olddecl) = *oldtypep = oldtype = newtype;
}
else
{
int new_quals = TYPE_QUALS (newtype);
int old_quals = TYPE_QUALS (oldtype);
if (new_quals != old_quals)
{
addr_space_t new_addr = DECODE_QUAL_ADDR_SPACE (new_quals);
addr_space_t old_addr = DECODE_QUAL_ADDR_SPACE (old_quals);
if (new_addr != old_addr)
{
if (ADDR_SPACE_GENERIC_P (new_addr))
error ("conflicting named address spaces (generic vs %s) "
"for %q+D",
c_addr_space_name (old_addr), newdecl);
else if (ADDR_SPACE_GENERIC_P (old_addr))
error ("conflicting named address spaces (%s vs generic) "
"for %q+D",
c_addr_space_name (new_addr), newdecl);
else
error ("conflicting named address spaces (%s vs %s) "
"for %q+D",
c_addr_space_name (new_addr),
c_addr_space_name (old_addr),
newdecl);
}
if (CLEAR_QUAL_ADDR_SPACE (new_quals)
!= CLEAR_QUAL_ADDR_SPACE (old_quals))
error ("conflicting type qualifiers for %q+D", newdecl);
}
else
error ("conflicting types for %q+D", newdecl);
diagnose_arglist_conflict (newdecl, olddecl, newtype, oldtype);
locate_old_decl (olddecl);
return false;
}
}
/* Redeclaration of a type is a constraint violation (6.7.2.3p1),
but silently ignore the redeclaration if either is in a system
header. (Conflicting redeclarations were handled above.) This
is allowed for C11 if the types are the same, not just
compatible. */
if (TREE_CODE (newdecl) == TYPE_DECL)
{
bool types_different = false;
int comptypes_result;
comptypes_result
= comptypes_check_different_types (oldtype, newtype, &types_different);
if (comptypes_result != 1 || types_different)
{
error ("redefinition of typedef %q+D with different type", newdecl);
locate_old_decl (olddecl);
return false;
}
if (DECL_IN_SYSTEM_HEADER (newdecl)
|| DECL_IN_SYSTEM_HEADER (olddecl)
|| TREE_NO_WARNING (newdecl)
|| TREE_NO_WARNING (olddecl))
return true; /* Allow OLDDECL to continue in use. */
if (variably_modified_type_p (newtype, NULL))
{
error ("redefinition of typedef %q+D with variably modified type",
newdecl);
locate_old_decl (olddecl);
}
else if (pedwarn_c99 (input_location, OPT_Wpedantic,
"redefinition of typedef %q+D", newdecl))
locate_old_decl (olddecl);
return true;
}
/* Function declarations can either be 'static' or 'extern' (no
qualifier is equivalent to 'extern' - C99 6.2.2p5) and therefore
can never conflict with each other on account of linkage
(6.2.2p4). Multiple definitions are not allowed (6.9p3,5) but
gnu89 mode permits two definitions if one is 'extern inline' and
one is not. The non- extern-inline definition supersedes the
extern-inline definition. */
else if (TREE_CODE (newdecl) == FUNCTION_DECL)
{
/* If you declare a built-in function name as static, or
define the built-in with an old-style definition (so we
can't validate the argument list) the built-in definition is
overridden, but optionally warn this was a bad choice of name. */
if (DECL_BUILT_IN (olddecl)
&& !C_DECL_DECLARED_BUILTIN (olddecl)
&& (!TREE_PUBLIC (newdecl)
|| (DECL_INITIAL (newdecl)
&& !prototype_p (TREE_TYPE (newdecl)))))
{
warning (OPT_Wshadow, "declaration of %q+D shadows "
"a built-in function", newdecl);
/* Discard the old built-in function. */
return false;
}
if (DECL_INITIAL (newdecl))
{
if (DECL_INITIAL (olddecl))
{
/* If both decls are in the same TU and the new declaration
isn't overriding an extern inline reject the new decl.
In c99, no overriding is allowed in the same translation
unit. */
if ((!DECL_EXTERN_INLINE (olddecl)
|| DECL_EXTERN_INLINE (newdecl)
|| (!flag_gnu89_inline
&& (!DECL_DECLARED_INLINE_P (olddecl)
|| !lookup_attribute ("gnu_inline",
DECL_ATTRIBUTES (olddecl)))
&& (!DECL_DECLARED_INLINE_P (newdecl)
|| !lookup_attribute ("gnu_inline",
DECL_ATTRIBUTES (newdecl))))
)
&& same_translation_unit_p (newdecl, olddecl))
{
error ("redefinition of %q+D", newdecl);
locate_old_decl (olddecl);
return false;
}
}
}
/* If we have a prototype after an old-style function definition,
the argument types must be checked specially. */
else if (DECL_INITIAL (olddecl)
&& !prototype_p (oldtype) && prototype_p (newtype)
&& TYPE_ACTUAL_ARG_TYPES (oldtype)
&& !validate_proto_after_old_defn (newdecl, newtype, oldtype))
{
locate_old_decl (olddecl);
return false;
}
/* A non-static declaration (even an "extern") followed by a
static declaration is undefined behavior per C99 6.2.2p3-5,7.
The same is true for a static forward declaration at block
scope followed by a non-static declaration/definition at file
scope. Static followed by non-static at the same scope is
not undefined behavior, and is the most convenient way to get
some effects (see e.g. what unwind-dw2-fde-glibc.c does to
the definition of _Unwind_Find_FDE in unwind-dw2-fde.c), but
we do diagnose it if -Wtraditional. */
if (TREE_PUBLIC (olddecl) && !TREE_PUBLIC (newdecl))
{
/* Two exceptions to the rule. If olddecl is an extern
inline, or a predeclared function that isn't actually
built in, newdecl silently overrides olddecl. The latter
occur only in Objective C; see also above. (FIXME: Make
Objective C use normal builtins.) */
if (!DECL_IS_BUILTIN (olddecl)
&& !DECL_EXTERN_INLINE (olddecl))
{
error ("static declaration of %q+D follows "
"non-static declaration", newdecl);
locate_old_decl (olddecl);
}
return false;
}
else if (TREE_PUBLIC (newdecl) && !TREE_PUBLIC (olddecl))
{
if (DECL_CONTEXT (olddecl))
{
error ("non-static declaration of %q+D follows "
"static declaration", newdecl);
locate_old_decl (olddecl);
return false;
}
else if (warn_traditional)
{
warned |= warning (OPT_Wtraditional,
"non-static declaration of %q+D "
"follows static declaration", newdecl);
}
}
/* Make sure gnu_inline attribute is either not present, or
present on all inline decls. */
if (DECL_DECLARED_INLINE_P (olddecl)
&& DECL_DECLARED_INLINE_P (newdecl))
{
bool newa = lookup_attribute ("gnu_inline",
DECL_ATTRIBUTES (newdecl)) != NULL;
bool olda = lookup_attribute ("gnu_inline",
DECL_ATTRIBUTES (olddecl)) != NULL;
if (newa != olda)
{
error_at (input_location, "%<gnu_inline%> attribute present on %q+D",
newa ? newdecl : olddecl);
error_at (DECL_SOURCE_LOCATION (newa ? olddecl : newdecl),
"but not here");
}
}
}
else if (VAR_P (newdecl))
{
/* Only variables can be thread-local, and all declarations must
agree on this property. */
if (C_DECL_THREADPRIVATE_P (olddecl) && !DECL_THREAD_LOCAL_P (newdecl))
{
/* Nothing to check. Since OLDDECL is marked threadprivate
and NEWDECL does not have a thread-local attribute, we
will merge the threadprivate attribute into NEWDECL. */
;
}
else if (DECL_THREAD_LOCAL_P (newdecl) != DECL_THREAD_LOCAL_P (olddecl))
{
if (DECL_THREAD_LOCAL_P (newdecl))
error ("thread-local declaration of %q+D follows "
"non-thread-local declaration", newdecl);
else
error ("non-thread-local declaration of %q+D follows "
"thread-local declaration", newdecl);
locate_old_decl (olddecl);
return false;
}
/* Multiple initialized definitions are not allowed (6.9p3,5). */
if (DECL_INITIAL (newdecl) && DECL_INITIAL (olddecl))
{
error ("redefinition of %q+D", newdecl);
locate_old_decl (olddecl);
return false;
}
/* Objects declared at file scope: if the first declaration had
external linkage (even if it was an external reference) the
second must have external linkage as well, or the behavior is
undefined. If the first declaration had internal linkage, then
the second must too, or else be an external reference (in which
case the composite declaration still has internal linkage).
As for function declarations, we warn about the static-then-
extern case only for -Wtraditional. See generally 6.2.2p3-5,7. */
if (DECL_FILE_SCOPE_P (newdecl)
&& TREE_PUBLIC (newdecl) != TREE_PUBLIC (olddecl))
{
if (DECL_EXTERNAL (newdecl))
{
if (!DECL_FILE_SCOPE_P (olddecl))
{
error ("extern declaration of %q+D follows "
"declaration with no linkage", newdecl);
locate_old_decl (olddecl);
return false;
}
else if (warn_traditional)
{
warned |= warning (OPT_Wtraditional,
"non-static declaration of %q+D "
"follows static declaration", newdecl);
}
}
else
{
if (TREE_PUBLIC (newdecl))
error ("non-static declaration of %q+D follows "
"static declaration", newdecl);
else
error ("static declaration of %q+D follows "
"non-static declaration", newdecl);
locate_old_decl (olddecl);
return false;
}
}
/* Two objects with the same name declared at the same block
scope must both be external references (6.7p3). */
else if (!DECL_FILE_SCOPE_P (newdecl))
{
if (DECL_EXTERNAL (newdecl))
{
/* Extern with initializer at block scope, which will
already have received an error. */
}
else if (DECL_EXTERNAL (olddecl))
{
error ("declaration of %q+D with no linkage follows "
"extern declaration", newdecl);
locate_old_decl (olddecl);
}
else
{
error ("redeclaration of %q+D with no linkage", newdecl);
locate_old_decl (olddecl);
}
return false;
}
/* C++ does not permit a decl to appear multiple times at file
scope. */
if (warn_cxx_compat
&& DECL_FILE_SCOPE_P (newdecl)
&& !DECL_EXTERNAL (newdecl)
&& !DECL_EXTERNAL (olddecl))
warned |= warning_at (DECL_SOURCE_LOCATION (newdecl),
OPT_Wc___compat,
("duplicate declaration of %qD is "
"invalid in C++"),
newdecl);
}
/* warnings */
/* All decls must agree on a visibility. */
if (CODE_CONTAINS_STRUCT (TREE_CODE (newdecl), TS_DECL_WITH_VIS)
&& DECL_VISIBILITY_SPECIFIED (newdecl) && DECL_VISIBILITY_SPECIFIED (olddecl)
&& DECL_VISIBILITY (newdecl) != DECL_VISIBILITY (olddecl))
{
warned |= warning (0, "redeclaration of %q+D with different visibility "
"(old visibility preserved)", newdecl);
}
if (TREE_CODE (newdecl) == FUNCTION_DECL)
warned |= diagnose_mismatched_attributes (olddecl, newdecl);
else /* PARM_DECL, VAR_DECL */
{
/* Redeclaration of a parameter is a constraint violation (this is
not explicitly stated, but follows from C99 6.7p3 [no more than
one declaration of the same identifier with no linkage in the
same scope, except type tags] and 6.2.2p6 [parameters have no
linkage]). We must check for a forward parameter declaration,
indicated by TREE_ASM_WRITTEN on the old declaration - this is
an extension, the mandatory diagnostic for which is handled by
mark_forward_parm_decls. */
if (TREE_CODE (newdecl) == PARM_DECL
&& (!TREE_ASM_WRITTEN (olddecl) || TREE_ASM_WRITTEN (newdecl)))
{
error ("redefinition of parameter %q+D", newdecl);
locate_old_decl (olddecl);
return false;
}
}
/* Optional warning for completely redundant decls. */
if (!warned && !pedwarned
&& warn_redundant_decls
/* Don't warn about a function declaration followed by a
definition. */
&& !(TREE_CODE (newdecl) == FUNCTION_DECL
&& DECL_INITIAL (newdecl) && !DECL_INITIAL (olddecl))
/* Don't warn about redundant redeclarations of builtins. */
&& !(TREE_CODE (newdecl) == FUNCTION_DECL
&& !DECL_BUILT_IN (newdecl)
&& DECL_BUILT_IN (olddecl)
&& !C_DECL_DECLARED_BUILTIN (olddecl))
/* Don't warn about an extern followed by a definition. */
&& !(DECL_EXTERNAL (olddecl) && !DECL_EXTERNAL (newdecl))
/* Don't warn about forward parameter decls. */
&& !(TREE_CODE (newdecl) == PARM_DECL
&& TREE_ASM_WRITTEN (olddecl) && !TREE_ASM_WRITTEN (newdecl))
/* Don't warn about a variable definition following a declaration. */
&& !(VAR_P (newdecl)
&& DECL_INITIAL (newdecl) && !DECL_INITIAL (olddecl)))
{
warned = warning (OPT_Wredundant_decls, "redundant redeclaration of %q+D",
newdecl);
}
/* Report location of previous decl/defn. */
if (warned || pedwarned)
locate_old_decl (olddecl);
#undef DECL_EXTERN_INLINE
return retval;
}
/* Subroutine of duplicate_decls. NEWDECL has been found to be
consistent with OLDDECL, but carries new information. Merge the
new information into OLDDECL. This function issues no
diagnostics. */
static void
merge_decls (tree newdecl, tree olddecl, tree newtype, tree oldtype)
{
bool new_is_definition = (TREE_CODE (newdecl) == FUNCTION_DECL
&& DECL_INITIAL (newdecl) != NULL_TREE);
bool new_is_prototype = (TREE_CODE (newdecl) == FUNCTION_DECL
&& prototype_p (TREE_TYPE (newdecl)));
bool old_is_prototype = (TREE_CODE (olddecl) == FUNCTION_DECL
&& prototype_p (TREE_TYPE (olddecl)));
/* For real parm decl following a forward decl, rechain the old decl
in its new location and clear TREE_ASM_WRITTEN (it's not a
forward decl anymore). */
if (TREE_CODE (newdecl) == PARM_DECL
&& TREE_ASM_WRITTEN (olddecl) && !TREE_ASM_WRITTEN (newdecl))
{
struct c_binding *b, **here;
for (here = ¤t_scope->bindings; *here; here = &(*here)->prev)
if ((*here)->decl == olddecl)
goto found;
gcc_unreachable ();
found:
b = *here;
*here = b->prev;
b->prev = current_scope->bindings;
current_scope->bindings = b;
TREE_ASM_WRITTEN (olddecl) = 0;
}
DECL_ATTRIBUTES (newdecl)
= targetm.merge_decl_attributes (olddecl, newdecl);
/* For typedefs use the old type, as the new type's DECL_NAME points
at newdecl, which will be ggc_freed. */
if (TREE_CODE (newdecl) == TYPE_DECL)
{
/* But NEWTYPE might have an attribute, honor that. */
tree tem = newtype;
newtype = oldtype;
if (TYPE_USER_ALIGN (tem))
{
if (TYPE_ALIGN (tem) > TYPE_ALIGN (newtype))
SET_TYPE_ALIGN (newtype, TYPE_ALIGN (tem));
TYPE_USER_ALIGN (newtype) = true;
}
/* And remove the new type from the variants list. */
if (TYPE_NAME (TREE_TYPE (newdecl)) == newdecl)
{
tree remove = TREE_TYPE (newdecl);
for (tree t = TYPE_MAIN_VARIANT (remove); ;
t = TYPE_NEXT_VARIANT (t))
if (TYPE_NEXT_VARIANT (t) == remove)
{
TYPE_NEXT_VARIANT (t) = TYPE_NEXT_VARIANT (remove);
break;
}
}
}
/* Merge the data types specified in the two decls. */
TREE_TYPE (newdecl)
= TREE_TYPE (olddecl)
= composite_type (newtype, oldtype);
/* Lay the type out, unless already done. */
if (!comptypes (oldtype, TREE_TYPE (newdecl)))
{
if (TREE_TYPE (newdecl) != error_mark_node)
layout_type (TREE_TYPE (newdecl));
if (TREE_CODE (newdecl) != FUNCTION_DECL
&& TREE_CODE (newdecl) != TYPE_DECL
&& TREE_CODE (newdecl) != CONST_DECL)
layout_decl (newdecl, 0);
}
else
{
/* Since the type is OLDDECL's, make OLDDECL's size go with. */
DECL_SIZE (newdecl) = DECL_SIZE (olddecl);
DECL_SIZE_UNIT (newdecl) = DECL_SIZE_UNIT (olddecl);
SET_DECL_MODE (newdecl, DECL_MODE (olddecl));
if (DECL_ALIGN (olddecl) > DECL_ALIGN (newdecl))
{
SET_DECL_ALIGN (newdecl, DECL_ALIGN (olddecl));
DECL_USER_ALIGN (newdecl) |= DECL_USER_ALIGN (olddecl);
}
if (DECL_WARN_IF_NOT_ALIGN (olddecl)
> DECL_WARN_IF_NOT_ALIGN (newdecl))
SET_DECL_WARN_IF_NOT_ALIGN (newdecl,
DECL_WARN_IF_NOT_ALIGN (olddecl));
}
/* Keep the old rtl since we can safely use it. */
if (HAS_RTL_P (olddecl))
COPY_DECL_RTL (olddecl, newdecl);
/* Merge the type qualifiers. */
if (TREE_READONLY (newdecl))
TREE_READONLY (olddecl) = 1;
if (TREE_THIS_VOLATILE (newdecl))
TREE_THIS_VOLATILE (olddecl) = 1;
/* Merge deprecatedness. */
if (TREE_DEPRECATED (newdecl))
TREE_DEPRECATED (olddecl) = 1;
/* If a decl is in a system header and the other isn't, keep the one on the
system header. Otherwise, keep source location of definition rather than
declaration and of prototype rather than non-prototype unless that
prototype is built-in. */
if (CODE_CONTAINS_STRUCT (TREE_CODE (olddecl), TS_DECL_WITH_VIS)
&& DECL_IN_SYSTEM_HEADER (olddecl)
&& !DECL_IN_SYSTEM_HEADER (newdecl) )
DECL_SOURCE_LOCATION (newdecl) = DECL_SOURCE_LOCATION (olddecl);
else if (CODE_CONTAINS_STRUCT (TREE_CODE (olddecl), TS_DECL_WITH_VIS)
&& DECL_IN_SYSTEM_HEADER (newdecl)
&& !DECL_IN_SYSTEM_HEADER (olddecl))
DECL_SOURCE_LOCATION (olddecl) = DECL_SOURCE_LOCATION (newdecl);
else if ((DECL_INITIAL (newdecl) == NULL_TREE
&& DECL_INITIAL (olddecl) != NULL_TREE)
|| (old_is_prototype && !new_is_prototype
&& !C_DECL_BUILTIN_PROTOTYPE (olddecl)))
DECL_SOURCE_LOCATION (newdecl) = DECL_SOURCE_LOCATION (olddecl);
/* Merge the initialization information. */
if (DECL_INITIAL (newdecl) == NULL_TREE)
DECL_INITIAL (newdecl) = DECL_INITIAL (olddecl);
/* Merge the threadprivate attribute. */
if (VAR_P (olddecl) && C_DECL_THREADPRIVATE_P (olddecl))
C_DECL_THREADPRIVATE_P (newdecl) = 1;
if (CODE_CONTAINS_STRUCT (TREE_CODE (olddecl), TS_DECL_WITH_VIS))
{
/* Copy the assembler name.
Currently, it can only be defined in the prototype. */
COPY_DECL_ASSEMBLER_NAME (olddecl, newdecl);
/* Use visibility of whichever declaration had it specified */
if (DECL_VISIBILITY_SPECIFIED (olddecl))
{
DECL_VISIBILITY (newdecl) = DECL_VISIBILITY (olddecl);
DECL_VISIBILITY_SPECIFIED (newdecl) = 1;
}
if (TREE_CODE (newdecl) == FUNCTION_DECL)
{
DECL_STATIC_CONSTRUCTOR(newdecl) |= DECL_STATIC_CONSTRUCTOR(olddecl);
DECL_STATIC_DESTRUCTOR (newdecl) |= DECL_STATIC_DESTRUCTOR (olddecl);
DECL_NO_LIMIT_STACK (newdecl) |= DECL_NO_LIMIT_STACK (olddecl);
DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (newdecl)
|= DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (olddecl);
TREE_THIS_VOLATILE (newdecl) |= TREE_THIS_VOLATILE (olddecl);
DECL_IS_MALLOC (newdecl) |= DECL_IS_MALLOC (olddecl);
DECL_IS_OPERATOR_NEW (newdecl) |= DECL_IS_OPERATOR_NEW (olddecl);
TREE_READONLY (newdecl) |= TREE_READONLY (olddecl);
DECL_PURE_P (newdecl) |= DECL_PURE_P (olddecl);
DECL_IS_NOVOPS (newdecl) |= DECL_IS_NOVOPS (olddecl);
}
/* Merge the storage class information. */
merge_weak (newdecl, olddecl);
/* For functions, static overrides non-static. */
if (TREE_CODE (newdecl) == FUNCTION_DECL)
{
TREE_PUBLIC (newdecl) &= TREE_PUBLIC (olddecl);
/* This is since we don't automatically
copy the attributes of NEWDECL into OLDDECL. */
TREE_PUBLIC (olddecl) = TREE_PUBLIC (newdecl);
/* If this clears `static', clear it in the identifier too. */
if (!TREE_PUBLIC (olddecl))
TREE_PUBLIC (DECL_NAME (olddecl)) = 0;
}
}
/* In c99, 'extern' declaration before (or after) 'inline' means this
function is not DECL_EXTERNAL, unless 'gnu_inline' attribute
is present. */
if (TREE_CODE (newdecl) == FUNCTION_DECL
&& !flag_gnu89_inline
&& (DECL_DECLARED_INLINE_P (newdecl)
|| DECL_DECLARED_INLINE_P (olddecl))
&& (!DECL_DECLARED_INLINE_P (newdecl)
|| !DECL_DECLARED_INLINE_P (olddecl)
|| !DECL_EXTERNAL (olddecl))
&& DECL_EXTERNAL (newdecl)
&& !lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (newdecl))
&& !current_function_decl)
DECL_EXTERNAL (newdecl) = 0;
/* An inline definition following a static declaration is not
DECL_EXTERNAL. */
if (new_is_definition
&& (DECL_DECLARED_INLINE_P (newdecl)
|| DECL_DECLARED_INLINE_P (olddecl))
&& !TREE_PUBLIC (olddecl))
DECL_EXTERNAL (newdecl) = 0;
if (DECL_EXTERNAL (newdecl))
{
TREE_STATIC (newdecl) = TREE_STATIC (olddecl);
DECL_EXTERNAL (newdecl) = DECL_EXTERNAL (olddecl);
/* An extern decl does not override previous storage class. */
TREE_PUBLIC (newdecl) = TREE_PUBLIC (olddecl);
if (!DECL_EXTERNAL (newdecl))
{
DECL_CONTEXT (newdecl) = DECL_CONTEXT (olddecl);
DECL_COMMON (newdecl) = DECL_COMMON (olddecl);
}
}
else
{
TREE_STATIC (olddecl) = TREE_STATIC (newdecl);
TREE_PUBLIC (olddecl) = TREE_PUBLIC (newdecl);
}
if (TREE_CODE (newdecl) == FUNCTION_DECL)
{
/* If we're redefining a function previously defined as extern
inline, make sure we emit debug info for the inline before we
throw it away, in case it was inlined into a function that
hasn't been written out yet. */
if (new_is_definition && DECL_INITIAL (olddecl))
/* The new defn must not be inline. */
DECL_UNINLINABLE (newdecl) = 1;
else
{
/* If either decl says `inline', this fn is inline, unless
its definition was passed already. */
if (DECL_DECLARED_INLINE_P (newdecl)
|| DECL_DECLARED_INLINE_P (olddecl))
DECL_DECLARED_INLINE_P (newdecl) = 1;
DECL_UNINLINABLE (newdecl) = DECL_UNINLINABLE (olddecl)
= (DECL_UNINLINABLE (newdecl) || DECL_UNINLINABLE (olddecl));
DECL_DISREGARD_INLINE_LIMITS (newdecl)
= DECL_DISREGARD_INLINE_LIMITS (olddecl)
= (DECL_DISREGARD_INLINE_LIMITS (newdecl)
|| DECL_DISREGARD_INLINE_LIMITS (olddecl));
}
if (DECL_BUILT_IN (olddecl))
{
/* If redeclaring a builtin function, it stays built in.
But it gets tagged as having been declared. */
DECL_BUILT_IN_CLASS (newdecl) = DECL_BUILT_IN_CLASS (olddecl);
DECL_FUNCTION_CODE (newdecl) = DECL_FUNCTION_CODE (olddecl);
C_DECL_DECLARED_BUILTIN (newdecl) = 1;
if (new_is_prototype)
{
C_DECL_BUILTIN_PROTOTYPE (newdecl) = 0;
if (DECL_BUILT_IN_CLASS (newdecl) == BUILT_IN_NORMAL)
{
enum built_in_function fncode = DECL_FUNCTION_CODE (newdecl);
switch (fncode)
{
/* If a compatible prototype of these builtin functions
is seen, assume the runtime implements it with the
expected semantics. */
case BUILT_IN_STPCPY:
if (builtin_decl_explicit_p (fncode))
set_builtin_decl_implicit_p (fncode, true);
break;
default:
if (builtin_decl_explicit_p (fncode))
set_builtin_decl_declared_p (fncode, true);
break;
}
copy_attributes_to_builtin (newdecl);
}
}
else
C_DECL_BUILTIN_PROTOTYPE (newdecl)
= C_DECL_BUILTIN_PROTOTYPE (olddecl);
}
/* Preserve function specific target and optimization options */
if (DECL_FUNCTION_SPECIFIC_TARGET (olddecl)
&& !DECL_FUNCTION_SPECIFIC_TARGET (newdecl))
DECL_FUNCTION_SPECIFIC_TARGET (newdecl)
= DECL_FUNCTION_SPECIFIC_TARGET (olddecl);
if (DECL_FUNCTION_SPECIFIC_OPTIMIZATION (olddecl)
&& !DECL_FUNCTION_SPECIFIC_OPTIMIZATION (newdecl))
DECL_FUNCTION_SPECIFIC_OPTIMIZATION (newdecl)
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (olddecl);
/* Also preserve various other info from the definition. */
if (!new_is_definition)
{
tree t;
DECL_RESULT (newdecl) = DECL_RESULT (olddecl);
DECL_INITIAL (newdecl) = DECL_INITIAL (olddecl);
DECL_STRUCT_FUNCTION (newdecl) = DECL_STRUCT_FUNCTION (olddecl);
DECL_SAVED_TREE (newdecl) = DECL_SAVED_TREE (olddecl);
DECL_ARGUMENTS (newdecl) = copy_list (DECL_ARGUMENTS (olddecl));
for (t = DECL_ARGUMENTS (newdecl); t ; t = DECL_CHAIN (t))
DECL_CONTEXT (t) = newdecl;
/* See if we've got a function to instantiate from. */
if (DECL_SAVED_TREE (olddecl))
DECL_ABSTRACT_ORIGIN (newdecl)
= DECL_ABSTRACT_ORIGIN (olddecl);
}
}
/* Merge the USED information. */
if (TREE_USED (olddecl))
TREE_USED (newdecl) = 1;
else if (TREE_USED (newdecl))
TREE_USED (olddecl) = 1;
if (VAR_P (olddecl) || TREE_CODE (olddecl) == PARM_DECL)
DECL_READ_P (newdecl) |= DECL_READ_P (olddecl);
if (DECL_PRESERVE_P (olddecl))
DECL_PRESERVE_P (newdecl) = 1;
else if (DECL_PRESERVE_P (newdecl))
DECL_PRESERVE_P (olddecl) = 1;
/* Merge DECL_COMMON */
if (VAR_P (olddecl) && VAR_P (newdecl)
&& !lookup_attribute ("common", DECL_ATTRIBUTES (newdecl))
&& !lookup_attribute ("nocommon", DECL_ATTRIBUTES (newdecl)))
DECL_COMMON (newdecl) = DECL_COMMON (newdecl) && DECL_COMMON (olddecl);
/* Copy most of the decl-specific fields of NEWDECL into OLDDECL.
But preserve OLDDECL's DECL_UID, DECL_CONTEXT and
DECL_ARGUMENTS (if appropriate). */
{
unsigned olddecl_uid = DECL_UID (olddecl);
tree olddecl_context = DECL_CONTEXT (olddecl);
tree olddecl_arguments = NULL;
if (TREE_CODE (olddecl) == FUNCTION_DECL)
olddecl_arguments = DECL_ARGUMENTS (olddecl);
memcpy ((char *) olddecl + sizeof (struct tree_common),
(char *) newdecl + sizeof (struct tree_common),
sizeof (struct tree_decl_common) - sizeof (struct tree_common));
DECL_USER_ALIGN (olddecl) = DECL_USER_ALIGN (newdecl);
switch (TREE_CODE (olddecl))
{
case FUNCTION_DECL:
case VAR_DECL:
{
struct symtab_node *snode = olddecl->decl_with_vis.symtab_node;
memcpy ((char *) olddecl + sizeof (struct tree_decl_common),
(char *) newdecl + sizeof (struct tree_decl_common),
tree_code_size (TREE_CODE (olddecl)) - sizeof (struct tree_decl_common));
olddecl->decl_with_vis.symtab_node = snode;
if ((DECL_EXTERNAL (olddecl)
|| TREE_PUBLIC (olddecl)
|| TREE_STATIC (olddecl))
&& DECL_SECTION_NAME (newdecl) != NULL)
set_decl_section_name (olddecl, DECL_SECTION_NAME (newdecl));
/* This isn't quite correct for something like
int __thread x attribute ((tls_model ("local-exec")));
extern int __thread x;
as we'll lose the "local-exec" model. */
if (VAR_P (olddecl) && DECL_THREAD_LOCAL_P (newdecl))
set_decl_tls_model (olddecl, DECL_TLS_MODEL (newdecl));
break;
}
case FIELD_DECL:
case PARM_DECL:
case LABEL_DECL:
case RESULT_DECL:
case CONST_DECL:
case TYPE_DECL:
memcpy ((char *) olddecl + sizeof (struct tree_decl_common),
(char *) newdecl + sizeof (struct tree_decl_common),
tree_code_size (TREE_CODE (olddecl)) - sizeof (struct tree_decl_common));
break;
default:
memcpy ((char *) olddecl + sizeof (struct tree_decl_common),
(char *) newdecl + sizeof (struct tree_decl_common),
sizeof (struct tree_decl_non_common) - sizeof (struct tree_decl_common));
}
DECL_UID (olddecl) = olddecl_uid;
DECL_CONTEXT (olddecl) = olddecl_context;
if (TREE_CODE (olddecl) == FUNCTION_DECL)
DECL_ARGUMENTS (olddecl) = olddecl_arguments;
}
/* If OLDDECL had its DECL_RTL instantiated, re-invoke make_decl_rtl
so that encode_section_info has a chance to look at the new decl
flags and attributes. */
if (DECL_RTL_SET_P (olddecl)
&& (TREE_CODE (olddecl) == FUNCTION_DECL
|| (VAR_P (olddecl) && TREE_STATIC (olddecl))))
make_decl_rtl (olddecl);
}
/* Handle when a new declaration NEWDECL has the same name as an old
one OLDDECL in the same binding contour. Prints an error message
if appropriate.
If safely possible, alter OLDDECL to look like NEWDECL, and return
true. Otherwise, return false. */
static bool
duplicate_decls (tree newdecl, tree olddecl)
{
tree newtype = NULL, oldtype = NULL;
if (!diagnose_mismatched_decls (newdecl, olddecl, &newtype, &oldtype))
{
/* Avoid `unused variable' and other warnings for OLDDECL. */
TREE_NO_WARNING (olddecl) = 1;
return false;
}
merge_decls (newdecl, olddecl, newtype, oldtype);
/* The NEWDECL will no longer be needed.
Before releasing the node, be sure to remove function from symbol
table that might have been inserted there to record comdat group.
Be sure to however do not free DECL_STRUCT_FUNCTION because this
structure is shared in between NEWDECL and OLDECL. */
if (TREE_CODE (newdecl) == FUNCTION_DECL)
DECL_STRUCT_FUNCTION (newdecl) = NULL;
if (VAR_OR_FUNCTION_DECL_P (newdecl))
{
struct symtab_node *snode = symtab_node::get (newdecl);
if (snode)
snode->remove ();
}
ggc_free (newdecl);
return true;
}
/* Check whether decl-node NEW_DECL shadows an existing declaration. */
static void
warn_if_shadowing (tree new_decl)
{
struct c_binding *b;
/* Shadow warnings wanted? */
if (!(warn_shadow
|| warn_shadow_local
|| warn_shadow_compatible_local)
/* No shadow warnings for internally generated vars. */
|| DECL_IS_BUILTIN (new_decl)
/* No shadow warnings for vars made for inlining. */
|| DECL_FROM_INLINE (new_decl))
return;
/* Is anything being shadowed? Invisible decls do not count. */
for (b = I_SYMBOL_BINDING (DECL_NAME (new_decl)); b; b = b->shadowed)
if (b->decl && b->decl != new_decl && !b->invisible
&& (b->decl == error_mark_node
|| diagnostic_report_warnings_p (global_dc,
DECL_SOURCE_LOCATION (b->decl))))
{
tree old_decl = b->decl;
bool warned = false;
if (old_decl == error_mark_node)
{
warning (OPT_Wshadow, "declaration of %q+D shadows previous "
"non-variable", new_decl);
break;
}
else if (TREE_CODE (old_decl) == PARM_DECL)
{
enum opt_code warning_code;
/* If '-Wshadow=compatible-local' is specified without other
-Wshadow= flags, we will warn only when the types of the
shadowing variable (i.e. new_decl) and the shadowed variable
(old_decl) are compatible. */
if (warn_shadow)
warning_code = OPT_Wshadow;
else if (comptypes (TREE_TYPE (old_decl), TREE_TYPE (new_decl)))
warning_code = OPT_Wshadow_compatible_local;
else
warning_code = OPT_Wshadow_local;
warned = warning_at (DECL_SOURCE_LOCATION (new_decl), warning_code,
"declaration of %qD shadows a parameter",
new_decl);
}
else if (DECL_FILE_SCOPE_P (old_decl))
{
/* Do not warn if a variable shadows a function, unless
the variable is a function or a pointer-to-function. */
if (TREE_CODE (old_decl) == FUNCTION_DECL
&& TREE_CODE (new_decl) != FUNCTION_DECL
&& !FUNCTION_POINTER_TYPE_P (TREE_TYPE (new_decl)))
continue;
warned = warning_at (DECL_SOURCE_LOCATION (new_decl), OPT_Wshadow,
"declaration of %qD shadows a global "
"declaration",
new_decl);
}
else if (TREE_CODE (old_decl) == FUNCTION_DECL
&& DECL_BUILT_IN (old_decl))
{
warning (OPT_Wshadow, "declaration of %q+D shadows "
"a built-in function", new_decl);
break;
}
else
{
enum opt_code warning_code;
/* If '-Wshadow=compatible-local' is specified without other
-Wshadow= flags, we will warn only when the types of the
shadowing variable (i.e. new_decl) and the shadowed variable
(old_decl) are compatible. */
if (warn_shadow)
warning_code = OPT_Wshadow;
else if (comptypes (TREE_TYPE (old_decl), TREE_TYPE (new_decl)))
warning_code = OPT_Wshadow_compatible_local;
else
warning_code = OPT_Wshadow_local;
warned = warning_at (DECL_SOURCE_LOCATION (new_decl), warning_code,
"declaration of %qD shadows a previous local",
new_decl);
}
if (warned)
inform (DECL_SOURCE_LOCATION (old_decl),
"shadowed declaration is here");
break;
}
}
/* Record a decl-node X as belonging to the current lexical scope.
Check for errors (such as an incompatible declaration for the same
name already seen in the same scope).
Returns either X or an old decl for the same name.
If an old decl is returned, it may have been smashed
to agree with what X says. */
tree
pushdecl (tree x)
{
tree name = DECL_NAME (x);
struct c_scope *scope = current_scope;
struct c_binding *b;
bool nested = false;
location_t locus = DECL_SOURCE_LOCATION (x);
/* Must set DECL_CONTEXT for everything not at file scope or
DECL_FILE_SCOPE_P won't work. Local externs don't count
unless they have initializers (which generate code). */
if (current_function_decl
&& (!VAR_OR_FUNCTION_DECL_P (x)
|| DECL_INITIAL (x) || !DECL_EXTERNAL (x)))
DECL_CONTEXT (x) = current_function_decl;
/* Anonymous decls are just inserted in the scope. */
if (!name)
{
bind (name, x, scope, /*invisible=*/false, /*nested=*/false,
locus);
return x;
}
/* First, see if there is another declaration with the same name in
the current scope. If there is, duplicate_decls may do all the
work for us. If duplicate_decls returns false, that indicates
two incompatible decls in the same scope; we are to silently
replace the old one (duplicate_decls has issued all appropriate
diagnostics). In particular, we should not consider possible
duplicates in the external scope, or shadowing. */
b = I_SYMBOL_BINDING (name);
if (b && B_IN_SCOPE (b, scope))
{
struct c_binding *b_ext, *b_use;
tree type = TREE_TYPE (x);
tree visdecl = b->decl;
tree vistype = TREE_TYPE (visdecl);
if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE
&& COMPLETE_TYPE_P (TREE_TYPE (x)))
b->inner_comp = false;
b_use = b;
b_ext = b;
/* If this is an external linkage declaration, we should check
for compatibility with the type in the external scope before
setting the type at this scope based on the visible
information only. */
if (TREE_PUBLIC (x) && TREE_PUBLIC (visdecl))
{
while (b_ext && !B_IN_EXTERNAL_SCOPE (b_ext))
b_ext = b_ext->shadowed;
if (b_ext)
{
b_use = b_ext;
if (b_use->u.type)
TREE_TYPE (b_use->decl) = b_use->u.type;
}
}
if (duplicate_decls (x, b_use->decl))
{
if (b_use != b)
{
/* Save the updated type in the external scope and
restore the proper type for this scope. */
tree thistype;
if (comptypes (vistype, type))
thistype = composite_type (vistype, type);
else
thistype = TREE_TYPE (b_use->decl);
b_use->u.type = TREE_TYPE (b_use->decl);
if (TREE_CODE (b_use->decl) == FUNCTION_DECL
&& DECL_BUILT_IN (b_use->decl))
thistype
= build_type_attribute_variant (thistype,
TYPE_ATTRIBUTES
(b_use->u.type));
TREE_TYPE (b_use->decl) = thistype;
}
return b_use->decl;
}
else
goto skip_external_and_shadow_checks;
}
/* All declarations with external linkage, and all external
references, go in the external scope, no matter what scope is
current. However, the binding in that scope is ignored for
purposes of normal name lookup. A separate binding structure is
created in the requested scope; this governs the normal
visibility of the symbol.
The binding in the externals scope is used exclusively for
detecting duplicate declarations of the same object, no matter
what scope they are in; this is what we do here. (C99 6.2.7p2:
All declarations that refer to the same object or function shall
have compatible type; otherwise, the behavior is undefined.) */
if (DECL_EXTERNAL (x) || scope == file_scope)
{
tree type = TREE_TYPE (x);
tree vistype = NULL_TREE;
tree visdecl = NULL_TREE;
bool type_saved = false;
if (b && !B_IN_EXTERNAL_SCOPE (b)
&& VAR_OR_FUNCTION_DECL_P (b->decl)
&& DECL_FILE_SCOPE_P (b->decl))
{
visdecl = b->decl;
vistype = TREE_TYPE (visdecl);
}
if (scope != file_scope
&& !DECL_IN_SYSTEM_HEADER (x))
warning_at (locus, OPT_Wnested_externs,
"nested extern declaration of %qD", x);
while (b && !B_IN_EXTERNAL_SCOPE (b))
{
/* If this decl might be modified, save its type. This is
done here rather than when the decl is first bound
because the type may change after first binding, through
being completed or through attributes being added. If we
encounter multiple such decls, only the first should have
its type saved; the others will already have had their
proper types saved and the types will not have changed as
their scopes will not have been re-entered. */
if (DECL_P (b->decl) && DECL_FILE_SCOPE_P (b->decl) && !type_saved)
{
b->u.type = TREE_TYPE (b->decl);
type_saved = true;
}
if (B_IN_FILE_SCOPE (b)
&& VAR_P (b->decl)
&& TREE_STATIC (b->decl)
&& TREE_CODE (TREE_TYPE (b->decl)) == ARRAY_TYPE
&& !TYPE_DOMAIN (TREE_TYPE (b->decl))
&& TREE_CODE (type) == ARRAY_TYPE
&& TYPE_DOMAIN (type)
&& TYPE_MAX_VALUE (TYPE_DOMAIN (type))
&& !integer_zerop (TYPE_MAX_VALUE (TYPE_DOMAIN (type))))
{
/* Array type completed in inner scope, which should be
diagnosed if the completion does not have size 1 and
it does not get completed in the file scope. */
b->inner_comp = true;
}
b = b->shadowed;
}
/* If a matching external declaration has been found, set its
type to the composite of all the types of that declaration.
After the consistency checks, it will be reset to the
composite of the visible types only. */
if (b && (TREE_PUBLIC (x) || same_translation_unit_p (x, b->decl))
&& b->u.type)
TREE_TYPE (b->decl) = b->u.type;
/* The point of the same_translation_unit_p check here is,
we want to detect a duplicate decl for a construct like
foo() { extern bar(); } ... static bar(); but not if
they are in different translation units. In any case,
the static does not go in the externals scope. */
if (b
&& (TREE_PUBLIC (x) || same_translation_unit_p (x, b->decl))
&& duplicate_decls (x, b->decl))
{
tree thistype;
if (vistype)
{
if (comptypes (vistype, type))
thistype = composite_type (vistype, type);
else
thistype = TREE_TYPE (b->decl);
}
else
thistype = type;
b->u.type = TREE_TYPE (b->decl);
if (TREE_CODE (b->decl) == FUNCTION_DECL && DECL_BUILT_IN (b->decl))
thistype
= build_type_attribute_variant (thistype,
TYPE_ATTRIBUTES (b->u.type));
TREE_TYPE (b->decl) = thistype;
bind (name, b->decl, scope, /*invisible=*/false, /*nested=*/true,
locus);
return b->decl;
}
else if (TREE_PUBLIC (x))
{
if (visdecl && !b && duplicate_decls (x, visdecl))
{
/* An external declaration at block scope referring to a
visible entity with internal linkage. The composite
type will already be correct for this scope, so we
just need to fall through to make the declaration in
this scope. */
nested = true;
x = visdecl;
}
else
{
bind (name, x, external_scope, /*invisible=*/true,
/*nested=*/false, locus);
nested = true;
}
}
}
if (TREE_CODE (x) != PARM_DECL)
warn_if_shadowing (x);
skip_external_and_shadow_checks:
if (TREE_CODE (x) == TYPE_DECL)
{
/* So this is a typedef, set its underlying type. */
set_underlying_type (x);
/* If X is a typedef defined in the current function, record it
for the purpose of implementing the -Wunused-local-typedefs
warning. */
record_locally_defined_typedef (x);
}
bind (name, x, scope, /*invisible=*/false, nested, locus);
/* If x's type is incomplete because it's based on a
structure or union which has not yet been fully declared,
attach it to that structure or union type, so we can go
back and complete the variable declaration later, if the
structure or union gets fully declared.
If the input is erroneous, we can have error_mark in the type
slot (e.g. "f(void a, ...)") - that doesn't count as an
incomplete type. */
if (TREE_TYPE (x) != error_mark_node
&& !COMPLETE_TYPE_P (TREE_TYPE (x)))
{
tree element = TREE_TYPE (x);
while (TREE_CODE (element) == ARRAY_TYPE)
element = TREE_TYPE (element);
element = TYPE_MAIN_VARIANT (element);
if (RECORD_OR_UNION_TYPE_P (element)
&& (TREE_CODE (x) != TYPE_DECL
|| TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE)
&& !COMPLETE_TYPE_P (element))
C_TYPE_INCOMPLETE_VARS (element)
= tree_cons (NULL_TREE, x, C_TYPE_INCOMPLETE_VARS (element));
}
return x;
}
/* Issue a warning about implicit function declaration. ID is the function
identifier, OLDDECL is a declaration of the function in a different scope,
or NULL_TREE. */
static void
implicit_decl_warning (location_t loc, tree id, tree olddecl)
{
if (!warn_implicit_function_declaration)
return;
bool warned;
name_hint hint;
if (!olddecl)
hint = lookup_name_fuzzy (id, FUZZY_LOOKUP_FUNCTION_NAME, loc);
if (flag_isoc99)
{
if (hint)
{
gcc_rich_location richloc (loc);
richloc.add_fixit_replace (hint.suggestion ());
warned = pedwarn (&richloc, OPT_Wimplicit_function_declaration,
"implicit declaration of function %qE;"
" did you mean %qs?",
id, hint.suggestion ());
}
else
warned = pedwarn (loc, OPT_Wimplicit_function_declaration,
"implicit declaration of function %qE", id);
}
else if (hint)
{
gcc_rich_location richloc (loc);
richloc.add_fixit_replace (hint.suggestion ());
warned = warning_at
(&richloc, OPT_Wimplicit_function_declaration,
G_("implicit declaration of function %qE; did you mean %qs?"),
id, hint.suggestion ());
}
else
warned = warning_at (loc, OPT_Wimplicit_function_declaration,
G_("implicit declaration of function %qE"), id);
if (olddecl && warned)
locate_old_decl (olddecl);
if (!warned)
hint.suppress ();
}
/* This function represents mapping of a function code FCODE
to its respective header. */
static const char *
header_for_builtin_fn (enum built_in_function fcode)
{
switch (fcode)
{
CASE_FLT_FN (BUILT_IN_ACOS):
CASE_FLT_FN (BUILT_IN_ACOSH):
CASE_FLT_FN (BUILT_IN_ASIN):
CASE_FLT_FN (BUILT_IN_ASINH):
CASE_FLT_FN (BUILT_IN_ATAN):
CASE_FLT_FN (BUILT_IN_ATANH):
CASE_FLT_FN (BUILT_IN_ATAN2):
CASE_FLT_FN (BUILT_IN_CBRT):
CASE_FLT_FN (BUILT_IN_CEIL):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_CEIL):
CASE_FLT_FN (BUILT_IN_COPYSIGN):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_COPYSIGN):
CASE_FLT_FN (BUILT_IN_COS):
CASE_FLT_FN (BUILT_IN_COSH):
CASE_FLT_FN (BUILT_IN_ERF):
CASE_FLT_FN (BUILT_IN_ERFC):
CASE_FLT_FN (BUILT_IN_EXP):
CASE_FLT_FN (BUILT_IN_EXP2):
CASE_FLT_FN (BUILT_IN_EXPM1):
CASE_FLT_FN (BUILT_IN_FABS):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_FABS):
CASE_FLT_FN (BUILT_IN_FDIM):
CASE_FLT_FN (BUILT_IN_FLOOR):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_FLOOR):
CASE_FLT_FN (BUILT_IN_FMA):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_FMA):
CASE_FLT_FN (BUILT_IN_FMAX):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_FMAX):
CASE_FLT_FN (BUILT_IN_FMIN):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_FMIN):
CASE_FLT_FN (BUILT_IN_FMOD):
CASE_FLT_FN (BUILT_IN_FREXP):
CASE_FLT_FN (BUILT_IN_HYPOT):
CASE_FLT_FN (BUILT_IN_ILOGB):
CASE_FLT_FN (BUILT_IN_LDEXP):
CASE_FLT_FN (BUILT_IN_LGAMMA):
CASE_FLT_FN (BUILT_IN_LLRINT):
CASE_FLT_FN (BUILT_IN_LLROUND):
CASE_FLT_FN (BUILT_IN_LOG):
CASE_FLT_FN (BUILT_IN_LOG10):
CASE_FLT_FN (BUILT_IN_LOG1P):
CASE_FLT_FN (BUILT_IN_LOG2):
CASE_FLT_FN (BUILT_IN_LOGB):
CASE_FLT_FN (BUILT_IN_LRINT):
CASE_FLT_FN (BUILT_IN_LROUND):
CASE_FLT_FN (BUILT_IN_MODF):
CASE_FLT_FN (BUILT_IN_NAN):
CASE_FLT_FN (BUILT_IN_NEARBYINT):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_NEARBYINT):
CASE_FLT_FN (BUILT_IN_NEXTAFTER):
CASE_FLT_FN (BUILT_IN_NEXTTOWARD):
CASE_FLT_FN (BUILT_IN_POW):
CASE_FLT_FN (BUILT_IN_REMAINDER):
CASE_FLT_FN (BUILT_IN_REMQUO):
CASE_FLT_FN (BUILT_IN_RINT):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_RINT):
CASE_FLT_FN (BUILT_IN_ROUND):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_ROUND):
CASE_FLT_FN (BUILT_IN_SCALBLN):
CASE_FLT_FN (BUILT_IN_SCALBN):
CASE_FLT_FN (BUILT_IN_SIN):
CASE_FLT_FN (BUILT_IN_SINH):
CASE_FLT_FN (BUILT_IN_SINCOS):
CASE_FLT_FN (BUILT_IN_SQRT):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_SQRT):
CASE_FLT_FN (BUILT_IN_TAN):
CASE_FLT_FN (BUILT_IN_TANH):
CASE_FLT_FN (BUILT_IN_TGAMMA):
CASE_FLT_FN (BUILT_IN_TRUNC):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_TRUNC):
case BUILT_IN_ISINF:
case BUILT_IN_ISNAN:
return "<math.h>";
CASE_FLT_FN (BUILT_IN_CABS):
CASE_FLT_FN (BUILT_IN_CACOS):
CASE_FLT_FN (BUILT_IN_CACOSH):
CASE_FLT_FN (BUILT_IN_CARG):
CASE_FLT_FN (BUILT_IN_CASIN):
CASE_FLT_FN (BUILT_IN_CASINH):
CASE_FLT_FN (BUILT_IN_CATAN):
CASE_FLT_FN (BUILT_IN_CATANH):
CASE_FLT_FN (BUILT_IN_CCOS):
CASE_FLT_FN (BUILT_IN_CCOSH):
CASE_FLT_FN (BUILT_IN_CEXP):
CASE_FLT_FN (BUILT_IN_CIMAG):
CASE_FLT_FN (BUILT_IN_CLOG):
CASE_FLT_FN (BUILT_IN_CONJ):
CASE_FLT_FN (BUILT_IN_CPOW):
CASE_FLT_FN (BUILT_IN_CPROJ):
CASE_FLT_FN (BUILT_IN_CREAL):
CASE_FLT_FN (BUILT_IN_CSIN):
CASE_FLT_FN (BUILT_IN_CSINH):
CASE_FLT_FN (BUILT_IN_CSQRT):
CASE_FLT_FN (BUILT_IN_CTAN):
CASE_FLT_FN (BUILT_IN_CTANH):
return "<complex.h>";
case BUILT_IN_MEMCHR:
case BUILT_IN_MEMCMP:
case BUILT_IN_MEMCPY:
case BUILT_IN_MEMMOVE:
case BUILT_IN_MEMSET:
case BUILT_IN_STRCAT:
case BUILT_IN_STRCHR:
case BUILT_IN_STRCMP:
case BUILT_IN_STRCPY:
case BUILT_IN_STRCSPN:
case BUILT_IN_STRLEN:
case BUILT_IN_STRNCAT:
case BUILT_IN_STRNCMP:
case BUILT_IN_STRNCPY:
case BUILT_IN_STRPBRK:
case BUILT_IN_STRRCHR:
case BUILT_IN_STRSPN:
case BUILT_IN_STRSTR:
return "<string.h>";
case BUILT_IN_FPRINTF:
case BUILT_IN_PUTC:
case BUILT_IN_FPUTC:
case BUILT_IN_FPUTS:
case BUILT_IN_FSCANF:
case BUILT_IN_FWRITE:
case BUILT_IN_PRINTF:
case BUILT_IN_PUTCHAR:
case BUILT_IN_PUTS:
case BUILT_IN_SCANF:
case BUILT_IN_SNPRINTF:
case BUILT_IN_SPRINTF:
case BUILT_IN_SSCANF:
case BUILT_IN_VFPRINTF:
case BUILT_IN_VFSCANF:
case BUILT_IN_VPRINTF:
case BUILT_IN_VSCANF:
case BUILT_IN_VSNPRINTF:
case BUILT_IN_VSPRINTF:
case BUILT_IN_VSSCANF:
return "<stdio.h>";
case BUILT_IN_ISALNUM:
case BUILT_IN_ISALPHA:
case BUILT_IN_ISBLANK:
case BUILT_IN_ISCNTRL:
case BUILT_IN_ISDIGIT:
case BUILT_IN_ISGRAPH:
case BUILT_IN_ISLOWER:
case BUILT_IN_ISPRINT:
case BUILT_IN_ISPUNCT:
case BUILT_IN_ISSPACE:
case BUILT_IN_ISUPPER:
case BUILT_IN_ISXDIGIT:
case BUILT_IN_TOLOWER:
case BUILT_IN_TOUPPER:
return "<ctype.h>";
case BUILT_IN_ISWALNUM:
case BUILT_IN_ISWALPHA:
case BUILT_IN_ISWBLANK:
case BUILT_IN_ISWCNTRL:
case BUILT_IN_ISWDIGIT:
case BUILT_IN_ISWGRAPH:
case BUILT_IN_ISWLOWER:
case BUILT_IN_ISWPRINT:
case BUILT_IN_ISWPUNCT:
case BUILT_IN_ISWSPACE:
case BUILT_IN_ISWUPPER:
case BUILT_IN_ISWXDIGIT:
case BUILT_IN_TOWLOWER:
case BUILT_IN_TOWUPPER:
return "<wctype.h>";
case BUILT_IN_ABORT:
case BUILT_IN_ABS:
case BUILT_IN_CALLOC:
case BUILT_IN_EXIT:
case BUILT_IN_FREE:
case BUILT_IN_LABS:
case BUILT_IN_LLABS:
case BUILT_IN_MALLOC:
case BUILT_IN_REALLOC:
case BUILT_IN__EXIT2:
case BUILT_IN_ALIGNED_ALLOC:
return "<stdlib.h>";
case BUILT_IN_IMAXABS:
return "<inttypes.h>";
case BUILT_IN_STRFTIME:
return "<time.h>";
default:
return NULL;
}
}
/* Generate an implicit declaration for identifier FUNCTIONID at LOC as a
function of type int (). */
tree
implicitly_declare (location_t loc, tree functionid)
{
struct c_binding *b;
tree decl = NULL_TREE;
tree asmspec_tree;
for (b = I_SYMBOL_BINDING (functionid); b; b = b->shadowed)
{
if (B_IN_SCOPE (b, external_scope))
{
decl = b->decl;
break;
}
}
if (decl)
{
if (TREE_CODE (decl) != FUNCTION_DECL)
return decl;
/* FIXME: Objective-C has weird not-really-builtin functions
which are supposed to be visible automatically. They wind up
in the external scope because they're pushed before the file
scope gets created. Catch this here and rebind them into the
file scope. */
if (!DECL_BUILT_IN (decl) && DECL_IS_BUILTIN (decl))
{
bind (functionid, decl, file_scope,
/*invisible=*/false, /*nested=*/true,
DECL_SOURCE_LOCATION (decl));
return decl;
}
else
{
tree newtype = default_function_type;
if (b->u.type)
TREE_TYPE (decl) = b->u.type;
/* Implicit declaration of a function already declared
(somehow) in a different scope, or as a built-in.
If this is the first time this has happened, warn;
then recycle the old declaration but with the new type. */
if (!C_DECL_IMPLICIT (decl))
{
implicit_decl_warning (loc, functionid, decl);
C_DECL_IMPLICIT (decl) = 1;
}
if (DECL_BUILT_IN (decl))
{
newtype = build_type_attribute_variant (newtype,
TYPE_ATTRIBUTES
(TREE_TYPE (decl)));
if (!comptypes (newtype, TREE_TYPE (decl)))
{
bool warned = warning_at (loc, 0, "incompatible implicit "
"declaration of built-in "
"function %qD", decl);
/* See if we can hint which header to include. */
const char *header
= header_for_builtin_fn (DECL_FUNCTION_CODE (decl));
if (header != NULL && warned)
{
rich_location richloc (line_table, loc);
maybe_add_include_fixit (&richloc, header);
inform (&richloc,
"include %qs or provide a declaration of %qD",
header, decl);
}
newtype = TREE_TYPE (decl);
}
}
else
{
if (!comptypes (newtype, TREE_TYPE (decl)))
{
error_at (loc, "incompatible implicit declaration of "
"function %qD", decl);
locate_old_decl (decl);
}
}
b->u.type = TREE_TYPE (decl);
TREE_TYPE (decl) = newtype;
bind (functionid, decl, current_scope,
/*invisible=*/false, /*nested=*/true,
DECL_SOURCE_LOCATION (decl));
return decl;
}
}
/* Not seen before. */
decl = build_decl (loc, FUNCTION_DECL, functionid, default_function_type);
DECL_EXTERNAL (decl) = 1;
TREE_PUBLIC (decl) = 1;
C_DECL_IMPLICIT (decl) = 1;
implicit_decl_warning (loc, functionid, 0);
asmspec_tree = maybe_apply_renaming_pragma (decl, /*asmname=*/NULL);
if (asmspec_tree)
set_user_assembler_name (decl, TREE_STRING_POINTER (asmspec_tree));
/* C89 says implicit declarations are in the innermost block.
So we record the decl in the standard fashion. */
decl = pushdecl (decl);
/* No need to call objc_check_decl here - it's a function type. */
rest_of_decl_compilation (decl, 0, 0);
/* Write a record describing this implicit function declaration
to the prototypes file (if requested). */
gen_aux_info_record (decl, 0, 1, 0);
/* Possibly apply some default attributes to this implicit declaration. */
decl_attributes (&decl, NULL_TREE, 0);
return decl;
}
/* Issue an error message for a reference to an undeclared variable
ID, including a reference to a builtin outside of function-call
context. Establish a binding of the identifier to error_mark_node
in an appropriate scope, which will suppress further errors for the
same identifier. The error message should be given location LOC. */
void
undeclared_variable (location_t loc, tree id)
{
static bool already = false;
struct c_scope *scope;
if (current_function_decl == NULL_TREE)
{
name_hint guessed_id = lookup_name_fuzzy (id, FUZZY_LOOKUP_NAME, loc);
if (guessed_id)
{
gcc_rich_location richloc (loc);
richloc.add_fixit_replace (guessed_id.suggestion ());
error_at (&richloc,
"%qE undeclared here (not in a function);"
" did you mean %qs?",
id, guessed_id.suggestion ());
}
else
error_at (loc, "%qE undeclared here (not in a function)", id);
scope = current_scope;
}
else
{
if (!objc_diagnose_private_ivar (id))
{
name_hint guessed_id = lookup_name_fuzzy (id, FUZZY_LOOKUP_NAME, loc);
if (guessed_id)
{
gcc_rich_location richloc (loc);
richloc.add_fixit_replace (guessed_id.suggestion ());
error_at (&richloc,
"%qE undeclared (first use in this function);"
" did you mean %qs?",
id, guessed_id.suggestion ());
}
else
error_at (loc, "%qE undeclared (first use in this function)", id);
}
if (!already)
{
inform (loc, "each undeclared identifier is reported only"
" once for each function it appears in");
already = true;
}
/* If we are parsing old-style parameter decls, current_function_decl
will be nonnull but current_function_scope will be null. */
scope = current_function_scope ? current_function_scope : current_scope;
}
bind (id, error_mark_node, scope, /*invisible=*/false, /*nested=*/false,
UNKNOWN_LOCATION);
}
/* Subroutine of lookup_label, declare_label, define_label: construct a
LABEL_DECL with all the proper frills. Also create a struct
c_label_vars initialized for the current scope. */
static tree
make_label (location_t location, tree name, bool defining,
struct c_label_vars **p_label_vars)
{
tree label = build_decl (location, LABEL_DECL, name, void_type_node);
DECL_CONTEXT (label) = current_function_decl;
SET_DECL_MODE (label, VOIDmode);
c_label_vars *label_vars = ggc_alloc<c_label_vars> ();
label_vars->shadowed = NULL;
set_spot_bindings (&label_vars->label_bindings, defining);
label_vars->decls_in_scope = make_tree_vector ();
label_vars->gotos = NULL;
*p_label_vars = label_vars;
return label;
}
/* Get the LABEL_DECL corresponding to identifier NAME as a label.
Create one if none exists so far for the current function.
This is called when a label is used in a goto expression or
has its address taken. */
tree
lookup_label (tree name)
{
tree label;
struct c_label_vars *label_vars;
if (current_function_scope == 0)
{
error ("label %qE referenced outside of any function", name);
return NULL_TREE;
}
/* Use a label already defined or ref'd with this name, but not if
it is inherited from a containing function and wasn't declared
using __label__. */
label = I_LABEL_DECL (name);
if (label && (DECL_CONTEXT (label) == current_function_decl
|| C_DECLARED_LABEL_FLAG (label)))
{
/* If the label has only been declared, update its apparent
location to point here, for better diagnostics if it
turns out not to have been defined. */
if (DECL_INITIAL (label) == NULL_TREE)
DECL_SOURCE_LOCATION (label) = input_location;
return label;
}
/* No label binding for that identifier; make one. */
label = make_label (input_location, name, false, &label_vars);
/* Ordinary labels go in the current function scope. */
bind_label (name, label, current_function_scope, label_vars);
return label;
}
/* Issue a warning about DECL for a goto statement at GOTO_LOC going
to LABEL. */
static void
warn_about_goto (location_t goto_loc, tree label, tree decl)
{
if (variably_modified_type_p (TREE_TYPE (decl), NULL_TREE))
error_at (goto_loc,
"jump into scope of identifier with variably modified type");
else
warning_at (goto_loc, OPT_Wjump_misses_init,
"jump skips variable initialization");
inform (DECL_SOURCE_LOCATION (label), "label %qD defined here", label);
inform (DECL_SOURCE_LOCATION (decl), "%qD declared here", decl);
}
/* Look up a label because of a goto statement. This is like
lookup_label, but also issues any appropriate warnings. */
tree
lookup_label_for_goto (location_t loc, tree name)
{
tree label;
struct c_label_vars *label_vars;
unsigned int ix;
tree decl;
label = lookup_label (name);
if (label == NULL_TREE)
return NULL_TREE;
/* If we are jumping to a different function, we can't issue any
useful warnings. */
if (DECL_CONTEXT (label) != current_function_decl)
{
gcc_assert (C_DECLARED_LABEL_FLAG (label));
return label;
}
label_vars = I_LABEL_BINDING (name)->u.label;
/* If the label has not yet been defined, then push this goto on a
list for possible later warnings. */
if (label_vars->label_bindings.scope == NULL)
{
c_goto_bindings *g = ggc_alloc<c_goto_bindings> ();
g->loc = loc;
set_spot_bindings (&g->goto_bindings, true);
vec_safe_push (label_vars->gotos, g);
return label;
}
/* If there are any decls in label_vars->decls_in_scope, then this
goto has missed the declaration of the decl. This happens for a
case like
int i = 1;
lab:
...
goto lab;
Issue a warning or error. */
FOR_EACH_VEC_SAFE_ELT (label_vars->decls_in_scope, ix, decl)
warn_about_goto (loc, label, decl);
if (label_vars->label_bindings.left_stmt_expr)
{
error_at (loc, "jump into statement expression");
inform (DECL_SOURCE_LOCATION (label), "label %qD defined here", label);
}
return label;
}
/* Make a label named NAME in the current function, shadowing silently
any that may be inherited from containing functions or containing
scopes. This is called for __label__ declarations. */
tree
declare_label (tree name)
{
struct c_binding *b = I_LABEL_BINDING (name);
tree label;
struct c_label_vars *label_vars;
/* Check to make sure that the label hasn't already been declared
at this scope */
if (b && B_IN_CURRENT_SCOPE (b))
{
error ("duplicate label declaration %qE", name);
locate_old_decl (b->decl);
/* Just use the previous declaration. */
return b->decl;
}
label = make_label (input_location, name, false, &label_vars);
C_DECLARED_LABEL_FLAG (label) = 1;
/* Declared labels go in the current scope. */
bind_label (name, label, current_scope, label_vars);
return label;
}
/* When we define a label, issue any appropriate warnings if there are
any gotos earlier in the function which jump to this label. */
static void
check_earlier_gotos (tree label, struct c_label_vars* label_vars)
{
unsigned int ix;
struct c_goto_bindings *g;
FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g)
{
struct c_binding *b;
struct c_scope *scope;
/* We have a goto to this label. The goto is going forward. In
g->scope, the goto is going to skip any binding which was
defined after g->bindings_in_scope. */
if (g->goto_bindings.scope->has_jump_unsafe_decl)
{
for (b = g->goto_bindings.scope->bindings;
b != g->goto_bindings.bindings_in_scope;
b = b->prev)
{
if (decl_jump_unsafe (b->decl))
warn_about_goto (g->loc, label, b->decl);
}
}
/* We also need to warn about decls defined in any scopes
between the scope of the label and the scope of the goto. */
for (scope = label_vars->label_bindings.scope;
scope != g->goto_bindings.scope;
scope = scope->outer)
{
gcc_assert (scope != NULL);
if (scope->has_jump_unsafe_decl)
{
if (scope == label_vars->label_bindings.scope)
b = label_vars->label_bindings.bindings_in_scope;
else
b = scope->bindings;
for (; b != NULL; b = b->prev)
{
if (decl_jump_unsafe (b->decl))
warn_about_goto (g->loc, label, b->decl);
}
}
}
if (g->goto_bindings.stmt_exprs > 0)
{
error_at (g->loc, "jump into statement expression");
inform (DECL_SOURCE_LOCATION (label), "label %qD defined here",
label);
}
}
/* Now that the label is defined, we will issue warnings about
subsequent gotos to this label when we see them. */
vec_safe_truncate (label_vars->gotos, 0);
label_vars->gotos = NULL;
}
/* Define a label, specifying the location in the source file.
Return the LABEL_DECL node for the label, if the definition is valid.
Otherwise return NULL_TREE. */
tree
define_label (location_t location, tree name)
{
/* Find any preexisting label with this name. It is an error
if that label has already been defined in this function, or
if there is a containing function with a declared label with
the same name. */
tree label = I_LABEL_DECL (name);
if (label
&& ((DECL_CONTEXT (label) == current_function_decl
&& DECL_INITIAL (label) != NULL_TREE)
|| (DECL_CONTEXT (label) != current_function_decl
&& C_DECLARED_LABEL_FLAG (label))))
{
error_at (location, "duplicate label %qD", label);
locate_old_decl (label);
return NULL_TREE;
}
else if (label && DECL_CONTEXT (label) == current_function_decl)
{
struct c_label_vars *label_vars = I_LABEL_BINDING (name)->u.label;
/* The label has been used or declared already in this function,
but not defined. Update its location to point to this
definition. */
DECL_SOURCE_LOCATION (label) = location;
set_spot_bindings (&label_vars->label_bindings, true);
/* Issue warnings as required about any goto statements from
earlier in the function. */
check_earlier_gotos (label, label_vars);
}
else
{
struct c_label_vars *label_vars;
/* No label binding for that identifier; make one. */
label = make_label (location, name, true, &label_vars);
/* Ordinary labels go in the current function scope. */
bind_label (name, label, current_function_scope, label_vars);
}
if (!in_system_header_at (input_location) && lookup_name (name))
warning_at (location, OPT_Wtraditional,
"traditional C lacks a separate namespace "
"for labels, identifier %qE conflicts", name);
/* Mark label as having been defined. */
DECL_INITIAL (label) = error_mark_node;
return label;
}
/* Get the bindings for a new switch statement. This is used to issue
warnings as appropriate for jumps from the switch to case or
default labels. */
struct c_spot_bindings *
c_get_switch_bindings (void)
{
struct c_spot_bindings *switch_bindings;
switch_bindings = XNEW (struct c_spot_bindings);
set_spot_bindings (switch_bindings, true);
return switch_bindings;
}
void
c_release_switch_bindings (struct c_spot_bindings *bindings)
{
gcc_assert (bindings->stmt_exprs == 0 && !bindings->left_stmt_expr);
XDELETE (bindings);
}
/* This is called at the point of a case or default label to issue
warnings about decls as needed. It returns true if it found an
error, not just a warning. */
bool
c_check_switch_jump_warnings (struct c_spot_bindings *switch_bindings,
location_t switch_loc, location_t case_loc)
{
bool saw_error;
struct c_scope *scope;
saw_error = false;
for (scope = current_scope;
scope != switch_bindings->scope;
scope = scope->outer)
{
struct c_binding *b;
gcc_assert (scope != NULL);
if (!scope->has_jump_unsafe_decl)
continue;
for (b = scope->bindings; b != NULL; b = b->prev)
{
if (decl_jump_unsafe (b->decl))
{
if (variably_modified_type_p (TREE_TYPE (b->decl), NULL_TREE))
{
saw_error = true;
error_at (case_loc,
("switch jumps into scope of identifier with "
"variably modified type"));
}
else
warning_at (case_loc, OPT_Wjump_misses_init,
"switch jumps over variable initialization");
inform (switch_loc, "switch starts here");
inform (DECL_SOURCE_LOCATION (b->decl), "%qD declared here",
b->decl);
}
}
}
if (switch_bindings->stmt_exprs > 0)
{
saw_error = true;
error_at (case_loc, "switch jumps into statement expression");
inform (switch_loc, "switch starts here");
}
return saw_error;
}
/* Given NAME, an IDENTIFIER_NODE,
return the structure (or union or enum) definition for that name.
If THISLEVEL_ONLY is nonzero, searches only the current_scope.
CODE says which kind of type the caller wants;
it is RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE.
If PLOC is not NULL and this returns non-null, it sets *PLOC to the
location where the tag was defined.
If the wrong kind of type is found, an error is reported. */
static tree
lookup_tag (enum tree_code code, tree name, bool thislevel_only,
location_t *ploc)
{
struct c_binding *b = I_TAG_BINDING (name);
bool thislevel = false;
if (!b || !b->decl)
return NULL_TREE;
/* We only care about whether it's in this level if
thislevel_only was set or it might be a type clash. */
if (thislevel_only || TREE_CODE (b->decl) != code)
{
/* For our purposes, a tag in the external scope is the same as
a tag in the file scope. (Primarily relevant to Objective-C
and its builtin structure tags, which get pushed before the
file scope is created.) */
if (B_IN_CURRENT_SCOPE (b)
|| (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b)))
thislevel = true;
}
if (thislevel_only && !thislevel)
return NULL_TREE;
if (TREE_CODE (b->decl) != code)
{
/* Definition isn't the kind we were looking for. */
pending_invalid_xref = name;
pending_invalid_xref_location = input_location;
/* If in the same binding level as a declaration as a tag
of a different type, this must not be allowed to
shadow that tag, so give the error immediately.
(For example, "struct foo; union foo;" is invalid.) */
if (thislevel)
pending_xref_error ();
}
if (ploc != NULL)
*ploc = b->locus;
return b->decl;
}
/* Return true if a definition exists for NAME with code CODE. */
bool
tag_exists_p (enum tree_code code, tree name)
{
struct c_binding *b = I_TAG_BINDING (name);
if (b == NULL || b->decl == NULL_TREE)
return false;
return TREE_CODE (b->decl) == code;
}
/* Print an error message now
for a recent invalid struct, union or enum cross reference.
We don't print them immediately because they are not invalid
when used in the `struct foo;' construct for shadowing. */
void
pending_xref_error (void)
{
if (pending_invalid_xref != NULL_TREE)
error_at (pending_invalid_xref_location, "%qE defined as wrong kind of tag",
pending_invalid_xref);
pending_invalid_xref = NULL_TREE;
}
/* Look up NAME in the current scope and its superiors
in the namespace of variables, functions and typedefs.
Return a ..._DECL node of some kind representing its definition,
or return NULL_TREE if it is undefined. */
tree
lookup_name (tree name)
{
struct c_binding *b = I_SYMBOL_BINDING (name);
if (b && !b->invisible)
{
maybe_record_typedef_use (b->decl);
return b->decl;
}
return NULL_TREE;
}
/* Similar to `lookup_name' but look only at the indicated scope. */
static tree
lookup_name_in_scope (tree name, struct c_scope *scope)
{
struct c_binding *b;
for (b = I_SYMBOL_BINDING (name); b; b = b->shadowed)
if (B_IN_SCOPE (b, scope))
return b->decl;
return NULL_TREE;
}
/* Look for the closest match for NAME within the currently valid
scopes.
This finds the identifier with the lowest Levenshtein distance to
NAME. If there are multiple candidates with equal minimal distance,
the first one found is returned. Scopes are searched from innermost
outwards, and within a scope in reverse order of declaration, thus
benefiting candidates "near" to the current scope.
The function also looks for similar macro names to NAME, since a
misspelled macro name will not be expanded, and hence looks like an
identifier to the C frontend.
It also looks for start_typename keywords, to detect "singed" vs "signed"
typos.
Use LOC for any deferred diagnostics. */
name_hint
lookup_name_fuzzy (tree name, enum lookup_name_fuzzy_kind kind, location_t loc)
{
gcc_assert (TREE_CODE (name) == IDENTIFIER_NODE);
/* First, try some well-known names in the C standard library, in case
the user forgot a #include. */
const char *header_hint
= get_c_stdlib_header_for_name (IDENTIFIER_POINTER (name));
if (header_hint)
return name_hint (NULL,
new suggest_missing_header (loc,
IDENTIFIER_POINTER (name),
header_hint));
/* Only suggest names reserved for the implementation if NAME begins
with an underscore. */
bool consider_implementation_names = (IDENTIFIER_POINTER (name)[0] == '_');
best_match<tree, tree> bm (name);
/* Look within currently valid scopes. */
for (c_scope *scope = current_scope; scope; scope = scope->outer)
for (c_binding *binding = scope->bindings; binding; binding = binding->prev)
{
if (!binding->id || binding->invisible)
continue;
if (binding->decl == error_mark_node)
continue;
/* Don't use bindings from implicitly declared functions,
as they were likely misspellings themselves. */
if (TREE_CODE (binding->decl) == FUNCTION_DECL)
if (C_DECL_IMPLICIT (binding->decl))
continue;
/* Don't suggest names that are reserved for use by the
implementation, unless NAME began with an underscore. */
if (!consider_implementation_names)
{
const char *suggestion_str = IDENTIFIER_POINTER (binding->id);
if (name_reserved_for_implementation_p (suggestion_str))
continue;
}
switch (kind)
{
case FUZZY_LOOKUP_TYPENAME:
if (TREE_CODE (binding->decl) != TYPE_DECL)
continue;
break;
case FUZZY_LOOKUP_FUNCTION_NAME:
if (TREE_CODE (binding->decl) != FUNCTION_DECL)
{
/* Allow function pointers. */
if ((VAR_P (binding->decl)
|| TREE_CODE (binding->decl) == PARM_DECL)
&& TREE_CODE (TREE_TYPE (binding->decl)) == POINTER_TYPE
&& (TREE_CODE (TREE_TYPE (TREE_TYPE (binding->decl)))
== FUNCTION_TYPE))
break;
continue;
}
break;
default:
break;
}
bm.consider (binding->id);
}
/* Consider macros: if the user misspelled a macro name e.g. "SOME_MACRO"
as:
x = SOME_OTHER_MACRO (y);
then "SOME_OTHER_MACRO" will survive to the frontend and show up
as a misspelled identifier.
Use the best distance so far so that a candidate is only set if
a macro is better than anything so far. This allows early rejection
(without calculating the edit distance) of macro names that must have
distance >= bm.get_best_distance (), and means that we only get a
non-NULL result for best_macro_match if it's better than any of
the identifiers already checked, which avoids needless creation
of identifiers for macro hashnodes. */
best_macro_match bmm (name, bm.get_best_distance (), parse_in);
cpp_hashnode *best_macro = bmm.get_best_meaningful_candidate ();
/* If a macro is the closest so far to NAME, use it, creating an
identifier tree node for it. */
if (best_macro)
{
const char *id = (const char *)best_macro->ident.str;
tree macro_as_identifier
= get_identifier_with_length (id, best_macro->ident.len);
bm.set_best_so_far (macro_as_identifier,
bmm.get_best_distance (),
bmm.get_best_candidate_length ());
}
/* Try the "start_typename" keywords to detect
"singed" vs "signed" typos. */
if (kind == FUZZY_LOOKUP_TYPENAME)
{
for (unsigned i = 0; i < num_c_common_reswords; i++)
{
const c_common_resword *resword = &c_common_reswords[i];
if (!c_keyword_starts_typename (resword->rid))
continue;
tree resword_identifier = ridpointers [resword->rid];
if (!resword_identifier)
continue;
gcc_assert (TREE_CODE (resword_identifier) == IDENTIFIER_NODE);
bm.consider (resword_identifier);
}
}
tree best = bm.get_best_meaningful_candidate ();
if (best)
return name_hint (IDENTIFIER_POINTER (best), NULL);
else
return name_hint (NULL, NULL);
}
/* Create the predefined scalar types of C,
and some nodes representing standard constants (0, 1, (void *) 0).
Initialize the global scope.
Make definitions for built-in primitive functions. */
void
c_init_decl_processing (void)
{
location_t save_loc = input_location;
/* Initialize reserved words for parser. */
c_parse_init ();
current_function_decl = NULL_TREE;
gcc_obstack_init (&parser_obstack);
/* Make the externals scope. */
push_scope ();
external_scope = current_scope;
/* Declarations from c_common_nodes_and_builtins must not be associated
with this input file, lest we get differences between using and not
using preprocessed headers. */
input_location = BUILTINS_LOCATION;
c_common_nodes_and_builtins ();
/* In C, comparisons and TRUTH_* expressions have type int. */
truthvalue_type_node = integer_type_node;
truthvalue_true_node = integer_one_node;
truthvalue_false_node = integer_zero_node;
/* Even in C99, which has a real boolean type. */
pushdecl (build_decl (UNKNOWN_LOCATION, TYPE_DECL, get_identifier ("_Bool"),
boolean_type_node));
input_location = save_loc;
make_fname_decl = c_make_fname_decl;
start_fname_decls ();
}
/* Create the VAR_DECL at LOC for __FUNCTION__ etc. ID is the name to
give the decl, NAME is the initialization string and TYPE_DEP
indicates whether NAME depended on the type of the function. As we
don't yet implement delayed emission of static data, we mark the
decl as emitted so it is not placed in the output. Anything using
it must therefore pull out the STRING_CST initializer directly.
FIXME. */
static tree
c_make_fname_decl (location_t loc, tree id, int type_dep)
{
const char *name = fname_as_string (type_dep);
tree decl, type, init;
size_t length = strlen (name);
type = build_array_type (char_type_node,
build_index_type (size_int (length)));
type = c_build_qualified_type (type, TYPE_QUAL_CONST);
decl = build_decl (loc, VAR_DECL, id, type);
TREE_STATIC (decl) = 1;
TREE_READONLY (decl) = 1;
DECL_ARTIFICIAL (decl) = 1;
init = build_string (length + 1, name);
free (CONST_CAST (char *, name));
TREE_TYPE (init) = type;
DECL_INITIAL (decl) = init;
TREE_USED (decl) = 1;
if (current_function_decl
/* For invalid programs like this:
void foo()
const char* p = __FUNCTION__;
the __FUNCTION__ is believed to appear in K&R style function
parameter declarator. In that case we still don't have
function_scope. */
&& current_function_scope)
{
DECL_CONTEXT (decl) = current_function_decl;
bind (id, decl, current_function_scope,
/*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION);
}
finish_decl (decl, loc, init, NULL_TREE, NULL_TREE);
return decl;
}
tree
c_builtin_function (tree decl)
{
tree type = TREE_TYPE (decl);
tree id = DECL_NAME (decl);
const char *name = IDENTIFIER_POINTER (id);
C_DECL_BUILTIN_PROTOTYPE (decl) = prototype_p (type);
/* Should never be called on a symbol with a preexisting meaning. */
gcc_assert (!I_SYMBOL_BINDING (id));
bind (id, decl, external_scope, /*invisible=*/true, /*nested=*/false,
UNKNOWN_LOCATION);
/* Builtins in the implementation namespace are made visible without
needing to be explicitly declared. See push_file_scope. */
if (name[0] == '_' && (name[1] == '_' || ISUPPER (name[1])))
{
DECL_CHAIN (decl) = visible_builtins;
visible_builtins = decl;
}
return decl;
}
tree
c_builtin_function_ext_scope (tree decl)
{
tree type = TREE_TYPE (decl);
tree id = DECL_NAME (decl);
const char *name = IDENTIFIER_POINTER (id);
C_DECL_BUILTIN_PROTOTYPE (decl) = prototype_p (type);
if (external_scope)
bind (id, decl, external_scope, /*invisible=*/false, /*nested=*/false,
UNKNOWN_LOCATION);
/* Builtins in the implementation namespace are made visible without
needing to be explicitly declared. See push_file_scope. */
if (name[0] == '_' && (name[1] == '_' || ISUPPER (name[1])))
{
DECL_CHAIN (decl) = visible_builtins;
visible_builtins = decl;
}
return decl;
}
/* Called when a declaration is seen that contains no names to declare.
If its type is a reference to a structure, union or enum inherited
from a containing scope, shadow that tag name for the current scope
with a forward reference.
If its type defines a new named structure or union
or defines an enum, it is valid but we need not do anything here.
Otherwise, it is an error. */
void
shadow_tag (const struct c_declspecs *declspecs)
{
shadow_tag_warned (declspecs, 0);
}
/* WARNED is 1 if we have done a pedwarn, 2 if we have done a warning,
but no pedwarn. */
void
shadow_tag_warned (const struct c_declspecs *declspecs, int warned)
{
bool found_tag = false;
if (declspecs->type && !declspecs->default_int_p && !declspecs->typedef_p)
{
tree value = declspecs->type;
enum tree_code code = TREE_CODE (value);
if (code == RECORD_TYPE || code == UNION_TYPE || code == ENUMERAL_TYPE)
/* Used to test also that TYPE_SIZE (value) != 0.
That caused warning for `struct foo;' at top level in the file. */
{
tree name = TYPE_NAME (value);
tree t;
found_tag = true;
if (declspecs->restrict_p)
{
error ("invalid use of %<restrict%>");
warned = 1;
}
if (name == NULL_TREE)
{
if (warned != 1 && code != ENUMERAL_TYPE)
/* Empty unnamed enum OK */
{
pedwarn (input_location, 0,
"unnamed struct/union that defines no instances");
warned = 1;
}
}
else if (declspecs->typespec_kind != ctsk_tagdef
&& declspecs->typespec_kind != ctsk_tagfirstref
&& declspecs->storage_class != csc_none)
{
if (warned != 1)
pedwarn (input_location, 0,
"empty declaration with storage class specifier "
"does not redeclare tag");
warned = 1;
pending_xref_error ();
}
else if (declspecs->typespec_kind != ctsk_tagdef
&& declspecs->typespec_kind != ctsk_tagfirstref
&& (declspecs->const_p
|| declspecs->volatile_p
|| declspecs->atomic_p
|| declspecs->restrict_p
|| declspecs->address_space))
{
if (warned != 1)
pedwarn (input_location, 0,
"empty declaration with type qualifier "
"does not redeclare tag");
warned = 1;
pending_xref_error ();
}
else if (declspecs->typespec_kind != ctsk_tagdef
&& declspecs->typespec_kind != ctsk_tagfirstref
&& declspecs->alignas_p)
{
if (warned != 1)
pedwarn (input_location, 0,
"empty declaration with %<_Alignas%> "
"does not redeclare tag");
warned = 1;
pending_xref_error ();
}
else
{
pending_invalid_xref = NULL_TREE;
t = lookup_tag (code, name, true, NULL);
if (t == NULL_TREE)
{
t = make_node (code);
pushtag (input_location, name, t);
}
}
}
else
{
if (warned != 1 && !in_system_header_at (input_location))
{
pedwarn (input_location, 0,
"useless type name in empty declaration");
warned = 1;
}
}
}
else if (warned != 1 && !in_system_header_at (input_location)
&& declspecs->typedef_p)
{
pedwarn (input_location, 0, "useless type name in empty declaration");
warned = 1;
}
pending_invalid_xref = NULL_TREE;
if (declspecs->inline_p)
{
error ("%<inline%> in empty declaration");
warned = 1;
}
if (declspecs->noreturn_p)
{
error ("%<_Noreturn%> in empty declaration");
warned = 1;
}
if (current_scope == file_scope && declspecs->storage_class == csc_auto)
{
error ("%<auto%> in file-scope empty declaration");
warned = 1;
}
if (current_scope == file_scope && declspecs->storage_class == csc_register)
{
error ("%<register%> in file-scope empty declaration");
warned = 1;
}
if (!warned && !in_system_header_at (input_location)
&& declspecs->storage_class != csc_none)
{
warning (0, "useless storage class specifier in empty declaration");
warned = 2;
}
if (!warned && !in_system_header_at (input_location) && declspecs->thread_p)
{
warning (0, "useless %qs in empty declaration",
declspecs->thread_gnu_p ? "__thread" : "_Thread_local");
warned = 2;
}
if (!warned
&& !in_system_header_at (input_location)
&& (declspecs->const_p
|| declspecs->volatile_p
|| declspecs->atomic_p
|| declspecs->restrict_p
|| declspecs->address_space))
{
warning (0, "useless type qualifier in empty declaration");
warned = 2;
}
if (!warned && !in_system_header_at (input_location)
&& declspecs->alignas_p)
{
warning (0, "useless %<_Alignas%> in empty declaration");
warned = 2;
}
if (warned != 1)
{
if (!found_tag)
pedwarn (input_location, 0, "empty declaration");
}
}
/* Return the qualifiers from SPECS as a bitwise OR of TYPE_QUAL_*
bits. SPECS represents declaration specifiers that the grammar
only permits to contain type qualifiers and attributes. */
int
quals_from_declspecs (const struct c_declspecs *specs)
{
int quals = ((specs->const_p ? TYPE_QUAL_CONST : 0)
| (specs->volatile_p ? TYPE_QUAL_VOLATILE : 0)
| (specs->restrict_p ? TYPE_QUAL_RESTRICT : 0)
| (specs->atomic_p ? TYPE_QUAL_ATOMIC : 0)
| (ENCODE_QUAL_ADDR_SPACE (specs->address_space)));
gcc_assert (!specs->type
&& !specs->decl_attr
&& specs->typespec_word == cts_none
&& specs->storage_class == csc_none
&& !specs->typedef_p
&& !specs->explicit_signed_p
&& !specs->deprecated_p
&& !specs->long_p
&& !specs->long_long_p
&& !specs->short_p
&& !specs->signed_p
&& !specs->unsigned_p
&& !specs->complex_p
&& !specs->inline_p
&& !specs->noreturn_p
&& !specs->thread_p);
return quals;
}
/* Construct an array declarator. LOC is the location of the
beginning of the array (usually the opening brace). EXPR is the
expression inside [], or NULL_TREE. QUALS are the type qualifiers
inside the [] (to be applied to the pointer to which a parameter
array is converted). STATIC_P is true if "static" is inside the
[], false otherwise. VLA_UNSPEC_P is true if the array is [*], a
VLA of unspecified length which is nevertheless a complete type,
false otherwise. The field for the contained declarator is left to
be filled in by set_array_declarator_inner. */
struct c_declarator *
build_array_declarator (location_t loc,
tree expr, struct c_declspecs *quals, bool static_p,
bool vla_unspec_p)
{
struct c_declarator *declarator = XOBNEW (&parser_obstack,
struct c_declarator);
declarator->id_loc = loc;
declarator->kind = cdk_array;
declarator->declarator = 0;
declarator->u.array.dimen = expr;
if (quals)
{
declarator->u.array.attrs = quals->attrs;
declarator->u.array.quals = quals_from_declspecs (quals);
}
else
{
declarator->u.array.attrs = NULL_TREE;
declarator->u.array.quals = 0;
}
declarator->u.array.static_p = static_p;
declarator->u.array.vla_unspec_p = vla_unspec_p;
if (static_p || quals != NULL)
pedwarn_c90 (loc, OPT_Wpedantic,
"ISO C90 does not support %<static%> or type "
"qualifiers in parameter array declarators");
if (vla_unspec_p)
pedwarn_c90 (loc, OPT_Wpedantic,
"ISO C90 does not support %<[*]%> array declarators");
if (vla_unspec_p)
{
if (!current_scope->parm_flag)
{
/* C99 6.7.5.2p4 */
error_at (loc, "%<[*]%> not allowed in other than "
"function prototype scope");
declarator->u.array.vla_unspec_p = false;
return NULL;
}
current_scope->had_vla_unspec = true;
}
return declarator;
}
/* Set the contained declarator of an array declarator. DECL is the
declarator, as constructed by build_array_declarator; INNER is what
appears on the left of the []. */
struct c_declarator *
set_array_declarator_inner (struct c_declarator *decl,
struct c_declarator *inner)
{
decl->declarator = inner;
return decl;
}
/* INIT is a constructor that forms DECL's initializer. If the final
element initializes a flexible array field, add the size of that
initializer to DECL's size. */
static void
add_flexible_array_elts_to_size (tree decl, tree init)
{
tree elt, type;
if (vec_safe_is_empty (CONSTRUCTOR_ELTS (init)))
return;
elt = CONSTRUCTOR_ELTS (init)->last ().value;
type = TREE_TYPE (elt);
if (TREE_CODE (type) == ARRAY_TYPE
&& TYPE_SIZE (type) == NULL_TREE
&& TYPE_DOMAIN (type) != NULL_TREE
&& TYPE_MAX_VALUE (TYPE_DOMAIN (type)) == NULL_TREE)
{
complete_array_type (&type, elt, false);
DECL_SIZE (decl)
= size_binop (PLUS_EXPR, DECL_SIZE (decl), TYPE_SIZE (type));
DECL_SIZE_UNIT (decl)
= size_binop (PLUS_EXPR, DECL_SIZE_UNIT (decl), TYPE_SIZE_UNIT (type));
}
}
/* Decode a "typename", such as "int **", returning a ..._TYPE node.
Set *EXPR, if EXPR not NULL, to any expression to be evaluated
before the type name, and set *EXPR_CONST_OPERANDS, if
EXPR_CONST_OPERANDS not NULL, to indicate whether the type name may
appear in a constant expression. */
tree
groktypename (struct c_type_name *type_name, tree *expr,
bool *expr_const_operands)
{
tree type;
tree attrs = type_name->specs->attrs;
type_name->specs->attrs = NULL_TREE;
type = grokdeclarator (type_name->declarator, type_name->specs, TYPENAME,
false, NULL, &attrs, expr, expr_const_operands,
DEPRECATED_NORMAL);
/* Apply attributes. */
decl_attributes (&type, attrs, 0);
return type;
}
/* Wrapper for decl_attributes that adds some implicit attributes
to VAR_DECLs or FUNCTION_DECLs. */
static tree
c_decl_attributes (tree *node, tree attributes, int flags)
{
/* Add implicit "omp declare target" attribute if requested. */
if (current_omp_declare_target_attribute
&& ((VAR_P (*node) && is_global_var (*node))
|| TREE_CODE (*node) == FUNCTION_DECL))
{
if (VAR_P (*node)
&& !lang_hooks.types.omp_mappable_type (TREE_TYPE (*node)))
error ("%q+D in declare target directive does not have mappable type",
*node);
else
attributes = tree_cons (get_identifier ("omp declare target"),
NULL_TREE, attributes);
}
/* Look up the current declaration with all the attributes merged
so far so that attributes on the current declaration that's
about to be pushed that conflict with the former can be detected,
diagnosed, and rejected as appropriate. */
tree last_decl = lookup_name (DECL_NAME (*node));
if (!last_decl)
last_decl = lookup_name_in_scope (DECL_NAME (*node), external_scope);
return decl_attributes (node, attributes, flags, last_decl);
}
/* Decode a declarator in an ordinary declaration or data definition.
This is called as soon as the type information and variable name
have been parsed, before parsing the initializer if any.
Here we create the ..._DECL node, fill in its type,
and put it on the list of decls for the current context.
The ..._DECL node is returned as the value.
Exception: for arrays where the length is not specified,
the type is left null, to be filled in by `finish_decl'.
Function definitions do not come here; they go to start_function
instead. However, external and forward declarations of functions
do go through here. Structure field declarations are done by
grokfield and not through here. */
tree
start_decl (struct c_declarator *declarator, struct c_declspecs *declspecs,
bool initialized, tree attributes)
{
tree decl;
tree tem;
tree expr = NULL_TREE;
enum deprecated_states deprecated_state = DEPRECATED_NORMAL;
/* An object declared as __attribute__((deprecated)) suppresses
warnings of uses of other deprecated items. */
if (lookup_attribute ("deprecated", attributes))
deprecated_state = DEPRECATED_SUPPRESS;
decl = grokdeclarator (declarator, declspecs,
NORMAL, initialized, NULL, &attributes, &expr, NULL,
deprecated_state);
if (!decl || decl == error_mark_node)
return NULL_TREE;
if (expr)
add_stmt (fold_convert (void_type_node, expr));
if (TREE_CODE (decl) != FUNCTION_DECL && MAIN_NAME_P (DECL_NAME (decl)))
warning (OPT_Wmain, "%q+D is usually a function", decl);
if (initialized)
/* Is it valid for this decl to have an initializer at all?
If not, set INITIALIZED to zero, which will indirectly
tell 'finish_decl' to ignore the initializer once it is parsed. */
switch (TREE_CODE (decl))
{
case TYPE_DECL:
error ("typedef %qD is initialized (use __typeof__ instead)", decl);
initialized = false;
break;
case FUNCTION_DECL:
error ("function %qD is initialized like a variable", decl);
initialized = false;
break;
case PARM_DECL:
/* DECL_INITIAL in a PARM_DECL is really DECL_ARG_TYPE. */
error ("parameter %qD is initialized", decl);
initialized = false;
break;
default:
/* Don't allow initializations for incomplete types except for
arrays which might be completed by the initialization. */
/* This can happen if the array size is an undefined macro.
We already gave a warning, so we don't need another one. */
if (TREE_TYPE (decl) == error_mark_node)
initialized = false;
else if (COMPLETE_TYPE_P (TREE_TYPE (decl)))
{
/* A complete type is ok if size is fixed. */
if (TREE_CODE (TYPE_SIZE (TREE_TYPE (decl))) != INTEGER_CST
|| C_DECL_VARIABLE_SIZE (decl))
{
error ("variable-sized object may not be initialized");
initialized = false;
}
}
else if (TREE_CODE (TREE_TYPE (decl)) != ARRAY_TYPE)
{
error ("variable %qD has initializer but incomplete type", decl);
initialized = false;
}
else if (C_DECL_VARIABLE_SIZE (decl))
{
/* Although C99 is unclear about whether incomplete arrays
of VLAs themselves count as VLAs, it does not make
sense to permit them to be initialized given that
ordinary VLAs may not be initialized. */
error ("variable-sized object may not be initialized");
initialized = false;
}
}
if (initialized)
{
if (current_scope == file_scope)
TREE_STATIC (decl) = 1;
/* Tell 'pushdecl' this is an initialized decl
even though we don't yet have the initializer expression.
Also tell 'finish_decl' it may store the real initializer. */
DECL_INITIAL (decl) = error_mark_node;
}
/* If this is a function declaration, write a record describing it to the
prototypes file (if requested). */
if (TREE_CODE (decl) == FUNCTION_DECL)
gen_aux_info_record (decl, 0, 0, prototype_p (TREE_TYPE (decl)));
/* ANSI specifies that a tentative definition which is not merged with
a non-tentative definition behaves exactly like a definition with an
initializer equal to zero. (Section 3.7.2)
-fno-common gives strict ANSI behavior, though this tends to break
a large body of code that grew up without this rule.
Thread-local variables are never common, since there's no entrenched
body of code to break, and it allows more efficient variable references
in the presence of dynamic linking. */
if (VAR_P (decl)
&& !initialized
&& TREE_PUBLIC (decl)
&& !DECL_THREAD_LOCAL_P (decl)
&& !flag_no_common)
DECL_COMMON (decl) = 1;
/* Set attributes here so if duplicate decl, will have proper attributes. */
c_decl_attributes (&decl, attributes, 0);
/* Handle gnu_inline attribute. */
if (declspecs->inline_p
&& !flag_gnu89_inline
&& TREE_CODE (decl) == FUNCTION_DECL
&& (lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (decl))
|| current_function_decl))
{
if (declspecs->storage_class == csc_auto && current_scope != file_scope)
;
else if (declspecs->storage_class != csc_static)
DECL_EXTERNAL (decl) = !DECL_EXTERNAL (decl);
}
if (TREE_CODE (decl) == FUNCTION_DECL
&& targetm.calls.promote_prototypes (TREE_TYPE (decl)))
{
struct c_declarator *ce = declarator;
if (ce->kind == cdk_pointer)
ce = declarator->declarator;
if (ce->kind == cdk_function)
{
tree args = ce->u.arg_info->parms;
for (; args; args = DECL_CHAIN (args))
{
tree type = TREE_TYPE (args);
if (type && INTEGRAL_TYPE_P (type)
&& TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node))
DECL_ARG_TYPE (args) = c_type_promotes_to (type);
}
}
}
if (TREE_CODE (decl) == FUNCTION_DECL
&& DECL_DECLARED_INLINE_P (decl)
&& DECL_UNINLINABLE (decl)
&& lookup_attribute ("noinline", DECL_ATTRIBUTES (decl)))
warning (OPT_Wattributes, "inline function %q+D given attribute noinline",
decl);
/* C99 6.7.4p3: An inline definition of a function with external
linkage shall not contain a definition of a modifiable object
with static storage duration... */
if (VAR_P (decl)
&& current_scope != file_scope
&& TREE_STATIC (decl)
&& !TREE_READONLY (decl)
&& DECL_DECLARED_INLINE_P (current_function_decl)
&& DECL_EXTERNAL (current_function_decl))
record_inline_static (input_location, current_function_decl,
decl, csi_modifiable);
if (c_dialect_objc ()
&& VAR_OR_FUNCTION_DECL_P (decl))
objc_check_global_decl (decl);
/* Add this decl to the current scope.
TEM may equal DECL or it may be a previous decl of the same name. */
tem = pushdecl (decl);
if (initialized && DECL_EXTERNAL (tem))
{
DECL_EXTERNAL (tem) = 0;
TREE_STATIC (tem) = 1;
}
return tem;
}
/* Subroutine of finish_decl. TYPE is the type of an uninitialized object
DECL or the non-array element type if DECL is an uninitialized array.
If that type has a const member, diagnose this. */
static void
diagnose_uninitialized_cst_member (tree decl, tree type)
{
tree field;
for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
{
tree field_type;
if (TREE_CODE (field) != FIELD_DECL)
continue;
field_type = strip_array_types (TREE_TYPE (field));
if (TYPE_QUALS (field_type) & TYPE_QUAL_CONST)
{
warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat,
"uninitialized const member in %qT is invalid in C++",
strip_array_types (TREE_TYPE (decl)));
inform (DECL_SOURCE_LOCATION (field), "%qD should be initialized", field);
}
if (RECORD_OR_UNION_TYPE_P (field_type))
diagnose_uninitialized_cst_member (decl, field_type);
}
}
/* Finish processing of a declaration;
install its initial value.
If ORIGTYPE is not NULL_TREE, it is the original type of INIT.
If the length of an array type is not known before,
it must be determined now, from the initial value, or it is an error.
INIT_LOC is the location of the initial value. */
void
finish_decl (tree decl, location_t init_loc, tree init,
tree origtype, tree asmspec_tree)
{
tree type;
bool was_incomplete = (DECL_SIZE (decl) == NULL_TREE);
const char *asmspec = 0;
/* If a name was specified, get the string. */
if (VAR_OR_FUNCTION_DECL_P (decl)
&& DECL_FILE_SCOPE_P (decl))
asmspec_tree = maybe_apply_renaming_pragma (decl, asmspec_tree);
if (asmspec_tree)
asmspec = TREE_STRING_POINTER (asmspec_tree);
if (VAR_P (decl)
&& TREE_STATIC (decl)
&& global_bindings_p ())
/* So decl is a global variable. Record the types it uses
so that we can decide later to emit debug info for them. */
record_types_used_by_current_var_decl (decl);
/* If `start_decl' didn't like having an initialization, ignore it now. */
if (init != NULL_TREE && DECL_INITIAL (decl) == NULL_TREE)
init = NULL_TREE;
/* Don't crash if parm is initialized. */
if (TREE_CODE (decl) == PARM_DECL)
init = NULL_TREE;
if (init)
store_init_value (init_loc, decl, init, origtype);
if (c_dialect_objc () && (VAR_OR_FUNCTION_DECL_P (decl)
|| TREE_CODE (decl) == FIELD_DECL))
objc_check_decl (decl);
type = TREE_TYPE (decl);
/* Deduce size of array from initialization, if not already known. */
if (TREE_CODE (type) == ARRAY_TYPE
&& TYPE_DOMAIN (type) == NULL_TREE
&& TREE_CODE (decl) != TYPE_DECL)
{
bool do_default
= (TREE_STATIC (decl)
/* Even if pedantic, an external linkage array
may have incomplete type at first. */
? pedantic && !TREE_PUBLIC (decl)
: !DECL_EXTERNAL (decl));
int failure
= complete_array_type (&TREE_TYPE (decl), DECL_INITIAL (decl),
do_default);
/* Get the completed type made by complete_array_type. */
type = TREE_TYPE (decl);
switch (failure)
{
case 1:
error ("initializer fails to determine size of %q+D", decl);
break;
case 2:
if (do_default)
error ("array size missing in %q+D", decl);
/* If a `static' var's size isn't known,
make it extern as well as static, so it does not get
allocated.
If it is not `static', then do not mark extern;
finish_incomplete_decl will give it a default size
and it will get allocated. */
else if (!pedantic && TREE_STATIC (decl) && !TREE_PUBLIC (decl))
DECL_EXTERNAL (decl) = 1;
break;
case 3:
error ("zero or negative size array %q+D", decl);
break;
case 0:
/* For global variables, update the copy of the type that
exists in the binding. */
if (TREE_PUBLIC (decl))
{
struct c_binding *b_ext = I_SYMBOL_BINDING (DECL_NAME (decl));
while (b_ext && !B_IN_EXTERNAL_SCOPE (b_ext))
b_ext = b_ext->shadowed;
if (b_ext && TREE_CODE (decl) == TREE_CODE (b_ext->decl))
{
if (b_ext->u.type && comptypes (b_ext->u.type, type))
b_ext->u.type = composite_type (b_ext->u.type, type);
else
b_ext->u.type = type;
}
}
break;
default:
gcc_unreachable ();
}
if (DECL_INITIAL (decl))
TREE_TYPE (DECL_INITIAL (decl)) = type;
relayout_decl (decl);
}
if (VAR_P (decl))
{
if (init && TREE_CODE (init) == CONSTRUCTOR)
add_flexible_array_elts_to_size (decl, init);
if (DECL_SIZE (decl) == NULL_TREE && TREE_TYPE (decl) != error_mark_node
&& COMPLETE_TYPE_P (TREE_TYPE (decl)))
layout_decl (decl, 0);
if (DECL_SIZE (decl) == NULL_TREE
/* Don't give an error if we already gave one earlier. */
&& TREE_TYPE (decl) != error_mark_node
&& (TREE_STATIC (decl)
/* A static variable with an incomplete type
is an error if it is initialized.
Also if it is not file scope.
Otherwise, let it through, but if it is not `extern'
then it may cause an error message later. */
? (DECL_INITIAL (decl) != NULL_TREE
|| !DECL_FILE_SCOPE_P (decl))
/* An automatic variable with an incomplete type
is an error. */
: !DECL_EXTERNAL (decl)))
{
error ("storage size of %q+D isn%'t known", decl);
TREE_TYPE (decl) = error_mark_node;
}
if ((RECORD_OR_UNION_TYPE_P (TREE_TYPE (decl))
|| TREE_CODE (TREE_TYPE (decl)) == ENUMERAL_TYPE)
&& DECL_SIZE (decl) == NULL_TREE
&& TREE_STATIC (decl))
incomplete_record_decls.safe_push (decl);
if (is_global_var (decl) && DECL_SIZE (decl) != NULL_TREE)
{
if (TREE_CODE (DECL_SIZE (decl)) == INTEGER_CST)
constant_expression_warning (DECL_SIZE (decl));
else
{
error ("storage size of %q+D isn%'t constant", decl);
TREE_TYPE (decl) = error_mark_node;
}
}
if (TREE_USED (type))
{
TREE_USED (decl) = 1;
DECL_READ_P (decl) = 1;
}
}
/* If this is a function and an assembler name is specified, reset DECL_RTL
so we can give it its new name. Also, update builtin_decl if it
was a normal built-in. */
if (TREE_CODE (decl) == FUNCTION_DECL && asmspec)
{
if (DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL)
set_builtin_user_assembler_name (decl, asmspec);
set_user_assembler_name (decl, asmspec);
}
/* If #pragma weak was used, mark the decl weak now. */
maybe_apply_pragma_weak (decl);
/* Output the assembler code and/or RTL code for variables and functions,
unless the type is an undefined structure or union.
If not, it will get done when the type is completed. */
if (VAR_OR_FUNCTION_DECL_P (decl))
{
/* Determine the ELF visibility. */
if (TREE_PUBLIC (decl))
c_determine_visibility (decl);
/* This is a no-op in c-lang.c or something real in objc-act.c. */
if (c_dialect_objc ())
objc_check_decl (decl);
if (asmspec)
{
/* If this is not a static variable, issue a warning.
It doesn't make any sense to give an ASMSPEC for an
ordinary, non-register local variable. Historically,
GCC has accepted -- but ignored -- the ASMSPEC in
this case. */
if (!DECL_FILE_SCOPE_P (decl)
&& VAR_P (decl)
&& !C_DECL_REGISTER (decl)
&& !TREE_STATIC (decl))
warning (0, "ignoring asm-specifier for non-static local "
"variable %q+D", decl);
else
set_user_assembler_name (decl, asmspec);
}
if (DECL_FILE_SCOPE_P (decl))
{
if (DECL_INITIAL (decl) == NULL_TREE
|| DECL_INITIAL (decl) == error_mark_node)
/* Don't output anything
when a tentative file-scope definition is seen.
But at end of compilation, do output code for them. */
DECL_DEFER_OUTPUT (decl) = 1;
if (asmspec && VAR_P (decl) && C_DECL_REGISTER (decl))
DECL_HARD_REGISTER (decl) = 1;
rest_of_decl_compilation (decl, true, 0);
}
else
{
/* In conjunction with an ASMSPEC, the `register'
keyword indicates that we should place the variable
in a particular register. */
if (asmspec && C_DECL_REGISTER (decl))
{
DECL_HARD_REGISTER (decl) = 1;
/* This cannot be done for a structure with volatile
fields, on which DECL_REGISTER will have been
reset. */
if (!DECL_REGISTER (decl))
error ("cannot put object with volatile field into register");
}
if (TREE_CODE (decl) != FUNCTION_DECL)
{
/* If we're building a variable sized type, and we might be
reachable other than via the top of the current binding
level, then create a new BIND_EXPR so that we deallocate
the object at the right time. */
/* Note that DECL_SIZE can be null due to errors. */
if (DECL_SIZE (decl)
&& !TREE_CONSTANT (DECL_SIZE (decl))
&& STATEMENT_LIST_HAS_LABEL (cur_stmt_list))
{
tree bind;
bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
TREE_SIDE_EFFECTS (bind) = 1;
add_stmt (bind);
BIND_EXPR_BODY (bind) = push_stmt_list ();
}
add_stmt (build_stmt (DECL_SOURCE_LOCATION (decl),
DECL_EXPR, decl));
}
}
if (!DECL_FILE_SCOPE_P (decl))
{
/* Recompute the RTL of a local array now
if it used to be an incomplete type. */
if (was_incomplete && !is_global_var (decl))
{
/* If we used it already as memory, it must stay in memory. */
TREE_ADDRESSABLE (decl) = TREE_USED (decl);
/* If it's still incomplete now, no init will save it. */
if (DECL_SIZE (decl) == NULL_TREE)
DECL_INITIAL (decl) = NULL_TREE;
}
}
}
if (TREE_CODE (decl) == TYPE_DECL)
{
if (!DECL_FILE_SCOPE_P (decl)
&& variably_modified_type_p (TREE_TYPE (decl), NULL_TREE))
add_stmt (build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl));
rest_of_decl_compilation (decl, DECL_FILE_SCOPE_P (decl), 0);
}
/* Install a cleanup (aka destructor) if one was given. */
if (VAR_P (decl) && !TREE_STATIC (decl))
{
tree attr = lookup_attribute ("cleanup", DECL_ATTRIBUTES (decl));
if (attr)
{
tree cleanup_id = TREE_VALUE (TREE_VALUE (attr));
tree cleanup_decl = lookup_name (cleanup_id);
tree cleanup;
vec<tree, va_gc> *v;
/* Build "cleanup(&decl)" for the destructor. */
cleanup = build_unary_op (input_location, ADDR_EXPR, decl, false);
vec_alloc (v, 1);
v->quick_push (cleanup);
cleanup = c_build_function_call_vec (DECL_SOURCE_LOCATION (decl),
vNULL, cleanup_decl, v, NULL);
vec_free (v);
/* Don't warn about decl unused; the cleanup uses it. */
TREE_USED (decl) = 1;
TREE_USED (cleanup_decl) = 1;
DECL_READ_P (decl) = 1;
push_cleanup (decl, cleanup, false);
}
}
if (warn_cxx_compat
&& VAR_P (decl)
&& !DECL_EXTERNAL (decl)
&& DECL_INITIAL (decl) == NULL_TREE)
{
type = strip_array_types (type);
if (TREE_READONLY (decl))
warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat,
"uninitialized const %qD is invalid in C++", decl);
else if (RECORD_OR_UNION_TYPE_P (type)
&& C_TYPE_FIELDS_READONLY (type))
diagnose_uninitialized_cst_member (decl, type);
}
invoke_plugin_callbacks (PLUGIN_FINISH_DECL, decl);
}
/* Given a parsed parameter declaration, decode it into a PARM_DECL.
EXPR is NULL or a pointer to an expression that needs to be
evaluated for the side effects of array size expressions in the
parameters. */
tree
grokparm (const struct c_parm *parm, tree *expr)
{
tree attrs = parm->attrs;
tree decl = grokdeclarator (parm->declarator, parm->specs, PARM, false,
NULL, &attrs, expr, NULL, DEPRECATED_NORMAL);
decl_attributes (&decl, attrs, 0);
return decl;
}
/* Given a parsed parameter declaration, decode it into a PARM_DECL
and push that on the current scope. EXPR is a pointer to an
expression that needs to be evaluated for the side effects of array
size expressions in the parameters. */
void
push_parm_decl (const struct c_parm *parm, tree *expr)
{
tree attrs = parm->attrs;
tree decl;
decl = grokdeclarator (parm->declarator, parm->specs, PARM, false, NULL,
&attrs, expr, NULL, DEPRECATED_NORMAL);
if (decl && DECL_P (decl))
DECL_SOURCE_LOCATION (decl) = parm->loc;
decl_attributes (&decl, attrs, 0);
decl = pushdecl (decl);
finish_decl (decl, input_location, NULL_TREE, NULL_TREE, NULL_TREE);
}
/* Mark all the parameter declarations to date as forward decls.
Also diagnose use of this extension. */
void
mark_forward_parm_decls (void)
{
struct c_binding *b;
if (pedantic && !current_scope->warned_forward_parm_decls)
{
pedwarn (input_location, OPT_Wpedantic,
"ISO C forbids forward parameter declarations");
current_scope->warned_forward_parm_decls = true;
}
for (b = current_scope->bindings; b; b = b->prev)
if (TREE_CODE (b->decl) == PARM_DECL)
TREE_ASM_WRITTEN (b->decl) = 1;
}
/* Build a COMPOUND_LITERAL_EXPR. TYPE is the type given in the compound
literal, which may be an incomplete array type completed by the
initializer; INIT is a CONSTRUCTOR at LOC that initializes the compound
literal. NON_CONST is true if the initializers contain something
that cannot occur in a constant expression. If ALIGNAS_ALIGN is nonzero,
it is the (valid) alignment for this compound literal, as specified
with _Alignas. */
tree
build_compound_literal (location_t loc, tree type, tree init, bool non_const,
unsigned int alignas_align)
{
/* We do not use start_decl here because we have a type, not a declarator;
and do not use finish_decl because the decl should be stored inside
the COMPOUND_LITERAL_EXPR rather than added elsewhere as a DECL_EXPR. */
tree decl;
tree complit;
tree stmt;
if (type == error_mark_node
|| init == error_mark_node)
return error_mark_node;
decl = build_decl (loc, VAR_DECL, NULL_TREE, type);
DECL_EXTERNAL (decl) = 0;
TREE_PUBLIC (decl) = 0;
TREE_STATIC (decl) = (current_scope == file_scope);
DECL_CONTEXT (decl) = current_function_decl;
TREE_USED (decl) = 1;
DECL_READ_P (decl) = 1;
DECL_ARTIFICIAL (decl) = 1;
DECL_IGNORED_P (decl) = 1;
TREE_TYPE (decl) = type;
c_apply_type_quals_to_decl (TYPE_QUALS (strip_array_types (type)), decl);
if (alignas_align)
{
SET_DECL_ALIGN (decl, alignas_align * BITS_PER_UNIT);
DECL_USER_ALIGN (decl) = 1;
}
store_init_value (loc, decl, init, NULL_TREE);
if (TREE_CODE (type) == ARRAY_TYPE && !COMPLETE_TYPE_P (type))
{
int failure = complete_array_type (&TREE_TYPE (decl),
DECL_INITIAL (decl), true);
/* If complete_array_type returns 3, it means that the
initial value of the compound literal is empty. Allow it. */
gcc_assert (failure == 0 || failure == 3);
type = TREE_TYPE (decl);
TREE_TYPE (DECL_INITIAL (decl)) = type;
}
if (type == error_mark_node || !COMPLETE_TYPE_P (type))
{
c_incomplete_type_error (loc, NULL_TREE, type);
return error_mark_node;
}
stmt = build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl);
complit = build1 (COMPOUND_LITERAL_EXPR, type, stmt);
TREE_SIDE_EFFECTS (complit) = 1;
layout_decl (decl, 0);
if (TREE_STATIC (decl))
{
/* This decl needs a name for the assembler output. */
set_compound_literal_name (decl);
DECL_DEFER_OUTPUT (decl) = 1;
DECL_COMDAT (decl) = 1;
pushdecl (decl);
rest_of_decl_compilation (decl, 1, 0);
}
if (non_const)
{
complit = build2 (C_MAYBE_CONST_EXPR, type, NULL, complit);
C_MAYBE_CONST_EXPR_NON_CONST (complit) = 1;
}
return complit;
}
/* Check the type of a compound literal. Here we just check that it
is valid for C++. */
void
check_compound_literal_type (location_t loc, struct c_type_name *type_name)
{
if (warn_cxx_compat
&& (type_name->specs->typespec_kind == ctsk_tagdef
|| type_name->specs->typespec_kind == ctsk_tagfirstref))
warning_at (loc, OPT_Wc___compat,
"defining a type in a compound literal is invalid in C++");
}
/* Determine whether TYPE is a structure with a flexible array member,
or a union containing such a structure (possibly recursively). */
static bool
flexible_array_type_p (tree type)
{
tree x;
switch (TREE_CODE (type))
{
case RECORD_TYPE:
x = TYPE_FIELDS (type);
if (x == NULL_TREE)
return false;
while (DECL_CHAIN (x) != NULL_TREE)
x = DECL_CHAIN (x);
if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE
&& TYPE_SIZE (TREE_TYPE (x)) == NULL_TREE
&& TYPE_DOMAIN (TREE_TYPE (x)) != NULL_TREE
&& TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (x))) == NULL_TREE)
return true;
return false;
case UNION_TYPE:
for (x = TYPE_FIELDS (type); x != NULL_TREE; x = DECL_CHAIN (x))
{
if (flexible_array_type_p (TREE_TYPE (x)))
return true;
}
return false;
default:
return false;
}
}
/* Performs sanity checks on the TYPE and WIDTH of the bit-field NAME,
replacing with appropriate values if they are invalid. */
static void
check_bitfield_type_and_width (location_t loc, tree *type, tree *width,
tree orig_name)
{
tree type_mv;
unsigned int max_width;
unsigned HOST_WIDE_INT w;
const char *name = (orig_name
? identifier_to_locale (IDENTIFIER_POINTER (orig_name))
: _("<anonymous>"));
/* Detect and ignore out of range field width and process valid
field widths. */
if (!INTEGRAL_TYPE_P (TREE_TYPE (*width)))
{
error_at (loc, "bit-field %qs width not an integer constant", name);
*width = integer_one_node;
}
else
{
if (TREE_CODE (*width) != INTEGER_CST)
{
*width = c_fully_fold (*width, false, NULL);
if (TREE_CODE (*width) == INTEGER_CST)
pedwarn (loc, OPT_Wpedantic,
"bit-field %qs width not an integer constant expression",
name);
}
if (TREE_CODE (*width) != INTEGER_CST)
{
error_at (loc, "bit-field %qs width not an integer constant", name);
*width = integer_one_node;
}
constant_expression_warning (*width);
if (tree_int_cst_sgn (*width) < 0)
{
error_at (loc, "negative width in bit-field %qs", name);
*width = integer_one_node;
}
else if (integer_zerop (*width) && orig_name)
{
error_at (loc, "zero width for bit-field %qs", name);
*width = integer_one_node;
}
}
/* Detect invalid bit-field type. */
if (TREE_CODE (*type) != INTEGER_TYPE
&& TREE_CODE (*type) != BOOLEAN_TYPE
&& TREE_CODE (*type) != ENUMERAL_TYPE)
{
error_at (loc, "bit-field %qs has invalid type", name);
*type = unsigned_type_node;
}
if (TYPE_WARN_IF_NOT_ALIGN (*type))
{
error_at (loc, "cannot declare bit-field %qs with %<warn_if_not_aligned%> type",
name);
*type = unsigned_type_node;
}
type_mv = TYPE_MAIN_VARIANT (*type);
if (!in_system_header_at (input_location)
&& type_mv != integer_type_node
&& type_mv != unsigned_type_node
&& type_mv != boolean_type_node)
pedwarn_c90 (loc, OPT_Wpedantic,
"type of bit-field %qs is a GCC extension", name);
max_width = TYPE_PRECISION (*type);
if (compare_tree_int (*width, max_width) > 0)
{
error_at (loc, "width of %qs exceeds its type", name);
w = max_width;
*width = build_int_cst (integer_type_node, w);
}
else
w = tree_to_uhwi (*width);
if (TREE_CODE (*type) == ENUMERAL_TYPE)
{
struct lang_type *lt = TYPE_LANG_SPECIFIC (*type);
if (!lt
|| w < tree_int_cst_min_precision (lt->enum_min, TYPE_SIGN (*type))
|| w < tree_int_cst_min_precision (lt->enum_max, TYPE_SIGN (*type)))
warning_at (loc, 0, "%qs is narrower than values of its type", name);
}
}
/* Print warning about variable length array if necessary. */
static void
warn_variable_length_array (tree name, tree size)
{
if (TREE_CONSTANT (size))
{
if (name)
pedwarn_c90 (input_location, OPT_Wvla,
"ISO C90 forbids array %qE whose size "
"can%'t be evaluated", name);
else
pedwarn_c90 (input_location, OPT_Wvla, "ISO C90 forbids array "
"whose size can%'t be evaluated");
}
else
{
if (name)
pedwarn_c90 (input_location, OPT_Wvla,
"ISO C90 forbids variable length array %qE", name);
else
pedwarn_c90 (input_location, OPT_Wvla, "ISO C90 forbids variable "
"length array");
}
}
/* Print warning about defaulting to int if necessary. */
static void
warn_defaults_to (location_t location, int opt, const char *gmsgid, ...)
{
diagnostic_info diagnostic;
va_list ap;
rich_location richloc (line_table, location);
va_start (ap, gmsgid);
diagnostic_set_info (&diagnostic, gmsgid, &ap, &richloc,
flag_isoc99 ? DK_PEDWARN : DK_WARNING);
diagnostic.option_index = opt;
diagnostic_report_diagnostic (global_dc, &diagnostic);
va_end (ap);
}
/* Returns the smallest location != UNKNOWN_LOCATION in LOCATIONS,
considering only those c_declspec_words found in LIST, which
must be terminated by cdw_number_of_elements. */
static location_t
smallest_type_quals_location (const location_t *locations,
const c_declspec_word *list)
{
location_t loc = UNKNOWN_LOCATION;
while (*list != cdw_number_of_elements)
{
location_t newloc = locations[*list];
if (loc == UNKNOWN_LOCATION
|| (newloc != UNKNOWN_LOCATION && newloc < loc))
loc = newloc;
list++;
}
return loc;
}
/* Given declspecs and a declarator,
determine the name and type of the object declared
and construct a ..._DECL node for it.
(In one case we can return a ..._TYPE node instead.
For invalid input we sometimes return NULL_TREE.)
DECLSPECS is a c_declspecs structure for the declaration specifiers.
DECL_CONTEXT says which syntactic context this declaration is in:
NORMAL for most contexts. Make a VAR_DECL or FUNCTION_DECL or TYPE_DECL.
FUNCDEF for a function definition. Like NORMAL but a few different
error messages in each case. Return value may be zero meaning
this definition is too screwy to try to parse.
PARM for a parameter declaration (either within a function prototype
or before a function body). Make a PARM_DECL, or return void_type_node.
TYPENAME if for a typename (in a cast or sizeof).
Don't make a DECL node; just return the ..._TYPE node.
FIELD for a struct or union field; make a FIELD_DECL.
INITIALIZED is true if the decl has an initializer.
WIDTH is non-NULL for bit-fields, and is a pointer to an INTEGER_CST node
representing the width of the bit-field.
DECL_ATTRS points to the list of attributes that should be added to this
decl. Any nested attributes that belong on the decl itself will be
added to this list.
If EXPR is not NULL, any expressions that need to be evaluated as
part of evaluating variably modified types will be stored in *EXPR.
If EXPR_CONST_OPERANDS is not NULL, *EXPR_CONST_OPERANDS will be
set to indicate whether operands in *EXPR can be used in constant
expressions.
DEPRECATED_STATE is a deprecated_states value indicating whether
deprecation warnings should be suppressed.
In the TYPENAME case, DECLARATOR is really an absolute declarator.
It may also be so in the PARM case, for a prototype where the
argument type is specified but not the name.
This function is where the complicated C meanings of `static'
and `extern' are interpreted. */
static tree
grokdeclarator (const struct c_declarator *declarator,
struct c_declspecs *declspecs,
enum decl_context decl_context, bool initialized, tree *width,
tree *decl_attrs, tree *expr, bool *expr_const_operands,
enum deprecated_states deprecated_state)
{
tree type = declspecs->type;
bool threadp = declspecs->thread_p;
enum c_storage_class storage_class = declspecs->storage_class;
int constp;
int restrictp;
int volatilep;
int atomicp;
int type_quals = TYPE_UNQUALIFIED;
tree name = NULL_TREE;
bool funcdef_flag = false;
bool funcdef_syntax = false;
bool size_varies = false;
tree decl_attr = declspecs->decl_attr;
int array_ptr_quals = TYPE_UNQUALIFIED;
tree array_ptr_attrs = NULL_TREE;
bool array_parm_static = false;
bool array_parm_vla_unspec_p = false;
tree returned_attrs = NULL_TREE;
bool bitfield = width != NULL;
tree element_type;
tree orig_qual_type = NULL;
size_t orig_qual_indirect = 0;
struct c_arg_info *arg_info = 0;
addr_space_t as1, as2, address_space;
location_t loc = UNKNOWN_LOCATION;
tree expr_dummy;
bool expr_const_operands_dummy;
enum c_declarator_kind first_non_attr_kind;
unsigned int alignas_align = 0;
if (TREE_CODE (type) == ERROR_MARK)
return error_mark_node;
if (expr == NULL)
{
expr = &expr_dummy;
expr_dummy = NULL_TREE;
}
if (expr_const_operands == NULL)
expr_const_operands = &expr_const_operands_dummy;
if (declspecs->expr)
{
if (*expr)
*expr = build2 (COMPOUND_EXPR, TREE_TYPE (declspecs->expr), *expr,
declspecs->expr);
else
*expr = declspecs->expr;
}
*expr_const_operands = declspecs->expr_const_operands;
if (decl_context == FUNCDEF)
funcdef_flag = true, decl_context = NORMAL;
/* Look inside a declarator for the name being declared
and get it as an IDENTIFIER_NODE, for an error message. */
{
const struct c_declarator *decl = declarator;
first_non_attr_kind = cdk_attrs;
while (decl)
switch (decl->kind)
{
case cdk_array:
loc = decl->id_loc;
/* FALL THRU. */
case cdk_function:
case cdk_pointer:
funcdef_syntax = (decl->kind == cdk_function);
if (first_non_attr_kind == cdk_attrs)
first_non_attr_kind = decl->kind;
decl = decl->declarator;
break;
case cdk_attrs:
decl = decl->declarator;
break;
case cdk_id:
loc = decl->id_loc;
if (decl->u.id)
name = decl->u.id;
if (first_non_attr_kind == cdk_attrs)
first_non_attr_kind = decl->kind;
decl = 0;
break;
default:
gcc_unreachable ();
}
if (name == NULL_TREE)
{
gcc_assert (decl_context == PARM
|| decl_context == TYPENAME
|| (decl_context == FIELD
&& declarator->kind == cdk_id));
gcc_assert (!initialized);
}
}
/* A function definition's declarator must have the form of
a function declarator. */
if (funcdef_flag && !funcdef_syntax)
return NULL_TREE;
/* If this looks like a function definition, make it one,
even if it occurs where parms are expected.
Then store_parm_decls will reject it and not use it as a parm. */
if (decl_context == NORMAL && !funcdef_flag && current_scope->parm_flag)
decl_context = PARM;
if (declspecs->deprecated_p && deprecated_state != DEPRECATED_SUPPRESS)
warn_deprecated_use (declspecs->type, declspecs->decl_attr);
if ((decl_context == NORMAL || decl_context == FIELD)
&& current_scope == file_scope
&& variably_modified_type_p (type, NULL_TREE))
{
if (name)
error_at (loc, "variably modified %qE at file scope", name);
else
error_at (loc, "variably modified field at file scope");
type = integer_type_node;
}
size_varies = C_TYPE_VARIABLE_SIZE (type) != 0;
/* Diagnose defaulting to "int". */
if (declspecs->default_int_p && !in_system_header_at (input_location))
{
/* Issue a warning if this is an ISO C 99 program or if
-Wreturn-type and this is a function, or if -Wimplicit;
prefer the former warning since it is more explicit. */
if ((warn_implicit_int || warn_return_type || flag_isoc99)
&& funcdef_flag)
warn_about_return_type = 1;
else
{
if (name)
warn_defaults_to (loc, OPT_Wimplicit_int,
"type defaults to %<int%> in declaration "
"of %qE", name);
else
warn_defaults_to (loc, OPT_Wimplicit_int,
"type defaults to %<int%> in type name");
}
}
/* Adjust the type if a bit-field is being declared,
-funsigned-bitfields applied and the type is not explicitly
"signed". */
if (bitfield && !flag_signed_bitfields && !declspecs->explicit_signed_p
&& TREE_CODE (type) == INTEGER_TYPE)
type = unsigned_type_for (type);
/* Figure out the type qualifiers for the declaration. There are
two ways a declaration can become qualified. One is something
like `const int i' where the `const' is explicit. Another is
something like `typedef const int CI; CI i' where the type of the
declaration contains the `const'. A third possibility is that
there is a type qualifier on the element type of a typedefed
array type, in which case we should extract that qualifier so
that c_apply_type_quals_to_decl receives the full list of
qualifiers to work with (C90 is not entirely clear about whether
duplicate qualifiers should be diagnosed in this case, but it
seems most appropriate to do so). */
element_type = strip_array_types (type);
constp = declspecs->const_p + TYPE_READONLY (element_type);
restrictp = declspecs->restrict_p + TYPE_RESTRICT (element_type);
volatilep = declspecs->volatile_p + TYPE_VOLATILE (element_type);
atomicp = declspecs->atomic_p + TYPE_ATOMIC (element_type);
as1 = declspecs->address_space;
as2 = TYPE_ADDR_SPACE (element_type);
address_space = ADDR_SPACE_GENERIC_P (as1)? as2 : as1;
if (constp > 1)
pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<const%>");
if (restrictp > 1)
pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<restrict%>");
if (volatilep > 1)
pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<volatile%>");
if (atomicp > 1)
pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<_Atomic%>");
if (!ADDR_SPACE_GENERIC_P (as1) && !ADDR_SPACE_GENERIC_P (as2) && as1 != as2)
error_at (loc, "conflicting named address spaces (%s vs %s)",
c_addr_space_name (as1), c_addr_space_name (as2));
if ((TREE_CODE (type) == ARRAY_TYPE
|| first_non_attr_kind == cdk_array)
&& TYPE_QUALS (element_type))
{
orig_qual_type = type;
type = TYPE_MAIN_VARIANT (type);
}
type_quals = ((constp ? TYPE_QUAL_CONST : 0)
| (restrictp ? TYPE_QUAL_RESTRICT : 0)
| (volatilep ? TYPE_QUAL_VOLATILE : 0)
| (atomicp ? TYPE_QUAL_ATOMIC : 0)
| ENCODE_QUAL_ADDR_SPACE (address_space));
if (type_quals != TYPE_QUALS (element_type))
orig_qual_type = NULL_TREE;
/* Applying the _Atomic qualifier to an array type (through the use
of typedefs or typeof) must be detected here. If the qualifier
is introduced later, any appearance of applying it to an array is
actually applying it to an element of that array. */
if (declspecs->atomic_p && TREE_CODE (type) == ARRAY_TYPE)
error_at (loc, "%<_Atomic%>-qualified array type");
/* Warn about storage classes that are invalid for certain
kinds of declarations (parameters, typenames, etc.). */
if (funcdef_flag
&& (threadp
|| storage_class == csc_auto
|| storage_class == csc_register
|| storage_class == csc_typedef))
{
if (storage_class == csc_auto)
pedwarn (loc,
(current_scope == file_scope) ? 0 : OPT_Wpedantic,
"function definition declared %<auto%>");
if (storage_class == csc_register)
error_at (loc, "function definition declared %<register%>");
if (storage_class == csc_typedef)
error_at (loc, "function definition declared %<typedef%>");
if (threadp)
error_at (loc, "function definition declared %qs",
declspecs->thread_gnu_p ? "__thread" : "_Thread_local");
threadp = false;
if (storage_class == csc_auto
|| storage_class == csc_register
|| storage_class == csc_typedef)
storage_class = csc_none;
}
else if (decl_context != NORMAL && (storage_class != csc_none || threadp))
{
if (decl_context == PARM && storage_class == csc_register)
;
else
{
switch (decl_context)
{
case FIELD:
if (name)
error_at (loc, "storage class specified for structure "
"field %qE", name);
else
error_at (loc, "storage class specified for structure field");
break;
case PARM:
if (name)
error_at (loc, "storage class specified for parameter %qE",
name);
else
error_at (loc, "storage class specified for unnamed parameter");
break;
default:
error_at (loc, "storage class specified for typename");
break;
}
storage_class = csc_none;
threadp = false;
}
}
else if (storage_class == csc_extern
&& initialized
&& !funcdef_flag)
{
/* 'extern' with initialization is invalid if not at file scope. */
if (current_scope == file_scope)
{
/* It is fine to have 'extern const' when compiling at C
and C++ intersection. */
if (!(warn_cxx_compat && constp))
warning_at (loc, 0, "%qE initialized and declared %<extern%>",
name);
}
else
error_at (loc, "%qE has both %<extern%> and initializer", name);
}
else if (current_scope == file_scope)
{
if (storage_class == csc_auto)
error_at (loc, "file-scope declaration of %qE specifies %<auto%>",
name);
if (pedantic && storage_class == csc_register)
pedwarn (input_location, OPT_Wpedantic,
"file-scope declaration of %qE specifies %<register%>", name);
}
else
{
if (storage_class == csc_extern && funcdef_flag)
error_at (loc, "nested function %qE declared %<extern%>", name);
else if (threadp && storage_class == csc_none)
{
error_at (loc, "function-scope %qE implicitly auto and declared "
"%qs", name,
declspecs->thread_gnu_p ? "__thread" : "_Thread_local");
threadp = false;
}
}
/* Now figure out the structure of the declarator proper.
Descend through it, creating more complex types, until we reach
the declared identifier (or NULL_TREE, in an absolute declarator).
At each stage we maintain an unqualified version of the type
together with any qualifiers that should be applied to it with
c_build_qualified_type; this way, array types including
multidimensional array types are first built up in unqualified
form and then the qualified form is created with
TYPE_MAIN_VARIANT pointing to the unqualified form. */
while (declarator && declarator->kind != cdk_id)
{
if (type == error_mark_node)
{
declarator = declarator->declarator;
continue;
}
/* Each level of DECLARATOR is either a cdk_array (for ...[..]),
a cdk_pointer (for *...),
a cdk_function (for ...(...)),
a cdk_attrs (for nested attributes),
or a cdk_id (for the name being declared
or the place in an absolute declarator
where the name was omitted).
For the last case, we have just exited the loop.
At this point, TYPE is the type of elements of an array,
or for a function to return, or for a pointer to point to.
After this sequence of ifs, TYPE is the type of the
array or function or pointer, and DECLARATOR has had its
outermost layer removed. */
if (array_ptr_quals != TYPE_UNQUALIFIED
|| array_ptr_attrs != NULL_TREE
|| array_parm_static)
{
/* Only the innermost declarator (making a parameter be of
array type which is converted to pointer type)
may have static or type qualifiers. */
error_at (loc, "static or type qualifiers in non-parameter array declarator");
array_ptr_quals = TYPE_UNQUALIFIED;
array_ptr_attrs = NULL_TREE;
array_parm_static = false;
}
switch (declarator->kind)
{
case cdk_attrs:
{
/* A declarator with embedded attributes. */
tree attrs = declarator->u.attrs;
const struct c_declarator *inner_decl;
int attr_flags = 0;
declarator = declarator->declarator;
inner_decl = declarator;
while (inner_decl->kind == cdk_attrs)
inner_decl = inner_decl->declarator;
if (inner_decl->kind == cdk_id)
attr_flags |= (int) ATTR_FLAG_DECL_NEXT;
else if (inner_decl->kind == cdk_function)
attr_flags |= (int) ATTR_FLAG_FUNCTION_NEXT;
else if (inner_decl->kind == cdk_array)
attr_flags |= (int) ATTR_FLAG_ARRAY_NEXT;
returned_attrs = decl_attributes (&type,
chainon (returned_attrs, attrs),
attr_flags);
break;
}
case cdk_array:
{
tree itype = NULL_TREE;
tree size = declarator->u.array.dimen;
/* The index is a signed object `sizetype' bits wide. */
tree index_type = c_common_signed_type (sizetype);
array_ptr_quals = declarator->u.array.quals;
array_ptr_attrs = declarator->u.array.attrs;
array_parm_static = declarator->u.array.static_p;
array_parm_vla_unspec_p = declarator->u.array.vla_unspec_p;
declarator = declarator->declarator;
/* Check for some types that there cannot be arrays of. */
if (VOID_TYPE_P (type))
{
if (name)
error_at (loc, "declaration of %qE as array of voids", name);
else
error_at (loc, "declaration of type name as array of voids");
type = error_mark_node;
}
if (TREE_CODE (type) == FUNCTION_TYPE)
{
if (name)
error_at (loc, "declaration of %qE as array of functions",
name);
else
error_at (loc, "declaration of type name as array of "
"functions");
type = error_mark_node;
}
if (pedantic && !in_system_header_at (input_location)
&& flexible_array_type_p (type))
pedwarn (loc, OPT_Wpedantic,
"invalid use of structure with flexible array member");
if (size == error_mark_node)
type = error_mark_node;
if (type == error_mark_node)
continue;
/* If size was specified, set ITYPE to a range-type for
that size. Otherwise, ITYPE remains null. finish_decl
may figure it out from an initial value. */
if (size)
{
bool size_maybe_const = true;
bool size_int_const = (TREE_CODE (size) == INTEGER_CST
&& !TREE_OVERFLOW (size));
bool this_size_varies = false;
/* Strip NON_LVALUE_EXPRs since we aren't using as an
lvalue. */
STRIP_TYPE_NOPS (size);
if (!INTEGRAL_TYPE_P (TREE_TYPE (size)))
{
if (name)
error_at (loc, "size of array %qE has non-integer type",
name);
else
error_at (loc,
"size of unnamed array has non-integer type");
size = integer_one_node;
}
/* This can happen with enum forward declaration. */
else if (!COMPLETE_TYPE_P (TREE_TYPE (size)))
{
if (name)
error_at (loc, "size of array %qE has incomplete type",
name);
else
error_at (loc, "size of unnamed array has incomplete "
"type");
size = integer_one_node;
}
size = c_fully_fold (size, false, &size_maybe_const);
if (pedantic && size_maybe_const && integer_zerop (size))
{
if (name)
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids zero-size array %qE", name);
else
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids zero-size array");
}
if (TREE_CODE (size) == INTEGER_CST && size_maybe_const)
{
constant_expression_warning (size);
if (tree_int_cst_sgn (size) < 0)
{
if (name)
error_at (loc, "size of array %qE is negative", name);
else
error_at (loc, "size of unnamed array is negative");
size = integer_one_node;
}
/* Handle a size folded to an integer constant but
not an integer constant expression. */
if (!size_int_const)
{
/* If this is a file scope declaration of an
ordinary identifier, this is invalid code;
diagnosing it here and not subsequently
treating the type as variable-length avoids
more confusing diagnostics later. */
if ((decl_context == NORMAL || decl_context == FIELD)
&& current_scope == file_scope)
pedwarn (input_location, 0,
"variably modified %qE at file scope",
name);
else
this_size_varies = size_varies = true;
warn_variable_length_array (name, size);
}
}
else if ((decl_context == NORMAL || decl_context == FIELD)
&& current_scope == file_scope)
{
error_at (loc, "variably modified %qE at file scope", name);
size = integer_one_node;
}
else
{
/* Make sure the array size remains visibly
nonconstant even if it is (eg) a const variable
with known value. */
this_size_varies = size_varies = true;
warn_variable_length_array (name, size);
if (sanitize_flags_p (SANITIZE_VLA)
&& current_function_decl != NULL_TREE
&& decl_context == NORMAL)
{
/* Evaluate the array size only once. */
size = save_expr (size);
size = c_fully_fold (size, false, NULL);
size = fold_build2 (COMPOUND_EXPR, TREE_TYPE (size),
ubsan_instrument_vla (loc, size),
size);
}
}
if (integer_zerop (size) && !this_size_varies)
{
/* A zero-length array cannot be represented with
an unsigned index type, which is what we'll
get with build_index_type. Create an
open-ended range instead. */
itype = build_range_type (sizetype, size, NULL_TREE);
}
else
{
/* Arrange for the SAVE_EXPR on the inside of the
MINUS_EXPR, which allows the -1 to get folded
with the +1 that happens when building TYPE_SIZE. */
if (size_varies)
size = save_expr (size);
if (this_size_varies && TREE_CODE (size) == INTEGER_CST)
size = build2 (COMPOUND_EXPR, TREE_TYPE (size),
integer_zero_node, size);
/* Compute the maximum valid index, that is, size
- 1. Do the calculation in index_type, so that
if it is a variable the computations will be
done in the proper mode. */
itype = fold_build2_loc (loc, MINUS_EXPR, index_type,
convert (index_type, size),
convert (index_type,
size_one_node));
/* The above overflows when size does not fit
in index_type.
??? While a size of INT_MAX+1 technically shouldn't
cause an overflow (because we subtract 1), handling
this case seems like an unnecessary complication. */
if (TREE_CODE (size) == INTEGER_CST
&& !int_fits_type_p (size, index_type))
{
if (name)
error_at (loc, "size of array %qE is too large",
name);
else
error_at (loc, "size of unnamed array is too large");
type = error_mark_node;
continue;
}
itype = build_index_type (itype);
}
if (this_size_varies)
{
if (*expr)
*expr = build2 (COMPOUND_EXPR, TREE_TYPE (size),
*expr, size);
else
*expr = size;
*expr_const_operands &= size_maybe_const;
}
}
else if (decl_context == FIELD)
{
bool flexible_array_member = false;
if (array_parm_vla_unspec_p)
/* Field names can in fact have function prototype
scope so [*] is disallowed here through making
the field variably modified, not through being
something other than a declaration with function
prototype scope. */
size_varies = true;
else
{
const struct c_declarator *t = declarator;
while (t->kind == cdk_attrs)
t = t->declarator;
flexible_array_member = (t->kind == cdk_id);
}
if (flexible_array_member
&& !in_system_header_at (input_location))
pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not "
"support flexible array members");
/* ISO C99 Flexible array members are effectively
identical to GCC's zero-length array extension. */
if (flexible_array_member || array_parm_vla_unspec_p)
itype = build_range_type (sizetype, size_zero_node,
NULL_TREE);
}
else if (decl_context == PARM)
{
if (array_parm_vla_unspec_p)
{
itype = build_range_type (sizetype, size_zero_node, NULL_TREE);
size_varies = true;
}
}
else if (decl_context == TYPENAME)
{
if (array_parm_vla_unspec_p)
{
/* C99 6.7.5.2p4 */
warning (0, "%<[*]%> not in a declaration");
/* We use this to avoid messing up with incomplete
array types of the same type, that would
otherwise be modified below. */
itype = build_range_type (sizetype, size_zero_node,
NULL_TREE);
size_varies = true;
}
}
/* Complain about arrays of incomplete types. */
if (!COMPLETE_TYPE_P (type))
{
error_at (loc, "array type has incomplete element type %qT",
type);
/* See if we can be more helpful. */
if (TREE_CODE (type) == ARRAY_TYPE)
{
if (name)
inform (loc, "declaration of %qE as multidimensional "
"array must have bounds for all dimensions "
"except the first", name);
else
inform (loc, "declaration of multidimensional array "
"must have bounds for all dimensions except "
"the first");
}
type = error_mark_node;
}
else
/* When itype is NULL, a shared incomplete array type is
returned for all array of a given type. Elsewhere we
make sure we don't complete that type before copying
it, but here we want to make sure we don't ever
modify the shared type, so we gcc_assert (itype)
below. */
{
addr_space_t as = DECODE_QUAL_ADDR_SPACE (type_quals);
if (!ADDR_SPACE_GENERIC_P (as) && as != TYPE_ADDR_SPACE (type))
type = build_qualified_type (type,
ENCODE_QUAL_ADDR_SPACE (as));
type = build_array_type (type, itype);
}
if (type != error_mark_node)
{
if (size_varies)
{
/* It is ok to modify type here even if itype is
NULL: if size_varies, we're in a
multi-dimensional array and the inner type has
variable size, so the enclosing shared array type
must too. */
if (size && TREE_CODE (size) == INTEGER_CST)
type
= build_distinct_type_copy (TYPE_MAIN_VARIANT (type));
C_TYPE_VARIABLE_SIZE (type) = 1;
}
/* The GCC extension for zero-length arrays differs from
ISO flexible array members in that sizeof yields
zero. */
if (size && integer_zerop (size))
{
gcc_assert (itype);
type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type));
TYPE_SIZE (type) = bitsize_zero_node;
TYPE_SIZE_UNIT (type) = size_zero_node;
SET_TYPE_STRUCTURAL_EQUALITY (type);
}
if (array_parm_vla_unspec_p)
{
gcc_assert (itype);
/* The type is complete. C99 6.7.5.2p4 */
type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type));
TYPE_SIZE (type) = bitsize_zero_node;
TYPE_SIZE_UNIT (type) = size_zero_node;
SET_TYPE_STRUCTURAL_EQUALITY (type);
}
if (!valid_array_size_p (loc, type, name))
type = error_mark_node;
}
if (decl_context != PARM
&& (array_ptr_quals != TYPE_UNQUALIFIED
|| array_ptr_attrs != NULL_TREE
|| array_parm_static))
{
error_at (loc, "static or type qualifiers in non-parameter "
"array declarator");
array_ptr_quals = TYPE_UNQUALIFIED;
array_ptr_attrs = NULL_TREE;
array_parm_static = false;
}
orig_qual_indirect++;
break;
}
case cdk_function:
{
/* Say it's a definition only for the declarator closest
to the identifier, apart possibly from some
attributes. */
bool really_funcdef = false;
tree arg_types;
orig_qual_type = NULL_TREE;
if (funcdef_flag)
{
const struct c_declarator *t = declarator->declarator;
while (t->kind == cdk_attrs)
t = t->declarator;
really_funcdef = (t->kind == cdk_id);
}
/* Declaring a function type. Make sure we have a valid
type for the function to return. */
if (type == error_mark_node)
continue;
size_varies = false;
/* Warn about some types functions can't return. */
if (TREE_CODE (type) == FUNCTION_TYPE)
{
if (name)
error_at (loc, "%qE declared as function returning a "
"function", name);
else
error_at (loc, "type name declared as function "
"returning a function");
type = integer_type_node;
}
if (TREE_CODE (type) == ARRAY_TYPE)
{
if (name)
error_at (loc, "%qE declared as function returning an array",
name);
else
error_at (loc, "type name declared as function returning "
"an array");
type = integer_type_node;
}
/* Construct the function type and go to the next
inner layer of declarator. */
arg_info = declarator->u.arg_info;
arg_types = grokparms (arg_info, really_funcdef);
/* Type qualifiers before the return type of the function
qualify the return type, not the function type. */
if (type_quals)
{
const enum c_declspec_word ignored_quals_list[] =
{
cdw_const, cdw_volatile, cdw_restrict, cdw_address_space,
cdw_atomic, cdw_number_of_elements
};
location_t specs_loc
= smallest_type_quals_location (declspecs->locations,
ignored_quals_list);
if (specs_loc == UNKNOWN_LOCATION)
specs_loc = declspecs->locations[cdw_typedef];
if (specs_loc == UNKNOWN_LOCATION)
specs_loc = loc;
/* Type qualifiers on a function return type are
normally permitted by the standard but have no
effect, so give a warning at -Wreturn-type.
Qualifiers on a void return type are banned on
function definitions in ISO C; GCC used to used
them for noreturn functions. The resolution of C11
DR#423 means qualifiers (other than _Atomic) are
actually removed from the return type when
determining the function type. */
int quals_used = type_quals;
if (flag_isoc11)
quals_used &= TYPE_QUAL_ATOMIC;
if (quals_used && VOID_TYPE_P (type) && really_funcdef)
pedwarn (specs_loc, 0,
"function definition has qualified void return type");
else
warning_at (specs_loc, OPT_Wignored_qualifiers,
"type qualifiers ignored on function return type");
/* Ensure an error for restrict on invalid types; the
DR#423 resolution is not entirely clear about
this. */
if (flag_isoc11
&& (type_quals & TYPE_QUAL_RESTRICT)
&& (!POINTER_TYPE_P (type)
|| !C_TYPE_OBJECT_OR_INCOMPLETE_P (TREE_TYPE (type))))
error_at (loc, "invalid use of %<restrict%>");
if (quals_used)
type = c_build_qualified_type (type, quals_used);
}
type_quals = TYPE_UNQUALIFIED;
type = build_function_type (type, arg_types);
declarator = declarator->declarator;
/* Set the TYPE_CONTEXTs for each tagged type which is local to
the formal parameter list of this FUNCTION_TYPE to point to
the FUNCTION_TYPE node itself. */
{
c_arg_tag *tag;
unsigned ix;
FOR_EACH_VEC_SAFE_ELT_REVERSE (arg_info->tags, ix, tag)
TYPE_CONTEXT (tag->type) = type;
}
break;
}
case cdk_pointer:
{
/* Merge any constancy or volatility into the target type
for the pointer. */
if ((type_quals & TYPE_QUAL_ATOMIC)
&& TREE_CODE (type) == FUNCTION_TYPE)
{
error_at (loc,
"%<_Atomic%>-qualified function type");
type_quals &= ~TYPE_QUAL_ATOMIC;
}
else if (pedantic && TREE_CODE (type) == FUNCTION_TYPE
&& type_quals)
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids qualified function types");
if (type_quals)
type = c_build_qualified_type (type, type_quals, orig_qual_type,
orig_qual_indirect);
orig_qual_type = NULL_TREE;
size_varies = false;
/* When the pointed-to type involves components of variable size,
care must be taken to ensure that the size evaluation code is
emitted early enough to dominate all the possible later uses
and late enough for the variables on which it depends to have
been assigned.
This is expected to happen automatically when the pointed-to
type has a name/declaration of it's own, but special attention
is required if the type is anonymous.
We attach an artificial TYPE_DECL to such pointed-to type
and arrange for it to be included in a DECL_EXPR. This
forces the sizes evaluation at a safe point and ensures it
is not deferred until e.g. within a deeper conditional context.
PARM contexts have no enclosing statement list that
can hold the DECL_EXPR, so we need to use a BIND_EXPR
instead, and add it to the list of expressions that
need to be evaluated.
TYPENAME contexts do have an enclosing statement list,
but it would be incorrect to use it, as the size should
only be evaluated if the containing expression is
evaluated. We might also be in the middle of an
expression with side effects on the pointed-to type size
"arguments" prior to the pointer declaration point and
the fake TYPE_DECL in the enclosing context would force
the size evaluation prior to the side effects. We therefore
use BIND_EXPRs in TYPENAME contexts too. */
if (!TYPE_NAME (type)
&& variably_modified_type_p (type, NULL_TREE))
{
tree bind = NULL_TREE;
if (decl_context == TYPENAME || decl_context == PARM)
{
bind = build3 (BIND_EXPR, void_type_node, NULL_TREE,
NULL_TREE, NULL_TREE);
TREE_SIDE_EFFECTS (bind) = 1;
BIND_EXPR_BODY (bind) = push_stmt_list ();
push_scope ();
}
tree decl = build_decl (loc, TYPE_DECL, NULL_TREE, type);
DECL_ARTIFICIAL (decl) = 1;
pushdecl (decl);
finish_decl (decl, loc, NULL_TREE, NULL_TREE, NULL_TREE);
TYPE_NAME (type) = decl;
if (bind)
{
pop_scope ();
BIND_EXPR_BODY (bind)
= pop_stmt_list (BIND_EXPR_BODY (bind));
if (*expr)
*expr = build2 (COMPOUND_EXPR, void_type_node, *expr,
bind);
else
*expr = bind;
}
}
type = c_build_pointer_type (type);
/* Process type qualifiers (such as const or volatile)
that were given inside the `*'. */
type_quals = declarator->u.pointer_quals;
declarator = declarator->declarator;
break;
}
default:
gcc_unreachable ();
}
}
*decl_attrs = chainon (returned_attrs, *decl_attrs);
/* Now TYPE has the actual type, apart from any qualifiers in
TYPE_QUALS. */
/* Warn about address space used for things other than static memory or
pointers. */
address_space = DECODE_QUAL_ADDR_SPACE (type_quals);
if (!ADDR_SPACE_GENERIC_P (address_space))
{
if (decl_context == NORMAL)
{
switch (storage_class)
{
case csc_auto:
error ("%qs combined with %<auto%> qualifier for %qE",
c_addr_space_name (address_space), name);
break;
case csc_register:
error ("%qs combined with %<register%> qualifier for %qE",
c_addr_space_name (address_space), name);
break;
case csc_none:
if (current_function_scope)
{
error ("%qs specified for auto variable %qE",
c_addr_space_name (address_space), name);
break;
}
break;
case csc_static:
case csc_extern:
case csc_typedef:
break;
default:
gcc_unreachable ();
}
}
else if (decl_context == PARM && TREE_CODE (type) != ARRAY_TYPE)
{
if (name)
error ("%qs specified for parameter %qE",
c_addr_space_name (address_space), name);
else
error ("%qs specified for unnamed parameter",
c_addr_space_name (address_space));
}
else if (decl_context == FIELD)
{
if (name)
error ("%qs specified for structure field %qE",
c_addr_space_name (address_space), name);
else
error ("%qs specified for structure field",
c_addr_space_name (address_space));
}
}
/* Check the type and width of a bit-field. */
if (bitfield)
{
check_bitfield_type_and_width (loc, &type, width, name);
/* C11 makes it implementation-defined (6.7.2.1#5) whether
atomic types are permitted for bit-fields; we have no code to
make bit-field accesses atomic, so disallow them. */
if (type_quals & TYPE_QUAL_ATOMIC)
{
if (name)
error_at (loc, "bit-field %qE has atomic type", name);
else
error_at (loc, "bit-field has atomic type");
type_quals &= ~TYPE_QUAL_ATOMIC;
}
}
/* Reject invalid uses of _Alignas. */
if (declspecs->alignas_p)
{
if (storage_class == csc_typedef)
error_at (loc, "alignment specified for typedef %qE", name);
else if (storage_class == csc_register)
error_at (loc, "alignment specified for %<register%> object %qE",
name);
else if (decl_context == PARM)
{
if (name)
error_at (loc, "alignment specified for parameter %qE", name);
else
error_at (loc, "alignment specified for unnamed parameter");
}
else if (bitfield)
{
if (name)
error_at (loc, "alignment specified for bit-field %qE", name);
else
error_at (loc, "alignment specified for unnamed bit-field");
}
else if (TREE_CODE (type) == FUNCTION_TYPE)
error_at (loc, "alignment specified for function %qE", name);
else if (declspecs->align_log != -1 && TYPE_P (type))
{
alignas_align = 1U << declspecs->align_log;
if (alignas_align < min_align_of_type (type))
{
if (name)
error_at (loc, "%<_Alignas%> specifiers cannot reduce "
"alignment of %qE", name);
else
error_at (loc, "%<_Alignas%> specifiers cannot reduce "
"alignment of unnamed field");
alignas_align = 0;
}
}
}
/* If this is declaring a typedef name, return a TYPE_DECL. */
if (storage_class == csc_typedef)
{
tree decl;
if ((type_quals & TYPE_QUAL_ATOMIC)
&& TREE_CODE (type) == FUNCTION_TYPE)
{
error_at (loc,
"%<_Atomic%>-qualified function type");
type_quals &= ~TYPE_QUAL_ATOMIC;
}
else if (pedantic && TREE_CODE (type) == FUNCTION_TYPE
&& type_quals)
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids qualified function types");
if (type_quals)
type = c_build_qualified_type (type, type_quals, orig_qual_type,
orig_qual_indirect);
decl = build_decl (declarator->id_loc,
TYPE_DECL, declarator->u.id, type);
if (declspecs->explicit_signed_p)
C_TYPEDEF_EXPLICITLY_SIGNED (decl) = 1;
if (declspecs->inline_p)
pedwarn (loc, 0,"typedef %q+D declared %<inline%>", decl);
if (declspecs->noreturn_p)
pedwarn (loc, 0,"typedef %q+D declared %<_Noreturn%>", decl);
if (warn_cxx_compat && declarator->u.id != NULL_TREE)
{
struct c_binding *b = I_TAG_BINDING (declarator->u.id);
if (b != NULL
&& b->decl != NULL_TREE
&& (B_IN_CURRENT_SCOPE (b)
|| (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b)))
&& TYPE_MAIN_VARIANT (b->decl) != TYPE_MAIN_VARIANT (type))
{
if (warning_at (declarator->id_loc, OPT_Wc___compat,
("using %qD as both a typedef and a tag is "
"invalid in C++"), decl)
&& b->locus != UNKNOWN_LOCATION)
inform (b->locus, "originally defined here");
}
}
return decl;
}
/* If this is a type name (such as, in a cast or sizeof),
compute the type and return it now. */
if (decl_context == TYPENAME)
{
/* Note that the grammar rejects storage classes in typenames
and fields. */
gcc_assert (storage_class == csc_none && !threadp
&& !declspecs->inline_p && !declspecs->noreturn_p);
if ((type_quals & TYPE_QUAL_ATOMIC)
&& TREE_CODE (type) == FUNCTION_TYPE)
{
error_at (loc,
"%<_Atomic%>-qualified function type");
type_quals &= ~TYPE_QUAL_ATOMIC;
}
else if (pedantic && TREE_CODE (type) == FUNCTION_TYPE
&& type_quals)
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids const or volatile function types");
if (type_quals)
type = c_build_qualified_type (type, type_quals, orig_qual_type,
orig_qual_indirect);
return type;
}
if (pedantic && decl_context == FIELD
&& variably_modified_type_p (type, NULL_TREE))
{
/* C99 6.7.2.1p8 */
pedwarn (loc, OPT_Wpedantic, "a member of a structure or union cannot "
"have a variably modified type");
}
/* Aside from typedefs and type names (handle above),
`void' at top level (not within pointer)
is allowed only in public variables.
We don't complain about parms either, but that is because
a better error message can be made later. */
if (VOID_TYPE_P (type) && decl_context != PARM
&& !((decl_context != FIELD && TREE_CODE (type) != FUNCTION_TYPE)
&& (storage_class == csc_extern
|| (current_scope == file_scope
&& !(storage_class == csc_static
|| storage_class == csc_register)))))
{
error_at (loc, "variable or field %qE declared void", name);
type = integer_type_node;
}
/* Now create the decl, which may be a VAR_DECL, a PARM_DECL
or a FUNCTION_DECL, depending on DECL_CONTEXT and TYPE. */
{
tree decl;
if (decl_context == PARM)
{
tree promoted_type;
bool array_parameter_p = false;
/* A parameter declared as an array of T is really a pointer to T.
One declared as a function is really a pointer to a function. */
if (TREE_CODE (type) == ARRAY_TYPE)
{
/* Transfer const-ness of array into that of type pointed to. */
type = TREE_TYPE (type);
if (orig_qual_type != NULL_TREE)
{
if (orig_qual_indirect == 0)
orig_qual_type = TREE_TYPE (orig_qual_type);
else
orig_qual_indirect--;
}
if (type_quals)
type = c_build_qualified_type (type, type_quals, orig_qual_type,
orig_qual_indirect);
type = c_build_pointer_type (type);
type_quals = array_ptr_quals;
if (type_quals)
type = c_build_qualified_type (type, type_quals);
/* We don't yet implement attributes in this context. */
if (array_ptr_attrs != NULL_TREE)
warning_at (loc, OPT_Wattributes,
"attributes in parameter array declarator ignored");
size_varies = false;
array_parameter_p = true;
}
else if (TREE_CODE (type) == FUNCTION_TYPE)
{
if (type_quals & TYPE_QUAL_ATOMIC)
{
error_at (loc,
"%<_Atomic%>-qualified function type");
type_quals &= ~TYPE_QUAL_ATOMIC;
}
else if (type_quals)
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids qualified function types");
if (type_quals)
type = c_build_qualified_type (type, type_quals);
type = c_build_pointer_type (type);
type_quals = TYPE_UNQUALIFIED;
}
else if (type_quals)
type = c_build_qualified_type (type, type_quals);
decl = build_decl (declarator->id_loc,
PARM_DECL, declarator->u.id, type);
if (size_varies)
C_DECL_VARIABLE_SIZE (decl) = 1;
C_ARRAY_PARAMETER (decl) = array_parameter_p;
/* Compute the type actually passed in the parmlist,
for the case where there is no prototype.
(For example, shorts and chars are passed as ints.)
When there is a prototype, this is overridden later. */
if (type == error_mark_node)
promoted_type = type;
else
promoted_type = c_type_promotes_to (type);
DECL_ARG_TYPE (decl) = promoted_type;
if (declspecs->inline_p)
pedwarn (loc, 0, "parameter %q+D declared %<inline%>", decl);
if (declspecs->noreturn_p)
pedwarn (loc, 0, "parameter %q+D declared %<_Noreturn%>", decl);
}
else if (decl_context == FIELD)
{
/* Note that the grammar rejects storage classes in typenames
and fields. */
gcc_assert (storage_class == csc_none && !threadp
&& !declspecs->inline_p && !declspecs->noreturn_p);
/* Structure field. It may not be a function. */
if (TREE_CODE (type) == FUNCTION_TYPE)
{
error_at (loc, "field %qE declared as a function", name);
type = build_pointer_type (type);
}
else if (TREE_CODE (type) != ERROR_MARK
&& !COMPLETE_OR_UNBOUND_ARRAY_TYPE_P (type))
{
if (name)
error_at (loc, "field %qE has incomplete type", name);
else
error_at (loc, "unnamed field has incomplete type");
type = error_mark_node;
}
else if (TREE_CODE (type) == ARRAY_TYPE
&& TYPE_DOMAIN (type) == NULL_TREE)
{
/* We have a flexible array member through a typedef.
Set suitable range. Whether this is a correct position
for a flexible array member will be determined elsewhere. */
if (!in_system_header_at (input_location))
pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not "
"support flexible array members");
type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type));
TYPE_DOMAIN (type) = build_range_type (sizetype, size_zero_node,
NULL_TREE);
if (orig_qual_indirect == 0)
orig_qual_type = NULL_TREE;
}
type = c_build_qualified_type (type, type_quals, orig_qual_type,
orig_qual_indirect);
decl = build_decl (declarator->id_loc,
FIELD_DECL, declarator->u.id, type);
DECL_NONADDRESSABLE_P (decl) = bitfield;
if (bitfield && !declarator->u.id)
{
TREE_NO_WARNING (decl) = 1;
DECL_PADDING_P (decl) = 1;
}
if (size_varies)
C_DECL_VARIABLE_SIZE (decl) = 1;
}
else if (TREE_CODE (type) == FUNCTION_TYPE)
{
if (storage_class == csc_register || threadp)
{
error_at (loc, "invalid storage class for function %qE", name);
}
else if (current_scope != file_scope)
{
/* Function declaration not at file scope. Storage
classes other than `extern' are not allowed, C99
6.7.1p5, and `extern' makes no difference. However,
GCC allows 'auto', perhaps with 'inline', to support
nested functions. */
if (storage_class == csc_auto)
pedwarn (loc, OPT_Wpedantic,
"invalid storage class for function %qE", name);
else if (storage_class == csc_static)
{
error_at (loc, "invalid storage class for function %qE", name);
if (funcdef_flag)
storage_class = declspecs->storage_class = csc_none;
else
return NULL_TREE;
}
}
decl = build_decl (declarator->id_loc,
FUNCTION_DECL, declarator->u.id, type);
decl = build_decl_attribute_variant (decl, decl_attr);
if (type_quals & TYPE_QUAL_ATOMIC)
{
error_at (loc,
"%<_Atomic%>-qualified function type");
type_quals &= ~TYPE_QUAL_ATOMIC;
}
else if (pedantic && type_quals && !DECL_IN_SYSTEM_HEADER (decl))
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids qualified function types");
/* Every function declaration is an external reference
(DECL_EXTERNAL) except for those which are not at file
scope and are explicitly declared "auto". This is
forbidden by standard C (C99 6.7.1p5) and is interpreted by
GCC to signify a forward declaration of a nested function. */
if (storage_class == csc_auto && current_scope != file_scope)
DECL_EXTERNAL (decl) = 0;
/* In C99, a function which is declared 'inline' with 'extern'
is not an external reference (which is confusing). It
means that the later definition of the function must be output
in this file, C99 6.7.4p6. In GNU C89, a function declared
'extern inline' is an external reference. */
else if (declspecs->inline_p && storage_class != csc_static)
DECL_EXTERNAL (decl) = ((storage_class == csc_extern)
== flag_gnu89_inline);
else
DECL_EXTERNAL (decl) = !initialized;
/* Record absence of global scope for `static' or `auto'. */
TREE_PUBLIC (decl)
= !(storage_class == csc_static || storage_class == csc_auto);
/* For a function definition, record the argument information
block where store_parm_decls will look for it. */
if (funcdef_flag)
current_function_arg_info = arg_info;
if (declspecs->default_int_p)
C_FUNCTION_IMPLICIT_INT (decl) = 1;
/* Record presence of `inline' and `_Noreturn', if it is
reasonable. */
if (flag_hosted && MAIN_NAME_P (declarator->u.id))
{
if (declspecs->inline_p)
pedwarn (loc, 0, "cannot inline function %<main%>");
if (declspecs->noreturn_p)
pedwarn (loc, 0, "%<main%> declared %<_Noreturn%>");
}
else
{
if (declspecs->inline_p)
/* Record that the function is declared `inline'. */
DECL_DECLARED_INLINE_P (decl) = 1;
if (declspecs->noreturn_p)
{
if (flag_isoc99)
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C99 does not support %<_Noreturn%>");
else
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C90 does not support %<_Noreturn%>");
TREE_THIS_VOLATILE (decl) = 1;
}
}
}
else
{
/* It's a variable. */
/* An uninitialized decl with `extern' is a reference. */
int extern_ref = !initialized && storage_class == csc_extern;
type = c_build_qualified_type (type, type_quals, orig_qual_type,
orig_qual_indirect);
/* C99 6.2.2p7: It is invalid (compile-time undefined
behavior) to create an 'extern' declaration for a
variable if there is a global declaration that is
'static' and the global declaration is not visible.
(If the static declaration _is_ currently visible,
the 'extern' declaration is taken to refer to that decl.) */
if (extern_ref && current_scope != file_scope)
{
tree global_decl = identifier_global_value (declarator->u.id);
tree visible_decl = lookup_name (declarator->u.id);
if (global_decl
&& global_decl != visible_decl
&& VAR_P (global_decl)
&& !TREE_PUBLIC (global_decl))
error_at (loc, "variable previously declared %<static%> "
"redeclared %<extern%>");
}
decl = build_decl (declarator->id_loc,
VAR_DECL, declarator->u.id, type);
if (size_varies)
C_DECL_VARIABLE_SIZE (decl) = 1;
if (declspecs->inline_p)
pedwarn (loc, 0, "variable %q+D declared %<inline%>", decl);
if (declspecs->noreturn_p)
pedwarn (loc, 0, "variable %q+D declared %<_Noreturn%>", decl);
/* At file scope, an initialized extern declaration may follow
a static declaration. In that case, DECL_EXTERNAL will be
reset later in start_decl. */
DECL_EXTERNAL (decl) = (storage_class == csc_extern);
/* At file scope, the presence of a `static' or `register' storage
class specifier, or the absence of all storage class specifiers
makes this declaration a definition (perhaps tentative). Also,
the absence of `static' makes it public. */
if (current_scope == file_scope)
{
TREE_PUBLIC (decl) = storage_class != csc_static;
TREE_STATIC (decl) = !extern_ref;
}
/* Not at file scope, only `static' makes a static definition. */
else
{
TREE_STATIC (decl) = (storage_class == csc_static);
TREE_PUBLIC (decl) = extern_ref;
}
if (threadp)
set_decl_tls_model (decl, decl_default_tls_model (decl));
}
if ((storage_class == csc_extern
|| (storage_class == csc_none
&& TREE_CODE (type) == FUNCTION_TYPE
&& !funcdef_flag))
&& variably_modified_type_p (type, NULL_TREE))
{
/* C99 6.7.5.2p2 */
if (TREE_CODE (type) == FUNCTION_TYPE)
error_at (loc, "non-nested function with variably modified type");
else
error_at (loc, "object with variably modified type must have "
"no linkage");
}
/* Record `register' declaration for warnings on &
and in case doing stupid register allocation. */
if (storage_class == csc_register)
{
C_DECL_REGISTER (decl) = 1;
DECL_REGISTER (decl) = 1;
}
/* Record constancy and volatility. */
c_apply_type_quals_to_decl (type_quals, decl);
/* Apply _Alignas specifiers. */
if (alignas_align)
{
SET_DECL_ALIGN (decl, alignas_align * BITS_PER_UNIT);
DECL_USER_ALIGN (decl) = 1;
}
/* If a type has volatile components, it should be stored in memory.
Otherwise, the fact that those components are volatile
will be ignored, and would even crash the compiler.
Of course, this only makes sense on VAR,PARM, and RESULT decl's. */
if (C_TYPE_FIELDS_VOLATILE (TREE_TYPE (decl))
&& (VAR_P (decl) || TREE_CODE (decl) == PARM_DECL
|| TREE_CODE (decl) == RESULT_DECL))
{
/* It is not an error for a structure with volatile fields to
be declared register, but reset DECL_REGISTER since it
cannot actually go in a register. */
int was_reg = C_DECL_REGISTER (decl);
C_DECL_REGISTER (decl) = 0;
DECL_REGISTER (decl) = 0;
c_mark_addressable (decl);
C_DECL_REGISTER (decl) = was_reg;
}
/* This is the earliest point at which we might know the assembler
name of a variable. Thus, if it's known before this, die horribly. */
gcc_assert (!HAS_DECL_ASSEMBLER_NAME_P (decl)
|| !DECL_ASSEMBLER_NAME_SET_P (decl));
if (warn_cxx_compat
&& VAR_P (decl)
&& TREE_PUBLIC (decl)
&& TREE_STATIC (decl)
&& (RECORD_OR_UNION_TYPE_P (TREE_TYPE (decl))
|| TREE_CODE (TREE_TYPE (decl)) == ENUMERAL_TYPE)
&& TYPE_NAME (TREE_TYPE (decl)) == NULL_TREE)
warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat,
("non-local variable %qD with anonymous type is "
"questionable in C++"),
decl);
return decl;
}
}
/* Decode the parameter-list info for a function type or function definition.
The argument is the value returned by `get_parm_info' (or made in c-parse.c
if there is an identifier list instead of a parameter decl list).
These two functions are separate because when a function returns
or receives functions then each is called multiple times but the order
of calls is different. The last call to `grokparms' is always the one
that contains the formal parameter names of a function definition.
Return a list of arg types to use in the FUNCTION_TYPE for this function.
FUNCDEF_FLAG is true for a function definition, false for
a mere declaration. A nonempty identifier-list gets an error message
when FUNCDEF_FLAG is false. */
static tree
grokparms (struct c_arg_info *arg_info, bool funcdef_flag)
{
tree arg_types = arg_info->types;
if (funcdef_flag && arg_info->had_vla_unspec)
{
/* A function definition isn't function prototype scope C99 6.2.1p4. */
/* C99 6.7.5.2p4 */
error ("%<[*]%> not allowed in other than function prototype scope");
}
if (arg_types == NULL_TREE && !funcdef_flag
&& !in_system_header_at (input_location))
warning (OPT_Wstrict_prototypes,
"function declaration isn%'t a prototype");
if (arg_types == error_mark_node)
/* Don't set TYPE_ARG_TYPES in this case. */
return NULL_TREE;
else if (arg_types && TREE_CODE (TREE_VALUE (arg_types)) == IDENTIFIER_NODE)
{
if (!funcdef_flag)
{
pedwarn (input_location, 0, "parameter names (without types) in "
"function declaration");
arg_info->parms = NULL_TREE;
}
else
arg_info->parms = arg_info->types;
arg_info->types = NULL_TREE;
return NULL_TREE;
}
else
{
tree parm, type, typelt;
unsigned int parmno;
/* If there is a parameter of incomplete type in a definition,
this is an error. In a declaration this is valid, and a
struct or union type may be completed later, before any calls
or definition of the function. In the case where the tag was
first declared within the parameter list, a warning has
already been given. If a parameter has void type, then
however the function cannot be defined or called, so
warn. */
for (parm = arg_info->parms, typelt = arg_types, parmno = 1;
parm;
parm = DECL_CHAIN (parm), typelt = TREE_CHAIN (typelt), parmno++)
{
type = TREE_VALUE (typelt);
if (type == error_mark_node)
continue;
if (!COMPLETE_TYPE_P (type))
{
if (funcdef_flag)
{
if (DECL_NAME (parm))
error_at (input_location,
"parameter %u (%q+D) has incomplete type",
parmno, parm);
else
error_at (DECL_SOURCE_LOCATION (parm),
"parameter %u has incomplete type",
parmno);
TREE_VALUE (typelt) = error_mark_node;
TREE_TYPE (parm) = error_mark_node;
arg_types = NULL_TREE;
}
else if (VOID_TYPE_P (type))
{
if (DECL_NAME (parm))
warning_at (input_location, 0,
"parameter %u (%q+D) has void type",
parmno, parm);
else
warning_at (DECL_SOURCE_LOCATION (parm), 0,
"parameter %u has void type",
parmno);
}
}
if (DECL_NAME (parm) && TREE_USED (parm))
warn_if_shadowing (parm);
}
return arg_types;
}
}
/* Allocate and initialize a c_arg_info structure from the parser's
obstack. */
struct c_arg_info *
build_arg_info (void)
{
struct c_arg_info *ret = XOBNEW (&parser_obstack, struct c_arg_info);
ret->parms = NULL_TREE;
ret->tags = NULL;
ret->types = NULL_TREE;
ret->others = NULL_TREE;
ret->pending_sizes = NULL;
ret->had_vla_unspec = 0;
return ret;
}
/* Take apart the current scope and return a c_arg_info structure with
info on a parameter list just parsed.
This structure is later fed to 'grokparms' and 'store_parm_decls'.
ELLIPSIS being true means the argument list ended in '...' so don't
append a sentinel (void_list_node) to the end of the type-list.
EXPR is NULL or an expression that needs to be evaluated for the
side effects of array size expressions in the parameters. */
struct c_arg_info *
get_parm_info (bool ellipsis, tree expr)
{
struct c_binding *b = current_scope->bindings;
struct c_arg_info *arg_info = build_arg_info ();
tree parms = NULL_TREE;
vec<c_arg_tag, va_gc> *tags = NULL;
tree types = NULL_TREE;
tree others = NULL_TREE;
bool gave_void_only_once_err = false;
arg_info->had_vla_unspec = current_scope->had_vla_unspec;
/* The bindings in this scope must not get put into a block.
We will take care of deleting the binding nodes. */
current_scope->bindings = 0;
/* This function is only called if there was *something* on the
parameter list. */
gcc_assert (b);
/* A parameter list consisting solely of 'void' indicates that the
function takes no arguments. But if the 'void' is qualified
(by 'const' or 'volatile'), or has a storage class specifier
('register'), then the behavior is undefined; issue an error.
Typedefs for 'void' are OK (see DR#157). */
if (b->prev == 0 /* one binding */
&& TREE_CODE (b->decl) == PARM_DECL /* which is a parameter */
&& !DECL_NAME (b->decl) /* anonymous */
&& VOID_TYPE_P (TREE_TYPE (b->decl))) /* of void type */
{
if (TYPE_QUALS (TREE_TYPE (b->decl)) != TYPE_UNQUALIFIED
|| C_DECL_REGISTER (b->decl))
error_at (b->locus, "%<void%> as only parameter may not be qualified");
/* There cannot be an ellipsis. */
if (ellipsis)
error_at (b->locus, "%<void%> must be the only parameter");
arg_info->types = void_list_node;
return arg_info;
}
if (!ellipsis)
types = void_list_node;
/* Break up the bindings list into parms, tags, types, and others;
apply sanity checks; purge the name-to-decl bindings. */
while (b)
{
tree decl = b->decl;
tree type = TREE_TYPE (decl);
c_arg_tag tag;
const char *keyword;
switch (TREE_CODE (decl))
{
case PARM_DECL:
if (b->id)
{
gcc_assert (I_SYMBOL_BINDING (b->id) == b);
I_SYMBOL_BINDING (b->id) = b->shadowed;
}
/* Check for forward decls that never got their actual decl. */
if (TREE_ASM_WRITTEN (decl))
error_at (b->locus,
"parameter %q+D has just a forward declaration", decl);
/* Check for (..., void, ...) and issue an error. */
else if (VOID_TYPE_P (type) && !DECL_NAME (decl))
{
if (!gave_void_only_once_err)
{
error_at (b->locus, "%<void%> must be the only parameter");
gave_void_only_once_err = true;
}
}
else
{
/* Valid parameter, add it to the list. */
DECL_CHAIN (decl) = parms;
parms = decl;
/* Since there is a prototype, args are passed in their
declared types. The back end may override this later. */
DECL_ARG_TYPE (decl) = type;
types = tree_cons (0, type, types);
}
break;
case ENUMERAL_TYPE: keyword = "enum"; goto tag;
case UNION_TYPE: keyword = "union"; goto tag;
case RECORD_TYPE: keyword = "struct"; goto tag;
tag:
/* Types may not have tag-names, in which case the type
appears in the bindings list with b->id NULL. */
if (b->id)
{
gcc_assert (I_TAG_BINDING (b->id) == b);
I_TAG_BINDING (b->id) = b->shadowed;
}
/* Warn about any struct, union or enum tags defined in a
parameter list. The scope of such types is limited to
the parameter list, which is rarely if ever desirable
(it's impossible to call such a function with type-
correct arguments). An anonymous union parm type is
meaningful as a GNU extension, so don't warn for that. */
if (TREE_CODE (decl) != UNION_TYPE || b->id != NULL_TREE)
{
if (b->id)
/* The %s will be one of 'struct', 'union', or 'enum'. */
warning_at (b->locus, 0,
"%<%s %E%> declared inside parameter list"
" will not be visible outside of this definition or"
" declaration", keyword, b->id);
else
/* The %s will be one of 'struct', 'union', or 'enum'. */
warning_at (b->locus, 0,
"anonymous %s declared inside parameter list"
" will not be visible outside of this definition or"
" declaration", keyword);
}
tag.id = b->id;
tag.type = decl;
vec_safe_push (tags, tag);
break;
case FUNCTION_DECL:
/* FUNCTION_DECLs appear when there is an implicit function
declaration in the parameter list. */
gcc_assert (b->nested || seen_error ());
goto set_shadowed;
case CONST_DECL:
case TYPE_DECL:
/* CONST_DECLs appear here when we have an embedded enum,
and TYPE_DECLs appear here when we have an embedded struct
or union. No warnings for this - we already warned about the
type itself. */
/* When we reinsert this decl in the function body, we need
to reconstruct whether it was marked as nested. */
gcc_assert (!b->nested);
DECL_CHAIN (decl) = others;
others = decl;
/* fall through */
case ERROR_MARK:
set_shadowed:
/* error_mark_node appears here when we have an undeclared
variable. Just throw it away. */
if (b->id)
{
gcc_assert (I_SYMBOL_BINDING (b->id) == b);
I_SYMBOL_BINDING (b->id) = b->shadowed;
}
break;
/* Other things that might be encountered. */
case LABEL_DECL:
case VAR_DECL:
default:
gcc_unreachable ();
}
b = free_binding_and_advance (b);
}
arg_info->parms = parms;
arg_info->tags = tags;
arg_info->types = types;
arg_info->others = others;
arg_info->pending_sizes = expr;
return arg_info;
}
/* Get the struct, enum or union (CODE says which) with tag NAME.
Define the tag as a forward-reference with location LOC if it is
not defined. Return a c_typespec structure for the type
specifier. */
struct c_typespec
parser_xref_tag (location_t loc, enum tree_code code, tree name)
{
struct c_typespec ret;
tree ref;
location_t refloc;
ret.expr = NULL_TREE;
ret.expr_const_operands = true;
/* If a cross reference is requested, look up the type
already defined for this tag and return it. */
ref = lookup_tag (code, name, false, &refloc);
/* If this is the right type of tag, return what we found.
(This reference will be shadowed by shadow_tag later if appropriate.)
If this is the wrong type of tag, do not return it. If it was the
wrong type in the same scope, we will have had an error
message already; if in a different scope and declaring
a name, pending_xref_error will give an error message; but if in a
different scope and not declaring a name, this tag should
shadow the previous declaration of a different type of tag, and
this would not work properly if we return the reference found.
(For example, with "struct foo" in an outer scope, "union foo;"
must shadow that tag with a new one of union type.) */
ret.kind = (ref ? ctsk_tagref : ctsk_tagfirstref);
if (ref && TREE_CODE (ref) == code)
{
if (C_TYPE_DEFINED_IN_STRUCT (ref)
&& loc != UNKNOWN_LOCATION
&& warn_cxx_compat)
{
switch (code)
{
case ENUMERAL_TYPE:
warning_at (loc, OPT_Wc___compat,
("enum type defined in struct or union "
"is not visible in C++"));
inform (refloc, "enum type defined here");
break;
case RECORD_TYPE:
warning_at (loc, OPT_Wc___compat,
("struct defined in struct or union "
"is not visible in C++"));
inform (refloc, "struct defined here");
break;
case UNION_TYPE:
warning_at (loc, OPT_Wc___compat,
("union defined in struct or union "
"is not visible in C++"));
inform (refloc, "union defined here");
break;
default:
gcc_unreachable();
}
}
ret.spec = ref;
return ret;
}
/* If no such tag is yet defined, create a forward-reference node
and record it as the "definition".
When a real declaration of this type is found,
the forward-reference will be altered into a real type. */
ref = make_node (code);
if (code == ENUMERAL_TYPE)
{
/* Give the type a default layout like unsigned int
to avoid crashing if it does not get defined. */
SET_TYPE_MODE (ref, TYPE_MODE (unsigned_type_node));
SET_TYPE_ALIGN (ref, TYPE_ALIGN (unsigned_type_node));
TYPE_USER_ALIGN (ref) = 0;
TYPE_UNSIGNED (ref) = 1;
TYPE_PRECISION (ref) = TYPE_PRECISION (unsigned_type_node);
TYPE_MIN_VALUE (ref) = TYPE_MIN_VALUE (unsigned_type_node);
TYPE_MAX_VALUE (ref) = TYPE_MAX_VALUE (unsigned_type_node);
}
pushtag (loc, name, ref);
ret.spec = ref;
return ret;
}
/* Get the struct, enum or union (CODE says which) with tag NAME.
Define the tag as a forward-reference if it is not defined.
Return a tree for the type. */
tree
xref_tag (enum tree_code code, tree name)
{
return parser_xref_tag (input_location, code, name).spec;
}
/* Make sure that the tag NAME is defined *in the current scope*
at least as a forward reference.
LOC is the location of the struct's definition.
CODE says which kind of tag NAME ought to be.
This stores the current value of the file static STRUCT_PARSE_INFO
in *ENCLOSING_STRUCT_PARSE_INFO, and points STRUCT_PARSE_INFO at a
new c_struct_parse_info structure. The old value of
STRUCT_PARSE_INFO is restored in finish_struct. */
tree
start_struct (location_t loc, enum tree_code code, tree name,
struct c_struct_parse_info **enclosing_struct_parse_info)
{
/* If there is already a tag defined at this scope
(as a forward reference), just return it. */
tree ref = NULL_TREE;
location_t refloc = UNKNOWN_LOCATION;
if (name != NULL_TREE)
ref = lookup_tag (code, name, true, &refloc);
if (ref && TREE_CODE (ref) == code)
{
if (TYPE_STUB_DECL (ref))
refloc = DECL_SOURCE_LOCATION (TYPE_STUB_DECL (ref));
if (TYPE_SIZE (ref))
{
if (code == UNION_TYPE)
error_at (loc, "redefinition of %<union %E%>", name);
else
error_at (loc, "redefinition of %<struct %E%>", name);
if (refloc != UNKNOWN_LOCATION)
inform (refloc, "originally defined here");
/* Don't create structures using a name already in use. */
ref = NULL_TREE;
}
else if (C_TYPE_BEING_DEFINED (ref))
{
if (code == UNION_TYPE)
error_at (loc, "nested redefinition of %<union %E%>", name);
else
error_at (loc, "nested redefinition of %<struct %E%>", name);
/* Don't bother to report "originally defined here" for a
nested redefinition; the original definition should be
obvious. */
/* Don't create structures that contain themselves. */
ref = NULL_TREE;
}
}
/* Otherwise create a forward-reference just so the tag is in scope. */
if (ref == NULL_TREE || TREE_CODE (ref) != code)
{
ref = make_node (code);
pushtag (loc, name, ref);
}
C_TYPE_BEING_DEFINED (ref) = 1;
for (tree v = TYPE_MAIN_VARIANT (ref); v; v = TYPE_NEXT_VARIANT (v))
TYPE_PACKED (v) = flag_pack_struct;
*enclosing_struct_parse_info = struct_parse_info;
struct_parse_info = new c_struct_parse_info ();
/* FIXME: This will issue a warning for a use of a type defined
within a statement expr used within sizeof, et. al. This is not
terribly serious as C++ doesn't permit statement exprs within
sizeof anyhow. */
if (warn_cxx_compat && (in_sizeof || in_typeof || in_alignof))
warning_at (loc, OPT_Wc___compat,
"defining type in %qs expression is invalid in C++",
(in_sizeof
? "sizeof"
: (in_typeof ? "typeof" : "alignof")));
return ref;
}
/* Process the specs, declarator and width (NULL if omitted)
of a structure component, returning a FIELD_DECL node.
WIDTH is non-NULL for bit-fields only, and is an INTEGER_CST node.
DECL_ATTRS is as for grokdeclarator.
LOC is the location of the structure component.
This is done during the parsing of the struct declaration.
The FIELD_DECL nodes are chained together and the lot of them
are ultimately passed to `build_struct' to make the RECORD_TYPE node. */
tree
grokfield (location_t loc,
struct c_declarator *declarator, struct c_declspecs *declspecs,
tree width, tree *decl_attrs)
{
tree value;
if (declarator->kind == cdk_id && declarator->u.id == NULL_TREE
&& width == NULL_TREE)
{
/* This is an unnamed decl.
If we have something of the form "union { list } ;" then this
is the anonymous union extension. Similarly for struct.
If this is something of the form "struct foo;", then
If MS or Plan 9 extensions are enabled, this is handled as
an anonymous struct.
Otherwise this is a forward declaration of a structure tag.
If this is something of the form "foo;" and foo is a TYPE_DECL, then
If foo names a structure or union without a tag, then this
is an anonymous struct (this is permitted by C11).
If MS or Plan 9 extensions are enabled and foo names a
structure, then again this is an anonymous struct.
Otherwise this is an error.
Oh what a horrid tangled web we weave. I wonder if MS consciously
took this from Plan 9 or if it was an accident of implementation
that took root before someone noticed the bug... */
tree type = declspecs->type;
bool ok = false;
if (RECORD_OR_UNION_TYPE_P (type)
&& (flag_ms_extensions
|| flag_plan9_extensions
|| !declspecs->typedef_p))
{
if (flag_ms_extensions || flag_plan9_extensions)
ok = true;
else if (TYPE_NAME (type) == NULL)
ok = true;
else
ok = false;
}
if (!ok)
{
pedwarn (loc, 0, "declaration does not declare anything");
return NULL_TREE;
}
if (flag_isoc99)
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C99 doesn%'t support unnamed structs/unions");
else
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C90 doesn%'t support unnamed structs/unions");
}
value = grokdeclarator (declarator, declspecs, FIELD, false,
width ? &width : NULL, decl_attrs, NULL, NULL,
DEPRECATED_NORMAL);
finish_decl (value, loc, NULL_TREE, NULL_TREE, NULL_TREE);
DECL_INITIAL (value) = width;
if (width)
SET_DECL_C_BIT_FIELD (value);
if (warn_cxx_compat && DECL_NAME (value) != NULL_TREE)
{
/* If we currently have a binding for this field, set the
in_struct field in the binding, so that we warn about lookups
which find it. */
struct c_binding *b = I_SYMBOL_BINDING (DECL_NAME (value));
if (b != NULL)
{
/* If the in_struct field is not yet set, push it on a list
to be cleared when this struct is finished. */
if (!b->in_struct)
{
struct_parse_info->fields.safe_push (b);
b->in_struct = 1;
}
}
}
return value;
}
/* Subroutine of detect_field_duplicates: return whether X and Y,
which are both fields in the same struct, have duplicate field
names. */
static bool
is_duplicate_field (tree x, tree y)
{
if (DECL_NAME (x) != NULL_TREE && DECL_NAME (x) == DECL_NAME (y))
return true;
/* When using -fplan9-extensions, an anonymous field whose name is a
typedef can duplicate a field name. */
if (flag_plan9_extensions
&& (DECL_NAME (x) == NULL_TREE || DECL_NAME (y) == NULL_TREE))
{
tree xt, xn, yt, yn;
xt = TREE_TYPE (x);
if (DECL_NAME (x) != NULL_TREE)
xn = DECL_NAME (x);
else if (RECORD_OR_UNION_TYPE_P (xt)
&& TYPE_NAME (xt) != NULL_TREE
&& TREE_CODE (TYPE_NAME (xt)) == TYPE_DECL)
xn = DECL_NAME (TYPE_NAME (xt));
else
xn = NULL_TREE;
yt = TREE_TYPE (y);
if (DECL_NAME (y) != NULL_TREE)
yn = DECL_NAME (y);
else if (RECORD_OR_UNION_TYPE_P (yt)
&& TYPE_NAME (yt) != NULL_TREE
&& TREE_CODE (TYPE_NAME (yt)) == TYPE_DECL)
yn = DECL_NAME (TYPE_NAME (yt));
else
yn = NULL_TREE;
if (xn != NULL_TREE && xn == yn)
return true;
}
return false;
}
/* Subroutine of detect_field_duplicates: add the fields of FIELDLIST
to HTAB, giving errors for any duplicates. */
static void
detect_field_duplicates_hash (tree fieldlist,
hash_table<nofree_ptr_hash <tree_node> > *htab)
{
tree x, y;
tree_node **slot;
for (x = fieldlist; x ; x = DECL_CHAIN (x))
if ((y = DECL_NAME (x)) != NULL_TREE)
{
slot = htab->find_slot (y, INSERT);
if (*slot)
{
error ("duplicate member %q+D", x);
DECL_NAME (x) = NULL_TREE;
}
*slot = y;
}
else if (RECORD_OR_UNION_TYPE_P (TREE_TYPE (x)))
{
detect_field_duplicates_hash (TYPE_FIELDS (TREE_TYPE (x)), htab);
/* When using -fplan9-extensions, an anonymous field whose
name is a typedef can duplicate a field name. */
if (flag_plan9_extensions
&& TYPE_NAME (TREE_TYPE (x)) != NULL_TREE
&& TREE_CODE (TYPE_NAME (TREE_TYPE (x))) == TYPE_DECL)
{
tree xn = DECL_NAME (TYPE_NAME (TREE_TYPE (x)));
slot = htab->find_slot (xn, INSERT);
if (*slot)
error ("duplicate member %q+D", TYPE_NAME (TREE_TYPE (x)));
*slot = xn;
}
}
}
/* Generate an error for any duplicate field names in FIELDLIST. Munge
the list such that this does not present a problem later. */
static void
detect_field_duplicates (tree fieldlist)
{
tree x, y;
int timeout = 10;
/* If the struct is the list of instance variables of an Objective-C
class, then we need to check all the instance variables of
superclasses when checking for duplicates (since you can't have
an instance variable in a subclass with the same name as an
instance variable in a superclass). We pass on this job to the
Objective-C compiler. objc_detect_field_duplicates() will return
false if we are not checking the list of instance variables and
the C frontend should proceed with the standard field duplicate
checks. If we are checking the list of instance variables, the
ObjC frontend will do the check, emit the errors if needed, and
then return true. */
if (c_dialect_objc ())
if (objc_detect_field_duplicates (false))
return;
/* First, see if there are more than "a few" fields.
This is trivially true if there are zero or one fields. */
if (!fieldlist || !DECL_CHAIN (fieldlist))
return;
x = fieldlist;
do {
timeout--;
if (DECL_NAME (x) == NULL_TREE
&& RECORD_OR_UNION_TYPE_P (TREE_TYPE (x)))
timeout = 0;
x = DECL_CHAIN (x);
} while (timeout > 0 && x);
/* If there were "few" fields and no anonymous structures or unions,
avoid the overhead of allocating a hash table. Instead just do
the nested traversal thing. */
if (timeout > 0)
{
for (x = DECL_CHAIN (fieldlist); x; x = DECL_CHAIN (x))
/* When using -fplan9-extensions, we can have duplicates
between typedef names and fields. */
if (DECL_NAME (x)
|| (flag_plan9_extensions
&& DECL_NAME (x) == NULL_TREE
&& RECORD_OR_UNION_TYPE_P (TREE_TYPE (x))
&& TYPE_NAME (TREE_TYPE (x)) != NULL_TREE
&& TREE_CODE (TYPE_NAME (TREE_TYPE (x))) == TYPE_DECL))
{
for (y = fieldlist; y != x; y = TREE_CHAIN (y))
if (is_duplicate_field (y, x))
{
error ("duplicate member %q+D", x);
DECL_NAME (x) = NULL_TREE;
}
}
}
else
{
hash_table<nofree_ptr_hash <tree_node> > htab (37);
detect_field_duplicates_hash (fieldlist, &htab);
}
}
/* Finish up struct info used by -Wc++-compat. */
static void
warn_cxx_compat_finish_struct (tree fieldlist, enum tree_code code,
location_t record_loc)
{
unsigned int ix;
tree x;
struct c_binding *b;
if (fieldlist == NULL_TREE)
{
if (code == RECORD_TYPE)
warning_at (record_loc, OPT_Wc___compat,
"empty struct has size 0 in C, size 1 in C++");
else
warning_at (record_loc, OPT_Wc___compat,
"empty union has size 0 in C, size 1 in C++");
}
/* Set the C_TYPE_DEFINED_IN_STRUCT flag for each type defined in
the current struct. We do this now at the end of the struct
because the flag is used to issue visibility warnings, and we
only want to issue those warnings if the type is referenced
outside of the struct declaration. */
FOR_EACH_VEC_ELT (struct_parse_info->struct_types, ix, x)
C_TYPE_DEFINED_IN_STRUCT (x) = 1;
/* The TYPEDEFS_SEEN field of STRUCT_PARSE_INFO is a list of
typedefs used when declaring fields in this struct. If the name
of any of the fields is also a typedef name then the struct would
not parse in C++, because the C++ lookup rules say that the
typedef name would be looked up in the context of the struct, and
would thus be the field rather than the typedef. */
if (!struct_parse_info->typedefs_seen.is_empty ()
&& fieldlist != NULL_TREE)
{
/* Use a hash_set<tree> using the name of the typedef. We can use
a hash_set<tree> because identifiers are interned. */
hash_set<tree> tset;
FOR_EACH_VEC_ELT (struct_parse_info->typedefs_seen, ix, x)
tset.add (DECL_NAME (x));
for (x = fieldlist; x != NULL_TREE; x = DECL_CHAIN (x))
{
if (DECL_NAME (x) != NULL_TREE
&& tset.contains (DECL_NAME (x)))
{
warning_at (DECL_SOURCE_LOCATION (x), OPT_Wc___compat,
("using %qD as both field and typedef name is "
"invalid in C++"),
x);
/* FIXME: It would be nice to report the location where
the typedef name is used. */
}
}
}
/* For each field which has a binding and which was not defined in
an enclosing struct, clear the in_struct field. */
FOR_EACH_VEC_ELT (struct_parse_info->fields, ix, b)
b->in_struct = 0;
}
/* Function to help qsort sort FIELD_DECLs by name order. */
static int
field_decl_cmp (const void *x_p, const void *y_p)
{
const tree *const x = (const tree *) x_p;
const tree *const y = (const tree *) y_p;
if (DECL_NAME (*x) == DECL_NAME (*y))
/* A nontype is "greater" than a type. */
return (TREE_CODE (*y) == TYPE_DECL) - (TREE_CODE (*x) == TYPE_DECL);
if (DECL_NAME (*x) == NULL_TREE)
return -1;
if (DECL_NAME (*y) == NULL_TREE)
return 1;
if (DECL_NAME (*x) < DECL_NAME (*y))
return -1;
return 1;
}
/* Fill in the fields of a RECORD_TYPE or UNION_TYPE node, T.
LOC is the location of the RECORD_TYPE or UNION_TYPE's definition.
FIELDLIST is a chain of FIELD_DECL nodes for the fields.
ATTRIBUTES are attributes to be applied to the structure.
ENCLOSING_STRUCT_PARSE_INFO is the value of STRUCT_PARSE_INFO when
the struct was started. */
tree
finish_struct (location_t loc, tree t, tree fieldlist, tree attributes,
struct c_struct_parse_info *enclosing_struct_parse_info)
{
tree x;
bool toplevel = file_scope == current_scope;
/* If this type was previously laid out as a forward reference,
make sure we lay it out again. */
TYPE_SIZE (t) = NULL_TREE;
decl_attributes (&t, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE);
if (pedantic)
{
for (x = fieldlist; x; x = DECL_CHAIN (x))
{
if (DECL_NAME (x) != NULL_TREE)
break;
if (flag_isoc11 && RECORD_OR_UNION_TYPE_P (TREE_TYPE (x)))
break;
}
if (x == NULL_TREE)
{
if (TREE_CODE (t) == UNION_TYPE)
{
if (fieldlist)
pedwarn (loc, OPT_Wpedantic, "union has no named members");
else
pedwarn (loc, OPT_Wpedantic, "union has no members");
}
else
{
if (fieldlist)
pedwarn (loc, OPT_Wpedantic, "struct has no named members");
else
pedwarn (loc, OPT_Wpedantic, "struct has no members");
}
}
}
/* Install struct as DECL_CONTEXT of each field decl.
Also process specified field sizes, found in the DECL_INITIAL,
storing 0 there after the type has been changed to precision equal
to its width, rather than the precision of the specified standard
type. (Correct layout requires the original type to have been preserved
until now.) */
bool saw_named_field = false;
for (x = fieldlist; x; x = DECL_CHAIN (x))
{
if (TREE_TYPE (x) == error_mark_node)
continue;
DECL_CONTEXT (x) = t;
/* If any field is const, the structure type is pseudo-const. */
if (TREE_READONLY (x))
C_TYPE_FIELDS_READONLY (t) = 1;
else
{
/* A field that is pseudo-const makes the structure likewise. */
tree t1 = strip_array_types (TREE_TYPE (x));
if (RECORD_OR_UNION_TYPE_P (t1) && C_TYPE_FIELDS_READONLY (t1))
C_TYPE_FIELDS_READONLY (t) = 1;
}
/* Any field that is volatile means variables of this type must be
treated in some ways as volatile. */
if (TREE_THIS_VOLATILE (x))
C_TYPE_FIELDS_VOLATILE (t) = 1;
/* Any field of nominal variable size implies structure is too. */
if (C_DECL_VARIABLE_SIZE (x))
C_TYPE_VARIABLE_SIZE (t) = 1;
if (DECL_C_BIT_FIELD (x))
{
unsigned HOST_WIDE_INT width = tree_to_uhwi (DECL_INITIAL (x));
DECL_SIZE (x) = bitsize_int (width);
DECL_BIT_FIELD (x) = 1;
}
if (TYPE_PACKED (t)
&& (DECL_BIT_FIELD (x)
|| TYPE_ALIGN (TREE_TYPE (x)) > BITS_PER_UNIT))
DECL_PACKED (x) = 1;
/* Detect flexible array member in an invalid context. */
if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE
&& TYPE_SIZE (TREE_TYPE (x)) == NULL_TREE
&& TYPE_DOMAIN (TREE_TYPE (x)) != NULL_TREE
&& TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (x))) == NULL_TREE)
{
if (TREE_CODE (t) == UNION_TYPE)
{
error_at (DECL_SOURCE_LOCATION (x),
"flexible array member in union");
TREE_TYPE (x) = error_mark_node;
}
else if (DECL_CHAIN (x) != NULL_TREE)
{
error_at (DECL_SOURCE_LOCATION (x),
"flexible array member not at end of struct");
TREE_TYPE (x) = error_mark_node;
}
else if (!saw_named_field)
{
error_at (DECL_SOURCE_LOCATION (x),
"flexible array member in a struct with no named "
"members");
TREE_TYPE (x) = error_mark_node;
}
}
if (pedantic && TREE_CODE (t) == RECORD_TYPE
&& flexible_array_type_p (TREE_TYPE (x)))
pedwarn (DECL_SOURCE_LOCATION (x), OPT_Wpedantic,
"invalid use of structure with flexible array member");
if (DECL_NAME (x)
|| RECORD_OR_UNION_TYPE_P (TREE_TYPE (x)))
saw_named_field = true;
}
detect_field_duplicates (fieldlist);
/* Now we have the nearly final fieldlist. Record it,
then lay out the structure or union (including the fields). */
TYPE_FIELDS (t) = fieldlist;
maybe_apply_pragma_scalar_storage_order (t);
layout_type (t);
if (TYPE_SIZE_UNIT (t)
&& TREE_CODE (TYPE_SIZE_UNIT (t)) == INTEGER_CST
&& !TREE_OVERFLOW (TYPE_SIZE_UNIT (t))
&& !valid_constant_size_p (TYPE_SIZE_UNIT (t)))
error ("type %qT is too large", t);
/* Give bit-fields their proper types and rewrite the type of array fields
with scalar component if the enclosing type has reverse storage order. */
for (tree field = fieldlist; field; field = DECL_CHAIN (field))
{
if (TREE_CODE (field) == FIELD_DECL
&& DECL_INITIAL (field)
&& TREE_TYPE (field) != error_mark_node)
{
unsigned HOST_WIDE_INT width
= tree_to_uhwi (DECL_INITIAL (field));
tree type = TREE_TYPE (field);
if (width != TYPE_PRECISION (type))
{
TREE_TYPE (field)
= c_build_bitfield_integer_type (width, TYPE_UNSIGNED (type));
SET_DECL_MODE (field, TYPE_MODE (TREE_TYPE (field)));
}
DECL_INITIAL (field) = NULL_TREE;
}
else if (TYPE_REVERSE_STORAGE_ORDER (t)
&& TREE_CODE (field) == FIELD_DECL
&& TREE_CODE (TREE_TYPE (field)) == ARRAY_TYPE)
{
tree ftype = TREE_TYPE (field);
tree ctype = strip_array_types (ftype);
if (!RECORD_OR_UNION_TYPE_P (ctype) && TYPE_MODE (ctype) != QImode)
{
tree fmain_type = TYPE_MAIN_VARIANT (ftype);
tree *typep = &fmain_type;
do {
*typep = build_distinct_type_copy (*typep);
TYPE_REVERSE_STORAGE_ORDER (*typep) = 1;
typep = &TREE_TYPE (*typep);
} while (TREE_CODE (*typep) == ARRAY_TYPE);
TREE_TYPE (field)
= c_build_qualified_type (fmain_type, TYPE_QUALS (ftype));
}
}
}
/* Now we have the truly final field list.
Store it in this type and in the variants. */
TYPE_FIELDS (t) = fieldlist;
/* If there are lots of fields, sort so we can look through them fast.
We arbitrarily consider 16 or more elts to be "a lot". */
{
int len = 0;
for (x = fieldlist; x; x = DECL_CHAIN (x))
{
if (len > 15 || DECL_NAME (x) == NULL)
break;
len += 1;
}
if (len > 15)
{
tree *field_array;
struct lang_type *space;
struct sorted_fields_type *space2;
len += list_length (x);
/* Use the same allocation policy here that make_node uses, to
ensure that this lives as long as the rest of the struct decl.
All decls in an inline function need to be saved. */
space = ggc_cleared_alloc<struct lang_type> ();
space2 = (sorted_fields_type *) ggc_internal_alloc
(sizeof (struct sorted_fields_type) + len * sizeof (tree));
len = 0;
space->s = space2;
field_array = &space2->elts[0];
for (x = fieldlist; x; x = DECL_CHAIN (x))
{
field_array[len++] = x;
/* If there is anonymous struct or union, break out of the loop. */
if (DECL_NAME (x) == NULL)
break;
}
/* Found no anonymous struct/union. Add the TYPE_LANG_SPECIFIC. */
if (x == NULL)
{
TYPE_LANG_SPECIFIC (t) = space;
TYPE_LANG_SPECIFIC (t)->s->len = len;
field_array = TYPE_LANG_SPECIFIC (t)->s->elts;
qsort (field_array, len, sizeof (tree), field_decl_cmp);
}
}
}
/* Note: C_TYPE_INCOMPLETE_VARS overloads TYPE_VFIELD which is used
in dwarf2out via rest_of_decl_compilation below and means
something totally different. Since we will be clearing
C_TYPE_INCOMPLETE_VARS shortly after we iterate through them,
clear it ahead of time and avoid problems in dwarf2out. Ideally,
C_TYPE_INCOMPLETE_VARS should use some language specific
node. */
tree incomplete_vars = C_TYPE_INCOMPLETE_VARS (TYPE_MAIN_VARIANT (t));
for (x = TYPE_MAIN_VARIANT (t); x; x = TYPE_NEXT_VARIANT (x))
{
TYPE_FIELDS (x) = TYPE_FIELDS (t);
TYPE_LANG_SPECIFIC (x) = TYPE_LANG_SPECIFIC (t);
C_TYPE_FIELDS_READONLY (x) = C_TYPE_FIELDS_READONLY (t);
C_TYPE_FIELDS_VOLATILE (x) = C_TYPE_FIELDS_VOLATILE (t);
C_TYPE_VARIABLE_SIZE (x) = C_TYPE_VARIABLE_SIZE (t);
C_TYPE_INCOMPLETE_VARS (x) = NULL_TREE;
}
/* If this was supposed to be a transparent union, but we can't
make it one, warn and turn off the flag. */
if (TREE_CODE (t) == UNION_TYPE
&& TYPE_TRANSPARENT_AGGR (t)
&& (!TYPE_FIELDS (t) || TYPE_MODE (t) != DECL_MODE (TYPE_FIELDS (t))))
{
TYPE_TRANSPARENT_AGGR (t) = 0;
warning_at (loc, 0, "union cannot be made transparent");
}
/* Update type location to the one of the definition, instead of e.g.
a forward declaration. */
if (TYPE_STUB_DECL (t))
DECL_SOURCE_LOCATION (TYPE_STUB_DECL (t)) = loc;
/* Finish debugging output for this type. */
rest_of_type_compilation (t, toplevel);
/* If this structure or union completes the type of any previous
variable declaration, lay it out and output its rtl. */
for (x = incomplete_vars; x; x = TREE_CHAIN (x))
{
tree decl = TREE_VALUE (x);
if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
layout_array_type (TREE_TYPE (decl));
if (TREE_CODE (decl) != TYPE_DECL)
{
layout_decl (decl, 0);
if (c_dialect_objc ())
objc_check_decl (decl);
rest_of_decl_compilation (decl, toplevel, 0);
}
}
/* If we're inside a function proper, i.e. not file-scope and not still
parsing parameters, then arrange for the size of a variable sized type
to be bound now. */
if (building_stmt_list_p () && variably_modified_type_p (t, NULL_TREE))
add_stmt (build_stmt (loc,
DECL_EXPR, build_decl (loc, TYPE_DECL, NULL, t)));
if (warn_cxx_compat)
warn_cxx_compat_finish_struct (fieldlist, TREE_CODE (t), loc);
delete struct_parse_info;
struct_parse_info = enclosing_struct_parse_info;
/* If this struct is defined inside a struct, add it to
struct_types. */
if (warn_cxx_compat
&& struct_parse_info != NULL
&& !in_sizeof && !in_typeof && !in_alignof)
struct_parse_info->struct_types.safe_push (t);
return t;
}
static struct {
gt_pointer_operator new_value;
void *cookie;
} resort_data;
/* This routine compares two fields like field_decl_cmp but using the
pointer operator in resort_data. */
static int
resort_field_decl_cmp (const void *x_p, const void *y_p)
{
const tree *const x = (const tree *) x_p;
const tree *const y = (const tree *) y_p;
if (DECL_NAME (*x) == DECL_NAME (*y))
/* A nontype is "greater" than a type. */
return (TREE_CODE (*y) == TYPE_DECL) - (TREE_CODE (*x) == TYPE_DECL);
if (DECL_NAME (*x) == NULL_TREE)
return -1;
if (DECL_NAME (*y) == NULL_TREE)
return 1;
{
tree d1 = DECL_NAME (*x);
tree d2 = DECL_NAME (*y);
resort_data.new_value (&d1, resort_data.cookie);
resort_data.new_value (&d2, resort_data.cookie);
if (d1 < d2)
return -1;
}
return 1;
}
/* Resort DECL_SORTED_FIELDS because pointers have been reordered. */
void
resort_sorted_fields (void *obj,
void * ARG_UNUSED (orig_obj),
gt_pointer_operator new_value,
void *cookie)
{
struct sorted_fields_type *sf = (struct sorted_fields_type *) obj;
resort_data.new_value = new_value;
resort_data.cookie = cookie;
qsort (&sf->elts[0], sf->len, sizeof (tree),
resort_field_decl_cmp);
}
/* Lay out the type T, and its element type, and so on. */
static void
layout_array_type (tree t)
{
if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE)
layout_array_type (TREE_TYPE (t));
layout_type (t);
}
/* Begin compiling the definition of an enumeration type.
NAME is its name (or null if anonymous).
LOC is the enum's location.
Returns the type object, as yet incomplete.
Also records info about it so that build_enumerator
may be used to declare the individual values as they are read. */
tree
start_enum (location_t loc, struct c_enum_contents *the_enum, tree name)
{
tree enumtype = NULL_TREE;
location_t enumloc = UNKNOWN_LOCATION;
/* If this is the real definition for a previous forward reference,
fill in the contents in the same object that used to be the
forward reference. */
if (name != NULL_TREE)
enumtype = lookup_tag (ENUMERAL_TYPE, name, true, &enumloc);
if (enumtype == NULL_TREE || TREE_CODE (enumtype) != ENUMERAL_TYPE)
{
enumtype = make_node (ENUMERAL_TYPE);
pushtag (loc, name, enumtype);
}
/* Update type location to the one of the definition, instead of e.g.
a forward declaration. */
else if (TYPE_STUB_DECL (enumtype))
{
enumloc = DECL_SOURCE_LOCATION (TYPE_STUB_DECL (enumtype));
DECL_SOURCE_LOCATION (TYPE_STUB_DECL (enumtype)) = loc;
}
if (C_TYPE_BEING_DEFINED (enumtype))
error_at (loc, "nested redefinition of %<enum %E%>", name);
C_TYPE_BEING_DEFINED (enumtype) = 1;
if (TYPE_VALUES (enumtype) != NULL_TREE)
{
/* This enum is a named one that has been declared already. */
error_at (loc, "redeclaration of %<enum %E%>", name);
if (enumloc != UNKNOWN_LOCATION)
inform (enumloc, "originally defined here");
/* Completely replace its old definition.
The old enumerators remain defined, however. */
TYPE_VALUES (enumtype) = NULL_TREE;
}
the_enum->enum_next_value = integer_zero_node;
the_enum->enum_overflow = 0;
if (flag_short_enums)
for (tree v = TYPE_MAIN_VARIANT (enumtype); v; v = TYPE_NEXT_VARIANT (v))
TYPE_PACKED (v) = 1;
/* FIXME: This will issue a warning for a use of a type defined
within sizeof in a statement expr. This is not terribly serious
as C++ doesn't permit statement exprs within sizeof anyhow. */
if (warn_cxx_compat && (in_sizeof || in_typeof || in_alignof))
warning_at (loc, OPT_Wc___compat,
"defining type in %qs expression is invalid in C++",
(in_sizeof
? "sizeof"
: (in_typeof ? "typeof" : "alignof")));
return enumtype;
}
/* After processing and defining all the values of an enumeration type,
install their decls in the enumeration type and finish it off.
ENUMTYPE is the type object, VALUES a list of decl-value pairs,
and ATTRIBUTES are the specified attributes.
Returns ENUMTYPE. */
tree
finish_enum (tree enumtype, tree values, tree attributes)
{
tree pair, tem;
tree minnode = NULL_TREE, maxnode = NULL_TREE;
int precision;
signop sign;
bool toplevel = (file_scope == current_scope);
struct lang_type *lt;
decl_attributes (&enumtype, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE);
/* Calculate the maximum value of any enumerator in this type. */
if (values == error_mark_node)
minnode = maxnode = integer_zero_node;
else
{
minnode = maxnode = TREE_VALUE (values);
for (pair = TREE_CHAIN (values); pair; pair = TREE_CHAIN (pair))
{
tree value = TREE_VALUE (pair);
if (tree_int_cst_lt (maxnode, value))
maxnode = value;
if (tree_int_cst_lt (value, minnode))
minnode = value;
}
}
/* Construct the final type of this enumeration. It is the same
as one of the integral types - the narrowest one that fits, except
that normally we only go as narrow as int - and signed iff any of
the values are negative. */
sign = (tree_int_cst_sgn (minnode) >= 0) ? UNSIGNED : SIGNED;
precision = MAX (tree_int_cst_min_precision (minnode, sign),
tree_int_cst_min_precision (maxnode, sign));
/* If the precision of the type was specified with an attribute and it
was too small, give an error. Otherwise, use it. */
if (TYPE_PRECISION (enumtype) && lookup_attribute ("mode", attributes))
{
if (precision > TYPE_PRECISION (enumtype))
{
TYPE_PRECISION (enumtype) = 0;
error ("specified mode too small for enumeral values");
}
else
precision = TYPE_PRECISION (enumtype);
}
else
TYPE_PRECISION (enumtype) = 0;
if (TYPE_PACKED (enumtype)
|| precision > TYPE_PRECISION (integer_type_node)
|| TYPE_PRECISION (enumtype))
{
tem = c_common_type_for_size (precision, sign == UNSIGNED ? 1 : 0);
if (tem == NULL)
{
warning (0, "enumeration values exceed range of largest integer");
tem = long_long_integer_type_node;
}
}
else
tem = sign == UNSIGNED ? unsigned_type_node : integer_type_node;
TYPE_MIN_VALUE (enumtype) = TYPE_MIN_VALUE (tem);
TYPE_MAX_VALUE (enumtype) = TYPE_MAX_VALUE (tem);
TYPE_UNSIGNED (enumtype) = TYPE_UNSIGNED (tem);
SET_TYPE_ALIGN (enumtype, TYPE_ALIGN (tem));
TYPE_SIZE (enumtype) = NULL_TREE;
TYPE_PRECISION (enumtype) = TYPE_PRECISION (tem);
layout_type (enumtype);
if (values != error_mark_node)
{
/* Change the type of the enumerators to be the enum type. We
need to do this irrespective of the size of the enum, for
proper type checking. Replace the DECL_INITIALs of the
enumerators, and the value slots of the list, with copies
that have the enum type; they cannot be modified in place
because they may be shared (e.g. integer_zero_node) Finally,
change the purpose slots to point to the names of the decls. */
for (pair = values; pair; pair = TREE_CHAIN (pair))
{
tree enu = TREE_PURPOSE (pair);
tree ini = DECL_INITIAL (enu);
TREE_TYPE (enu) = enumtype;
/* The ISO C Standard mandates enumerators to have type int,
even though the underlying type of an enum type is
unspecified. However, GCC allows enumerators of any
integer type as an extensions. build_enumerator()
converts any enumerators that fit in an int to type int,
to avoid promotions to unsigned types when comparing
integers with enumerators that fit in the int range.
When -pedantic is given, build_enumerator() would have
already warned about those that don't fit. Here we
convert the rest to the enumerator type. */
if (TREE_TYPE (ini) != integer_type_node)
ini = convert (enumtype, ini);
DECL_INITIAL (enu) = ini;
TREE_PURPOSE (pair) = DECL_NAME (enu);
TREE_VALUE (pair) = ini;
}
TYPE_VALUES (enumtype) = values;
}
/* Record the min/max values so that we can warn about bit-field
enumerations that are too small for the values. */
lt = ggc_cleared_alloc<struct lang_type> ();
lt->enum_min = minnode;
lt->enum_max = maxnode;
TYPE_LANG_SPECIFIC (enumtype) = lt;
/* Fix up all variant types of this enum type. */
for (tem = TYPE_MAIN_VARIANT (enumtype); tem; tem = TYPE_NEXT_VARIANT (tem))
{
if (tem == enumtype)
continue;
TYPE_VALUES (tem) = TYPE_VALUES (enumtype);
TYPE_MIN_VALUE (tem) = TYPE_MIN_VALUE (enumtype);
TYPE_MAX_VALUE (tem) = TYPE_MAX_VALUE (enumtype);
TYPE_SIZE (tem) = TYPE_SIZE (enumtype);
TYPE_SIZE_UNIT (tem) = TYPE_SIZE_UNIT (enumtype);
SET_TYPE_MODE (tem, TYPE_MODE (enumtype));
TYPE_PRECISION (tem) = TYPE_PRECISION (enumtype);
SET_TYPE_ALIGN (tem, TYPE_ALIGN (enumtype));
TYPE_USER_ALIGN (tem) = TYPE_USER_ALIGN (enumtype);
TYPE_UNSIGNED (tem) = TYPE_UNSIGNED (enumtype);
TYPE_LANG_SPECIFIC (tem) = TYPE_LANG_SPECIFIC (enumtype);
}
/* Finish debugging output for this type. */
rest_of_type_compilation (enumtype, toplevel);
/* If this enum is defined inside a struct, add it to
struct_types. */
if (warn_cxx_compat
&& struct_parse_info != NULL
&& !in_sizeof && !in_typeof && !in_alignof)
struct_parse_info->struct_types.safe_push (enumtype);
return enumtype;
}
/* Build and install a CONST_DECL for one value of the
current enumeration type (one that was begun with start_enum).
DECL_LOC is the location of the enumerator.
LOC is the location of the '=' operator if any, DECL_LOC otherwise.
Return a tree-list containing the CONST_DECL and its value.
Assignment of sequential values by default is handled here. */
tree
build_enumerator (location_t decl_loc, location_t loc,
struct c_enum_contents *the_enum, tree name, tree value)
{
tree decl, type;
/* Validate and default VALUE. */
if (value != NULL_TREE)
{
/* Don't issue more errors for error_mark_node (i.e. an
undeclared identifier) - just ignore the value expression. */
if (value == error_mark_node)
value = NULL_TREE;
else if (!INTEGRAL_TYPE_P (TREE_TYPE (value)))
{
error_at (loc, "enumerator value for %qE is not an integer constant",
name);
value = NULL_TREE;
}
else
{
if (TREE_CODE (value) != INTEGER_CST)
{
value = c_fully_fold (value, false, NULL);
if (TREE_CODE (value) == INTEGER_CST)
pedwarn (loc, OPT_Wpedantic,
"enumerator value for %qE is not an integer "
"constant expression", name);
}
if (TREE_CODE (value) != INTEGER_CST)
{
error ("enumerator value for %qE is not an integer constant",
name);
value = NULL_TREE;
}
else
{
value = default_conversion (value);
constant_expression_warning (value);
}
}
}
/* Default based on previous value. */
/* It should no longer be possible to have NON_LVALUE_EXPR
in the default. */
if (value == NULL_TREE)
{
value = the_enum->enum_next_value;
if (the_enum->enum_overflow)
error_at (loc, "overflow in enumeration values");
}
/* Even though the underlying type of an enum is unspecified, the
type of enumeration constants is explicitly defined as int
(6.4.4.3/2 in the C99 Standard). GCC allows any integer type as
an extension. */
else if (!int_fits_type_p (value, integer_type_node))
pedwarn (loc, OPT_Wpedantic,
"ISO C restricts enumerator values to range of %<int%>");
/* The ISO C Standard mandates enumerators to have type int, even
though the underlying type of an enum type is unspecified.
However, GCC allows enumerators of any integer type as an
extensions. Here we convert any enumerators that fit in an int
to type int, to avoid promotions to unsigned types when comparing
integers with enumerators that fit in the int range. When
-pedantic is given, we would have already warned about those that
don't fit. We have to do this here rather than in finish_enum
because this value may be used to define more enumerators. */
if (int_fits_type_p (value, integer_type_node))
value = convert (integer_type_node, value);
/* Set basis for default for next value. */
the_enum->enum_next_value
= build_binary_op (EXPR_LOC_OR_LOC (value, input_location),
PLUS_EXPR, value, integer_one_node, false);
the_enum->enum_overflow = tree_int_cst_lt (the_enum->enum_next_value, value);
/* Now create a declaration for the enum value name. */
type = TREE_TYPE (value);
type = c_common_type_for_size (MAX (TYPE_PRECISION (type),
TYPE_PRECISION (integer_type_node)),
(TYPE_PRECISION (type)
>= TYPE_PRECISION (integer_type_node)
&& TYPE_UNSIGNED (type)));
decl = build_decl (decl_loc, CONST_DECL, name, type);
DECL_INITIAL (decl) = convert (type, value);
pushdecl (decl);
return tree_cons (decl, value, NULL_TREE);
}
/* Create the FUNCTION_DECL for a function definition.
DECLSPECS, DECLARATOR and ATTRIBUTES are the parts of
the declaration; they describe the function's name and the type it returns,
but twisted together in a fashion that parallels the syntax of C.
This function creates a binding context for the function body
as well as setting up the FUNCTION_DECL in current_function_decl.
Returns true on success. If the DECLARATOR is not suitable for a function
(it defines a datum instead), we return false to report a parse error. */
bool
start_function (struct c_declspecs *declspecs, struct c_declarator *declarator,
tree attributes)
{
tree decl1, old_decl;
tree restype, resdecl;
location_t loc;
current_function_returns_value = 0; /* Assume, until we see it does. */
current_function_returns_null = 0;
current_function_returns_abnormally = 0;
warn_about_return_type = 0;
c_switch_stack = NULL;
/* Indicate no valid break/continue context by setting these variables
to some non-null, non-label value. We'll notice and emit the proper
error message in c_finish_bc_stmt. */
c_break_label = c_cont_label = size_zero_node;
decl1 = grokdeclarator (declarator, declspecs, FUNCDEF, true, NULL,
&attributes, NULL, NULL, DEPRECATED_NORMAL);
invoke_plugin_callbacks (PLUGIN_START_PARSE_FUNCTION, decl1);
/* If the declarator is not suitable for a function definition,
cause a syntax error. */
if (decl1 == NULL_TREE
|| TREE_CODE (decl1) != FUNCTION_DECL)
return false;
loc = DECL_SOURCE_LOCATION (decl1);
c_decl_attributes (&decl1, attributes, 0);
if (DECL_DECLARED_INLINE_P (decl1)
&& DECL_UNINLINABLE (decl1)
&& lookup_attribute ("noinline", DECL_ATTRIBUTES (decl1)))
warning_at (loc, OPT_Wattributes,
"inline function %qD given attribute noinline",
decl1);
/* Handle gnu_inline attribute. */
if (declspecs->inline_p
&& !flag_gnu89_inline
&& TREE_CODE (decl1) == FUNCTION_DECL
&& (lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (decl1))
|| current_function_decl))
{
if (declspecs->storage_class != csc_static)
DECL_EXTERNAL (decl1) = !DECL_EXTERNAL (decl1);
}
announce_function (decl1);
if (!COMPLETE_OR_VOID_TYPE_P (TREE_TYPE (TREE_TYPE (decl1))))
{
error_at (loc, "return type is an incomplete type");
/* Make it return void instead. */
TREE_TYPE (decl1)
= build_function_type (void_type_node,
TYPE_ARG_TYPES (TREE_TYPE (decl1)));
}
if (warn_about_return_type)
warn_defaults_to (loc, flag_isoc99 ? OPT_Wimplicit_int
: (warn_return_type ? OPT_Wreturn_type
: OPT_Wimplicit_int),
"return type defaults to %<int%>");
/* Make the init_value nonzero so pushdecl knows this is not tentative.
error_mark_node is replaced below (in pop_scope) with the BLOCK. */
DECL_INITIAL (decl1) = error_mark_node;
/* A nested function is not global. */
if (current_function_decl != NULL_TREE)
TREE_PUBLIC (decl1) = 0;
/* If this definition isn't a prototype and we had a prototype declaration
before, copy the arg type info from that prototype. */
old_decl = lookup_name_in_scope (DECL_NAME (decl1), current_scope);
if (old_decl && TREE_CODE (old_decl) != FUNCTION_DECL)
old_decl = NULL_TREE;
current_function_prototype_locus = UNKNOWN_LOCATION;
current_function_prototype_built_in = false;
current_function_prototype_arg_types = NULL_TREE;
if (!prototype_p (TREE_TYPE (decl1)))
{
if (old_decl != NULL_TREE
&& TREE_CODE (TREE_TYPE (old_decl)) == FUNCTION_TYPE
&& comptypes (TREE_TYPE (TREE_TYPE (decl1)),
TREE_TYPE (TREE_TYPE (old_decl))))
{
if (stdarg_p (TREE_TYPE (old_decl)))
{
warning_at (loc, 0, "%q+D defined as variadic function "
"without prototype", decl1);
locate_old_decl (old_decl);
}
TREE_TYPE (decl1) = composite_type (TREE_TYPE (old_decl),
TREE_TYPE (decl1));
current_function_prototype_locus = DECL_SOURCE_LOCATION (old_decl);
current_function_prototype_built_in
= C_DECL_BUILTIN_PROTOTYPE (old_decl);
current_function_prototype_arg_types
= TYPE_ARG_TYPES (TREE_TYPE (decl1));
}
if (TREE_PUBLIC (decl1))
{
/* If there is an external prototype declaration of this
function, record its location but do not copy information
to this decl. This may be an invisible declaration
(built-in or in a scope which has finished) or simply
have more refined argument types than any declaration
found above. */
struct c_binding *b;
for (b = I_SYMBOL_BINDING (DECL_NAME (decl1)); b; b = b->shadowed)
if (B_IN_SCOPE (b, external_scope))
break;
if (b)
{
tree ext_decl, ext_type;
ext_decl = b->decl;
ext_type = b->u.type ? b->u.type : TREE_TYPE (ext_decl);
if (TREE_CODE (ext_type) == FUNCTION_TYPE
&& comptypes (TREE_TYPE (TREE_TYPE (decl1)),
TREE_TYPE (ext_type)))
{
current_function_prototype_locus
= DECL_SOURCE_LOCATION (ext_decl);
current_function_prototype_built_in
= C_DECL_BUILTIN_PROTOTYPE (ext_decl);
current_function_prototype_arg_types
= TYPE_ARG_TYPES (ext_type);
}
}
}
}
/* Optionally warn of old-fashioned def with no previous prototype. */
if (warn_strict_prototypes
&& old_decl != error_mark_node
&& !prototype_p (TREE_TYPE (decl1))
&& C_DECL_ISNT_PROTOTYPE (old_decl))
warning_at (loc, OPT_Wstrict_prototypes,
"function declaration isn%'t a prototype");
/* Optionally warn of any global def with no previous prototype. */
else if (warn_missing_prototypes
&& old_decl != error_mark_node
&& TREE_PUBLIC (decl1)
&& !MAIN_NAME_P (DECL_NAME (decl1))
&& C_DECL_ISNT_PROTOTYPE (old_decl)
&& !DECL_DECLARED_INLINE_P (decl1))
warning_at (loc, OPT_Wmissing_prototypes,
"no previous prototype for %qD", decl1);
/* Optionally warn of any def with no previous prototype
if the function has already been used. */
else if (warn_missing_prototypes
&& old_decl != NULL_TREE
&& old_decl != error_mark_node
&& TREE_USED (old_decl)
&& !prototype_p (TREE_TYPE (old_decl)))
warning_at (loc, OPT_Wmissing_prototypes,
"%qD was used with no prototype before its definition", decl1);
/* Optionally warn of any global def with no previous declaration. */
else if (warn_missing_declarations
&& TREE_PUBLIC (decl1)
&& old_decl == NULL_TREE
&& !MAIN_NAME_P (DECL_NAME (decl1))
&& !DECL_DECLARED_INLINE_P (decl1))
warning_at (loc, OPT_Wmissing_declarations,
"no previous declaration for %qD",
decl1);
/* Optionally warn of any def with no previous declaration
if the function has already been used. */
else if (warn_missing_declarations
&& old_decl != NULL_TREE
&& old_decl != error_mark_node
&& TREE_USED (old_decl)
&& C_DECL_IMPLICIT (old_decl))
warning_at (loc, OPT_Wmissing_declarations,
"%qD was used with no declaration before its definition", decl1);
/* This function exists in static storage.
(This does not mean `static' in the C sense!) */
TREE_STATIC (decl1) = 1;
/* This is the earliest point at which we might know the assembler
name of the function. Thus, if it's set before this, die horribly. */
gcc_assert (!DECL_ASSEMBLER_NAME_SET_P (decl1));
/* If #pragma weak was used, mark the decl weak now. */
if (current_scope == file_scope)
maybe_apply_pragma_weak (decl1);
/* Warn for unlikely, improbable, or stupid declarations of `main'. */
if (warn_main && MAIN_NAME_P (DECL_NAME (decl1)))
{
if (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl1)))
!= integer_type_node)
pedwarn (loc, OPT_Wmain, "return type of %qD is not %<int%>", decl1);
else if (TYPE_ATOMIC (TREE_TYPE (TREE_TYPE (decl1))))
pedwarn (loc, OPT_Wmain, "%<_Atomic%>-qualified return type of %qD",
decl1);
check_main_parameter_types (decl1);
if (!TREE_PUBLIC (decl1))
pedwarn (loc, OPT_Wmain,
"%qD is normally a non-static function", decl1);
}
/* Record the decl so that the function name is defined.
If we already have a decl for this name, and it is a FUNCTION_DECL,
use the old decl. */
current_function_decl = pushdecl (decl1);
push_scope ();
declare_parm_level ();
restype = TREE_TYPE (TREE_TYPE (current_function_decl));
resdecl = build_decl (loc, RESULT_DECL, NULL_TREE, restype);
DECL_ARTIFICIAL (resdecl) = 1;
DECL_IGNORED_P (resdecl) = 1;
DECL_RESULT (current_function_decl) = resdecl;
start_fname_decls ();
return true;
}
/* Subroutine of store_parm_decls which handles new-style function
definitions (prototype format). The parms already have decls, so we
need only record them as in effect and complain if any redundant
old-style parm decls were written. */
static void
store_parm_decls_newstyle (tree fndecl, const struct c_arg_info *arg_info)
{
tree decl;
c_arg_tag *tag;
unsigned ix;
if (current_scope->bindings)
{
error_at (DECL_SOURCE_LOCATION (fndecl),
"old-style parameter declarations in prototyped "
"function definition");
/* Get rid of the old-style declarations. */
pop_scope ();
push_scope ();
}
/* Don't issue this warning for nested functions, and don't issue this
warning if we got here because ARG_INFO_TYPES was error_mark_node
(this happens when a function definition has just an ellipsis in
its parameter list). */
else if (!in_system_header_at (input_location)
&& !current_function_scope
&& arg_info->types != error_mark_node)
warning_at (DECL_SOURCE_LOCATION (fndecl), OPT_Wtraditional,
"traditional C rejects ISO C style function definitions");
/* Now make all the parameter declarations visible in the function body.
We can bypass most of the grunt work of pushdecl. */
for (decl = arg_info->parms; decl; decl = DECL_CHAIN (decl))
{
DECL_CONTEXT (decl) = current_function_decl;
if (DECL_NAME (decl))
{
bind (DECL_NAME (decl), decl, current_scope,
/*invisible=*/false, /*nested=*/false,
UNKNOWN_LOCATION);
if (!TREE_USED (decl))
warn_if_shadowing (decl);
}
else
error_at (DECL_SOURCE_LOCATION (decl), "parameter name omitted");
}
/* Record the parameter list in the function declaration. */
DECL_ARGUMENTS (fndecl) = arg_info->parms;
/* Now make all the ancillary declarations visible, likewise. */
for (decl = arg_info->others; decl; decl = DECL_CHAIN (decl))
{
DECL_CONTEXT (decl) = current_function_decl;
if (DECL_NAME (decl))
bind (DECL_NAME (decl), decl, current_scope,
/*invisible=*/false,
/*nested=*/(TREE_CODE (decl) == FUNCTION_DECL),
UNKNOWN_LOCATION);
}
/* And all the tag declarations. */
FOR_EACH_VEC_SAFE_ELT_REVERSE (arg_info->tags, ix, tag)
if (tag->id)
bind (tag->id, tag->type, current_scope,
/*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION);
}
/* Subroutine of store_parm_decls which handles old-style function
definitions (separate parameter list and declarations). */
static void
store_parm_decls_oldstyle (tree fndecl, const struct c_arg_info *arg_info)
{
struct c_binding *b;
tree parm, decl, last;
tree parmids = arg_info->parms;
hash_set<tree> seen_args;
if (!in_system_header_at (input_location))
warning_at (DECL_SOURCE_LOCATION (fndecl),
OPT_Wold_style_definition, "old-style function definition");
/* Match each formal parameter name with its declaration. Save each
decl in the appropriate TREE_PURPOSE slot of the parmids chain. */
for (parm = parmids; parm; parm = TREE_CHAIN (parm))
{
if (TREE_VALUE (parm) == NULL_TREE)
{
error_at (DECL_SOURCE_LOCATION (fndecl),
"parameter name missing from parameter list");
TREE_PURPOSE (parm) = NULL_TREE;
continue;
}
b = I_SYMBOL_BINDING (TREE_VALUE (parm));
if (b && B_IN_CURRENT_SCOPE (b))
{
decl = b->decl;
/* Skip erroneous parameters. */
if (decl == error_mark_node)
continue;
/* If we got something other than a PARM_DECL it is an error. */
if (TREE_CODE (decl) != PARM_DECL)
{
error_at (DECL_SOURCE_LOCATION (decl),
"%qD declared as a non-parameter", decl);
continue;
}
/* If the declaration is already marked, we have a duplicate
name. Complain and ignore the duplicate. */
else if (seen_args.contains (decl))
{
error_at (DECL_SOURCE_LOCATION (decl),
"multiple parameters named %qD", decl);
TREE_PURPOSE (parm) = NULL_TREE;
continue;
}
/* If the declaration says "void", complain and turn it into
an int. */
else if (VOID_TYPE_P (TREE_TYPE (decl)))
{
error_at (DECL_SOURCE_LOCATION (decl),
"parameter %qD declared with void type", decl);
TREE_TYPE (decl) = integer_type_node;
DECL_ARG_TYPE (decl) = integer_type_node;
layout_decl (decl, 0);
}
warn_if_shadowing (decl);
}
/* If no declaration found, default to int. */
else
{
/* FIXME diagnostics: This should be the location of the argument,
not the FNDECL. E.g., for an old-style declaration
int f10(v) { blah; }
We should use the location of the V, not the F10.
Unfortunately, the V is an IDENTIFIER_NODE which has no
location. In the future we need locations for c_arg_info
entries.
See gcc.dg/Wshadow-3.c for an example of this problem. */
decl = build_decl (DECL_SOURCE_LOCATION (fndecl),
PARM_DECL, TREE_VALUE (parm), integer_type_node);
DECL_ARG_TYPE (decl) = TREE_TYPE (decl);
pushdecl (decl);
warn_if_shadowing (decl);
if (flag_isoc99)
pedwarn (DECL_SOURCE_LOCATION (decl),
OPT_Wimplicit_int, "type of %qD defaults to %<int%>",
decl);
else
warning_at (DECL_SOURCE_LOCATION (decl),
OPT_Wmissing_parameter_type,
"type of %qD defaults to %<int%>", decl);
}
TREE_PURPOSE (parm) = decl;
seen_args.add (decl);
}
/* Now examine the parms chain for incomplete declarations
and declarations with no corresponding names. */
for (b = current_scope->bindings; b; b = b->prev)
{
parm = b->decl;
if (TREE_CODE (parm) != PARM_DECL)
continue;
if (TREE_TYPE (parm) != error_mark_node
&& !COMPLETE_TYPE_P (TREE_TYPE (parm)))
{
error_at (DECL_SOURCE_LOCATION (parm),
"parameter %qD has incomplete type", parm);
TREE_TYPE (parm) = error_mark_node;
}
if (!seen_args.contains (parm))
{
error_at (DECL_SOURCE_LOCATION (parm),
"declaration for parameter %qD but no such parameter",
parm);
/* Pretend the parameter was not missing.
This gets us to a standard state and minimizes
further error messages. */
parmids = chainon (parmids, tree_cons (parm, 0, 0));
}
}
/* Chain the declarations together in the order of the list of
names. Store that chain in the function decl, replacing the
list of names. Update the current scope to match. */
DECL_ARGUMENTS (fndecl) = NULL_TREE;
for (parm = parmids; parm; parm = TREE_CHAIN (parm))
if (TREE_PURPOSE (parm))
break;
if (parm && TREE_PURPOSE (parm))
{
last = TREE_PURPOSE (parm);
DECL_ARGUMENTS (fndecl) = last;
for (parm = TREE_CHAIN (parm); parm; parm = TREE_CHAIN (parm))
if (TREE_PURPOSE (parm))
{
DECL_CHAIN (last) = TREE_PURPOSE (parm);
last = TREE_PURPOSE (parm);
}
DECL_CHAIN (last) = NULL_TREE;
}
/* If there was a previous prototype,
set the DECL_ARG_TYPE of each argument according to
the type previously specified, and report any mismatches. */
if (current_function_prototype_arg_types)
{
tree type;
for (parm = DECL_ARGUMENTS (fndecl),
type = current_function_prototype_arg_types;
parm || (type != NULL_TREE
&& TREE_VALUE (type) != error_mark_node
&& TYPE_MAIN_VARIANT (TREE_VALUE (type)) != void_type_node);
parm = DECL_CHAIN (parm), type = TREE_CHAIN (type))
{
if (parm == NULL_TREE
|| type == NULL_TREE
|| (TREE_VALUE (type) != error_mark_node
&& TYPE_MAIN_VARIANT (TREE_VALUE (type)) == void_type_node))
{
if (current_function_prototype_built_in)
warning_at (DECL_SOURCE_LOCATION (fndecl),
0, "number of arguments doesn%'t match "
"built-in prototype");
else
{
/* FIXME diagnostics: This should be the location of
FNDECL, but there is bug when a prototype is
declared inside function context, but defined
outside of it (e.g., gcc.dg/pr15698-2.c). In
which case FNDECL gets the location of the
prototype, not the definition. */
error_at (input_location,
"number of arguments doesn%'t match prototype");
error_at (current_function_prototype_locus,
"prototype declaration");
}
break;
}
/* Type for passing arg must be consistent with that
declared for the arg. ISO C says we take the unqualified
type for parameters declared with qualified type. */
if (TREE_TYPE (parm) != error_mark_node
&& TREE_VALUE (type) != error_mark_node
&& ((TYPE_ATOMIC (DECL_ARG_TYPE (parm))
!= TYPE_ATOMIC (TREE_VALUE (type)))
|| !comptypes (TYPE_MAIN_VARIANT (DECL_ARG_TYPE (parm)),
TYPE_MAIN_VARIANT (TREE_VALUE (type)))))
{
if ((TYPE_ATOMIC (DECL_ARG_TYPE (parm))
== TYPE_ATOMIC (TREE_VALUE (type)))
&& (TYPE_MAIN_VARIANT (TREE_TYPE (parm))
== TYPE_MAIN_VARIANT (TREE_VALUE (type))))
{
/* Adjust argument to match prototype. E.g. a previous
`int foo(float);' prototype causes
`int foo(x) float x; {...}' to be treated like
`int foo(float x) {...}'. This is particularly
useful for argument types like uid_t. */
DECL_ARG_TYPE (parm) = TREE_TYPE (parm);
if (targetm.calls.promote_prototypes (TREE_TYPE (current_function_decl))
&& INTEGRAL_TYPE_P (TREE_TYPE (parm))
&& (TYPE_PRECISION (TREE_TYPE (parm))
< TYPE_PRECISION (integer_type_node)))
DECL_ARG_TYPE (parm)
= c_type_promotes_to (TREE_TYPE (parm));
/* ??? Is it possible to get here with a
built-in prototype or will it always have
been diagnosed as conflicting with an
old-style definition and discarded? */
if (current_function_prototype_built_in)
warning_at (DECL_SOURCE_LOCATION (parm),
OPT_Wpedantic, "promoted argument %qD "
"doesn%'t match built-in prototype", parm);
else
{
pedwarn (DECL_SOURCE_LOCATION (parm),
OPT_Wpedantic, "promoted argument %qD "
"doesn%'t match prototype", parm);
pedwarn (current_function_prototype_locus, OPT_Wpedantic,
"prototype declaration");
}
}
else
{
if (current_function_prototype_built_in)
warning_at (DECL_SOURCE_LOCATION (parm),
0, "argument %qD doesn%'t match "
"built-in prototype", parm);
else
{
error_at (DECL_SOURCE_LOCATION (parm),
"argument %qD doesn%'t match prototype", parm);
error_at (current_function_prototype_locus,
"prototype declaration");
}
}
}
}
TYPE_ACTUAL_ARG_TYPES (TREE_TYPE (fndecl)) = NULL_TREE;
}
/* Otherwise, create a prototype that would match. */
else
{
tree actual = NULL_TREE, last = NULL_TREE, type;
for (parm = DECL_ARGUMENTS (fndecl); parm; parm = DECL_CHAIN (parm))
{
type = tree_cons (NULL_TREE, DECL_ARG_TYPE (parm), NULL_TREE);
if (last)
TREE_CHAIN (last) = type;
else
actual = type;
last = type;
}
type = tree_cons (NULL_TREE, void_type_node, NULL_TREE);
if (last)
TREE_CHAIN (last) = type;
else
actual = type;
/* We are going to assign a new value for the TYPE_ACTUAL_ARG_TYPES
of the type of this function, but we need to avoid having this
affect the types of other similarly-typed functions, so we must
first force the generation of an identical (but separate) type
node for the relevant function type. The new node we create
will be a variant of the main variant of the original function
type. */
TREE_TYPE (fndecl) = build_variant_type_copy (TREE_TYPE (fndecl));
TYPE_ACTUAL_ARG_TYPES (TREE_TYPE (fndecl)) = actual;
}
}
/* Store parameter declarations passed in ARG_INFO into the current
function declaration. */
void
store_parm_decls_from (struct c_arg_info *arg_info)
{
current_function_arg_info = arg_info;
store_parm_decls ();
}
/* Called by walk_tree to look for and update context-less labels. */
static tree
set_labels_context_r (tree *tp, int *walk_subtrees, void *data)
{
if (TREE_CODE (*tp) == LABEL_EXPR
&& DECL_CONTEXT (LABEL_EXPR_LABEL (*tp)) == NULL_TREE)
{
DECL_CONTEXT (LABEL_EXPR_LABEL (*tp)) = static_cast<tree>(data);
*walk_subtrees = 0;
}
return NULL_TREE;
}
/* Store the parameter declarations into the current function declaration.
This is called after parsing the parameter declarations, before
digesting the body of the function.
For an old-style definition, construct a prototype out of the old-style
parameter declarations and inject it into the function's type. */
void
store_parm_decls (void)
{
tree fndecl = current_function_decl;
bool proto;
/* The argument information block for FNDECL. */
struct c_arg_info *arg_info = current_function_arg_info;
current_function_arg_info = 0;
/* True if this definition is written with a prototype. Note:
despite C99 6.7.5.3p14, we can *not* treat an empty argument
list in a function definition as equivalent to (void) -- an
empty argument list specifies the function has no parameters,
but only (void) sets up a prototype for future calls. */
proto = arg_info->types != 0;
if (proto)
store_parm_decls_newstyle (fndecl, arg_info);
else
store_parm_decls_oldstyle (fndecl, arg_info);
/* The next call to push_scope will be a function body. */
next_is_function_body = true;
/* Write a record describing this function definition to the prototypes
file (if requested). */
gen_aux_info_record (fndecl, 1, 0, proto);
/* Initialize the RTL code for the function. */
allocate_struct_function (fndecl, false);
if (warn_unused_local_typedefs)
cfun->language = ggc_cleared_alloc<language_function> ();
/* Begin the statement tree for this function. */
DECL_SAVED_TREE (fndecl) = push_stmt_list ();
/* ??? Insert the contents of the pending sizes list into the function
to be evaluated. The only reason left to have this is
void foo(int n, int array[n++])
because we throw away the array type in favor of a pointer type, and
thus won't naturally see the SAVE_EXPR containing the increment. All
other pending sizes would be handled by gimplify_parameters. */
if (arg_info->pending_sizes)
{
/* In very special circumstances, e.g. for code like
_Atomic int i = 5;
void f (int a[i += 2]) {}
we need to execute the atomic assignment on function entry.
But in this case, it is not just a straight store, it has the
op= form, which means that build_atomic_assign has generated
gotos, labels, etc. Because at that time the function decl
for F has not been created yet, those labels do not have any
function context. But we have the fndecl now, so update the
labels accordingly. gimplify_expr would crash otherwise. */
walk_tree_without_duplicates (&arg_info->pending_sizes,
set_labels_context_r, fndecl);
add_stmt (arg_info->pending_sizes);
}
}
/* Store PARM_DECLs in PARMS into scope temporarily. Used for
c_finish_omp_declare_simd for function prototypes. No diagnostics
should be done. */
void
temp_store_parm_decls (tree fndecl, tree parms)
{
push_scope ();
for (tree p = parms; p; p = DECL_CHAIN (p))
{
DECL_CONTEXT (p) = fndecl;
if (DECL_NAME (p))
bind (DECL_NAME (p), p, current_scope,
/*invisible=*/false, /*nested=*/false,
UNKNOWN_LOCATION);
}
}
/* Undo what temp_store_parm_decls did. */
void
temp_pop_parm_decls (void)
{
/* Clear all bindings in this temporary scope, so that
pop_scope doesn't create a BLOCK. */
struct c_binding *b = current_scope->bindings;
current_scope->bindings = NULL;
for (; b; b = free_binding_and_advance (b))
{
gcc_assert (TREE_CODE (b->decl) == PARM_DECL
|| b->decl == error_mark_node);
gcc_assert (I_SYMBOL_BINDING (b->id) == b);
I_SYMBOL_BINDING (b->id) = b->shadowed;
if (b->shadowed && b->shadowed->u.type)
TREE_TYPE (b->shadowed->decl) = b->shadowed->u.type;
}
pop_scope ();
}
/* Finish up a function declaration and compile that function
all the way to assembler language output. Then free the storage
for the function definition.
This is called after parsing the body of the function definition. */
void
finish_function (void)
{
tree fndecl = current_function_decl;
if (c_dialect_objc ())
objc_finish_function ();
if (TREE_CODE (fndecl) == FUNCTION_DECL
&& targetm.calls.promote_prototypes (TREE_TYPE (fndecl)))
{
tree args = DECL_ARGUMENTS (fndecl);
for (; args; args = DECL_CHAIN (args))
{
tree type = TREE_TYPE (args);
if (INTEGRAL_TYPE_P (type)
&& TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node))
DECL_ARG_TYPE (args) = c_type_promotes_to (type);
}
}
if (DECL_INITIAL (fndecl) && DECL_INITIAL (fndecl) != error_mark_node)
BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl;
/* Must mark the RESULT_DECL as being in this function. */
if (DECL_RESULT (fndecl) && DECL_RESULT (fndecl) != error_mark_node)
DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl;
if (MAIN_NAME_P (DECL_NAME (fndecl)) && flag_hosted
&& TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (fndecl)))
== integer_type_node && flag_isoc99)
{
/* Hack. We don't want the middle-end to warn that this return
is unreachable, so we mark its location as special. Using
UNKNOWN_LOCATION has the problem that it gets clobbered in
annotate_one_with_locus. A cleaner solution might be to
ensure ! should_carry_locus_p (stmt), but that needs a flag.
*/
c_finish_return (BUILTINS_LOCATION, integer_zero_node, NULL_TREE);
}
/* Tie off the statement tree for this function. */
DECL_SAVED_TREE (fndecl) = pop_stmt_list (DECL_SAVED_TREE (fndecl));
finish_fname_decls ();
/* Complain if there's just no return statement. */
if (warn_return_type
&& TREE_CODE (TREE_TYPE (TREE_TYPE (fndecl))) != VOID_TYPE
&& !current_function_returns_value && !current_function_returns_null
/* Don't complain if we are no-return. */
&& !current_function_returns_abnormally
/* Don't complain if we are declared noreturn. */
&& !TREE_THIS_VOLATILE (fndecl)
/* Don't warn for main(). */
&& !MAIN_NAME_P (DECL_NAME (fndecl))
/* Or if they didn't actually specify a return type. */
&& !C_FUNCTION_IMPLICIT_INT (fndecl)
/* Normally, with -Wreturn-type, flow will complain, but we might
optimize out static functions. */
&& !TREE_PUBLIC (fndecl))
{
warning (OPT_Wreturn_type,
"no return statement in function returning non-void");
TREE_NO_WARNING (fndecl) = 1;
}
/* Complain about parameters that are only set, but never otherwise used. */
if (warn_unused_but_set_parameter)
{
tree decl;
for (decl = DECL_ARGUMENTS (fndecl);
decl;
decl = DECL_CHAIN (decl))
if (TREE_USED (decl)
&& TREE_CODE (decl) == PARM_DECL
&& !DECL_READ_P (decl)
&& DECL_NAME (decl)
&& !DECL_ARTIFICIAL (decl)
&& !TREE_NO_WARNING (decl))
warning_at (DECL_SOURCE_LOCATION (decl),
OPT_Wunused_but_set_parameter,
"parameter %qD set but not used", decl);
}
/* Complain about locally defined typedefs that are not used in this
function. */
maybe_warn_unused_local_typedefs ();
/* Possibly warn about unused parameters. */
if (warn_unused_parameter)
do_warn_unused_parameter (fndecl);
/* Store the end of the function, so that we get good line number
info for the epilogue. */
cfun->function_end_locus = input_location;
/* Finalize the ELF visibility for the function. */
c_determine_visibility (fndecl);
/* For GNU C extern inline functions disregard inline limits. */
if (DECL_EXTERNAL (fndecl)
&& DECL_DECLARED_INLINE_P (fndecl)
&& (flag_gnu89_inline
|| lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (fndecl))))
DECL_DISREGARD_INLINE_LIMITS (fndecl) = 1;
/* Genericize before inlining. Delay genericizing nested functions
until their parent function is genericized. Since finalizing
requires GENERIC, delay that as well. */
if (DECL_INITIAL (fndecl) && DECL_INITIAL (fndecl) != error_mark_node
&& !undef_nested_function)
{
if (!decl_function_context (fndecl))
{
invoke_plugin_callbacks (PLUGIN_PRE_GENERICIZE, fndecl);
c_genericize (fndecl);
/* ??? Objc emits functions after finalizing the compilation unit.
This should be cleaned up later and this conditional removed. */
if (symtab->global_info_ready)
{
cgraph_node::add_new_function (fndecl, false);
return;
}
cgraph_node::finalize_function (fndecl, false);
}
else
{
/* Register this function with cgraph just far enough to get it
added to our parent's nested function list. Handy, since the
C front end doesn't have such a list. */
(void) cgraph_node::get_create (fndecl);
}
}
if (!decl_function_context (fndecl))
undef_nested_function = false;
if (cfun->language != NULL)
{
ggc_free (cfun->language);
cfun->language = NULL;
}
/* We're leaving the context of this function, so zap cfun.
It's still in DECL_STRUCT_FUNCTION, and we'll restore it in
tree_rest_of_compilation. */
set_cfun (NULL);
invoke_plugin_callbacks (PLUGIN_FINISH_PARSE_FUNCTION, current_function_decl);
current_function_decl = NULL;
}
/* Check the declarations given in a for-loop for satisfying the C99
constraints. If exactly one such decl is found, return it. LOC is
the location of the opening parenthesis of the for loop. The last
parameter allows you to control the "for loop initial declarations
are only allowed in C99 mode". Normally, you should pass
flag_isoc99 as that parameter. But in some cases (Objective-C
foreach loop, for example) we want to run the checks in this
function even if not in C99 mode, so we allow the caller to turn
off the error about not being in C99 mode.
*/
tree
check_for_loop_decls (location_t loc, bool turn_off_iso_c99_error)
{
struct c_binding *b;
tree one_decl = NULL_TREE;
int n_decls = 0;
if (!turn_off_iso_c99_error)
{
static bool hint = true;
/* If we get here, declarations have been used in a for loop without
the C99 for loop scope. This doesn't make much sense, so don't
allow it. */
error_at (loc, "%<for%> loop initial declarations "
"are only allowed in C99 or C11 mode");
if (hint)
{
inform (loc,
"use option -std=c99, -std=gnu99, -std=c11 or -std=gnu11 "
"to compile your code");
hint = false;
}
return NULL_TREE;
}
/* C99 subclause 6.8.5 paragraph 3:
[#3] The declaration part of a for statement shall only
declare identifiers for objects having storage class auto or
register.
It isn't clear whether, in this sentence, "identifiers" binds to
"shall only declare" or to "objects" - that is, whether all identifiers
declared must be identifiers for objects, or whether the restriction
only applies to those that are. (A question on this in comp.std.c
in November 2000 received no answer.) We implement the strictest
interpretation, to avoid creating an extension which later causes
problems. */
for (b = current_scope->bindings; b; b = b->prev)
{
tree id = b->id;
tree decl = b->decl;
if (!id)
continue;
switch (TREE_CODE (decl))
{
case VAR_DECL:
{
location_t decl_loc = DECL_SOURCE_LOCATION (decl);
if (TREE_STATIC (decl))
error_at (decl_loc,
"declaration of static variable %qD in %<for%> loop "
"initial declaration", decl);
else if (DECL_EXTERNAL (decl))
error_at (decl_loc,
"declaration of %<extern%> variable %qD in %<for%> loop "
"initial declaration", decl);
}
break;
case RECORD_TYPE:
error_at (loc,
"%<struct %E%> declared in %<for%> loop initial "
"declaration", id);
break;
case UNION_TYPE:
error_at (loc,
"%<union %E%> declared in %<for%> loop initial declaration",
id);
break;
case ENUMERAL_TYPE:
error_at (loc, "%<enum %E%> declared in %<for%> loop "
"initial declaration", id);
break;
default:
error_at (loc, "declaration of non-variable "
"%qD in %<for%> loop initial declaration", decl);
}
n_decls++;
one_decl = decl;
}
return n_decls == 1 ? one_decl : NULL_TREE;
}
/* Save and reinitialize the variables
used during compilation of a C function. */
void
c_push_function_context (void)
{
struct language_function *p = cfun->language;
/* cfun->language might have been already allocated by the use of
-Wunused-local-typedefs. In that case, just re-use it. */
if (p == NULL)
cfun->language = p = ggc_cleared_alloc<language_function> ();
p->base.x_stmt_tree = c_stmt_tree;
c_stmt_tree.x_cur_stmt_list = vec_safe_copy (c_stmt_tree.x_cur_stmt_list);
p->x_break_label = c_break_label;
p->x_cont_label = c_cont_label;
p->x_switch_stack = c_switch_stack;
p->arg_info = current_function_arg_info;
p->returns_value = current_function_returns_value;
p->returns_null = current_function_returns_null;
p->returns_abnormally = current_function_returns_abnormally;
p->warn_about_return_type = warn_about_return_type;
push_function_context ();
}
/* Restore the variables used during compilation of a C function. */
void
c_pop_function_context (void)
{
struct language_function *p;
pop_function_context ();
p = cfun->language;
/* When -Wunused-local-typedefs is in effect, cfun->languages is
used to store data throughout the life time of the current cfun,
So don't deallocate it. */
if (!warn_unused_local_typedefs)
cfun->language = NULL;
if (DECL_STRUCT_FUNCTION (current_function_decl) == 0
&& DECL_SAVED_TREE (current_function_decl) == NULL_TREE)
{
/* Stop pointing to the local nodes about to be freed. */
/* But DECL_INITIAL must remain nonzero so we know this
was an actual function definition. */
DECL_INITIAL (current_function_decl) = error_mark_node;
DECL_ARGUMENTS (current_function_decl) = NULL_TREE;
}
c_stmt_tree = p->base.x_stmt_tree;
p->base.x_stmt_tree.x_cur_stmt_list = NULL;
c_break_label = p->x_break_label;
c_cont_label = p->x_cont_label;
c_switch_stack = p->x_switch_stack;
current_function_arg_info = p->arg_info;
current_function_returns_value = p->returns_value;
current_function_returns_null = p->returns_null;
current_function_returns_abnormally = p->returns_abnormally;
warn_about_return_type = p->warn_about_return_type;
}
/* The functions below are required for functionality of doing
function at once processing in the C front end. Currently these
functions are not called from anywhere in the C front end, but as
these changes continue, that will change. */
/* Returns the stmt_tree (if any) to which statements are currently
being added. If there is no active statement-tree, NULL is
returned. */
stmt_tree
current_stmt_tree (void)
{
return &c_stmt_tree;
}
/* Return the global value of T as a symbol. */
tree
identifier_global_value (tree t)
{
struct c_binding *b;
for (b = I_SYMBOL_BINDING (t); b; b = b->shadowed)
if (B_IN_FILE_SCOPE (b) || B_IN_EXTERNAL_SCOPE (b))
return b->decl;
return NULL_TREE;
}
/* In C, the only C-linkage public declaration is at file scope. */
tree
c_linkage_bindings (tree name)
{
return identifier_global_value (name);
}
/* Record a builtin type for C. If NAME is non-NULL, it is the name used;
otherwise the name is found in ridpointers from RID_INDEX. */
void
record_builtin_type (enum rid rid_index, const char *name, tree type)
{
tree id, decl;
if (name == 0)
id = ridpointers[(int) rid_index];
else
id = get_identifier (name);
decl = build_decl (UNKNOWN_LOCATION, TYPE_DECL, id, type);
pushdecl (decl);
if (debug_hooks->type_decl)
debug_hooks->type_decl (decl, false);
}
/* Build the void_list_node (void_type_node having been created). */
tree
build_void_list_node (void)
{
tree t = build_tree_list (NULL_TREE, void_type_node);
return t;
}
/* Return a c_parm structure with the given SPECS, ATTRS and DECLARATOR. */
struct c_parm *
build_c_parm (struct c_declspecs *specs, tree attrs,
struct c_declarator *declarator,
location_t loc)
{
struct c_parm *ret = XOBNEW (&parser_obstack, struct c_parm);
ret->specs = specs;
ret->attrs = attrs;
ret->declarator = declarator;
ret->loc = loc;
return ret;
}
/* Return a declarator with nested attributes. TARGET is the inner
declarator to which these attributes apply. ATTRS are the
attributes. */
struct c_declarator *
build_attrs_declarator (tree attrs, struct c_declarator *target)
{
struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator);
ret->kind = cdk_attrs;
ret->declarator = target;
ret->u.attrs = attrs;
return ret;
}
/* Return a declarator for a function with arguments specified by ARGS
and return type specified by TARGET. */
struct c_declarator *
build_function_declarator (struct c_arg_info *args,
struct c_declarator *target)
{
struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator);
ret->kind = cdk_function;
ret->declarator = target;
ret->u.arg_info = args;
return ret;
}
/* Return a declarator for the identifier IDENT (which may be
NULL_TREE for an abstract declarator). */
struct c_declarator *
build_id_declarator (tree ident)
{
struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator);
ret->kind = cdk_id;
ret->declarator = 0;
ret->u.id = ident;
/* Default value - may get reset to a more precise location. */
ret->id_loc = input_location;
return ret;
}
/* Return something to represent absolute declarators containing a *.
TARGET is the absolute declarator that the * contains.
TYPE_QUALS_ATTRS is a structure for type qualifiers and attributes
to apply to the pointer type. */
struct c_declarator *
make_pointer_declarator (struct c_declspecs *type_quals_attrs,
struct c_declarator *target)
{
tree attrs;
int quals = 0;
struct c_declarator *itarget = target;
struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator);
if (type_quals_attrs)
{
attrs = type_quals_attrs->attrs;
quals = quals_from_declspecs (type_quals_attrs);
if (attrs != NULL_TREE)
itarget = build_attrs_declarator (attrs, target);
}
ret->kind = cdk_pointer;
ret->declarator = itarget;
ret->u.pointer_quals = quals;
return ret;
}
/* Return a pointer to a structure for an empty list of declaration
specifiers. */
struct c_declspecs *
build_null_declspecs (void)
{
struct c_declspecs *ret = XOBNEW (&parser_obstack, struct c_declspecs);
memset (ret, 0, sizeof *ret);
ret->align_log = -1;
ret->typespec_word = cts_none;
ret->storage_class = csc_none;
ret->expr_const_operands = true;
ret->typespec_kind = ctsk_none;
ret->address_space = ADDR_SPACE_GENERIC;
return ret;
}
/* Add the address space ADDRSPACE to the declaration specifiers
SPECS, returning SPECS. */
struct c_declspecs *
declspecs_add_addrspace (source_location location,
struct c_declspecs *specs, addr_space_t as)
{
specs->non_sc_seen_p = true;
specs->declspecs_seen_p = true;
if (!ADDR_SPACE_GENERIC_P (specs->address_space)
&& specs->address_space != as)
error ("incompatible address space qualifiers %qs and %qs",
c_addr_space_name (as),
c_addr_space_name (specs->address_space));
else
{
specs->address_space = as;
specs->locations[cdw_address_space] = location;
}
return specs;
}
/* Add the type qualifier QUAL to the declaration specifiers SPECS,
returning SPECS. */
struct c_declspecs *
declspecs_add_qual (source_location loc,
struct c_declspecs *specs, tree qual)
{
enum rid i;
bool dupe = false;
specs->non_sc_seen_p = true;
specs->declspecs_seen_p = true;
gcc_assert (TREE_CODE (qual) == IDENTIFIER_NODE
&& C_IS_RESERVED_WORD (qual));
i = C_RID_CODE (qual);
location_t prev_loc = UNKNOWN_LOCATION;
switch (i)
{
case RID_CONST:
dupe = specs->const_p;
specs->const_p = true;
prev_loc = specs->locations[cdw_const];
specs->locations[cdw_const] = loc;
break;
case RID_VOLATILE:
dupe = specs->volatile_p;
specs->volatile_p = true;
prev_loc = specs->locations[cdw_volatile];
specs->locations[cdw_volatile] = loc;
break;
case RID_RESTRICT:
dupe = specs->restrict_p;
specs->restrict_p = true;
prev_loc = specs->locations[cdw_restrict];
specs->locations[cdw_restrict] = loc;
break;
case RID_ATOMIC:
dupe = specs->atomic_p;
specs->atomic_p = true;
prev_loc = specs->locations[cdw_atomic];
specs->locations[cdw_atomic] = loc;
break;
default:
gcc_unreachable ();
}
if (dupe)
{
bool warned = pedwarn_c90 (loc, OPT_Wpedantic,
"duplicate %qE declaration specifier", qual);
if (!warned
&& warn_duplicate_decl_specifier
&& prev_loc >= RESERVED_LOCATION_COUNT
&& !from_macro_expansion_at (prev_loc)
&& !from_macro_expansion_at (loc))
warning_at (loc, OPT_Wduplicate_decl_specifier,
"duplicate %qE declaration specifier", qual);
}
return specs;
}
/* Add the type specifier TYPE to the declaration specifiers SPECS,
returning SPECS. */
struct c_declspecs *
declspecs_add_type (location_t loc, struct c_declspecs *specs,
struct c_typespec spec)
{
tree type = spec.spec;
specs->non_sc_seen_p = true;
specs->declspecs_seen_p = true;
specs->typespec_kind = spec.kind;
if (TREE_DEPRECATED (type))
specs->deprecated_p = true;
/* Handle type specifier keywords. */
if (TREE_CODE (type) == IDENTIFIER_NODE
&& C_IS_RESERVED_WORD (type)
&& C_RID_CODE (type) != RID_CXX_COMPAT_WARN)
{
enum rid i = C_RID_CODE (type);
if (specs->type)
{
error_at (loc, "two or more data types in declaration specifiers");
return specs;
}
if ((int) i <= (int) RID_LAST_MODIFIER)
{
/* "long", "short", "signed", "unsigned", "_Complex" or "_Sat". */
bool dupe = false;
switch (i)
{
case RID_LONG:
if (specs->long_long_p)
{
error_at (loc, "%<long long long%> is too long for GCC");
break;
}
if (specs->long_p)
{
if (specs->typespec_word == cts_double)
{
error_at (loc,
("both %<long long%> and %<double%> in "
"declaration specifiers"));
break;
}
pedwarn_c90 (loc, OPT_Wlong_long,
"ISO C90 does not support %<long long%>");
specs->long_long_p = 1;
specs->locations[cdw_long_long] = loc;
break;
}
if (specs->short_p)
error_at (loc,
("both %<long%> and %<short%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_auto_type)
error_at (loc,
("both %<long%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_void)
error_at (loc,
("both %<long%> and %<void%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_int_n)
error_at (loc,
("both %<long%> and %<__int%d%> in "
"declaration specifiers"),
int_n_data[specs->int_n_idx].bitsize);
else if (specs->typespec_word == cts_bool)
error_at (loc,
("both %<long%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_char)
error_at (loc,
("both %<long%> and %<char%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_float)
error_at (loc,
("both %<long%> and %<float%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_floatn_nx)
error_at (loc,
("both %<long%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->typespec_word == cts_dfloat32)
error_at (loc,
("both %<long%> and %<_Decimal32%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat64)
error_at (loc,
("both %<long%> and %<_Decimal64%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat128)
error_at (loc,
("both %<long%> and %<_Decimal128%> in "
"declaration specifiers"));
else
{
specs->long_p = true;
specs->locations[cdw_long] = loc;
}
break;
case RID_SHORT:
dupe = specs->short_p;
if (specs->long_p)
error_at (loc,
("both %<long%> and %<short%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_auto_type)
error_at (loc,
("both %<short%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_void)
error_at (loc,
("both %<short%> and %<void%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_int_n)
error_at (loc,
("both %<short%> and %<__int%d%> in "
"declaration specifiers"),
int_n_data[specs->int_n_idx].bitsize);
else if (specs->typespec_word == cts_bool)
error_at (loc,
("both %<short%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_char)
error_at (loc,
("both %<short%> and %<char%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_float)
error_at (loc,
("both %<short%> and %<float%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_double)
error_at (loc,
("both %<short%> and %<double%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_floatn_nx)
error_at (loc,
("both %<short%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->typespec_word == cts_dfloat32)
error_at (loc,
("both %<short%> and %<_Decimal32%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat64)
error_at (loc,
("both %<short%> and %<_Decimal64%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat128)
error_at (loc,
("both %<short%> and %<_Decimal128%> in "
"declaration specifiers"));
else
{
specs->short_p = true;
specs->locations[cdw_short] = loc;
}
break;
case RID_SIGNED:
dupe = specs->signed_p;
if (specs->unsigned_p)
error_at (loc,
("both %<signed%> and %<unsigned%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_auto_type)
error_at (loc,
("both %<signed%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_void)
error_at (loc,
("both %<signed%> and %<void%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_bool)
error_at (loc,
("both %<signed%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_float)
error_at (loc,
("both %<signed%> and %<float%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_double)
error_at (loc,
("both %<signed%> and %<double%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_floatn_nx)
error_at (loc,
("both %<signed%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->typespec_word == cts_dfloat32)
error_at (loc,
("both %<signed%> and %<_Decimal32%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat64)
error_at (loc,
("both %<signed%> and %<_Decimal64%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat128)
error_at (loc,
("both %<signed%> and %<_Decimal128%> in "
"declaration specifiers"));
else
{
specs->signed_p = true;
specs->locations[cdw_signed] = loc;
}
break;
case RID_UNSIGNED:
dupe = specs->unsigned_p;
if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<unsigned%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_auto_type)
error_at (loc,
("both %<unsigned%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_void)
error_at (loc,
("both %<unsigned%> and %<void%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_bool)
error_at (loc,
("both %<unsigned%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_float)
error_at (loc,
("both %<unsigned%> and %<float%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_double)
error_at (loc,
("both %<unsigned%> and %<double%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_floatn_nx)
error_at (loc,
("both %<unsigned%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->typespec_word == cts_dfloat32)
error_at (loc,
("both %<unsigned%> and %<_Decimal32%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat64)
error_at (loc,
("both %<unsigned%> and %<_Decimal64%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat128)
error_at (loc,
("both %<unsigned%> and %<_Decimal128%> in "
"declaration specifiers"));
else
{
specs->unsigned_p = true;
specs->locations[cdw_unsigned] = loc;
}
break;
case RID_COMPLEX:
dupe = specs->complex_p;
if (!in_system_header_at (loc))
pedwarn_c90 (loc, OPT_Wpedantic,
"ISO C90 does not support complex types");
if (specs->typespec_word == cts_auto_type)
error_at (loc,
("both %<complex%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_void)
error_at (loc,
("both %<complex%> and %<void%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_bool)
error_at (loc,
("both %<complex%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat32)
error_at (loc,
("both %<complex%> and %<_Decimal32%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat64)
error_at (loc,
("both %<complex%> and %<_Decimal64%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat128)
error_at (loc,
("both %<complex%> and %<_Decimal128%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_fract)
error_at (loc,
("both %<complex%> and %<_Fract%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_accum)
error_at (loc,
("both %<complex%> and %<_Accum%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<complex%> and %<_Sat%> in "
"declaration specifiers"));
else
{
specs->complex_p = true;
specs->locations[cdw_complex] = loc;
}
break;
case RID_SAT:
dupe = specs->saturating_p;
pedwarn (loc, OPT_Wpedantic,
"ISO C does not support saturating types");
if (specs->typespec_word == cts_int_n)
{
error_at (loc,
("both %<_Sat%> and %<__int%d%> in "
"declaration specifiers"),
int_n_data[specs->int_n_idx].bitsize);
}
else if (specs->typespec_word == cts_auto_type)
error_at (loc,
("both %<_Sat%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_void)
error_at (loc,
("both %<_Sat%> and %<void%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_bool)
error_at (loc,
("both %<_Sat%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_char)
error_at (loc,
("both %<_Sat%> and %<char%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_int)
error_at (loc,
("both %<_Sat%> and %<int%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_float)
error_at (loc,
("both %<_Sat%> and %<float%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_double)
error_at (loc,
("both %<_Sat%> and %<double%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_floatn_nx)
error_at (loc,
("both %<_Sat%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->typespec_word == cts_dfloat32)
error_at (loc,
("both %<_Sat%> and %<_Decimal32%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat64)
error_at (loc,
("both %<_Sat%> and %<_Decimal64%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat128)
error_at (loc,
("both %<_Sat%> and %<_Decimal128%> in "
"declaration specifiers"));
else if (specs->complex_p)
error_at (loc,
("both %<_Sat%> and %<complex%> in "
"declaration specifiers"));
else
{
specs->saturating_p = true;
specs->locations[cdw_saturating] = loc;
}
break;
default:
gcc_unreachable ();
}
if (dupe)
error_at (loc, "duplicate %qE", type);
return specs;
}
else
{
/* "void", "_Bool", "char", "int", "float", "double",
"_FloatN", "_FloatNx", "_Decimal32", "__intN",
"_Decimal64", "_Decimal128", "_Fract", "_Accum" or
"__auto_type". */
if (specs->typespec_word != cts_none)
{
error_at (loc,
"two or more data types in declaration specifiers");
return specs;
}
switch (i)
{
case RID_AUTO_TYPE:
if (specs->long_p)
error_at (loc,
("both %<long%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->unsigned_p)
error_at (loc,
("both %<unsigned%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->complex_p)
error_at (loc,
("both %<complex%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<__auto_type%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_auto_type;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_INT_N_0:
case RID_INT_N_1:
case RID_INT_N_2:
case RID_INT_N_3:
specs->int_n_idx = i - RID_INT_N_0;
if (!in_system_header_at (input_location))
pedwarn (loc, OPT_Wpedantic,
"ISO C does not support %<__int%d%> types",
int_n_data[specs->int_n_idx].bitsize);
if (specs->long_p)
error_at (loc,
("both %<__int%d%> and %<long%> in "
"declaration specifiers"),
int_n_data[specs->int_n_idx].bitsize);
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<__int%d%> in "
"declaration specifiers"),
int_n_data[specs->int_n_idx].bitsize);
else if (specs->short_p)
error_at (loc,
("both %<__int%d%> and %<short%> in "
"declaration specifiers"),
int_n_data[specs->int_n_idx].bitsize);
else if (! int_n_enabled_p[specs->int_n_idx])
{
specs->typespec_word = cts_int_n;
error_at (loc,
"%<__int%d%> is not supported on this target",
int_n_data[specs->int_n_idx].bitsize);
}
else
{
specs->typespec_word = cts_int_n;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_VOID:
if (specs->long_p)
error_at (loc,
("both %<long%> and %<void%> in "
"declaration specifiers"));
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<void%> in "
"declaration specifiers"));
else if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<void%> in "
"declaration specifiers"));
else if (specs->unsigned_p)
error_at (loc,
("both %<unsigned%> and %<void%> in "
"declaration specifiers"));
else if (specs->complex_p)
error_at (loc,
("both %<complex%> and %<void%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<void%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_void;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_BOOL:
if (!in_system_header_at (loc))
pedwarn_c90 (loc, OPT_Wpedantic,
"ISO C90 does not support boolean types");
if (specs->long_p)
error_at (loc,
("both %<long%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->unsigned_p)
error_at (loc,
("both %<unsigned%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->complex_p)
error_at (loc,
("both %<complex%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<_Bool%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_bool;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_CHAR:
if (specs->long_p)
error_at (loc,
("both %<long%> and %<char%> in "
"declaration specifiers"));
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<char%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<char%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_char;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_INT:
if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<int%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_int;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_FLOAT:
if (specs->long_p)
error_at (loc,
("both %<long%> and %<float%> in "
"declaration specifiers"));
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<float%> in "
"declaration specifiers"));
else if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<float%> in "
"declaration specifiers"));
else if (specs->unsigned_p)
error_at (loc,
("both %<unsigned%> and %<float%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<float%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_float;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_DOUBLE:
if (specs->long_long_p)
error_at (loc,
("both %<long long%> and %<double%> in "
"declaration specifiers"));
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<double%> in "
"declaration specifiers"));
else if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<double%> in "
"declaration specifiers"));
else if (specs->unsigned_p)
error_at (loc,
("both %<unsigned%> and %<double%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<double%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_double;
specs->locations[cdw_typespec] = loc;
}
return specs;
CASE_RID_FLOATN_NX:
specs->floatn_nx_idx = i - RID_FLOATN_NX_FIRST;
if (!in_system_header_at (input_location))
pedwarn (loc, OPT_Wpedantic,
"ISO C does not support the %<_Float%d%s%> type",
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
if (specs->long_p)
error_at (loc,
("both %<long%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->unsigned_p)
error_at (loc,
("both %<unsigned%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (FLOATN_NX_TYPE_NODE (specs->floatn_nx_idx) == NULL_TREE)
{
specs->typespec_word = cts_floatn_nx;
error_at (loc,
"%<_Float%d%s%> is not supported on this target",
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
}
else
{
specs->typespec_word = cts_floatn_nx;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_DFLOAT32:
case RID_DFLOAT64:
case RID_DFLOAT128:
{
const char *str;
if (i == RID_DFLOAT32)
str = "_Decimal32";
else if (i == RID_DFLOAT64)
str = "_Decimal64";
else
str = "_Decimal128";
if (specs->long_long_p)
error_at (loc,
("both %<long long%> and %qs in "
"declaration specifiers"),
str);
if (specs->long_p)
error_at (loc,
("both %<long%> and %qs in "
"declaration specifiers"),
str);
else if (specs->short_p)
error_at (loc,
("both %<short%> and %qs in "
"declaration specifiers"),
str);
else if (specs->signed_p)
error_at (loc,
("both %<signed%> and %qs in "
"declaration specifiers"),
str);
else if (specs->unsigned_p)
error_at (loc,
("both %<unsigned%> and %qs in "
"declaration specifiers"),
str);
else if (specs->complex_p)
error_at (loc,
("both %<complex%> and %qs in "
"declaration specifiers"),
str);
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %qs in "
"declaration specifiers"),
str);
else if (i == RID_DFLOAT32)
specs->typespec_word = cts_dfloat32;
else if (i == RID_DFLOAT64)
specs->typespec_word = cts_dfloat64;
else
specs->typespec_word = cts_dfloat128;
specs->locations[cdw_typespec] = loc;
}
if (!targetm.decimal_float_supported_p ())
error_at (loc,
("decimal floating point not supported "
"for this target"));
pedwarn (loc, OPT_Wpedantic,
"ISO C does not support decimal floating point");
return specs;
case RID_FRACT:
case RID_ACCUM:
{
const char *str;
if (i == RID_FRACT)
str = "_Fract";
else
str = "_Accum";
if (specs->complex_p)
error_at (loc,
("both %<complex%> and %qs in "
"declaration specifiers"),
str);
else if (i == RID_FRACT)
specs->typespec_word = cts_fract;
else
specs->typespec_word = cts_accum;
specs->locations[cdw_typespec] = loc;
}
if (!targetm.fixed_point_supported_p ())
error_at (loc,
"fixed-point types not supported for this target");
pedwarn (loc, OPT_Wpedantic,
"ISO C does not support fixed-point types");
return specs;
default:
/* ObjC reserved word "id", handled below. */
break;
}
}
}
/* Now we have a typedef (a TYPE_DECL node), an identifier (some
form of ObjC type, cases such as "int" and "long" being handled
above), a TYPE (struct, union, enum and typeof specifiers) or an
ERROR_MARK. In none of these cases may there have previously
been any type specifiers. */
if (specs->type || specs->typespec_word != cts_none
|| specs->long_p || specs->short_p || specs->signed_p
|| specs->unsigned_p || specs->complex_p)
error_at (loc, "two or more data types in declaration specifiers");
else if (TREE_CODE (type) == TYPE_DECL)
{
if (TREE_TYPE (type) == error_mark_node)
; /* Allow the type to default to int to avoid cascading errors. */
else
{
specs->type = TREE_TYPE (type);
specs->decl_attr = DECL_ATTRIBUTES (type);
specs->typedef_p = true;
specs->explicit_signed_p = C_TYPEDEF_EXPLICITLY_SIGNED (type);
specs->locations[cdw_typedef] = loc;
/* If this typedef name is defined in a struct, then a C++
lookup would return a different value. */
if (warn_cxx_compat
&& I_SYMBOL_BINDING (DECL_NAME (type))->in_struct)
warning_at (loc, OPT_Wc___compat,
"C++ lookup of %qD would return a field, not a type",
type);
/* If we are parsing a struct, record that a struct field
used a typedef. */
if (warn_cxx_compat && struct_parse_info != NULL)
struct_parse_info->typedefs_seen.safe_push (type);
}
}
else if (TREE_CODE (type) == IDENTIFIER_NODE)
{
tree t = lookup_name (type);
if (!t || TREE_CODE (t) != TYPE_DECL)
error_at (loc, "%qE fails to be a typedef or built in type", type);
else if (TREE_TYPE (t) == error_mark_node)
;
else
{
specs->type = TREE_TYPE (t);
specs->locations[cdw_typespec] = loc;
}
}
else
{
if (TREE_CODE (type) != ERROR_MARK && spec.kind == ctsk_typeof)
{
specs->typedef_p = true;
specs->locations[cdw_typedef] = loc;
if (spec.expr)
{
if (specs->expr)
specs->expr = build2 (COMPOUND_EXPR, TREE_TYPE (spec.expr),
specs->expr, spec.expr);
else
specs->expr = spec.expr;
specs->expr_const_operands &= spec.expr_const_operands;
}
}
specs->type = type;
}
return specs;
}
/* Add the storage class specifier or function specifier SCSPEC to the
declaration specifiers SPECS, returning SPECS. */
struct c_declspecs *
declspecs_add_scspec (source_location loc,
struct c_declspecs *specs,
tree scspec)
{
enum rid i;
enum c_storage_class n = csc_none;
bool dupe = false;
specs->declspecs_seen_p = true;
gcc_assert (TREE_CODE (scspec) == IDENTIFIER_NODE
&& C_IS_RESERVED_WORD (scspec));
i = C_RID_CODE (scspec);
if (specs->non_sc_seen_p)
warning (OPT_Wold_style_declaration,
"%qE is not at beginning of declaration", scspec);
switch (i)
{
case RID_INLINE:
/* C99 permits duplicate inline. Although of doubtful utility,
it seems simplest to permit it in gnu89 mode as well, as
there is also little utility in maintaining this as a
difference between gnu89 and C99 inline. */
dupe = false;
specs->inline_p = true;
specs->locations[cdw_inline] = loc;
break;
case RID_NORETURN:
/* Duplicate _Noreturn is permitted. */
dupe = false;
specs->noreturn_p = true;
specs->locations[cdw_noreturn] = loc;
break;
case RID_THREAD:
dupe = specs->thread_p;
if (specs->storage_class == csc_auto)
error ("%qE used with %<auto%>", scspec);
else if (specs->storage_class == csc_register)
error ("%qE used with %<register%>", scspec);
else if (specs->storage_class == csc_typedef)
error ("%qE used with %<typedef%>", scspec);
else
{
specs->thread_p = true;
specs->thread_gnu_p = (strcmp (IDENTIFIER_POINTER (scspec),
"__thread") == 0);
/* A diagnostic is not required for the use of this
identifier in the implementation namespace; only diagnose
it for the C11 spelling because of existing code using
the other spelling. */
if (!specs->thread_gnu_p)
{
if (flag_isoc99)
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C99 does not support %qE", scspec);
else
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C90 does not support %qE", scspec);
}
specs->locations[cdw_thread] = loc;
}
break;
case RID_AUTO:
n = csc_auto;
break;
case RID_EXTERN:
n = csc_extern;
/* Diagnose "__thread extern". */
if (specs->thread_p && specs->thread_gnu_p)
error ("%<__thread%> before %<extern%>");
break;
case RID_REGISTER:
n = csc_register;
break;
case RID_STATIC:
n = csc_static;
/* Diagnose "__thread static". */
if (specs->thread_p && specs->thread_gnu_p)
error ("%<__thread%> before %<static%>");
break;
case RID_TYPEDEF:
n = csc_typedef;
break;
default:
gcc_unreachable ();
}
if (n != csc_none && n == specs->storage_class)
dupe = true;
if (dupe)
{
if (i == RID_THREAD)
error ("duplicate %<_Thread_local%> or %<__thread%>");
else
error ("duplicate %qE", scspec);
}
if (n != csc_none)
{
if (specs->storage_class != csc_none && n != specs->storage_class)
{
error ("multiple storage classes in declaration specifiers");
}
else
{
specs->storage_class = n;
specs->locations[cdw_storage_class] = loc;
if (n != csc_extern && n != csc_static && specs->thread_p)
{
error ("%qs used with %qE",
specs->thread_gnu_p ? "__thread" : "_Thread_local",
scspec);
specs->thread_p = false;
}
}
}
return specs;
}
/* Add the attributes ATTRS to the declaration specifiers SPECS,
returning SPECS. */
struct c_declspecs *
declspecs_add_attrs (source_location loc, struct c_declspecs *specs, tree attrs)
{
specs->attrs = chainon (attrs, specs->attrs);
specs->locations[cdw_attributes] = loc;
specs->declspecs_seen_p = true;
return specs;
}
/* Add an _Alignas specifier (expression ALIGN, or type whose
alignment is ALIGN) to the declaration specifiers SPECS, returning
SPECS. */
struct c_declspecs *
declspecs_add_alignas (source_location loc,
struct c_declspecs *specs, tree align)
{
int align_log;
specs->alignas_p = true;
specs->locations[cdw_alignas] = loc;
if (align == error_mark_node)
return specs;
align_log = check_user_alignment (align, true);
if (align_log > specs->align_log)
specs->align_log = align_log;
return specs;
}
/* Combine "long", "short", "signed", "unsigned" and "_Complex" type
specifiers with any other type specifier to determine the resulting
type. This is where ISO C checks on complex types are made, since
"_Complex long" is a prefix of the valid ISO C type "_Complex long
double". */
struct c_declspecs *
finish_declspecs (struct c_declspecs *specs)
{
/* If a type was specified as a whole, we have no modifiers and are
done. */
if (specs->type != NULL_TREE)
{
gcc_assert (!specs->long_p && !specs->long_long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p
&& !specs->complex_p);
/* Set a dummy type. */
if (TREE_CODE (specs->type) == ERROR_MARK)
specs->type = integer_type_node;
return specs;
}
/* If none of "void", "_Bool", "char", "int", "float" or "double"
has been specified, treat it as "int" unless "_Complex" is
present and there are no other specifiers. If we just have
"_Complex", it is equivalent to "_Complex double", but e.g.
"_Complex short" is equivalent to "_Complex short int". */
if (specs->typespec_word == cts_none)
{
if (specs->saturating_p)
{
error_at (specs->locations[cdw_saturating],
"%<_Sat%> is used without %<_Fract%> or %<_Accum%>");
if (!targetm.fixed_point_supported_p ())
error_at (specs->locations[cdw_saturating],
"fixed-point types not supported for this target");
specs->typespec_word = cts_fract;
}
else if (specs->long_p || specs->short_p
|| specs->signed_p || specs->unsigned_p)
{
specs->typespec_word = cts_int;
}
else if (specs->complex_p)
{
specs->typespec_word = cts_double;
pedwarn (specs->locations[cdw_complex], OPT_Wpedantic,
"ISO C does not support plain %<complex%> meaning "
"%<double complex%>");
}
else
{
specs->typespec_word = cts_int;
specs->default_int_p = true;
/* We don't diagnose this here because grokdeclarator will
give more specific diagnostics according to whether it is
a function definition. */
}
}
/* If "signed" was specified, record this to distinguish "int" and
"signed int" in the case of a bit-field with
-funsigned-bitfields. */
specs->explicit_signed_p = specs->signed_p;
/* Now compute the actual type. */
switch (specs->typespec_word)
{
case cts_auto_type:
gcc_assert (!specs->long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p
&& !specs->complex_p);
/* Type to be filled in later. */
break;
case cts_void:
gcc_assert (!specs->long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p
&& !specs->complex_p);
specs->type = void_type_node;
break;
case cts_bool:
gcc_assert (!specs->long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p
&& !specs->complex_p);
specs->type = boolean_type_node;
break;
case cts_char:
gcc_assert (!specs->long_p && !specs->short_p);
gcc_assert (!(specs->signed_p && specs->unsigned_p));
if (specs->signed_p)
specs->type = signed_char_type_node;
else if (specs->unsigned_p)
specs->type = unsigned_char_type_node;
else
specs->type = char_type_node;
if (specs->complex_p)
{
pedwarn (specs->locations[cdw_complex], OPT_Wpedantic,
"ISO C does not support complex integer types");
specs->type = build_complex_type (specs->type);
}
break;
case cts_int_n:
gcc_assert (!specs->long_p && !specs->short_p && !specs->long_long_p);
gcc_assert (!(specs->signed_p && specs->unsigned_p));
if (! int_n_enabled_p[specs->int_n_idx])
specs->type = integer_type_node;
else
specs->type = (specs->unsigned_p
? int_n_trees[specs->int_n_idx].unsigned_type
: int_n_trees[specs->int_n_idx].signed_type);
if (specs->complex_p)
{
pedwarn (specs->locations[cdw_complex], OPT_Wpedantic,
"ISO C does not support complex integer types");
specs->type = build_complex_type (specs->type);
}
break;
case cts_int:
gcc_assert (!(specs->long_p && specs->short_p));
gcc_assert (!(specs->signed_p && specs->unsigned_p));
if (specs->long_long_p)
specs->type = (specs->unsigned_p
? long_long_unsigned_type_node
: long_long_integer_type_node);
else if (specs->long_p)
specs->type = (specs->unsigned_p
? long_unsigned_type_node
: long_integer_type_node);
else if (specs->short_p)
specs->type = (specs->unsigned_p
? short_unsigned_type_node
: short_integer_type_node);
else
specs->type = (specs->unsigned_p
? unsigned_type_node
: integer_type_node);
if (specs->complex_p)
{
pedwarn (specs->locations[cdw_complex], OPT_Wpedantic,
"ISO C does not support complex integer types");
specs->type = build_complex_type (specs->type);
}
break;
case cts_float:
gcc_assert (!specs->long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p);
specs->type = (specs->complex_p
? complex_float_type_node
: float_type_node);
break;
case cts_double:
gcc_assert (!specs->long_long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p);
if (specs->long_p)
{
specs->type = (specs->complex_p
? complex_long_double_type_node
: long_double_type_node);
}
else
{
specs->type = (specs->complex_p
? complex_double_type_node
: double_type_node);
}
break;
case cts_floatn_nx:
gcc_assert (!specs->long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p);
if (FLOATN_NX_TYPE_NODE (specs->floatn_nx_idx) == NULL_TREE)
specs->type = integer_type_node;
else if (specs->complex_p)
specs->type = COMPLEX_FLOATN_NX_TYPE_NODE (specs->floatn_nx_idx);
else
specs->type = FLOATN_NX_TYPE_NODE (specs->floatn_nx_idx);
break;
case cts_dfloat32:
case cts_dfloat64:
case cts_dfloat128:
gcc_assert (!specs->long_p && !specs->long_long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p && !specs->complex_p);
if (specs->typespec_word == cts_dfloat32)
specs->type = dfloat32_type_node;
else if (specs->typespec_word == cts_dfloat64)
specs->type = dfloat64_type_node;
else
specs->type = dfloat128_type_node;
break;
case cts_fract:
gcc_assert (!specs->complex_p);
if (!targetm.fixed_point_supported_p ())
specs->type = integer_type_node;
else if (specs->saturating_p)
{
if (specs->long_long_p)
specs->type = specs->unsigned_p
? sat_unsigned_long_long_fract_type_node
: sat_long_long_fract_type_node;
else if (specs->long_p)
specs->type = specs->unsigned_p
? sat_unsigned_long_fract_type_node
: sat_long_fract_type_node;
else if (specs->short_p)
specs->type = specs->unsigned_p
? sat_unsigned_short_fract_type_node
: sat_short_fract_type_node;
else
specs->type = specs->unsigned_p
? sat_unsigned_fract_type_node
: sat_fract_type_node;
}
else
{
if (specs->long_long_p)
specs->type = specs->unsigned_p
? unsigned_long_long_fract_type_node
: long_long_fract_type_node;
else if (specs->long_p)
specs->type = specs->unsigned_p
? unsigned_long_fract_type_node
: long_fract_type_node;
else if (specs->short_p)
specs->type = specs->unsigned_p
? unsigned_short_fract_type_node
: short_fract_type_node;
else
specs->type = specs->unsigned_p
? unsigned_fract_type_node
: fract_type_node;
}
break;
case cts_accum:
gcc_assert (!specs->complex_p);
if (!targetm.fixed_point_supported_p ())
specs->type = integer_type_node;
else if (specs->saturating_p)
{
if (specs->long_long_p)
specs->type = specs->unsigned_p
? sat_unsigned_long_long_accum_type_node
: sat_long_long_accum_type_node;
else if (specs->long_p)
specs->type = specs->unsigned_p
? sat_unsigned_long_accum_type_node
: sat_long_accum_type_node;
else if (specs->short_p)
specs->type = specs->unsigned_p
? sat_unsigned_short_accum_type_node
: sat_short_accum_type_node;
else
specs->type = specs->unsigned_p
? sat_unsigned_accum_type_node
: sat_accum_type_node;
}
else
{
if (specs->long_long_p)
specs->type = specs->unsigned_p
? unsigned_long_long_accum_type_node
: long_long_accum_type_node;
else if (specs->long_p)
specs->type = specs->unsigned_p
? unsigned_long_accum_type_node
: long_accum_type_node;
else if (specs->short_p)
specs->type = specs->unsigned_p
? unsigned_short_accum_type_node
: short_accum_type_node;
else
specs->type = specs->unsigned_p
? unsigned_accum_type_node
: accum_type_node;
}
break;
default:
gcc_unreachable ();
}
return specs;
}
/* Perform final processing on one file scope's declarations (or the
external scope's declarations), GLOBALS. */
static void
c_write_global_declarations_1 (tree globals)
{
tree decl;
bool reconsider;
/* Process the decls in the order they were written. */
for (decl = globals; decl; decl = DECL_CHAIN (decl))
{
/* Check for used but undefined static functions using the C
standard's definition of "used", and set TREE_NO_WARNING so
that check_global_declaration doesn't repeat the check. */
if (TREE_CODE (decl) == FUNCTION_DECL
&& DECL_INITIAL (decl) == NULL_TREE
&& DECL_EXTERNAL (decl)
&& !TREE_PUBLIC (decl))
{
if (C_DECL_USED (decl))
{
pedwarn (input_location, 0, "%q+F used but never defined", decl);
TREE_NO_WARNING (decl) = 1;
}
/* For -Wunused-function warn about unused static prototypes. */
else if (warn_unused_function
&& ! DECL_ARTIFICIAL (decl)
&& ! TREE_NO_WARNING (decl))
{
warning (OPT_Wunused_function,
"%q+F declared %<static%> but never defined", decl);
TREE_NO_WARNING (decl) = 1;
}
}
wrapup_global_declaration_1 (decl);
}
do
{
reconsider = false;
for (decl = globals; decl; decl = DECL_CHAIN (decl))
reconsider |= wrapup_global_declaration_2 (decl);
}
while (reconsider);
}
/* Callback to collect a source_ref from a DECL. */
static void
collect_source_ref_cb (tree decl)
{
if (!DECL_IS_BUILTIN (decl))
collect_source_ref (LOCATION_FILE (decl_sloc (decl, false)));
}
/* Preserve the external declarations scope across a garbage collect. */
static GTY(()) tree ext_block;
/* Collect all references relevant to SOURCE_FILE. */
static void
collect_all_refs (const char *source_file)
{
tree t;
unsigned i;
FOR_EACH_VEC_ELT (*all_translation_units, i, t)
collect_ada_nodes (BLOCK_VARS (DECL_INITIAL (t)), source_file);
collect_ada_nodes (BLOCK_VARS (ext_block), source_file);
}
/* Iterate over all global declarations and call CALLBACK. */
static void
for_each_global_decl (void (*callback) (tree decl))
{
tree t;
tree decls;
tree decl;
unsigned i;
FOR_EACH_VEC_ELT (*all_translation_units, i, t)
{
decls = DECL_INITIAL (t);
for (decl = BLOCK_VARS (decls); decl; decl = TREE_CHAIN (decl))
callback (decl);
}
for (decl = BLOCK_VARS (ext_block); decl; decl = TREE_CHAIN (decl))
callback (decl);
}
/* Perform any final parser cleanups and generate initial debugging
information. */
void
c_parse_final_cleanups (void)
{
tree t;
unsigned i;
/* We don't want to do this if generating a PCH. */
if (pch_file)
return;
timevar_stop (TV_PHASE_PARSING);
timevar_start (TV_PHASE_DEFERRED);
/* Do the Objective-C stuff. This is where all the Objective-C
module stuff gets generated (symtab, class/protocol/selector
lists etc). */
if (c_dialect_objc ())
objc_write_global_declarations ();
/* Close the external scope. */
ext_block = pop_scope ();
external_scope = 0;
gcc_assert (!current_scope);
/* Handle -fdump-ada-spec[-slim]. */
if (flag_dump_ada_spec || flag_dump_ada_spec_slim)
{
/* Build a table of files to generate specs for */
if (flag_dump_ada_spec_slim)
collect_source_ref (main_input_filename);
else
for_each_global_decl (collect_source_ref_cb);
dump_ada_specs (collect_all_refs, NULL);
}
/* Process all file scopes in this compilation, and the external_scope,
through wrapup_global_declarations. */
FOR_EACH_VEC_ELT (*all_translation_units, i, t)
c_write_global_declarations_1 (BLOCK_VARS (DECL_INITIAL (t)));
c_write_global_declarations_1 (BLOCK_VARS (ext_block));
timevar_stop (TV_PHASE_DEFERRED);
timevar_start (TV_PHASE_PARSING);
ext_block = NULL;
}
/* Register reserved keyword WORD as qualifier for address space AS. */
void
c_register_addr_space (const char *word, addr_space_t as)
{
int rid = RID_FIRST_ADDR_SPACE + as;
tree id;
/* Address space qualifiers are only supported
in C with GNU extensions enabled. */
if (c_dialect_objc () || flag_no_asm)
return;
id = get_identifier (word);
C_SET_RID_CODE (id, rid);
C_IS_RESERVED_WORD (id) = 1;
ridpointers [rid] = id;
}
/* Return identifier to look up for omp declare reduction. */
tree
c_omp_reduction_id (enum tree_code reduction_code, tree reduction_id)
{
const char *p = NULL;
switch (reduction_code)
{
case PLUS_EXPR: p = "+"; break;
case MULT_EXPR: p = "*"; break;
case MINUS_EXPR: p = "-"; break;
case BIT_AND_EXPR: p = "&"; break;
case BIT_XOR_EXPR: p = "^"; break;
case BIT_IOR_EXPR: p = "|"; break;
case TRUTH_ANDIF_EXPR: p = "&&"; break;
case TRUTH_ORIF_EXPR: p = "||"; break;
case MIN_EXPR: p = "min"; break;
case MAX_EXPR: p = "max"; break;
default:
break;
}
if (p == NULL)
{
if (TREE_CODE (reduction_id) != IDENTIFIER_NODE)
return error_mark_node;
p = IDENTIFIER_POINTER (reduction_id);
}
const char prefix[] = "omp declare reduction ";
size_t lenp = sizeof (prefix);
size_t len = strlen (p);
char *name = XALLOCAVEC (char, lenp + len);
memcpy (name, prefix, lenp - 1);
memcpy (name + lenp - 1, p, len + 1);
return get_identifier (name);
}
/* Lookup REDUCTION_ID in the current scope, or create an artificial
VAR_DECL, bind it into the current scope and return it. */
tree
c_omp_reduction_decl (tree reduction_id)
{
struct c_binding *b = I_SYMBOL_BINDING (reduction_id);
if (b != NULL && B_IN_CURRENT_SCOPE (b))
return b->decl;
tree decl = build_decl (BUILTINS_LOCATION, VAR_DECL,
reduction_id, integer_type_node);
DECL_ARTIFICIAL (decl) = 1;
DECL_EXTERNAL (decl) = 1;
TREE_STATIC (decl) = 1;
TREE_PUBLIC (decl) = 0;
bind (reduction_id, decl, current_scope, true, false, BUILTINS_LOCATION);
return decl;
}
/* Lookup REDUCTION_ID in the first scope where it has entry for TYPE. */
tree
c_omp_reduction_lookup (tree reduction_id, tree type)
{
struct c_binding *b = I_SYMBOL_BINDING (reduction_id);
while (b)
{
tree t;
for (t = DECL_INITIAL (b->decl); t; t = TREE_CHAIN (t))
if (comptypes (TREE_PURPOSE (t), type))
return TREE_VALUE (t);
b = b->shadowed;
}
return error_mark_node;
}
/* Helper function called via walk_tree, to diagnose invalid
#pragma omp declare reduction combiners or initializers. */
tree
c_check_omp_declare_reduction_r (tree *tp, int *, void *data)
{
tree *vars = (tree *) data;
if (SSA_VAR_P (*tp)
&& !DECL_ARTIFICIAL (*tp)
&& *tp != vars[0]
&& *tp != vars[1])
{
location_t loc = DECL_SOURCE_LOCATION (vars[0]);
if (strcmp (IDENTIFIER_POINTER (DECL_NAME (vars[0])), "omp_out") == 0)
error_at (loc, "%<#pragma omp declare reduction%> combiner refers to "
"variable %qD which is not %<omp_out%> nor %<omp_in%>",
*tp);
else
error_at (loc, "%<#pragma omp declare reduction%> initializer refers "
"to variable %qD which is not %<omp_priv%> nor "
"%<omp_orig%>",
*tp);
return *tp;
}
return NULL_TREE;
}
#include "gt-c-c-decl.h"
|
absgradMEX_test.c | /**************************************************************************
MEX function to compute the approximate gradient of the absolute value
Author: R. Marc Lebel
Contact: mlebel@gmail.com
Date: 11/2010
Useage: wc2 = absgradMEX(wc,smooth)
Input:
wc: numeric array (single/double; real/complex)
smooth: small smoothing factor to prevent Inf
Output:
wc2: numeric array
**************************************************************************/
#include <stdio.h>
#include "mex.h"
#include <omp.h>
#include <math.h>
#include <string.h>
#include "fast_mxArray_setup.c"
float Q_rsqrt( float number )
{
long i;
float x2, y;
const float threehalfs = 1.5F;
x2 = number * 0.5F;
y = number;
i = * ( long * ) &y;
i = 0x5f375a86 - ( i >> 1 );
y = * ( float * ) &i;
y = y * ( threehalfs - ( x2 * y * y ) );
y = y * ( threehalfs - ( x2 * y * y ) );
return y;
}
float Q_dsqrt( float number )
{
long i;
double x2, y;
const double threehalfs = 1.5D;
x2 = number * 0.5D;
y = number;
i = * ( long * ) &y;
i = 0x5fe6eb50c7b537a9 - ( i >> 1 );
y = * ( double * ) &i;
y = y * ( threehalfs - ( x2 * y * y ) );
y = y * ( threehalfs - ( x2 * y * y ) );
y = y * ( threehalfs - ( x2 * y * y ) );
return y;
}
void mexFunction(int nlhs, mxArray *left[], int nrhs, const mxArray *right[]) {
/* Declare variables */
mwSize nD, elem, cmplx, *size2;
long long i;
mxClassID precision;
const mwSize *size;
mxComplexity cmplx2;
mxArray *X, *Y;
double *pXr, *pXi, *pYi, *pYr, *pS, Sd, denom;
float *pXrf, *pXif, *pYif, *pYrf, *pSf, Sf, denomf;
/* Get size */
nD = mxGetNumberOfDimensions(right[0]);
size = mxGetDimensions(right[0]);
elem = mxGetNumberOfElements(right[0]);
/*mexPrintf("nD: %i\n",nD);
mexPrintf("size: %i\n",size[0]);
mexPrintf("elem: %i\n",elem);*/
/* Perform strange memory copy to replicate the size (needed for create_array_d/f) */
size2 = (mwSize *)mxMalloc(nD*sizeof(mwSize));
memcpy(size2,size,nD*sizeof(mwSize));
/* Test for complex and obtain data class */
cmplx = mxIsComplex(right[0]);
precision = mxGetClassID(right[0]);
cmplx2 = cmplx ? mxCOMPLEX:mxREAL;
/* Test to ensure smoothing factor is real */
if (mxIsComplex(right[1]))
mexErrMsgTxt("Inputs 1 is complex");
/* Get pointers to input array and create output */
if (precision == mxDOUBLE_CLASS) {
pXr = mxGetPr(right[0]);
if (cmplx)
pXi = mxGetPi(right[0]);
/* Create output and assign pointers */
create_array_d(&(left[0]), &pYr, &pYi, nD, size2, cmplx2, 0);
}
else {
pXrf = mxGetData(right[0]);
if (cmplx)
pXif = mxGetImagData(right[0]);
/* Create output and assign pointers */
create_array_f(&(left[0]), &pYrf, &pYif, nD, size2, cmplx2, 0);
}
/* Get pointer to input scalar */
if (mxGetClassID(right[1]) == mxDOUBLE_CLASS)
pS = mxGetData(right[1]);
else
pSf = mxGetData(right[1]);
/* Convert smoothing factor to appropriate class */
if (precision == mxDOUBLE_CLASS) {
if (mxGetClassID(right[1]) == mxDOUBLE_CLASS)
Sd = pS[0];
else
Sd = (double) pSf[0];
}
else {
if (mxGetClassID(right[1]) == mxDOUBLE_CLASS)
Sf = (float) pS[0];
else
Sf = pSf[0];
}
/* Loop through and compute the gradient of the absolute value */
if (precision == mxDOUBLE_CLASS) {
if (cmplx) {
#pragma omp parallel for private(i,denom)
for (i=0; i<elem; i++) {
denom = 1.0/sqrt(pXr[i]*pXr[i] + pXi[i]*pXi[i] + Sd);
pYr[i] = pXr[i] * denom;
pYi[i] = pXi[i] * denom;
}
}
else {
#pragma omp parallel for private(i)
for (i=0; i<elem; i++) {
pYr[i] = pXr[i]/sqrt(pXr[i]*pXr[i] + Sd);
}
}
}
else {
if (cmplx) {
#pragma omp parallel for private(i,denomf)
for (i=0; i<elem; i++) {
denomf = Q_rsqrt(pXrf[i]*pXrf[i] + pXif[i]*pXif[i] + Sf); /* Not working on ubuntu?! */
/*denomf = 1.0/sqrt(pXrf[i]*pXrf[i] + pXif[i]*pXif[i] + Sf);*/
pYrf[i] = pXrf[i] * denomf;
pYif[i] = pXif[i] * denomf;
}
}
else {
#pragma omp parallel for private(i,denomf)
for (i=0; i<elem; i++) {
/*pYrf[i] = pXrf[i]/sqrt(pXrf[i]*pXrf[i] + Sf);*/
denomf = Q_rsqrt(pXrf[i]*pXrf[i] + Sf);
pYrf[i] = pXrf[i] * denomf; /* Not working on ubuntu?! */
}
}
}
/* Free memory */
mxFree(size2);
}
|
target_x86.h | /*****************************************************************************
*
* target_x86.h
*
* Edinburgh Soft Matter and Statistical Physics Group and
* Edinburgh Parallel Computing Centre
*
* (c) 2018-2021 The University of Edinburgh
*
* Contributing authors:
* Alan Gray (alang@epcc.ed.ac.uk)
* Kevin Stratford (kevin@epcc.ed.ac.uk)
*
*****************************************************************************/
#ifndef LUDWIG_TARGET_X86_H
#define LUDWIG_TARGET_X86_H
typedef enum tdpFuncCache_enum {
tdpFuncCachePreferNone = 0,
tdpFuncCachePreferShared = 1,
tdpFuncCachePreferL1 = 2,
tdpFuncCachePreferEqual = 3}
tdpFuncCache;
typedef enum tdpMemcpyKind_enum {
tdpMemcpyHostToHost = 0,
tdpMemcpyHostToDevice = 1,
tdpMemcpyDeviceToHost = 2,
tdpMemcpyDeviceToDevice = 3,
tdpMemcpyDefault = 4}
tdpMemcpyKind;
/* Device attributes (potentially a lot of them) */
typedef enum tdpDeviceAttr_enum {
tdpDevAttrMaxThreadsPerBlock = 1,
tdpDevAttrMaxBlockDimX = 2,
tdpDevAttrMaxBlockDimY = 3,
tdpDevAttrMaxBlockDimZ = 4,
tdpDevAttrMaxGridDimX = 5,
tdpDevAttrMaxGridDimY = 6,
tdpDevAttrMaxGridDimZ = 7,
tdpDevAttrManagedMemory = 83
} tdpDeviceAttr;
/* tdpGetLastError() can return... */
enum tdpError {
tdpSuccess = 0,
tdpErrorMissingConfiguration = 1,
tdpErrorMemoryAllocation = 2,
tdpErrorInitializationError = 3,
tdpErrorLaunchFailure = 4,
tdpErrorLaunchTimeout = 6,
tdpErrorLaunchOutOfResources = 7,
tdpErrorInvalidDeviceFunction = 8,
tdpErrorInvalidConfiguration = 9,
tdpErrorInvalidDevice = 10,
tdpErrorInvalidValue = 11,
tdpErrorInvalidPitchValue = 12,
tdpErrorInvalidSymbol = 13,
tdpErrorUnmapBufferObjectFailed = 15,
tdpErrorInvalidHostPointer = 16,
tdpErrorInvalidDevicePointer = 17,
tdpErrorInvalidTexture = 18,
tdpErrorInvalidTextureBinding = 19,
tdpErrorInvalidChannelDescriptor = 20,
tdpErrorInvalidMemcpyDirection = 21,
tdpErrorInvalidFilterSetting = 26,
tdpErrorUnknown = 30,
tdpErrorInvalidResourceHandle = 33,
tdpErrorInsufficientDriver = 35,
tdpErrorSetOnActiveProcess = 36,
tdpErrorInvalidSurface = 37,
tdpErrorNoDevice = 38,
tdpErrorStartupFailure = 0x7f
};
#define tdpHostAllocDefault 0x00
#define tdpHostAllocMapped 0x02
#define tdpHostAllocPortable 0x01
#define tdpHostAllocWriteCombined 0x04
#define tdpMemAttachGlobal 0x01
#define tdpMemAttachHost 0x02
#define tdpMemAttachSingle 0x04
/* Device memory qualifiers / executation space qualifiers */
#define __host__
#define __global__
#define __shared__ static
#define __device__
#define __constant__
#if (__STDC__VERSION__ >= 19901)
#define __forceinline__
#define __noinline__
#else
#define __forceinline__
#define __noinline__
#endif
/* Built-in variable implementation. */
typedef struct tdp_uint3_s uint3;
typedef struct tdp_dim3_s dim3;
struct tdp_uint3_s {
unsigned int x;
unsigned int y;
unsigned int z;
};
struct tdp_dim3_s {
int x;
int y;
int z;
};
extern dim3 gridDim;
extern dim3 blockDim;
extern dim3 threadIdx;
extern dim3 blockIdx;
/* Other vector types (as required) */
typedef struct tdp_double3_s double3;
struct tdp_double3_s {
double x;
double y;
double z;
};
#ifdef _OPENMP
/* These names are reserved and must be ... */
#pragma omp threadprivate(gridDim, blockDim, threadIdx, blockIdx)
#endif
typedef enum tdpError tdpError_t; /* an enum type */
typedef int * tdpStream_t; /* an opaque handle */
/* Incomplete. */
struct tdpDeviceProp {
int maxThreadsPerBlock;
int maxThreadsDim[3];
};
#define tdpSymbol(x) &(x)
void tdp_x86_prelaunch(dim3 nblocks, dim3 nthreads);
void tdp_x86_postlaunch(void);
#ifdef _OPENMP
/* Help to expand OpenMP clauses which need to be retained as strings */
#define xstr(a) str(a)
#define str(a) #a
/* Have OpenMP */
#include <omp.h>
#define TARGET_MAX_THREADS_PER_BLOCK 256
#define TARGET_PAD 8
#define __syncthreads() _Pragma("omp barrier")
#define __threadfence() /* only __syncthreads() is a barrier */
/* Kernel launch is a __VA_ARGS__ macro, thus: */
#define tdpLaunchKernel(kernel, nblocks, nthreads, shmem, stream, ...) \
_Pragma("omp parallel") \
{ \
tdp_x86_prelaunch(nblocks, nthreads); \
kernel(__VA_ARGS__); \
tdp_x86_postlaunch(); \
}
/* OpenMP work sharing */
#define for_simt_parallel(index, ndata, stride) \
_Pragma("omp for nowait") \
for (index = 0; index < (ndata); index += (stride))
/* SIMD safe loops */
#define for_simd_v(iv, nsimdvl) \
_Pragma("omp simd") \
for (iv = 0; iv < (nsimdvl); ++iv)
#define for_simd_v_reduction(iv, nsimdvl, clause) \
_Pragma(xstr(omp simd reduction(clause))) \
for (iv = 0; iv < nsimdvl; ++iv)
#else /* Not OPENMP */
#define TARGET_MAX_THREADS_PER_BLOCK 1
#define TARGET_PAD 1
#define omp_get_num_threads() 1
#define omp_get_thread_num() 0
#define omp_get_max_threads() 1
#define omp_set_num_threads(n)
#define __syncthreads()
#define __threadfence()
/* NULL implementation */
/* Kernel launch is a __VA_ARGS__ macro, thus: */
#define tdpLaunchKernel(kernel, nblocks, nthreads, shmem, stream, ...) \
tdp_x86_prelaunch(nblocks, nthreads); \
kernel(__VA_ARGS__); \
tdp_x86_postlaunch();
/* "Worksharing" is provided by a loop */
#define for_simt_parallel(index, ndata, stride) \
for (index = 0; index < (ndata); index += (stride))
/* Vectorised loops */
#define for_simd_v(iv, nsimdvl) for (iv = 0; iv < (nsimdvl); iv++)
#define for_simd_v_reduction(iv, nsimdvl, clause) \
for (iv = 0; iv < nsimdvl; iv++)
#endif /* _OPENMP */
#define tdp_get_max_threads() omp_get_max_threads()
/* For "critical section" it's handy to use atomicCAS() and atomicExch()
* in place (togther with __threadfence()); until some better mechanism
* is available */
#define atomicCAS(address, old, new) (old)
#define atomicExch(address, val)
#endif
|
my_lib_nd.c | #ifndef no_openmp
#include <omp.h>
#endif
#include <TH/TH.h>
#include <THTensor.h>
#include <stdbool.h>
#include <stdio.h>
#define real float
int BilinearSamplerBXC_updateOutput_1D(THFloatTensor *inputImages, THFloatTensor *grids, THFloatTensor *output, int zero_boundary)
{
// *B*atch, *X*-coors, *C*hannel
//inputImages->(size|stride).(.).
//THTensor_$1(inputImages,$2)
int batchsize = THFloatTensor_size(inputImages,0);
int inputImages_X = THFloatTensor_size(inputImages,1);
int inputImages_C = THFloatTensor_size(inputImages,2);
int output_X = THFloatTensor_size(output,1);
int output_C = THFloatTensor_size(output,2);
bool zero_boundary_bool = zero_boundary == 1;
int output_strideBatch = THFloatTensor_stride(output,0);
int output_stride_X = THFloatTensor_stride(output,1);
int output_stride_C = THFloatTensor_stride(output,2);
int inputImages_strideBatch = THFloatTensor_stride(inputImages,0);
int inputImages_stride_X = THFloatTensor_stride(inputImages,1);
int inputImages_stride_C = THFloatTensor_stride(inputImages,2);
int grids_strideBatch = THFloatTensor_stride(grids,0);
int grids_stride_X = THFloatTensor_stride(grids,1);
int grids_stride_C = THFloatTensor_stride(grids,2);
real *inputImages_data, *output_data, *grids_data;
inputImages_data = THFloatTensor_data(inputImages);
output_data = THFloatTensor_data(output);
grids_data = THFloatTensor_data(grids);
int b, xOut;
for(b=0; b < batchsize; b++)
{
#pragma omp parallel for
for(xOut=0; xOut < output_X; xOut++)
{
//read the grid
real xf = grids_data[b*grids_strideBatch + xOut*grids_stride_X];
// get the weights for interpolation
int xLow;
real xWeightLow;
real xcoord = (xf + 1) * (inputImages_X - 1) / 2; // map it from [-1,1] to [0,1]
xLow = floor(xcoord);
xWeightLow = 1 - (xcoord - xLow);
bool xBeyondLow = xLow < 0;
bool xBeyondHigh = xLow+1 > inputImages_X-1;
/////////////// using non zero border condition
if (!zero_boundary_bool) {
if (xBeyondLow)
xLow = 0;
if (xBeyondHigh)
xLow = inputImages_X-2;
}
const int outAddress = output_strideBatch * b + output_stride_X * xOut;
const int inLowAddress = inputImages_strideBatch * b + inputImages_stride_X * xLow;
const int inHighAddress = inLowAddress + inputImages_stride_X;
real v=0;
real inLow=0;
real inHigh=0;
int t;
// interpolation happens here
for(t=0; t<inputImages_C; t++)
{
if (!zero_boundary_bool || (! (xBeyondLow || xBeyondHigh ))){
inLow = inputImages_data[inLowAddress + t];
inHigh = inputImages_data[inHighAddress + t];
}
v = xWeightLow * inLow + (1 - xWeightLow) * inHigh;
output_data[outAddress + t] = v;
}
}
}
return 1;
}
int BilinearSamplerBCX_updateOutput_1D(THFloatTensor *inputImages, THFloatTensor *grids, THFloatTensor *output, int zero_boundary)
{
// *B*atch, *C*hannel, *X*-coors
int batchsize = THFloatTensor_size(inputImages,0);
int inputImages_X = THFloatTensor_size(inputImages,2);
int inputImages_C = THFloatTensor_size(inputImages,1);
int output_X = THFloatTensor_size(output,2);
int output_C = THFloatTensor_size(output,1);
bool zero_boundary_bool = zero_boundary == 1;
int output_strideBatch = THFloatTensor_stride(output,0);
int output_stride_X = THFloatTensor_stride(output,2);
int output_stride_C = THFloatTensor_stride(output,1);
int inputImages_strideBatch = THFloatTensor_stride(inputImages,0);
int inputImages_stride_X = THFloatTensor_stride(inputImages,2);
int inputImages_stride_C = THFloatTensor_stride(inputImages,1);
int grids_strideBatch = THFloatTensor_stride(grids,0);
int grids_stride_X = THFloatTensor_stride(grids,2);
int grids_stride_C = THFloatTensor_stride(grids,1);
real *inputImages_data, *output_data, *grids_data;
inputImages_data = THFloatTensor_data(inputImages);
output_data = THFloatTensor_data(output);
grids_data = THFloatTensor_data(grids);
int b, xOut;
for(b=0; b < batchsize; b++)
{
#pragma omp parallel for
for(xOut=0; xOut < output_X; xOut++)
{
//read the grid
real xf = grids_data[b*grids_strideBatch + xOut*grids_stride_X];
// get the weights for interpolation
int xLow;
real xWeightLow;
real xcoord = (xf + 1) * (inputImages_X - 1) / 2; // map it from [-1,1] to [0,1]
xLow = floor(xcoord);
xWeightLow = 1 - (xcoord - xLow);
bool xBeyondLow = xLow < 0;
bool xBeyondHigh = xLow+1 > inputImages_X-1;
/////////////// using non zero border condition
if (!zero_boundary_bool) {
if (xBeyondLow)
xLow = 0;
if (xBeyondHigh)
xLow = inputImages_X-2;
}
const int outAddress = output_strideBatch * b + output_stride_X * xOut;
const int inLowAddress = inputImages_strideBatch * b + inputImages_stride_X * xLow;
const int inHighAddress = inLowAddress + inputImages_stride_X;
real v=0;
real inLow=0;
real inHigh=0;
int t;
// interpolation happens here
for(t=0; t<inputImages_C; t++)
{
if (!zero_boundary_bool || (! (xBeyondLow || xBeyondHigh ))){
inLow = inputImages_data[inLowAddress + t*inputImages_stride_C];
inHigh = inputImages_data[inHighAddress + t*inputImages_stride_C];
}
v = xWeightLow * inLow + (1 - xWeightLow) * inHigh;
output_data[outAddress + t*output_stride_C] = v;
}
}
}
return 1;
}
int BilinearSamplerBXYC_updateOutput_2D(THFloatTensor *inputImages, THFloatTensor *grids, THFloatTensor *output , int zero_boundary)
{
// This is actua
int batchsize = THFloatTensor_size(inputImages,0);
int inputImages_X = THFloatTensor_size(inputImages,1);
int inputImages_Y = THFloatTensor_size(inputImages,2);
int inputImages_C = THFloatTensor_size(inputImages,3);
int output_X = THFloatTensor_size(output,1);
int output_Y = THFloatTensor_size(output,2);
bool zero_boundary_bool = zero_boundary == 1;
int output_strideBatch = THFloatTensor_stride(output,0);
int output_stride_X = THFloatTensor_stride(output,1);
int output_stride_Y = THFloatTensor_stride(output,2);
int inputImages_strideBatch = THFloatTensor_stride(inputImages,0);
int inputImages_stride_X = THFloatTensor_stride(inputImages,1);
int inputImages_stride_Y = THFloatTensor_stride(inputImages,2);
int grids_strideBatch = THFloatTensor_stride(grids,0);
int grids_stride_X = THFloatTensor_stride(grids,1);
int grids_stride_Y = THFloatTensor_stride(grids,2);
real *inputImages_data, *output_data, *grids_data;
inputImages_data = THFloatTensor_data(inputImages);
output_data = THFloatTensor_data(output);
grids_data = THFloatTensor_data(grids);
int b, yOut, xOut;
for(b=0; b < batchsize; b++)
{
#pragma omp parallel for
for(xOut=0; xOut < output_X; xOut++)
{
for(yOut=0; yOut < output_Y; yOut++)
{
//read the grid
real xf = grids_data[b*grids_strideBatch + yOut*grids_stride_Y + xOut*grids_stride_X];
real yf = grids_data[b*grids_strideBatch + yOut*grids_stride_Y + xOut*grids_stride_X+1];
// get the weights for interpolation
int yInLowLow, xInLowLow;
real yWeightLowLow, xWeightLowLow;
real xcoord = (xf + 1) * (inputImages_X - 1) / 2;
xInLowLow = floor(xcoord);
xWeightLowLow = 1 - (xcoord - xInLowLow);
real ycoord = (yf + 1) * (inputImages_Y - 1) / 2;
yInLowLow = floor(ycoord);
yWeightLowLow = 1 - (ycoord - yInLowLow);
bool xBeyondLow = xInLowLow < 0;
bool yBeyondLow = yInLowLow < 0;
bool xBeyondHigh = xInLowLow > inputImages_X-1;
bool yBeyondHigh = yInLowLow > inputImages_Y-1;
/////////////// using non zero border condition
if (!zero_boundary_bool) {
if (xBeyondLow)
xInLowLow = 0;
if (xBeyondHigh)
xInLowLow = inputImages_X-1;
if (yBeyondLow)
yInLowLow = 0;
if (yBeyondHigh)
yInLowLow = inputImages_Y-1;
}
const int outAddress = output_strideBatch * b + output_stride_Y * yOut + output_stride_X * xOut;
const int inLowLowAddress = inputImages_strideBatch * b + inputImages_stride_Y * yInLowLow + inputImages_stride_X * xInLowLow;
const int inLowHighAddress = inLowLowAddress + inputImages_stride_Y;
const int inHighLowAddress = inLowLowAddress + inputImages_stride_X;
const int inHighHighAddress = inHighLowAddress + inputImages_stride_Y;
real v=0;
real inLowLow=0;
real inLowHigh=0;
real inHighLow=0;
real inHighHigh=0;
int t;
// interpolation happens here
for(t=0; t<inputImages_C; t++)
{
// if the first is for non zero condition and the second is for zero condition
if (!zero_boundary_bool || (! (xBeyondLow || yBeyondLow || xBeyondHigh || yBeyondHigh))){
inLowLow = inputImages_data[inLowLowAddress + t];
inLowHigh = inputImages_data[inLowHighAddress + t];
inHighLow = inputImages_data[inHighLowAddress + t];
inHighHigh = inputImages_data[inHighHighAddress + t];
}
v = xWeightLowLow * yWeightLowLow * inLowLow
+ (1 - xWeightLowLow) * yWeightLowLow * inHighLow
+ xWeightLowLow * (1 - yWeightLowLow) * inLowHigh
+ (1 - xWeightLowLow) * (1 - yWeightLowLow) * inHighHigh;
output_data[outAddress + t] = v;
}
}
}
}
return 1;
}
int BilinearSamplerBXYC_updateOutput_2D_new(THFloatTensor *inputImages, THFloatTensor *grids, THFloatTensor *output , int zero_boundary)
{
// This is actua
int batchsize = THFloatTensor_size(inputImages,0);
int inputImages_X = THFloatTensor_size(inputImages,1);
int inputImages_Y = THFloatTensor_size(inputImages,2);
int inputImages_C = THFloatTensor_size(inputImages,3);
int output_X = THFloatTensor_size(output,1);
int output_Y = THFloatTensor_size(output,2);
bool zero_boundary_bool = zero_boundary == 1;
int output_strideBatch = THFloatTensor_stride(output,0);
int output_stride_X = THFloatTensor_stride(output,1);
int output_stride_Y = THFloatTensor_stride(output,2);
int inputImages_strideBatch = THFloatTensor_stride(inputImages,0);
int inputImages_stride_X = THFloatTensor_stride(inputImages,1);
int inputImages_stride_Y = THFloatTensor_stride(inputImages,2);
int grids_strideBatch = THFloatTensor_stride(grids,0);
int grids_stride_X = THFloatTensor_stride(grids,1);
int grids_stride_Y = THFloatTensor_stride(grids,2);
real *inputImages_data, *output_data, *grids_data;
inputImages_data = THFloatTensor_data(inputImages);
output_data = THFloatTensor_data(output);
grids_data = THFloatTensor_data(grids);
int b, yOut, xOut;
for(b=0; b < batchsize; b++)
{
#pragma omp parallel for
for(xOut=0; xOut < output_X; xOut++)
{
for(yOut=0; yOut < output_Y; yOut++)
{
//read the grid
real xf = grids_data[b*grids_strideBatch + yOut*grids_stride_Y + xOut*grids_stride_X];
real yf = grids_data[b*grids_strideBatch + yOut*grids_stride_Y + xOut*grids_stride_X+1];
// get the weights for interpolation
int yInLowLow, xInLowLow;
real yWeightLowLow, xWeightLowLow;
real xcoord = (xf + 1) * (inputImages_X - 1) / 2;
xInLowLow = floor(xcoord);
xWeightLowLow = 1 - (xcoord - xInLowLow);
real ycoord = (yf + 1) * (inputImages_Y - 1) / 2;
yInLowLow = floor(ycoord);
yWeightLowLow = 1 - (ycoord - yInLowLow);
bool xBeyondLow = xf < 0.0;
bool yBeyondLow = yf < 0.0;
bool xBeyondHigh = xf >= 1.0;
bool yBeyondHigh = yf >= 1.0;
/////////////// using non zero border condition
if (!zero_boundary_bool) {
if (xBeyondLow)
xInLowLow = 0;
if (xBeyondHigh)
xInLowLow = inputImages_X-1;
if (yBeyondLow)
yInLowLow = 0;
if (yBeyondHigh)
yInLowLow = inputImages_Y-1;
}
int outAddress = output_strideBatch * b + output_stride_Y * yOut + output_stride_X * xOut;
int inLowLowAddress = inputImages_strideBatch * b + inputImages_stride_Y * yInLowLow + inputImages_stride_X * xInLowLow;
int inLowHighAddress = inLowLowAddress + inputImages_stride_Y;
int inHighLowAddress = inLowLowAddress + inputImages_stride_X;
int inHighHighAddress = inHighLowAddress + inputImages_stride_Y;
real v=0;
real inLowLow=0;
real inLowHigh=0;
real inHighLow=0;
real inHighHigh=0;
if (xBeyondHigh)
inHighLowAddress = inLowLowAddress;
if (yBeyondHigh)
inLowHighAddress = inLowLowAddress;
if (xBeyondHigh)
inHighHighAddress = inLowHighAddress;
if (yBeyondHigh)
inHighHighAddress = inHighLowAddress;
int t;
// interpolation happens here
for(t=0; t<inputImages_C; t++)
{
// if the first is for non zero condition and the second is for zero condition
if (!zero_boundary_bool || (! (xBeyondLow || yBeyondLow || xBeyondHigh || yBeyondHigh))){
inLowLow = inputImages_data[inLowLowAddress + t];
inLowHigh = inputImages_data[inLowHighAddress + t];
inHighLow = inputImages_data[inHighLowAddress + t];
inHighHigh = inputImages_data[inHighHighAddress + t];
}
v = xWeightLowLow * yWeightLowLow * inLowLow
+ (1 - xWeightLowLow) * yWeightLowLow * inHighLow
+ xWeightLowLow * (1 - yWeightLowLow) * inLowHigh
+ (1 - xWeightLowLow) * (1 - yWeightLowLow) * inHighHigh;
output_data[outAddress + t] = v;
}
}
}
}
return 1;
}
int BilinearSamplerBCXY_updateOutput_2D(THFloatTensor *inputImages, THFloatTensor *grids, THFloatTensor *output, int zero_boundary)
{
// This is actua
int batchsize = THFloatTensor_size(inputImages,0);
int inputImages_X = THFloatTensor_size(inputImages,2);
int inputImages_Y = THFloatTensor_size(inputImages,3);
int inputImages_C = THFloatTensor_size(inputImages,1);
int output_X = THFloatTensor_size(output,2);
int output_Y = THFloatTensor_size(output,3);
bool zero_boundary_bool = zero_boundary == 1;
int output_strideBatch = THFloatTensor_stride(output,0);
int output_stride_X = THFloatTensor_stride(output,2);
int output_stride_Y = THFloatTensor_stride(output,3);
int output_stride_C = THFloatTensor_stride(output,1);
int inputImages_strideBatch = THFloatTensor_stride(inputImages,0);
int inputImages_stride_X = THFloatTensor_stride(inputImages,2);
int inputImages_stride_Y = THFloatTensor_stride(inputImages,3);
int inputImages_stride_C = THFloatTensor_stride(inputImages,1);
int grids_strideBatch = THFloatTensor_stride(grids,0);
int grids_stride_C = THFloatTensor_stride(grids,1);
int grids_stride_X = THFloatTensor_stride(grids,2);
int grids_stride_Y = THFloatTensor_stride(grids,3);
real *inputImages_data, *output_data, *grids_data;
inputImages_data = THFloatTensor_data(inputImages);
output_data = THFloatTensor_data(output);
grids_data = THFloatTensor_data(grids);
int b, yOut, xOut;
for(b=0; b < batchsize; b++)
{
#pragma omp parallel for
for(xOut=0; xOut < output_X; xOut++)
{
for(yOut=0; yOut < output_Y; yOut++)
{
//read the grid
real xf = grids_data[b*grids_strideBatch + yOut*grids_stride_Y + xOut*grids_stride_X];
real yf = grids_data[b*grids_strideBatch + yOut*grids_stride_Y + xOut*grids_stride_X+grids_stride_C];
// get the weights for interpolation
int yInLowLow, xInLowLow;
real yWeightLowLow, xWeightLowLow;
real xcoord = (xf + 1) * (inputImages_X - 1) / 2;
xInLowLow = floor(xcoord);
xWeightLowLow = 1 - (xcoord - xInLowLow);
real ycoord = (yf + 1) * (inputImages_Y - 1) / 2;
yInLowLow = floor(ycoord);
yWeightLowLow = 1 - (ycoord - yInLowLow);
bool xBeyondLow = xInLowLow < 0;
bool yBeyondLow = yInLowLow < 0;
bool xBeyondHigh = xInLowLow+1 > inputImages_X-1;
bool yBeyondHigh = yInLowLow+1 > inputImages_Y-1;
/////////////// using non zero border condition
if (!zero_boundary_bool) {
if (xBeyondLow)
xInLowLow = 0;
if (xBeyondHigh)
xInLowLow = inputImages_X-2;
if (yBeyondLow)
yInLowLow = 0;
if (yBeyondHigh)
yInLowLow = inputImages_Y-2;
}
const int outAddress = output_strideBatch * b + output_stride_Y * yOut + output_stride_X * xOut;
const int inLowLowAddress = inputImages_strideBatch * b + inputImages_stride_Y * yInLowLow + inputImages_stride_X * xInLowLow;
const int inLowHighAddress = inLowLowAddress + inputImages_stride_Y;
const int inHighLowAddress = inLowLowAddress + inputImages_stride_X;
const int inHighHighAddress = inHighLowAddress + inputImages_stride_Y;
real v=0;
real inLowLow=0;
real inLowHigh=0;
real inHighLow=0;
real inHighHigh=0;
int t;
// interpolation happens here
for(t=0; t<inputImages_C; t++)
{
// if the first is for non zero condition and the second is for zero condition
if (!zero_boundary_bool || (! (xBeyondLow || yBeyondLow || xBeyondHigh || yBeyondHigh))){
inLowLow = inputImages_data[inLowLowAddress + t*inputImages_stride_C];
inLowHigh = inputImages_data[inLowHighAddress + t*inputImages_stride_C];
inHighLow = inputImages_data[inHighLowAddress + t*inputImages_stride_C];
inHighHigh = inputImages_data[inHighHighAddress + t*inputImages_stride_C];
}
v = xWeightLowLow * yWeightLowLow * inLowLow
+ (1 - xWeightLowLow) * yWeightLowLow * inHighLow
+ xWeightLowLow * (1 - yWeightLowLow) * inLowHigh
+ (1 - xWeightLowLow) * (1 - yWeightLowLow) * inHighHigh;
output_data[outAddress + t*output_stride_C] = v;
}
}
}
}
return 1;
}
int BilinearSamplerBCXY_updateOutput_2D_old(THFloatTensor *inputImages, THFloatTensor *grids, THFloatTensor *output, int zero_boundary)
{
// This is actua
int batchsize = THFloatTensor_size(inputImages,0);
int inputImages_X = THFloatTensor_size(inputImages,2);
int inputImages_Y = THFloatTensor_size(inputImages,3);
int inputImages_C = THFloatTensor_size(inputImages,1);
int output_X = THFloatTensor_size(output,2);
int output_Y = THFloatTensor_size(output,3);
bool zero_boundary_bool = zero_boundary == 1;
int output_strideBatch = THFloatTensor_stride(output,0);
int output_stride_X = THFloatTensor_stride(output,2);
int output_stride_Y = THFloatTensor_stride(output,3);
int output_stride_C = THFloatTensor_stride(output,1);
int inputImages_strideBatch = THFloatTensor_stride(inputImages,0);
int inputImages_stride_X = THFloatTensor_stride(inputImages,2);
int inputImages_stride_Y = THFloatTensor_stride(inputImages,3);
int inputImages_stride_C = THFloatTensor_stride(inputImages,1);
int grids_strideBatch = THFloatTensor_stride(grids,0);
int grids_stride_C = THFloatTensor_stride(grids,1);
int grids_stride_X = THFloatTensor_stride(grids,2);
int grids_stride_Y = THFloatTensor_stride(grids,3);
real *inputImages_data, *output_data, *grids_data;
inputImages_data = THFloatTensor_data(inputImages);
output_data = THFloatTensor_data(output);
grids_data = THFloatTensor_data(grids);
int b, yOut, xOut;
for(b=0; b < batchsize; b++)
{
#pragma omp parallel for
for(xOut=0; xOut < output_X; xOut++)
{
for(yOut=0; yOut < output_Y; yOut++)
{
//read the grid
real xf = grids_data[b*grids_strideBatch + yOut*grids_stride_Y + xOut*grids_stride_X];
real yf = grids_data[b*grids_strideBatch + yOut*grids_stride_Y + xOut*grids_stride_X+grids_stride_C];
// get the weights for interpolation
int yInLowLow, xInLowLow;
real yWeightLowLow, xWeightLowLow;
real xcoord = (xf + 1) * (inputImages_X - 1) / 2;
xInLowLow = floor(xcoord);
xWeightLowLow = 1 - (xcoord - xInLowLow);
real ycoord = (yf + 1) * (inputImages_Y - 1) / 2;
yInLowLow = floor(ycoord);
yWeightLowLow = 1 - (ycoord - yInLowLow);
const int outAddress = output_strideBatch * b + output_stride_Y * yOut + output_stride_X * xOut;
const int inLowLowAddress = inputImages_strideBatch * b + inputImages_stride_Y * yInLowLow + inputImages_stride_X * xInLowLow;
const int inLowHighAddress = inLowLowAddress + inputImages_stride_Y;
const int inHighLowAddress = inLowLowAddress + inputImages_stride_X;
const int inHighHighAddress = inHighLowAddress + inputImages_stride_Y;
real v=0;
real inLowLow=0;
real inLowHigh=0;
real inHighLow=0;
real inHighHigh=0;
// we are careful with the boundaries
bool lowLowIsIn = xInLowLow >= 0 && xInLowLow <= inputImages_X-1 && yInLowLow >= 0 && yInLowLow <= inputImages_Y-1;
bool lowHighIsIn = xInLowLow >= 0 && xInLowLow <= inputImages_X-1 && yInLowLow+1 >= 0 && yInLowLow+1 <= inputImages_Y-1;
bool highLowIsIn = xInLowLow+1 >= 0 && xInLowLow+1 <= inputImages_X-1 && yInLowLow >= 0 && yInLowLow <= inputImages_Y-1;
bool highHighIsIn = xInLowLow+1 >= 0 && xInLowLow+1 <= inputImages_X-1 && yInLowLow+1 >= 0 && yInLowLow+1 <= inputImages_Y-1;
int t;
// interpolation happens here
for(t=0; t<inputImages_C; t++)
{
if(lowLowIsIn) inLowLow = inputImages_data[inLowLowAddress + t*inputImages_stride_C];
if(lowHighIsIn) inLowHigh = inputImages_data[inLowHighAddress + t*inputImages_stride_C];
if(highLowIsIn) inHighLow = inputImages_data[inHighLowAddress + t*inputImages_stride_C];
if(highHighIsIn) inHighHigh = inputImages_data[inHighHighAddress + t*inputImages_stride_C];
v = xWeightLowLow * yWeightLowLow * inLowLow
+ (1 - xWeightLowLow) * yWeightLowLow * inHighLow
+ xWeightLowLow * (1 - yWeightLowLow) * inLowHigh
+ (1 - xWeightLowLow) * (1 - yWeightLowLow) * inHighHigh;
output_data[outAddress + t*output_stride_C] = v;
}
}
}
}
return 1;
}
int BilinearSamplerBXYZC_updateOutput_3D(THFloatTensor *inputImages, THFloatTensor *grids, THFloatTensor *output, int zero_boundary)
{
// This is actua
int batchsize = THFloatTensor_size(inputImages,0);
int inputImages_X = THFloatTensor_size(inputImages,1);
int inputImages_Y = THFloatTensor_size(inputImages,2);
int inputImages_Z = THFloatTensor_size(inputImages,3);
int inputImages_C = THFloatTensor_size(inputImages,4);
int output_X = THFloatTensor_size(output,1);
int output_Y = THFloatTensor_size(output,2);
int output_Z = THFloatTensor_size(output,3);
bool zero_boundary_bool = zero_boundary == 1;
int output_strideBatch = THFloatTensor_stride(output,0);
int output_stride_X = THFloatTensor_stride(output,1);
int output_stride_Y = THFloatTensor_stride(output,2);
int output_stride_Z = THFloatTensor_stride(output,3);
int inputImages_strideBatch = THFloatTensor_stride(inputImages,0);
int inputImages_stride_X = THFloatTensor_stride(inputImages,1);
int inputImages_stride_Y = THFloatTensor_stride(inputImages,2);
int inputImages_stride_Z = THFloatTensor_stride(inputImages,3);
int grids_strideBatch = THFloatTensor_stride(grids,0);
int grids_stride_X = THFloatTensor_stride(grids,1);
int grids_stride_Y = THFloatTensor_stride(grids,2);
int grids_stride_Z = THFloatTensor_stride(grids,3);
real *inputImages_data, *output_data, *grids_data;
inputImages_data = THFloatTensor_data(inputImages);
output_data = THFloatTensor_data(output);
grids_data = THFloatTensor_data(grids);
int b, zOut, yOut, xOut;
for(b=0; b < batchsize; b++)
{
#pragma omp parallel for
for(xOut=0; xOut < output_X; xOut++)
{
for(yOut=0; yOut < output_Y; yOut++)
{
for(zOut=0; zOut < output_Z; zOut++)
{
//read the grid
real xf = grids_data[b*grids_strideBatch + zOut*grids_stride_Z + yOut*grids_stride_Y + xOut*grids_stride_X];
real yf = grids_data[b*grids_strideBatch + zOut*grids_stride_Z + yOut*grids_stride_Y + xOut*grids_stride_X+1];
real zf = grids_data[b*grids_strideBatch + zOut*grids_stride_Z + yOut*grids_stride_Y + xOut*grids_stride_X+2];
// get the weights for interpolation
int zInLowLowLow, yInLowLowLow, xInLowLowLow;
real zWeightLowLowLow, yWeightLowLowLow, xWeightLowLowLow;
real xcoord = (xf + 1) * (inputImages_X - 1) / 2;
xInLowLowLow = floor(xcoord);
xWeightLowLowLow = 1 - (xcoord - xInLowLowLow);
real ycoord = (yf + 1) * (inputImages_Y - 1) / 2;
yInLowLowLow = floor(ycoord);
yWeightLowLowLow = 1 - (ycoord - yInLowLowLow);
real zcoord = (zf + 1) * (inputImages_Z - 1) / 2;
zInLowLowLow = floor(zcoord);
zWeightLowLowLow = 1 - (zcoord - zInLowLowLow);
bool xBeyondLow = xInLowLowLow < 0;
bool yBeyondLow = yInLowLowLow < 0;
bool zBeyondLow = zInLowLowLow < 0;
bool xBeyondHigh = xInLowLowLow+1 > inputImages_X-1;
bool yBeyondHigh = yInLowLowLow+1 > inputImages_Y-1;
bool zBeyondHigh = zInLowLowLow+1 > inputImages_Z-1;
/////////////// using non zero border condition
if (!zero_boundary_bool) {
if (xBeyondLow)
xInLowLowLow = 0;
if (xBeyondHigh)
xInLowLowLow = inputImages_X-2;
if (yBeyondLow)
yInLowLowLow = 0;
if (yBeyondHigh)
yInLowLowLow = inputImages_Y-2;
if (zBeyondLow)
zInLowLowLow = 0;
if (zBeyondHigh)
zInLowLowLow = inputImages_Z-2;
}
const int outAddress = output_strideBatch * b + output_stride_Z * zOut + output_stride_Y * yOut + output_stride_X * xOut;
const int inLowLowLowAddress = inputImages_strideBatch * b + inputImages_stride_Z * zInLowLowLow + inputImages_stride_Y * yInLowLowLow + inputImages_stride_X * xInLowLowLow;
const int inLowLowHighAddress = inLowLowLowAddress + inputImages_stride_Z;
const int inLowHighLowAddress = inLowLowLowAddress + inputImages_stride_Y;
const int inLowHighHighAddress = inLowLowLowAddress + inputImages_stride_Y + inputImages_stride_Z;
const int inHighLowLowAddress = inLowLowLowAddress + inputImages_stride_X;
const int inHighLowHighAddress = inLowLowLowAddress + inputImages_stride_X + inputImages_stride_Z;
const int inHighHighLowAddress = inLowLowLowAddress + inputImages_stride_X + inputImages_stride_Y;
const int inHighHighHighAddress = inLowLowLowAddress + inputImages_stride_X + inputImages_stride_Y + inputImages_stride_Z;
real v=0;
real inLowLowLow=0;
real inLowLowHigh=0;
real inLowHighLow=0;
real inLowHighHigh=0;
real inHighLowLow=0;
real inHighLowHigh=0;
real inHighHighLow=0;
real inHighHighHigh=0;
int t;
// interpolation happens here
for(t=0; t<inputImages_C; t++)
{
if (!zero_boundary_bool || (! (xBeyondLow || yBeyondLow || xBeyondHigh || yBeyondHigh || zBeyondLow || zBeyondHigh))){
inLowLowLow = inputImages_data[inLowLowLowAddress + t];
inLowLowHigh = inputImages_data[inLowLowHighAddress + t];
inLowHighLow = inputImages_data[inLowHighLowAddress + t];
inLowHighHigh = inputImages_data[inLowHighHighAddress + t];
inHighLowLow = inputImages_data[inHighLowLowAddress + t];
inHighLowHigh = inputImages_data[inHighLowHighAddress + t];
inHighHighLow = inputImages_data[inHighHighLowAddress + t];
inHighHighHigh = inputImages_data[inHighHighHighAddress + t];
}
v = xWeightLowLowLow * yWeightLowLowLow * zWeightLowLowLow * inLowLowLow
+ xWeightLowLowLow * yWeightLowLowLow * (1-zWeightLowLowLow) * inLowLowHigh
+ xWeightLowLowLow * (1-yWeightLowLowLow) * zWeightLowLowLow * inLowHighLow
+ xWeightLowLowLow * (1-yWeightLowLowLow) * (1-zWeightLowLowLow) * inLowHighHigh
+ (1-xWeightLowLowLow) * yWeightLowLowLow * zWeightLowLowLow * inHighLowLow
+ (1-xWeightLowLowLow) * yWeightLowLowLow * (1-zWeightLowLowLow) * inHighLowHigh
+ (1-xWeightLowLowLow) * (1-yWeightLowLowLow) * zWeightLowLowLow * inHighHighLow
+ (1-xWeightLowLowLow) * (1-yWeightLowLowLow) * (1-zWeightLowLowLow) * inHighHighHigh;
output_data[outAddress + t] = v;
}
}
}
}
}
return 1;
}
int BilinearSamplerBCXYZ_updateOutput_3D(THFloatTensor *inputImages, THFloatTensor *grids, THFloatTensor *output, int zero_boundary)
{
// This is actua
int batchsize = THFloatTensor_size(inputImages,0);
int inputImages_X = THFloatTensor_size(inputImages,2);
int inputImages_Y = THFloatTensor_size(inputImages,3);
int inputImages_Z = THFloatTensor_size(inputImages,4);
int inputImages_C = THFloatTensor_size(inputImages,1);
int output_X = THFloatTensor_size(output,2);
int output_Y = THFloatTensor_size(output,3);
int output_Z = THFloatTensor_size(output,4);
bool zero_boundary_bool = zero_boundary == 1;
int output_strideBatch = THFloatTensor_stride(output,0);
int output_stride_X = THFloatTensor_stride(output,2);
int output_stride_Y = THFloatTensor_stride(output,3);
int output_stride_Z = THFloatTensor_stride(output,4);
int output_stride_C = THFloatTensor_stride(output,1);
int inputImages_strideBatch = THFloatTensor_stride(inputImages,0);
int inputImages_stride_X = THFloatTensor_stride(inputImages,2);
int inputImages_stride_Y = THFloatTensor_stride(inputImages,3);
int inputImages_stride_Z = THFloatTensor_stride(inputImages,4);
int inputImages_stride_C = THFloatTensor_stride(inputImages,1);
int grids_strideBatch = THFloatTensor_stride(grids,0);
int grids_stride_X = THFloatTensor_stride(grids,2);
int grids_stride_Y = THFloatTensor_stride(grids,3);
int grids_stride_Z = THFloatTensor_stride(grids,4);
int grids_stride_C = THFloatTensor_stride(grids,1);
real *inputImages_data, *output_data, *grids_data;
inputImages_data = THFloatTensor_data(inputImages);
output_data = THFloatTensor_data(output);
grids_data = THFloatTensor_data(grids);
int b, zOut, yOut, xOut;
for(b=0; b < batchsize; b++)
{
#pragma omp parallel for
for(xOut=0; xOut < output_X; xOut++)
{
for(yOut=0; yOut < output_Y; yOut++)
{
for(zOut=0; zOut < output_Z; zOut++)
{
//read the grid
real xf = grids_data[b*grids_strideBatch + zOut*grids_stride_Z + yOut*grids_stride_Y + xOut*grids_stride_X];
real yf = grids_data[b*grids_strideBatch + zOut*grids_stride_Z + yOut*grids_stride_Y + xOut*grids_stride_X+grids_stride_C];
real zf = grids_data[b*grids_strideBatch + zOut*grids_stride_Z + yOut*grids_stride_Y + xOut*grids_stride_X+2*grids_stride_C];
// get the weights for interpolation
int zInLowLowLow, yInLowLowLow, xInLowLowLow;
real zWeightLowLowLow, yWeightLowLowLow, xWeightLowLowLow;
real xcoord = (xf + 1) * (inputImages_X - 1) / 2;
xInLowLowLow = floor(xcoord);
xWeightLowLowLow = 1 - (xcoord - xInLowLowLow);
real ycoord = (yf + 1) * (inputImages_Y - 1) / 2;
yInLowLowLow = floor(ycoord);
yWeightLowLowLow = 1 - (ycoord - yInLowLowLow);
real zcoord = (zf + 1) * (inputImages_Z - 1) / 2;
zInLowLowLow = floor(zcoord);
zWeightLowLowLow = 1 - (zcoord - zInLowLowLow);
bool xBeyondLow = xInLowLowLow < 0;
bool yBeyondLow = yInLowLowLow < 0;
bool zBeyondLow = zInLowLowLow < 0;
bool xBeyondHigh = xInLowLowLow+1 > inputImages_X-1;
bool yBeyondHigh = yInLowLowLow+1 > inputImages_Y-1;
bool zBeyondHigh = zInLowLowLow+1 > inputImages_Z-1;
/////////////// using non zero border condition
if (!zero_boundary_bool) {
if (xBeyondLow)
xInLowLowLow = 0;
if (xBeyondHigh)
xInLowLowLow = inputImages_X-2;
if (yBeyondLow)
yInLowLowLow = 0;
if (yBeyondHigh)
yInLowLowLow = inputImages_Y-2;
if (zBeyondLow)
zInLowLowLow = 0;
if (zBeyondHigh)
zInLowLowLow = inputImages_Z-2;
}
const int outAddress = output_strideBatch * b + output_stride_Z * zOut + output_stride_Y * yOut + output_stride_X * xOut;
const int inLowLowLowAddress = inputImages_strideBatch * b + inputImages_stride_Z * zInLowLowLow + inputImages_stride_Y * yInLowLowLow + inputImages_stride_X * xInLowLowLow;
const int inLowLowHighAddress = inLowLowLowAddress + inputImages_stride_Z;
const int inLowHighLowAddress = inLowLowLowAddress + inputImages_stride_Y;
const int inLowHighHighAddress = inLowLowLowAddress + inputImages_stride_Y + inputImages_stride_Z;
const int inHighLowLowAddress = inLowLowLowAddress + inputImages_stride_X;
const int inHighLowHighAddress = inLowLowLowAddress + inputImages_stride_X + inputImages_stride_Z;
const int inHighHighLowAddress = inLowLowLowAddress + inputImages_stride_X + inputImages_stride_Y;
const int inHighHighHighAddress = inLowLowLowAddress + inputImages_stride_X + inputImages_stride_Y + inputImages_stride_Z;
real v=0;
real inLowLowLow=0;
real inLowLowHigh=0;
real inLowHighLow=0;
real inLowHighHigh=0;
real inHighLowLow=0;
real inHighLowHigh=0;
real inHighHighLow=0;
real inHighHighHigh=0;
int t;
// interpolation happens here
for(t=0; t<inputImages_C; t++)
{
if (!zero_boundary_bool || (! (xBeyondLow || yBeyondLow || xBeyondHigh || yBeyondHigh || zBeyondLow || zBeyondHigh))){
inLowLowLow = inputImages_data[inLowLowLowAddress + t*inputImages_stride_C];
inLowLowHigh = inputImages_data[inLowLowHighAddress + t*inputImages_stride_C];
inLowHighLow = inputImages_data[inLowHighLowAddress + t*inputImages_stride_C];
inLowHighHigh = inputImages_data[inLowHighHighAddress + t*inputImages_stride_C];
inHighLowLow = inputImages_data[inHighLowLowAddress + t*inputImages_stride_C];
inHighLowHigh = inputImages_data[inHighLowHighAddress + t*inputImages_stride_C];
inHighHighLow = inputImages_data[inHighHighLowAddress + t*inputImages_stride_C];
inHighHighHigh = inputImages_data[inHighHighHighAddress + t*inputImages_stride_C];
}
v = xWeightLowLowLow * yWeightLowLowLow * zWeightLowLowLow * inLowLowLow
+ xWeightLowLowLow * yWeightLowLowLow * (1-zWeightLowLowLow) * inLowLowHigh
+ xWeightLowLowLow * (1-yWeightLowLowLow) * zWeightLowLowLow * inLowHighLow
+ xWeightLowLowLow * (1-yWeightLowLowLow) * (1-zWeightLowLowLow) * inLowHighHigh
+ (1-xWeightLowLowLow) * yWeightLowLowLow * zWeightLowLowLow * inHighLowLow
+ (1-xWeightLowLowLow) * yWeightLowLowLow * (1-zWeightLowLowLow) * inHighLowHigh
+ (1-xWeightLowLowLow) * (1-yWeightLowLowLow) * zWeightLowLowLow * inHighHighLow
+ (1-xWeightLowLowLow) * (1-yWeightLowLowLow) * (1-zWeightLowLowLow) * inHighHighHigh;
output_data[outAddress + t*output_stride_C] = v;
}
}
}
}
}
return 1;
}
int BilinearSamplerBHWD_updateOutput(THFloatTensor *inputImages, THFloatTensor *grids, THFloatTensor *output, int zero_boundary)
{
int batchsize = THFloatTensor_size(inputImages,0);
int inputImages_height = THFloatTensor_size(inputImages,1);
int inputImages_width = THFloatTensor_size(inputImages,2);
int output_height = THFloatTensor_size(output,1);
int output_width = THFloatTensor_size(output,2);
int inputImages_channels = THFloatTensor_size(inputImages,3);
bool zero_boundary_bool= zero_boundary == 1;
int output_strideBatch = THFloatTensor_stride(output,0);
int output_strideHeight = THFloatTensor_stride(output,1);
int output_strideWidth = THFloatTensor_stride(output,2);
int inputImages_strideBatch = THFloatTensor_stride(inputImages,0);
int inputImages_strideHeight = THFloatTensor_stride(inputImages,1);
int inputImages_strideWidth = THFloatTensor_stride(inputImages,2);
int grids_strideBatch = THFloatTensor_stride(grids,0);
int grids_strideHeight = THFloatTensor_stride(grids,1);
int grids_strideWidth = THFloatTensor_stride(grids,2);
real *inputImages_data, *output_data, *grids_data;
inputImages_data = THFloatTensor_data(inputImages);
output_data = THFloatTensor_data(output);
grids_data = THFloatTensor_data(grids);
int b, yOut, xOut;
for(b=0; b < batchsize; b++)
{
for(yOut=0; yOut < output_height; yOut++)
{
for(xOut=0; xOut < output_width; xOut++)
{
//read the grid
real yf = grids_data[b*grids_strideBatch + yOut*grids_strideHeight + xOut*grids_strideWidth];
real xf = grids_data[b*grids_strideBatch + yOut*grids_strideHeight + xOut*grids_strideWidth + 1];
// get the weights for interpolation
int yInTopLeft, xInTopLeft;
real yWeightTopLeft, xWeightTopLeft;
real xcoord = (xf + 1) * (inputImages_width - 1) / 2;
xInTopLeft = floor(xcoord);
xWeightTopLeft = 1 - (xcoord - xInTopLeft);
real ycoord = (yf + 1) * (inputImages_height - 1) / 2;
yInTopLeft = floor(ycoord);
yWeightTopLeft = 1 - (ycoord - yInTopLeft);
const int outAddress = output_strideBatch * b + output_strideHeight * yOut + output_strideWidth * xOut;
const int inTopLeftAddress = inputImages_strideBatch * b + inputImages_strideHeight * yInTopLeft + inputImages_strideWidth * xInTopLeft;
const int inTopRightAddress = inTopLeftAddress + inputImages_strideWidth;
const int inBottomLeftAddress = inTopLeftAddress + inputImages_strideHeight;
const int inBottomRightAddress = inBottomLeftAddress + inputImages_strideWidth;
real v=0;
real inTopLeft=0;
real inTopRight=0;
real inBottomLeft=0;
real inBottomRight=0;
// we are careful with the boundaries
bool topLeftIsIn = xInTopLeft >= 0 && xInTopLeft <= inputImages_width-1 && yInTopLeft >= 0 && yInTopLeft <= inputImages_height-1;
bool topRightIsIn = xInTopLeft+1 >= 0 && xInTopLeft+1 <= inputImages_width-1 && yInTopLeft >= 0 && yInTopLeft <= inputImages_height-1;
bool bottomLeftIsIn = xInTopLeft >= 0 && xInTopLeft <= inputImages_width-1 && yInTopLeft+1 >= 0 && yInTopLeft+1 <= inputImages_height-1;
bool bottomRightIsIn = xInTopLeft+1 >= 0 && xInTopLeft+1 <= inputImages_width-1 && yInTopLeft+1 >= 0 && yInTopLeft+1 <= inputImages_height-1;
int t;
// interpolation happens here
for(t=0; t<inputImages_channels; t++)
{
if(topLeftIsIn) inTopLeft = inputImages_data[inTopLeftAddress + t];
if(topRightIsIn) inTopRight = inputImages_data[inTopRightAddress + t];
if(bottomLeftIsIn) inBottomLeft = inputImages_data[inBottomLeftAddress + t];
if(bottomRightIsIn) inBottomRight = inputImages_data[inBottomRightAddress + t];
v = xWeightTopLeft * yWeightTopLeft * inTopLeft
+ (1 - xWeightTopLeft) * yWeightTopLeft * inTopRight
+ xWeightTopLeft * (1 - yWeightTopLeft) * inBottomLeft
+ (1 - xWeightTopLeft) * (1 - yWeightTopLeft) * inBottomRight;
output_data[outAddress + t] = v;
}
}
}
}
return 1;
}
int BilinearSamplerBXYZC_updateOutput_ND(THFloatTensor *inputImages, THFloatTensor *grids, THFloatTensor *output, int ndim, int zero_boundary)
{
switch( ndim )
{
case 1: return BilinearSamplerBXC_updateOutput_1D( inputImages, grids, output, zero_boundary); break;
case 2: return BilinearSamplerBXYC_updateOutput_2D( inputImages, grids, output, zero_boundary); break;
case 3: return BilinearSamplerBXYZC_updateOutput_3D( inputImages, grids, output, zero_boundary); break;
default: return -1;
}
}
int BilinearSamplerBCXYZ_updateOutput_ND(THFloatTensor *inputImages, THFloatTensor *grids, THFloatTensor *output, int ndim, int zero_boundary)
{
switch( ndim )
{
case 1: return BilinearSamplerBCX_updateOutput_1D( inputImages, grids, output, zero_boundary); break;
case 2: return BilinearSamplerBCXY_updateOutput_2D( inputImages, grids, output, zero_boundary); break;
case 3: return BilinearSamplerBCXYZ_updateOutput_3D( inputImages, grids, output, zero_boundary); break;
default: return -1;
}
}
int BilinearSamplerBXC_updateGradInput_1D(THFloatTensor *inputImages, THFloatTensor *grids, THFloatTensor *gradInputImages,
THFloatTensor *gradGrids, THFloatTensor *gradOutput, int zero_boundary)
{
bool onlyGrid=false;
bool zero_boundary_bool= zero_boundary == 1;
int batchsize = THFloatTensor_size(inputImages,0);
int inputImages_X = THFloatTensor_size(inputImages,1);
int gradOutput_X = THFloatTensor_size(gradOutput,1);
int inputImages_C = THFloatTensor_size(inputImages,2);
int gradOutput_strideBatch = THFloatTensor_stride(gradOutput,0);
int gradOutput_stride_X = THFloatTensor_stride(gradOutput,1);
int inputImages_strideBatch = THFloatTensor_stride(inputImages,0);
int inputImages_stride_X = THFloatTensor_stride(inputImages,1);
int gradInputImages_strideBatch = THFloatTensor_stride(gradInputImages,0);
int gradInputImages_stride_X = THFloatTensor_stride(gradInputImages,1);
int grids_strideBatch = THFloatTensor_stride(grids,0);
int grids_stride_X = THFloatTensor_stride(grids,1);
int gradGrids_strideBatch = THFloatTensor_stride(gradGrids,0);
int gradGrids_stride_X = THFloatTensor_stride(gradGrids,1);
real *inputImages_data, *gradOutput_data, *grids_data, *gradGrids_data, *gradInputImages_data;
inputImages_data = THFloatTensor_data(inputImages);
gradOutput_data = THFloatTensor_data(gradOutput);
grids_data = THFloatTensor_data(grids);
gradGrids_data = THFloatTensor_data(gradGrids);
gradInputImages_data = THFloatTensor_data(gradInputImages);
int b, xOut;
for(b=0; b < batchsize; b++)
{
#pragma omp parallel for
for(xOut=0; xOut < gradOutput_X; xOut++)
{
//read the grid
real xf = grids_data[b*grids_strideBatch + xOut*grids_stride_X];
// get the weights for interpolation
int xInLow;
real xWeightLow;
real xcoord = (xf + 1) * (inputImages_X - 1) / 2;
xInLow = floor(xcoord);
xWeightLow = 1 - (xcoord - xInLow);
bool xBeyondLow = xInLow < 0;
bool xBeyondHigh = xInLow+1 > inputImages_X-1;
/////////////// using non zero border condition
if (!zero_boundary_bool) {
if (xBeyondLow)
xInLow = 0;
if (xBeyondHigh)
xInLow = inputImages_X-2;
}
const int inLowAddress = inputImages_strideBatch * b + inputImages_stride_X * xInLow;
const int inHighAddress = inLowAddress + inputImages_stride_X;
const int gradInputImagesLowAddress = gradInputImages_strideBatch * b + gradInputImages_stride_X * xInLow;
const int gradInputImagesHighAddress = gradInputImagesLowAddress + gradInputImages_stride_X;
const int gradOutputAddress = gradOutput_strideBatch * b + gradOutput_stride_X * xOut;
real lowDotProduct = 0;
real highDotProduct = 0;
real v=0;
real inLow=0;
real inHigh=0;
int t;
for(t=0; t<inputImages_C; t++)
{
real gradOutValue = gradOutput_data[gradOutputAddress + t];
if (!zero_boundary_bool || (! (xBeyondLow || xBeyondHigh ))){
real inLow = inputImages_data[inLowAddress + t];
lowDotProduct += inLow * gradOutValue;
if(!onlyGrid) gradInputImages_data[gradInputImagesLowAddress + t] += xWeightLow * gradOutValue;
real inHigh = inputImages_data[inHighAddress + t];
highDotProduct += inHigh * gradOutValue;
if(!onlyGrid) gradInputImages_data[gradInputImagesHighAddress + t] += (1-xWeightLow) * gradOutValue;
}
}
xf = - lowDotProduct + highDotProduct; // CHECK: CORRECT?
gradGrids_data[b*gradGrids_strideBatch + xOut*gradGrids_stride_X] = xf * (inputImages_X-1) / 2;
}
}
return 1;
}
int BilinearSamplerBCX_updateGradInput_1D(THFloatTensor *inputImages, THFloatTensor *grids, THFloatTensor *gradInputImages,
THFloatTensor *gradGrids, THFloatTensor *gradOutput, int zero_boundary)
{
bool onlyGrid=false;
bool zero_boundary_bool= zero_boundary == 1;
int batchsize = THFloatTensor_size(inputImages,0);
int inputImages_X = THFloatTensor_size(inputImages,2);
int gradOutput_X = THFloatTensor_size(gradOutput,2);
int inputImages_C = THFloatTensor_size(inputImages,1);
int gradOutput_strideBatch = THFloatTensor_stride(gradOutput,0);
int gradOutput_stride_X = THFloatTensor_stride(gradOutput,2);
int gradOutput_stride_C = THFloatTensor_stride(gradOutput,1);
int inputImages_strideBatch = THFloatTensor_stride(inputImages,0);
int inputImages_stride_X = THFloatTensor_stride(inputImages,2);
int inputImages_stride_C = THFloatTensor_stride(inputImages,1);
int gradInputImages_strideBatch = THFloatTensor_stride(gradInputImages,0);
int gradInputImages_stride_X = THFloatTensor_stride(gradInputImages,2);
int gradInputImages_stride_C = THFloatTensor_stride(gradInputImages,1);
int grids_strideBatch = THFloatTensor_stride(grids,0);
int grids_stride_X = THFloatTensor_stride(grids,2);
int gradGrids_strideBatch = THFloatTensor_stride(gradGrids,0);
int gradGrids_stride_X = THFloatTensor_stride(gradGrids,2);
real *inputImages_data, *gradOutput_data, *grids_data, *gradGrids_data, *gradInputImages_data;
inputImages_data = THFloatTensor_data(inputImages);
gradOutput_data = THFloatTensor_data(gradOutput);
grids_data = THFloatTensor_data(grids);
gradGrids_data = THFloatTensor_data(gradGrids);
gradInputImages_data = THFloatTensor_data(gradInputImages);
int b, xOut;
for(b=0; b < batchsize; b++)
{
#pragma omp parallel for
for(xOut=0; xOut < gradOutput_X; xOut++)
{
//read the grid
real xf = grids_data[b*grids_strideBatch + xOut*grids_stride_X];
// get the weights for interpolation
int xInLow;
real xWeightLow;
real xcoord = (xf + 1) * (inputImages_X - 1) / 2;
xInLow = floor(xcoord);
xWeightLow = 1 - (xcoord - xInLow);
bool xBeyondLow = xInLow < 0;
bool xBeyondHigh = xInLow+1 > inputImages_X-1;
/////////////// using non zero border condition
if (!zero_boundary_bool) {
if (xBeyondLow)
xInLow = 0;
if (xBeyondHigh)
xInLow = inputImages_X-2;
}
const int inLowAddress = inputImages_strideBatch * b + inputImages_stride_X * xInLow;
const int inHighAddress = inLowAddress + inputImages_stride_X;
const int gradInputImagesLowAddress = gradInputImages_strideBatch * b + gradInputImages_stride_X * xInLow;
const int gradInputImagesHighAddress = gradInputImagesLowAddress + gradInputImages_stride_X;
const int gradOutputAddress = gradOutput_strideBatch * b + gradOutput_stride_X * xOut;
real lowDotProduct = 0;
real highDotProduct = 0;
real v=0;
real inLow=0;
real inHigh=0;
// we are careful with the boundaries
bool lowIsIn = xInLow >= 0 && xInLow <= inputImages_X-1;
bool highIsIn = xInLow+1 >= 0 && xInLow+1 <= inputImages_X-1;
int t;
for(t=0; t<inputImages_C; t++)
{
real gradOutValue = gradOutput_data[gradOutputAddress + t*gradOutput_stride_C];
if (!zero_boundary_bool || (! (xBeyondLow || xBeyondHigh ))){
real inLow = inputImages_data[inLowAddress + t*inputImages_stride_C];
lowDotProduct += inLow * gradOutValue;
if(!onlyGrid) gradInputImages_data[gradInputImagesLowAddress + t*gradInputImages_stride_C] += xWeightLow * gradOutValue;
real inHigh = inputImages_data[inHighAddress + t*inputImages_stride_C];
highDotProduct += inHigh * gradOutValue;
if(!onlyGrid) gradInputImages_data[gradInputImagesHighAddress + t*gradInputImages_stride_C] += (1-xWeightLow) * gradOutValue;
}
}
xf = - lowDotProduct + highDotProduct; // CHECK: CORRECT?
gradGrids_data[b*gradGrids_strideBatch + xOut*gradGrids_stride_X] = xf * (inputImages_X-1) / 2;
}
}
return 1;
}
int BilinearSamplerBXYC_updateGradInput_2D(THFloatTensor *inputImages, THFloatTensor *grids, THFloatTensor *gradInputImages,
THFloatTensor *gradGrids, THFloatTensor *gradOutput, int zero_boundary)
{
bool onlyGrid=false;
bool zero_boundary_bool= zero_boundary == 1;
int batchsize = THFloatTensor_size(inputImages,0);
int inputImages_X = THFloatTensor_size(inputImages,1);
int inputImages_Y = THFloatTensor_size(inputImages,2);
int gradOutput_X = THFloatTensor_size(gradOutput,1);
int gradOutput_Y = THFloatTensor_size(gradOutput,2);
int inputImages_C = THFloatTensor_size(inputImages,3);
int gradOutput_strideBatch = THFloatTensor_stride(gradOutput,0);
int gradOutput_stride_X = THFloatTensor_stride(gradOutput,1);
int gradOutput_stride_Y = THFloatTensor_stride(gradOutput,2);
int inputImages_strideBatch = THFloatTensor_stride(inputImages,0);
int inputImages_stride_X = THFloatTensor_stride(inputImages,1);
int inputImages_stride_Y = THFloatTensor_stride(inputImages,2);
int gradInputImages_strideBatch = THFloatTensor_stride(gradInputImages,0);
int gradInputImages_stride_X = THFloatTensor_stride(gradInputImages,1);
int gradInputImages_stride_Y = THFloatTensor_stride(gradInputImages,2);
int grids_strideBatch = THFloatTensor_stride(grids,0);
int grids_stride_X = THFloatTensor_stride(grids,1);
int grids_stride_Y = THFloatTensor_stride(grids,2);
int gradGrids_strideBatch = THFloatTensor_stride(gradGrids,0);
int gradGrids_stride_X = THFloatTensor_stride(gradGrids,1);
int gradGrids_stride_Y = THFloatTensor_stride(gradGrids,2);
real *inputImages_data, *gradOutput_data, *grids_data, *gradGrids_data, *gradInputImages_data;
inputImages_data = THFloatTensor_data(inputImages);
gradOutput_data = THFloatTensor_data(gradOutput);
grids_data = THFloatTensor_data(grids);
gradGrids_data = THFloatTensor_data(gradGrids);
gradInputImages_data = THFloatTensor_data(gradInputImages);
int b, yOut, xOut;
for(b=0; b < batchsize; b++)
{
#pragma omp parallel for
for(xOut=0; xOut < gradOutput_X; xOut++)
{
for(yOut=0; yOut < gradOutput_Y; yOut++)
{
//read the grid
real xf = grids_data[b*grids_strideBatch + yOut*grids_stride_Y + xOut*grids_stride_X];
real yf = grids_data[b*grids_strideBatch + yOut*grids_stride_Y + xOut*grids_stride_X + 1];
// get the weights for interpolation
int yInLowLow, xInLowLow;
real yWeightLowLow, xWeightLowLow;
real xcoord = (xf + 1) * (inputImages_X - 1) / 2;
xInLowLow = floor(xcoord);
xWeightLowLow = 1 - (xcoord - xInLowLow);
real ycoord = (yf + 1) * (inputImages_Y - 1) / 2;
yInLowLow = floor(ycoord);
yWeightLowLow = 1 - (ycoord - yInLowLow);
bool xBeyondLow = xInLowLow < 0;
bool yBeyondLow = yInLowLow < 0;
bool xBeyondHigh = xInLowLow+1 > inputImages_X-1;
bool yBeyondHigh = yInLowLow+1 > inputImages_Y-1;
/////////////// using non zero border condition
if (!zero_boundary_bool) {
if (xBeyondLow)
xInLowLow = 0;
if (xBeyondHigh)
xInLowLow = inputImages_X-2;
if (yBeyondLow)
yInLowLow = 0;
if (yBeyondHigh)
yInLowLow = inputImages_Y-2;
}
const int inLowLowAddress = inputImages_strideBatch * b + inputImages_stride_Y * yInLowLow + inputImages_stride_X * xInLowLow;
const int inLowHighAddress = inLowLowAddress + inputImages_stride_Y;
const int inHighLowAddress = inLowLowAddress + inputImages_stride_X;
const int inHighHighAddress = inHighLowAddress + inputImages_stride_Y;
const int gradInputImagesLowLowAddress = gradInputImages_strideBatch * b + gradInputImages_stride_Y * yInLowLow + gradInputImages_stride_X * xInLowLow;
const int gradInputImagesLowHighAddress = gradInputImagesLowLowAddress + gradInputImages_stride_Y;
const int gradInputImagesHighLowAddress = gradInputImagesLowLowAddress + gradInputImages_stride_X;
const int gradInputImagesHighHighAddress = gradInputImagesHighLowAddress + gradInputImages_stride_Y;
const int gradOutputAddress = gradOutput_strideBatch * b + gradOutput_stride_Y * yOut + gradOutput_stride_X * xOut;
real lowLowDotProduct = 0;
real lowHighDotProduct = 0;
real highLowDotProduct = 0;
real highHighDotProduct = 0;
real v=0;
real inLowLow=0;
real inLowHigh=0;
real inHighLow=0;
real inHighHigh=0;
int t;
for(t=0; t<inputImages_C; t++)
{
real gradOutValue = gradOutput_data[gradOutputAddress + t];
if (!zero_boundary_bool || (! (xBeyondLow || yBeyondLow || xBeyondHigh || yBeyondHigh))){
real inLowLow = inputImages_data[inLowLowAddress + t];
lowLowDotProduct += inLowLow * gradOutValue;
if(!onlyGrid) gradInputImages_data[gradInputImagesLowLowAddress + t] += xWeightLowLow * yWeightLowLow * gradOutValue;
real inLowHigh = inputImages_data[inLowHighAddress + t];
lowHighDotProduct += inLowHigh * gradOutValue;
if(!onlyGrid) gradInputImages_data[gradInputImagesLowHighAddress + t] += xWeightLowLow * (1-yWeightLowLow) * gradOutValue; // CHECK: CORRECT?
real inHighLow = inputImages_data[inHighLowAddress + t];
highLowDotProduct += inHighLow * gradOutValue;
if(!onlyGrid) gradInputImages_data[gradInputImagesHighLowAddress + t] += (1-xWeightLowLow) * yWeightLowLow * gradOutValue; // CHECK: CORRECT?
real inHighHigh = inputImages_data[inHighHighAddress + t];
highHighDotProduct += inHighHigh * gradOutValue;
if(!onlyGrid) gradInputImages_data[gradInputImagesHighHighAddress + t] += (1 - xWeightLowLow) * (1 - yWeightLowLow) * gradOutValue;
}
}
xf = - yWeightLowLow * lowLowDotProduct + yWeightLowLow * highLowDotProduct
- (1-yWeightLowLow) * lowHighDotProduct + (1-yWeightLowLow) * highHighDotProduct;
yf = - xWeightLowLow * lowLowDotProduct + xWeightLowLow * lowHighDotProduct
- (1-xWeightLowLow) * highLowDotProduct + (1-xWeightLowLow) * highHighDotProduct;
gradGrids_data[b*gradGrids_strideBatch + yOut*gradGrids_stride_Y + xOut*gradGrids_stride_X] = xf * (inputImages_X-1) / 2;
gradGrids_data[b*gradGrids_strideBatch + yOut*gradGrids_stride_Y + xOut*gradGrids_stride_X + 1] = yf * (inputImages_Y-1) / 2;
}
}
}
return 1;
}
int BilinearSamplerBCXY_updateGradInput_2D(THFloatTensor *inputImages, THFloatTensor *grids, THFloatTensor *gradInputImages,
THFloatTensor *gradGrids, THFloatTensor *gradOutput, int zero_boundary)
{
bool onlyGrid=false;
bool zero_boundary_bool = zero_boundary == 1;
int batchsize = THFloatTensor_size(inputImages,0);
int inputImages_X = THFloatTensor_size(inputImages,2);
int inputImages_Y = THFloatTensor_size(inputImages,3);
int gradOutput_X = THFloatTensor_size(gradOutput,2);
int gradOutput_Y = THFloatTensor_size(gradOutput,3);
int inputImages_C = THFloatTensor_size(inputImages,1);
int gradOutput_strideBatch = THFloatTensor_stride(gradOutput,0);
int gradOutput_stride_X = THFloatTensor_stride(gradOutput,2);
int gradOutput_stride_Y = THFloatTensor_stride(gradOutput,3);
int gradOutput_stride_C = THFloatTensor_stride(gradOutput,1);
int inputImages_strideBatch = THFloatTensor_stride(inputImages,0);
int inputImages_stride_X = THFloatTensor_stride(inputImages,2);
int inputImages_stride_Y = THFloatTensor_stride(inputImages,3);
int inputImages_stride_C = THFloatTensor_stride(inputImages,1);
int gradInputImages_strideBatch = THFloatTensor_stride(gradInputImages,0);
int gradInputImages_stride_X = THFloatTensor_stride(gradInputImages,2);
int gradInputImages_stride_Y = THFloatTensor_stride(gradInputImages,3);
int gradInputImages_stride_C = THFloatTensor_stride(gradInputImages,1);
int grids_strideBatch = THFloatTensor_stride(grids,0);
int grids_stride_X = THFloatTensor_stride(grids,2);
int grids_stride_Y = THFloatTensor_stride(grids,3);
int grids_stride_C = THFloatTensor_stride(grids,1);
int gradGrids_strideBatch = THFloatTensor_stride(gradGrids,0);
int gradGrids_stride_X = THFloatTensor_stride(gradGrids,2);
int gradGrids_stride_Y = THFloatTensor_stride(gradGrids,3);
int gradGrids_stride_C = THFloatTensor_stride(gradGrids,1);
real *inputImages_data, *gradOutput_data, *grids_data, *gradGrids_data, *gradInputImages_data;
inputImages_data = THFloatTensor_data(inputImages);
gradOutput_data = THFloatTensor_data(gradOutput);
grids_data = THFloatTensor_data(grids);
gradGrids_data = THFloatTensor_data(gradGrids);
gradInputImages_data = THFloatTensor_data(gradInputImages);
int b, yOut, xOut;
for(b=0; b < batchsize; b++)
{
#pragma omp parallel for
for(xOut=0; xOut < gradOutput_X; xOut++)
{
for(yOut=0; yOut < gradOutput_Y; yOut++)
{
//read the grid
real xf = grids_data[b*grids_strideBatch + yOut*grids_stride_Y + xOut*grids_stride_X];
real yf = grids_data[b*grids_strideBatch + yOut*grids_stride_Y + xOut*grids_stride_X + grids_stride_C];
// get the weights for interpolation
int yInLowLow, xInLowLow;
real yWeightLowLow, xWeightLowLow;
real xcoord = (xf + 1) * (inputImages_X - 1) / 2;
xInLowLow = floor(xcoord);
xWeightLowLow = 1 - (xcoord - xInLowLow);
real ycoord = (yf + 1) * (inputImages_Y - 1) / 2;
yInLowLow = floor(ycoord);
yWeightLowLow = 1 - (ycoord - yInLowLow);
bool xBeyondLow = xInLowLow < 0;
bool yBeyondLow = yInLowLow < 0;
bool xBeyondHigh = xInLowLow+1 > inputImages_X-1;
bool yBeyondHigh = yInLowLow+1 > inputImages_Y-1;
/////////////// using non zero border condition
if (!zero_boundary_bool) {
if (xBeyondLow)
xInLowLow = 0;
if (xBeyondHigh)
xInLowLow = inputImages_X-2;
if (yBeyondLow)
yInLowLow = 0;
if (yBeyondHigh)
yInLowLow = inputImages_Y-2;
}
const int inLowLowAddress = inputImages_strideBatch * b + inputImages_stride_Y * yInLowLow + inputImages_stride_X * xInLowLow;
const int inLowHighAddress = inLowLowAddress + inputImages_stride_Y;
const int inHighLowAddress = inLowLowAddress + inputImages_stride_X;
const int inHighHighAddress = inHighLowAddress + inputImages_stride_Y;
const int gradInputImagesLowLowAddress = gradInputImages_strideBatch * b + gradInputImages_stride_Y * yInLowLow + gradInputImages_stride_X * xInLowLow;
const int gradInputImagesLowHighAddress = gradInputImagesLowLowAddress + gradInputImages_stride_Y;
const int gradInputImagesHighLowAddress = gradInputImagesLowLowAddress + gradInputImages_stride_X;
const int gradInputImagesHighHighAddress = gradInputImagesHighLowAddress + gradInputImages_stride_Y;
const int gradOutputAddress = gradOutput_strideBatch * b + gradOutput_stride_Y * yOut + gradOutput_stride_X * xOut;
real lowLowDotProduct = 0;
real lowHighDotProduct = 0;
real highLowDotProduct = 0;
real highHighDotProduct = 0;
real v=0;
real inLowLow=0;
real inLowHigh=0;
real inHighLow=0;
real inHighHigh=0;
int t;
for(t=0; t<inputImages_C; t++)
{
real gradOutValue = gradOutput_data[gradOutputAddress + t*gradOutput_stride_C];
if (!zero_boundary_bool || (! (xBeyondLow || yBeyondLow || xBeyondHigh || yBeyondHigh))){
real inLowLow = inputImages_data[inLowLowAddress + t*inputImages_stride_C];
lowLowDotProduct += inLowLow * gradOutValue;
if(!onlyGrid) gradInputImages_data[gradInputImagesLowLowAddress + t*gradInputImages_stride_C] += xWeightLowLow * yWeightLowLow * gradOutValue;
real inLowHigh = inputImages_data[inLowHighAddress + t*inputImages_stride_C];
lowHighDotProduct += inLowHigh * gradOutValue;
if(!onlyGrid) gradInputImages_data[gradInputImagesLowHighAddress + t*gradInputImages_stride_C] += xWeightLowLow * (1-yWeightLowLow) * gradOutValue; // CHECK: CORRECT?
real inHighLow = inputImages_data[inHighLowAddress + t*inputImages_stride_C];
highLowDotProduct += inHighLow * gradOutValue;
if(!onlyGrid) gradInputImages_data[gradInputImagesHighLowAddress + t*gradInputImages_stride_C] += (1-xWeightLowLow) * yWeightLowLow * gradOutValue; // CHECK: CORRECT?
real inHighHigh = inputImages_data[inHighHighAddress + t*inputImages_stride_C];
highHighDotProduct += inHighHigh * gradOutValue;
if(!onlyGrid) gradInputImages_data[gradInputImagesHighHighAddress + t*gradInputImages_stride_C] += (1 - xWeightLowLow) * (1 - yWeightLowLow) * gradOutValue;
}
}
xf = - yWeightLowLow * lowLowDotProduct + yWeightLowLow * highLowDotProduct
- (1-yWeightLowLow) * lowHighDotProduct + (1-yWeightLowLow) * highHighDotProduct;
yf = - xWeightLowLow * lowLowDotProduct + xWeightLowLow * lowHighDotProduct
- (1-xWeightLowLow) * highLowDotProduct + (1-xWeightLowLow) * highHighDotProduct;
gradGrids_data[b*gradGrids_strideBatch + yOut*gradGrids_stride_Y + xOut*gradGrids_stride_X] = xf * (inputImages_X-1) / 2;
gradGrids_data[b*gradGrids_strideBatch + yOut*gradGrids_stride_Y + xOut*gradGrids_stride_X + gradGrids_stride_C] = yf * (inputImages_Y-1) / 2;
}
}
}
return 1;
}
int BilinearSamplerBXYZC_updateGradInput_3D(THFloatTensor *inputImages, THFloatTensor *grids, THFloatTensor *gradInputImages,
THFloatTensor *gradGrids, THFloatTensor *gradOutput, int zero_boundary)
{
bool onlyGrid=false;
bool zero_boundary_bool = zero_boundary == 1;
int batchsize = THFloatTensor_size(inputImages,0);
int inputImages_X = THFloatTensor_size(inputImages,1);
int inputImages_Y = THFloatTensor_size(inputImages,2);
int inputImages_Z = THFloatTensor_size(inputImages,3);
int gradOutput_X = THFloatTensor_size(gradOutput,1);
int gradOutput_Y = THFloatTensor_size(gradOutput,2);
int gradOutput_Z = THFloatTensor_size(gradOutput,3);
int inputImages_C = THFloatTensor_size(inputImages,4);
int gradOutput_strideBatch = THFloatTensor_stride(gradOutput,0);
int gradOutput_stride_X = THFloatTensor_stride(gradOutput,1);
int gradOutput_stride_Y = THFloatTensor_stride(gradOutput,2);
int gradOutput_stride_Z = THFloatTensor_stride(gradOutput,3);
int inputImages_strideBatch = THFloatTensor_stride(inputImages,0);
int inputImages_stride_X = THFloatTensor_stride(inputImages,1);
int inputImages_stride_Y = THFloatTensor_stride(inputImages,2);
int inputImages_stride_Z = THFloatTensor_stride(inputImages,3);
int gradInputImages_strideBatch = THFloatTensor_stride(gradInputImages,0);
int gradInputImages_stride_X = THFloatTensor_stride(gradInputImages,1);
int gradInputImages_stride_Y = THFloatTensor_stride(gradInputImages,2);
int gradInputImages_stride_Z = THFloatTensor_stride(gradInputImages,3);
int grids_strideBatch = THFloatTensor_stride(grids,0);
int grids_stride_X = THFloatTensor_stride(grids,1);
int grids_stride_Y = THFloatTensor_stride(grids,2);
int grids_stride_Z = THFloatTensor_stride(grids,3);
int gradGrids_strideBatch = THFloatTensor_stride(gradGrids,0);
int gradGrids_stride_X = THFloatTensor_stride(gradGrids,1);
int gradGrids_stride_Y = THFloatTensor_stride(gradGrids,2);
int gradGrids_stride_Z = THFloatTensor_stride(gradGrids,3);
real *inputImages_data, *gradOutput_data, *grids_data, *gradGrids_data, *gradInputImages_data;
inputImages_data = THFloatTensor_data(inputImages);
gradOutput_data = THFloatTensor_data(gradOutput);
grids_data = THFloatTensor_data(grids);
gradGrids_data = THFloatTensor_data(gradGrids);
gradInputImages_data = THFloatTensor_data(gradInputImages);
int b, zOut, yOut, xOut;
for(b=0; b < batchsize; b++)
{
#pragma omp parallel for
for(xOut=0; xOut < gradOutput_X; xOut++)
{
for(yOut=0; yOut < gradOutput_Y; yOut++)
{
for(zOut=0; zOut < gradOutput_Z; zOut++)
{
//read the grid
real xf = grids_data[b*grids_strideBatch + zOut*grids_stride_Z + yOut*grids_stride_Y + xOut*grids_stride_X];
real yf = grids_data[b*grids_strideBatch + zOut*grids_stride_Z + yOut*grids_stride_Y + xOut*grids_stride_X + 1];
real zf = grids_data[b*grids_strideBatch + zOut*grids_stride_Z + yOut*grids_stride_Y + xOut*grids_stride_X + 2];
// get the weights for interpolation
int zInLowLowLow, yInLowLowLow, xInLowLowLow;
real zWeightLowLowLow, yWeightLowLowLow, xWeightLowLowLow;
real xcoord = (xf + 1) * (inputImages_X - 1) / 2;
xInLowLowLow = floor(xcoord);
xWeightLowLowLow = 1 - (xcoord - xInLowLowLow);
real ycoord = (yf + 1) * (inputImages_Y - 1) / 2;
yInLowLowLow = floor(ycoord);
yWeightLowLowLow = 1 - (ycoord - yInLowLowLow);
real zcoord = (zf + 1) * (inputImages_Z - 1) / 2;
zInLowLowLow = floor(zcoord);
zWeightLowLowLow = 1 - (zcoord - zInLowLowLow);
bool xBeyondLow = xInLowLowLow < 0;
bool yBeyondLow = yInLowLowLow < 0;
bool zBeyondLow = zInLowLowLow < 0;
bool xBeyondHigh = xInLowLowLow+1 > inputImages_X-1;
bool yBeyondHigh = yInLowLowLow+1 > inputImages_Y-1;
bool zBeyondHigh = zInLowLowLow+1 > inputImages_Z-1;
/////////////// using non zero border condition
if (!zero_boundary_bool) {
if (xBeyondLow)
xInLowLowLow = 0;
if (xBeyondHigh)
xInLowLowLow = inputImages_X-2;
if (yBeyondLow)
yInLowLowLow = 0;
if (yBeyondHigh)
yInLowLowLow = inputImages_Y-2;
if (zBeyondLow)
zInLowLowLow = 0;
if (zBeyondHigh)
zInLowLowLow = inputImages_Z-2;
}
const int inLowLowLowAddress = inputImages_strideBatch * b + inputImages_stride_Z * zInLowLowLow +
inputImages_stride_Y * yInLowLowLow + inputImages_stride_X * xInLowLowLow;
const int inLowLowHighAddress = inLowLowLowAddress + inputImages_stride_Z;
const int inLowHighLowAddress = inLowLowLowAddress + inputImages_stride_Y;
const int inLowHighHighAddress = inLowLowLowAddress + inputImages_stride_Y + inputImages_stride_Z;
const int inHighLowLowAddress = inLowLowLowAddress + inputImages_stride_X;
const int inHighLowHighAddress = inHighLowLowAddress + inputImages_stride_Z;
const int inHighHighLowAddress = inHighLowLowAddress + inputImages_stride_Y;
const int inHighHighHighAddress = inHighLowLowAddress + inputImages_stride_Y + inputImages_stride_Z;
const int gradInputImagesLowLowLowAddress = gradInputImages_strideBatch * b + gradInputImages_stride_Z * zInLowLowLow +
gradInputImages_stride_Y * yInLowLowLow + gradInputImages_stride_X * xInLowLowLow;
const int gradInputImagesLowLowHighAddress = gradInputImagesLowLowLowAddress + gradInputImages_stride_Z;
const int gradInputImagesLowHighLowAddress = gradInputImagesLowLowLowAddress + gradInputImages_stride_Y;
const int gradInputImagesLowHighHighAddress = gradInputImagesLowLowLowAddress + gradInputImages_stride_Y + gradInputImages_stride_Z;
const int gradInputImagesHighLowLowAddress = gradInputImagesLowLowLowAddress + gradInputImages_stride_X;
const int gradInputImagesHighLowHighAddress = gradInputImagesHighLowLowAddress + gradInputImages_stride_Z;
const int gradInputImagesHighHighLowAddress = gradInputImagesHighLowLowAddress + gradInputImages_stride_Y;
const int gradInputImagesHighHighHighAddress = gradInputImagesHighLowLowAddress + gradInputImages_stride_Y + gradInputImages_stride_Z;
const int gradOutputAddress = gradOutput_strideBatch * b + gradOutput_stride_Z * zOut + gradOutput_stride_Y * yOut + gradOutput_stride_X * xOut;
real lowLowLowDotProduct = 0;
real lowLowHighDotProduct = 0;
real lowHighLowDotProduct = 0;
real lowHighHighDotProduct = 0;
real highLowLowDotProduct = 0;
real highLowHighDotProduct = 0;
real highHighLowDotProduct = 0;
real highHighHighDotProduct = 0;
real v=0;
real inLowLowLow=0;
real inLowLowHigh=0;
real inLowHighLow=0;
real inLowHighHigh=0;
real inHighLowLow=0;
real inHighLowHigh=0;
real inHighHighLow=0;
real inHighHighHigh=0;
int t;
for(t=0; t<inputImages_C; t++)
{
real gradOutValue = gradOutput_data[gradOutputAddress + t];
if (!zero_boundary_bool || (! (xBeyondLow || yBeyondLow || xBeyondHigh || yBeyondHigh || zBeyondLow || zBeyondHigh))){
real inLowLowLow = inputImages_data[inLowLowLowAddress + t];
lowLowLowDotProduct += inLowLowLow * gradOutValue;
if(!onlyGrid) gradInputImages_data[gradInputImagesLowLowLowAddress + t] += xWeightLowLowLow * yWeightLowLowLow * zWeightLowLowLow * gradOutValue;
real inLowLowHigh = inputImages_data[inLowLowHighAddress + t];
lowLowHighDotProduct += inLowLowHigh * gradOutValue;
if(!onlyGrid) gradInputImages_data[gradInputImagesLowLowHighAddress + t] += xWeightLowLowLow * yWeightLowLowLow * (1-zWeightLowLowLow) * gradOutValue; // CHECK: CORRECT?
real inLowHighLow = inputImages_data[inLowHighLowAddress + t];
lowHighLowDotProduct += inLowHighLow * gradOutValue;
if(!onlyGrid) gradInputImages_data[gradInputImagesLowHighLowAddress + t] += xWeightLowLowLow * (1-yWeightLowLowLow) * zWeightLowLowLow * gradOutValue; // CHECK: CORRECT?
real inLowHighHigh = inputImages_data[inLowHighHighAddress + t];
lowHighHighDotProduct += inLowHighHigh * gradOutValue;
if(!onlyGrid) gradInputImages_data[gradInputImagesLowHighHighAddress + t] += xWeightLowLowLow * (1 - yWeightLowLowLow) * (1-zWeightLowLowLow) * gradOutValue;
real inHighLowLow = inputImages_data[inHighLowLowAddress + t];
highLowLowDotProduct += inHighLowLow * gradOutValue;
if(!onlyGrid) gradInputImages_data[gradInputImagesHighLowLowAddress + t] += (1-xWeightLowLowLow) * yWeightLowLowLow * zWeightLowLowLow * gradOutValue;
real inHighLowHigh = inputImages_data[inHighLowHighAddress + t];
highLowHighDotProduct += inHighLowHigh * gradOutValue;
if(!onlyGrid) gradInputImages_data[gradInputImagesHighLowHighAddress + t] += (1-xWeightLowLowLow) * yWeightLowLowLow * (1-zWeightLowLowLow) * gradOutValue; // CHECK: CORRECT?
real inHighHighLow = inputImages_data[inHighHighLowAddress + t];
highHighLowDotProduct += inHighHighLow * gradOutValue;
if(!onlyGrid) gradInputImages_data[gradInputImagesHighHighLowAddress + t] += (1-xWeightLowLowLow) * (1-yWeightLowLowLow) * zWeightLowLowLow * gradOutValue; // CHECK: CORRECT?
real inHighHighHigh = inputImages_data[inHighHighHighAddress + t];
highHighHighDotProduct += inHighHighHigh * gradOutValue;
if(!onlyGrid) gradInputImages_data[gradInputImagesHighHighHighAddress + t] += (1-xWeightLowLowLow) * (1 - yWeightLowLowLow) * (1-zWeightLowLowLow) * gradOutValue;
}
}
// CHECK: CORRECT?
xf = - yWeightLowLowLow * zWeightLowLowLow * lowLowLowDotProduct + yWeightLowLowLow * zWeightLowLowLow * highLowLowDotProduct
- yWeightLowLowLow * (1-zWeightLowLowLow) * lowLowHighDotProduct + yWeightLowLowLow * (1-zWeightLowLowLow) * highLowHighDotProduct
- (1-yWeightLowLowLow) * zWeightLowLowLow * lowHighLowDotProduct + (1-yWeightLowLowLow) * zWeightLowLowLow * highHighLowDotProduct
- (1-yWeightLowLowLow) * (1-zWeightLowLowLow) * lowHighHighDotProduct + (1-yWeightLowLowLow) * (1-zWeightLowLowLow) * highHighHighDotProduct;
yf = - xWeightLowLowLow * zWeightLowLowLow * lowLowLowDotProduct + xWeightLowLowLow * zWeightLowLowLow * lowHighLowDotProduct
- xWeightLowLowLow * (1-zWeightLowLowLow) * lowLowHighDotProduct + xWeightLowLowLow * (1-zWeightLowLowLow) * lowHighHighDotProduct
- (1-xWeightLowLowLow) * zWeightLowLowLow * highLowLowDotProduct + (1-xWeightLowLowLow) * zWeightLowLowLow * highHighLowDotProduct
- (1-xWeightLowLowLow) * (1-zWeightLowLowLow) * highLowHighDotProduct + (1-xWeightLowLowLow) * (1-zWeightLowLowLow) * highHighHighDotProduct;
zf = - xWeightLowLowLow * yWeightLowLowLow * lowLowLowDotProduct + xWeightLowLowLow * yWeightLowLowLow * lowLowHighDotProduct
- (1-xWeightLowLowLow) * yWeightLowLowLow * highLowLowDotProduct + (1-xWeightLowLowLow) * yWeightLowLowLow * highLowHighDotProduct
- xWeightLowLowLow * (1-yWeightLowLowLow) * lowHighLowDotProduct + xWeightLowLowLow * (1-yWeightLowLowLow) * lowHighHighDotProduct
- (1-xWeightLowLowLow) * (1-yWeightLowLowLow) * highHighLowDotProduct + (1-xWeightLowLowLow) * (1-yWeightLowLowLow) * highHighHighDotProduct;
gradGrids_data[b*gradGrids_strideBatch + zOut*gradGrids_stride_Z + yOut*gradGrids_stride_Y + xOut*gradGrids_stride_X] = xf * (inputImages_X-1) / 2;
gradGrids_data[b*gradGrids_strideBatch + zOut*gradGrids_stride_Z + yOut*gradGrids_stride_Y + xOut*gradGrids_stride_X + 1] = yf * (inputImages_Y-1) / 2;
gradGrids_data[b*gradGrids_strideBatch + zOut*gradGrids_stride_Z + yOut*gradGrids_stride_Y + xOut*gradGrids_stride_X + 2] = zf * (inputImages_Z-1) / 2;
}
}
}
}
return 1;
}
int BilinearSamplerBCXYZ_updateGradInput_3D(THFloatTensor *inputImages, THFloatTensor *grids, THFloatTensor *gradInputImages,
THFloatTensor *gradGrids, THFloatTensor *gradOutput, int zero_boundary)
{
bool onlyGrid=false;
bool zero_boundary_bool = zero_boundary == 1;
int batchsize = THFloatTensor_size(inputImages,0);
int inputImages_X = THFloatTensor_size(inputImages,2);
int inputImages_Y = THFloatTensor_size(inputImages,3);
int inputImages_Z = THFloatTensor_size(inputImages,4);
int gradOutput_X = THFloatTensor_size(gradOutput,2);
int gradOutput_Y = THFloatTensor_size(gradOutput,3);
int gradOutput_Z = THFloatTensor_size(gradOutput,4);
int inputImages_C = THFloatTensor_size(inputImages,1);
int gradOutput_strideBatch = THFloatTensor_stride(gradOutput,0);
int gradOutput_stride_X = THFloatTensor_stride(gradOutput,2);
int gradOutput_stride_Y = THFloatTensor_stride(gradOutput,3);
int gradOutput_stride_Z = THFloatTensor_stride(gradOutput,4);
int gradOutput_stride_C = THFloatTensor_stride(gradOutput,1);
int inputImages_strideBatch = THFloatTensor_stride(inputImages,0);
int inputImages_stride_X = THFloatTensor_stride(inputImages,2);
int inputImages_stride_Y = THFloatTensor_stride(inputImages,3);
int inputImages_stride_Z = THFloatTensor_stride(inputImages,4);
int inputImages_stride_C = THFloatTensor_stride(inputImages,1);
int gradInputImages_strideBatch = THFloatTensor_stride(gradInputImages,0);
int gradInputImages_stride_X = THFloatTensor_stride(gradInputImages,2);
int gradInputImages_stride_Y = THFloatTensor_stride(gradInputImages,3);
int gradInputImages_stride_Z = THFloatTensor_stride(gradInputImages,4);
int gradInputImages_stride_C = THFloatTensor_stride(gradInputImages,1);
int grids_strideBatch = THFloatTensor_stride(grids,0);
int grids_stride_X = THFloatTensor_stride(grids,2);
int grids_stride_Y = THFloatTensor_stride(grids,3);
int grids_stride_Z = THFloatTensor_stride(grids,4);
int grids_stride_C = THFloatTensor_stride(grids,1);
int gradGrids_strideBatch = THFloatTensor_stride(gradGrids,0);
int gradGrids_stride_X = THFloatTensor_stride(gradGrids,2);
int gradGrids_stride_Y = THFloatTensor_stride(gradGrids,3);
int gradGrids_stride_Z = THFloatTensor_stride(gradGrids,4);
int gradGrids_stride_C = THFloatTensor_stride(gradGrids,1);
real *inputImages_data, *gradOutput_data, *grids_data, *gradGrids_data, *gradInputImages_data;
inputImages_data = THFloatTensor_data(inputImages);
gradOutput_data = THFloatTensor_data(gradOutput);
grids_data = THFloatTensor_data(grids);
gradGrids_data = THFloatTensor_data(gradGrids);
gradInputImages_data = THFloatTensor_data(gradInputImages);
int b, zOut, yOut, xOut;
for(b=0; b < batchsize; b++)
{
#pragma omp parallel for
for(xOut=0; xOut < gradOutput_X; xOut++)
{
for(yOut=0; yOut < gradOutput_Y; yOut++)
{
for(zOut=0; zOut < gradOutput_Z; zOut++)
{
//read the grid
real xf = grids_data[b*grids_strideBatch + zOut*grids_stride_Z + yOut*grids_stride_Y + xOut*grids_stride_X];
real yf = grids_data[b*grids_strideBatch + zOut*grids_stride_Z + yOut*grids_stride_Y + xOut*grids_stride_X + grids_stride_C];
real zf = grids_data[b*grids_strideBatch + zOut*grids_stride_Z + yOut*grids_stride_Y + xOut*grids_stride_X + 2*grids_stride_C];
// get the weights for interpolation
int zInLowLowLow, yInLowLowLow, xInLowLowLow;
real zWeightLowLowLow, yWeightLowLowLow, xWeightLowLowLow;
real xcoord = (xf + 1) * (inputImages_X - 1) / 2;
xInLowLowLow = floor(xcoord);
xWeightLowLowLow = 1 - (xcoord - xInLowLowLow);
real ycoord = (yf + 1) * (inputImages_Y - 1) / 2;
yInLowLowLow = floor(ycoord);
yWeightLowLowLow = 1 - (ycoord - yInLowLowLow);
real zcoord = (zf + 1) * (inputImages_Z - 1) / 2;
zInLowLowLow = floor(zcoord);
zWeightLowLowLow = 1 - (zcoord - zInLowLowLow);
bool xBeyondLow = xInLowLowLow < 0;
bool yBeyondLow = yInLowLowLow < 0;
bool zBeyondLow = zInLowLowLow < 0;
bool xBeyondHigh = xInLowLowLow+1 > inputImages_X-1;
bool yBeyondHigh = yInLowLowLow+1 > inputImages_Y-1;
bool zBeyondHigh = zInLowLowLow+1 > inputImages_Z-1;
/////////////// using non zero border condition
if (!zero_boundary_bool) {
if (xBeyondLow)
xInLowLowLow = 0;
if (xBeyondHigh)
xInLowLowLow = inputImages_X-2;
if (yBeyondLow)
yInLowLowLow = 0;
if (yBeyondHigh)
yInLowLowLow = inputImages_Y-2;
if (zBeyondLow)
zInLowLowLow = 0;
if (zBeyondHigh)
zInLowLowLow = inputImages_Z-2;
}
const int inLowLowLowAddress = inputImages_strideBatch * b + inputImages_stride_Z * zInLowLowLow +
inputImages_stride_Y * yInLowLowLow + inputImages_stride_X * xInLowLowLow;
const int inLowLowHighAddress = inLowLowLowAddress + inputImages_stride_Z;
const int inLowHighLowAddress = inLowLowLowAddress + inputImages_stride_Y;
const int inLowHighHighAddress = inLowLowLowAddress + inputImages_stride_Y + inputImages_stride_Z;
const int inHighLowLowAddress = inLowLowLowAddress + inputImages_stride_X;
const int inHighLowHighAddress = inHighLowLowAddress + inputImages_stride_Z;
const int inHighHighLowAddress = inHighLowLowAddress + inputImages_stride_Y;
const int inHighHighHighAddress = inHighLowLowAddress + inputImages_stride_Y + inputImages_stride_Z;
const int gradInputImagesLowLowLowAddress = gradInputImages_strideBatch * b + gradInputImages_stride_Z * zInLowLowLow +
gradInputImages_stride_Y * yInLowLowLow + gradInputImages_stride_X * xInLowLowLow;
const int gradInputImagesLowLowHighAddress = gradInputImagesLowLowLowAddress + gradInputImages_stride_Z;
const int gradInputImagesLowHighLowAddress = gradInputImagesLowLowLowAddress + gradInputImages_stride_Y;
const int gradInputImagesLowHighHighAddress = gradInputImagesLowLowLowAddress + gradInputImages_stride_Y + gradInputImages_stride_Z;
const int gradInputImagesHighLowLowAddress = gradInputImagesLowLowLowAddress + gradInputImages_stride_X;
const int gradInputImagesHighLowHighAddress = gradInputImagesHighLowLowAddress + gradInputImages_stride_Z;
const int gradInputImagesHighHighLowAddress = gradInputImagesHighLowLowAddress + gradInputImages_stride_Y;
const int gradInputImagesHighHighHighAddress = gradInputImagesHighLowLowAddress + gradInputImages_stride_Y + gradInputImages_stride_Z;
const int gradOutputAddress = gradOutput_strideBatch * b + gradOutput_stride_Z * zOut + gradOutput_stride_Y * yOut + gradOutput_stride_X * xOut;
real lowLowLowDotProduct = 0;
real lowLowHighDotProduct = 0;
real lowHighLowDotProduct = 0;
real lowHighHighDotProduct = 0;
real highLowLowDotProduct = 0;
real highLowHighDotProduct = 0;
real highHighLowDotProduct = 0;
real highHighHighDotProduct = 0;
real v=0;
real inLowLowLow=0;
real inLowLowHigh=0;
real inLowHighLow=0;
real inLowHighHigh=0;
real inHighLowLow=0;
real inHighLowHigh=0;
real inHighHighLow=0;
real inHighHighHigh=0;
int t;
for(t=0; t<inputImages_C; t++)
{
real gradOutValue = gradOutput_data[gradOutputAddress + t*gradOutput_stride_C];
if (!zero_boundary_bool || (! (xBeyondLow || yBeyondLow || xBeyondHigh || yBeyondHigh || zBeyondLow || zBeyondHigh))){
real inLowLowLow = inputImages_data[inLowLowLowAddress + t*inputImages_stride_C];
lowLowLowDotProduct += inLowLowLow * gradOutValue;
if(!onlyGrid) gradInputImages_data[gradInputImagesLowLowLowAddress + t*gradInputImages_stride_C] += xWeightLowLowLow * yWeightLowLowLow * zWeightLowLowLow * gradOutValue;
real inLowLowHigh = inputImages_data[inLowLowHighAddress + t*inputImages_stride_C];
lowLowHighDotProduct += inLowLowHigh * gradOutValue;
if(!onlyGrid) gradInputImages_data[gradInputImagesLowLowHighAddress + t*gradInputImages_stride_C] += xWeightLowLowLow * yWeightLowLowLow * (1-zWeightLowLowLow) * gradOutValue; // CHECK: CORRECT?
real inLowHighLow = inputImages_data[inLowHighLowAddress + t*inputImages_stride_C];
lowHighLowDotProduct += inLowHighLow * gradOutValue;
if(!onlyGrid) gradInputImages_data[gradInputImagesLowHighLowAddress + t*gradInputImages_stride_C] += xWeightLowLowLow * (1-yWeightLowLowLow) * zWeightLowLowLow * gradOutValue; // CHECK: CORRECT?
real inLowHighHigh = inputImages_data[inLowHighHighAddress + t*inputImages_stride_C];
lowHighHighDotProduct += inLowHighHigh * gradOutValue;
if(!onlyGrid) gradInputImages_data[gradInputImagesLowHighHighAddress + t*gradInputImages_stride_C] += xWeightLowLowLow * (1 - yWeightLowLowLow) * (1-zWeightLowLowLow) * gradOutValue;
real inHighLowLow = inputImages_data[inHighLowLowAddress + t*inputImages_stride_C];
highLowLowDotProduct += inHighLowLow * gradOutValue;
if(!onlyGrid) gradInputImages_data[gradInputImagesHighLowLowAddress + t*gradInputImages_stride_C] += (1-xWeightLowLowLow) * yWeightLowLowLow * zWeightLowLowLow * gradOutValue;
real inHighLowHigh = inputImages_data[inHighLowHighAddress + t*inputImages_stride_C];
highLowHighDotProduct += inHighLowHigh * gradOutValue;
if(!onlyGrid) gradInputImages_data[gradInputImagesHighLowHighAddress + t*gradInputImages_stride_C] += (1-xWeightLowLowLow) * yWeightLowLowLow * (1-zWeightLowLowLow) * gradOutValue; // CHECK: CORRECT?
real inHighHighLow = inputImages_data[inHighHighLowAddress + t*inputImages_stride_C];
highHighLowDotProduct += inHighHighLow * gradOutValue;
if(!onlyGrid) gradInputImages_data[gradInputImagesHighHighLowAddress + t*gradInputImages_stride_C] += (1-xWeightLowLowLow) * (1-yWeightLowLowLow) * zWeightLowLowLow * gradOutValue; // CHECK: CORRECT?
real inHighHighHigh = inputImages_data[inHighHighHighAddress + t*inputImages_stride_C];
highHighHighDotProduct += inHighHighHigh * gradOutValue;
if(!onlyGrid) gradInputImages_data[gradInputImagesHighHighHighAddress + t*gradInputImages_stride_C] += (1-xWeightLowLowLow) * (1 - yWeightLowLowLow) * (1-zWeightLowLowLow) * gradOutValue;
}
}
// CHECK: CORRECT?
xf = - yWeightLowLowLow * zWeightLowLowLow * lowLowLowDotProduct + yWeightLowLowLow * zWeightLowLowLow * highLowLowDotProduct
- yWeightLowLowLow * (1-zWeightLowLowLow) * lowLowHighDotProduct + yWeightLowLowLow * (1-zWeightLowLowLow) * highLowHighDotProduct
- (1-yWeightLowLowLow) * zWeightLowLowLow * lowHighLowDotProduct + (1-yWeightLowLowLow) * zWeightLowLowLow * highHighLowDotProduct
- (1-yWeightLowLowLow) * (1-zWeightLowLowLow) * lowHighHighDotProduct + (1-yWeightLowLowLow) * (1-zWeightLowLowLow) * highHighHighDotProduct;
yf = - xWeightLowLowLow * zWeightLowLowLow * lowLowLowDotProduct + xWeightLowLowLow * zWeightLowLowLow * lowHighLowDotProduct
- xWeightLowLowLow * (1-zWeightLowLowLow) * lowLowHighDotProduct + xWeightLowLowLow * (1-zWeightLowLowLow) * lowHighHighDotProduct
- (1-xWeightLowLowLow) * zWeightLowLowLow * highLowLowDotProduct + (1-xWeightLowLowLow) * zWeightLowLowLow * highHighLowDotProduct
- (1-xWeightLowLowLow) * (1-zWeightLowLowLow) * highLowHighDotProduct + (1-xWeightLowLowLow) * (1-zWeightLowLowLow) * highHighHighDotProduct;
zf = - xWeightLowLowLow * yWeightLowLowLow * lowLowLowDotProduct + xWeightLowLowLow * yWeightLowLowLow * lowLowHighDotProduct
- (1-xWeightLowLowLow) * yWeightLowLowLow * highLowLowDotProduct + (1-xWeightLowLowLow) * yWeightLowLowLow * highLowHighDotProduct
- xWeightLowLowLow * (1-yWeightLowLowLow) * lowHighLowDotProduct + xWeightLowLowLow * (1-yWeightLowLowLow) * lowHighHighDotProduct
- (1-xWeightLowLowLow) * (1-yWeightLowLowLow) * highHighLowDotProduct + (1-xWeightLowLowLow) * (1-yWeightLowLowLow) * highHighHighDotProduct;
gradGrids_data[b*gradGrids_strideBatch + zOut*gradGrids_stride_Z + yOut*gradGrids_stride_Y + xOut*gradGrids_stride_X] = xf * (inputImages_X-1) / 2;
gradGrids_data[b*gradGrids_strideBatch + zOut*gradGrids_stride_Z + yOut*gradGrids_stride_Y + xOut*gradGrids_stride_X + gradGrids_stride_C] = yf * (inputImages_Y-1) / 2;
gradGrids_data[b*gradGrids_strideBatch + zOut*gradGrids_stride_Z + yOut*gradGrids_stride_Y + xOut*gradGrids_stride_X + 2*gradGrids_stride_C] = zf * (inputImages_Z-1) / 2;
}
}
}
}
return 1;
}
int BilinearSamplerBHWD_updateGradInput(THFloatTensor *inputImages, THFloatTensor *grids, THFloatTensor *gradInputImages,
THFloatTensor *gradGrids, THFloatTensor *gradOutput, int zero_boundary)
{
bool onlyGrid=false;
bool zero_boundary_bool = zero_boundary == 1;
int batchsize = THFloatTensor_size(inputImages,0);
int inputImages_height = THFloatTensor_size(inputImages,1);
int inputImages_width = THFloatTensor_size(inputImages,2);
int gradOutput_height = THFloatTensor_size(gradOutput,1);
int gradOutput_width = THFloatTensor_size(gradOutput,2);
int inputImages_channels = THFloatTensor_size(inputImages,3);
int gradOutput_strideBatch = THFloatTensor_stride(gradOutput,0);
int gradOutput_strideHeight = THFloatTensor_stride(gradOutput,1);
int gradOutput_strideWidth = THFloatTensor_stride(gradOutput,2);
int inputImages_strideBatch = THFloatTensor_stride(inputImages,0);
int inputImages_strideHeight = THFloatTensor_stride(inputImages,1);
int inputImages_strideWidth = THFloatTensor_stride(inputImages,2);
int gradInputImages_strideBatch = THFloatTensor_stride(gradInputImages,0);
int gradInputImages_strideHeight = THFloatTensor_stride(gradInputImages,1);
int gradInputImages_strideWidth = THFloatTensor_stride(gradInputImages,2);
int grids_strideBatch = THFloatTensor_stride(grids,0);
int grids_strideHeight = THFloatTensor_stride(grids,1);
int grids_strideWidth = THFloatTensor_stride(grids,2);
int gradGrids_strideBatch = THFloatTensor_stride(gradGrids,0);
int gradGrids_strideHeight = THFloatTensor_stride(gradGrids,1);
int gradGrids_strideWidth = THFloatTensor_stride(gradGrids,2);
real *inputImages_data, *gradOutput_data, *grids_data, *gradGrids_data, *gradInputImages_data;
inputImages_data = THFloatTensor_data(inputImages);
gradOutput_data = THFloatTensor_data(gradOutput);
grids_data = THFloatTensor_data(grids);
gradGrids_data = THFloatTensor_data(gradGrids);
gradInputImages_data = THFloatTensor_data(gradInputImages);
int b, yOut, xOut;
for(b=0; b < batchsize; b++)
{
for(yOut=0; yOut < gradOutput_height; yOut++)
{
for(xOut=0; xOut < gradOutput_width; xOut++)
{
//read the grid
real yf = grids_data[b*grids_strideBatch + yOut*grids_strideHeight + xOut*grids_strideWidth];
real xf = grids_data[b*grids_strideBatch + yOut*grids_strideHeight + xOut*grids_strideWidth + 1];
// get the weights for interpolation
int yInTopLeft, xInTopLeft;
real yWeightTopLeft, xWeightTopLeft;
real xcoord = (xf + 1) * (inputImages_width - 1) / 2;
xInTopLeft = floor(xcoord);
xWeightTopLeft = 1 - (xcoord - xInTopLeft);
real ycoord = (yf + 1) * (inputImages_height - 1) / 2;
yInTopLeft = floor(ycoord);
yWeightTopLeft = 1 - (ycoord - yInTopLeft);
const int inTopLeftAddress = inputImages_strideBatch * b + inputImages_strideHeight * yInTopLeft + inputImages_strideWidth * xInTopLeft;
const int inTopRightAddress = inTopLeftAddress + inputImages_strideWidth;
const int inBottomLeftAddress = inTopLeftAddress + inputImages_strideHeight;
const int inBottomRightAddress = inBottomLeftAddress + inputImages_strideWidth;
const int gradInputImagesTopLeftAddress = gradInputImages_strideBatch * b + gradInputImages_strideHeight * yInTopLeft + gradInputImages_strideWidth * xInTopLeft;
const int gradInputImagesTopRightAddress = gradInputImagesTopLeftAddress + gradInputImages_strideWidth;
const int gradInputImagesBottomLeftAddress = gradInputImagesTopLeftAddress + gradInputImages_strideHeight;
const int gradInputImagesBottomRightAddress = gradInputImagesBottomLeftAddress + gradInputImages_strideWidth;
const int gradOutputAddress = gradOutput_strideBatch * b + gradOutput_strideHeight * yOut + gradOutput_strideWidth * xOut;
real topLeftDotProduct = 0;
real topRightDotProduct = 0;
real bottomLeftDotProduct = 0;
real bottomRightDotProduct = 0;
real v=0;
real inTopLeft=0;
real inTopRight=0;
real inBottomLeft=0;
real inBottomRight=0;
// we are careful with the boundaries
bool topLeftIsIn = xInTopLeft >= 0 && xInTopLeft <= inputImages_width-1 && yInTopLeft >= 0 && yInTopLeft <= inputImages_height-1;
bool topRightIsIn = xInTopLeft+1 >= 0 && xInTopLeft+1 <= inputImages_width-1 && yInTopLeft >= 0 && yInTopLeft <= inputImages_height-1;
bool bottomLeftIsIn = xInTopLeft >= 0 && xInTopLeft <= inputImages_width-1 && yInTopLeft+1 >= 0 && yInTopLeft+1 <= inputImages_height-1;
bool bottomRightIsIn = xInTopLeft+1 >= 0 && xInTopLeft+1 <= inputImages_width-1 && yInTopLeft+1 >= 0 && yInTopLeft+1 <= inputImages_height-1;
int t;
for(t=0; t<inputImages_channels; t++)
{
real gradOutValue = gradOutput_data[gradOutputAddress + t];
if(topLeftIsIn)
{
real inTopLeft = inputImages_data[inTopLeftAddress + t];
topLeftDotProduct += inTopLeft * gradOutValue;
if(!onlyGrid) gradInputImages_data[gradInputImagesTopLeftAddress + t] += xWeightTopLeft * yWeightTopLeft * gradOutValue;
}
if(topRightIsIn)
{
real inTopRight = inputImages_data[inTopRightAddress + t];
topRightDotProduct += inTopRight * gradOutValue;
if(!onlyGrid) gradInputImages_data[gradInputImagesTopRightAddress + t] += (1 - xWeightTopLeft) * yWeightTopLeft * gradOutValue;
}
if(bottomLeftIsIn)
{
real inBottomLeft = inputImages_data[inBottomLeftAddress + t];
bottomLeftDotProduct += inBottomLeft * gradOutValue;
if(!onlyGrid) gradInputImages_data[gradInputImagesBottomLeftAddress + t] += xWeightTopLeft * (1 - yWeightTopLeft) * gradOutValue;
}
if(bottomRightIsIn)
{
real inBottomRight = inputImages_data[inBottomRightAddress + t];
bottomRightDotProduct += inBottomRight * gradOutValue;
if(!onlyGrid) gradInputImages_data[gradInputImagesBottomRightAddress + t] += (1 - xWeightTopLeft) * (1 - yWeightTopLeft) * gradOutValue;
}
}
yf = - xWeightTopLeft * topLeftDotProduct + xWeightTopLeft * bottomLeftDotProduct - (1-xWeightTopLeft) * topRightDotProduct + (1-xWeightTopLeft) * bottomRightDotProduct;
xf = - yWeightTopLeft * topLeftDotProduct + yWeightTopLeft * topRightDotProduct - (1-yWeightTopLeft) * bottomLeftDotProduct + (1-yWeightTopLeft) * bottomRightDotProduct;
gradGrids_data[b*gradGrids_strideBatch + yOut*gradGrids_strideHeight + xOut*gradGrids_strideWidth] = yf * (inputImages_height-1) / 2;
gradGrids_data[b*gradGrids_strideBatch + yOut*gradGrids_strideHeight + xOut*gradGrids_strideWidth + 1] = xf * (inputImages_width-1) / 2;
}
}
}
return 1;
}
int BilinearSamplerBXYZC_updateGradInput_ND(THFloatTensor *inputImages, THFloatTensor *grids, THFloatTensor *gradInputImages,
THFloatTensor *gradGrids, THFloatTensor *gradOutput, int ndim, int zero_boundary)
{
switch( ndim )
{
case 1: return BilinearSamplerBXC_updateGradInput_1D( inputImages, grids, gradInputImages, gradGrids, gradOutput,zero_boundary ); break;
case 2: return BilinearSamplerBXYC_updateGradInput_2D( inputImages, grids, gradInputImages, gradGrids, gradOutput,zero_boundary ); break;
case 3: return BilinearSamplerBXYZC_updateGradInput_3D( inputImages, grids, gradInputImages, gradGrids, gradOutput,zero_boundary ); break;
default: return -1;
}
}
int BilinearSamplerBCXYZ_updateGradInput_ND(THFloatTensor *inputImages, THFloatTensor *grids, THFloatTensor *gradInputImages,
THFloatTensor *gradGrids, THFloatTensor *gradOutput, int ndim, int zero_boundary)
{
switch( ndim )
{
case 1: return BilinearSamplerBCX_updateGradInput_1D( inputImages, grids, gradInputImages, gradGrids, gradOutput,zero_boundary ); break;
case 2: return BilinearSamplerBCXY_updateGradInput_2D( inputImages, grids, gradInputImages, gradGrids, gradOutput,zero_boundary ); break;
case 3: return BilinearSamplerBCXYZ_updateGradInput_3D( inputImages, grids, gradInputImages, gradGrids, gradOutput,zero_boundary ); break;
default: return -1;
}
}
|
sum_simd.c | #include <stdio.h>
#include <stdlib.h>
#include <sys/timeb.h>
#include <stdbool.h>
#include <string.h>
#include <omp.h>
#include "../constants.h"
/* read timer in second */
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
/* read timer in ms */
double read_timer_ms() {
struct timeb tm;
ftime(&tm);
return (double) tm.time * 1000.0 + (double) tm.millitm;
}
/* initialize a vector with random floating point numbers */
void init(float *A, int N) {
int i;
#pragma omp parallel for shared(A, N) private(i)
for (i = 0; i < N; i++) {
A[i] = (double) drand48();
}
}
float sum(int N, float *numbers);
int main(int argc, char *argv[]) {
int N = VECTOR_LENGTH;
bool full = true;
if (argc == 2) {
if (strcmp(argv[1], "-m") == 0) full = false;
}
float *numbers = malloc(sizeof(float)* N);
srand48((1 << 12));
init(numbers, N);
int i;
int num_runs = RUNS;
volatile float result;
double elapsed = read_timer();
for (i=0; i<num_runs; i++) result = sum(N, numbers);
elapsed = (read_timer() - elapsed)/num_runs;
elapsed = elapsed * 1.0e3;
if (full) {
/* you should add the call to each function and time the execution */
printf("======================================================================================================\n");
printf("Performance:\t\t\tRuntime (s)\t MFLOPS \t\tCalculated Result\n");
printf("------------------------------------------------------------------------------------------------------\n");
printf("sum (OMP SIMD):\t\t\t%4f\t%4f \t\t%g\n", elapsed, (2.0 * N) / (1.0e6 * elapsed), sum(N, numbers));
} else {
printf("%f\n", elapsed);
}
free(numbers);
return 0;
}
float sum(int N, float *numbers) {
float sum = 0;
int i;
#pragma omp for simd
for (i = 0; i<N; i++)
sum += numbers[i];
return sum;
}
|
dataset.h | #ifndef LIGHTGBM_DATASET_H_
#define LIGHTGBM_DATASET_H_
#include <LightGBM/utils/random.h>
#include <LightGBM/utils/text_reader.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <LightGBM/meta.h>
#include <LightGBM/config.h>
#include <LightGBM/feature_group.h>
#include <vector>
#include <utility>
#include <functional>
#include <string>
#include <unordered_set>
#include <mutex>
namespace LightGBM {
/*! \brief forward declaration */
class DatasetLoader;
/*!
* \brief This class is used to store some meta(non-feature) data for training data,
* e.g. labels, weights, initial scores, qurey level informations.
*
* Some details:
* 1. Label, used for traning.
* 2. Weights, weighs of records, optional
* 3. Query Boundaries, necessary for lambdarank.
* The documents of i-th query is in [ query_boundarise[i], query_boundarise[i+1] )
* 4. Query Weights, auto calculate by weights and query_boundarise(if both of them are existed)
* the weight for i-th query is sum(query_boundarise[i] , .., query_boundarise[i+1]) / (query_boundarise[i + 1] - query_boundarise[i+1])
* 5. Initial score. optional. if exsitng, the model will boost from this score, otherwise will start from 0.
*/
class Metadata {
public:
/*!
* \brief Null costructor
*/
Metadata();
/*!
* \brief Initialization will load qurey level informations, since it is need for sampling data
* \param data_filename Filename of data
* \param init_score_filename Filename of initial score
*/
void Init(const char* data_filename, const char* initscore_file);
/*!
* \brief init as subset
* \param metadata Filename of data
* \param used_indices
* \param num_used_indices
*/
void Init(const Metadata& metadata, const data_size_t* used_indices, data_size_t num_used_indices);
/*!
* \brief Initial with binary memory
* \param memory Pointer to memory
*/
void LoadFromMemory(const void* memory);
/*! \brief Destructor */
~Metadata();
/*!
* \brief Initial work, will allocate space for label, weight(if exists) and query(if exists)
* \param num_data Number of training data
* \param weight_idx Index of weight column, < 0 means doesn't exists
* \param query_idx Index of query id column, < 0 means doesn't exists
*/
void Init(data_size_t num_data, int weight_idx, int query_idx);
/*!
* \brief Partition label by used indices
* \param used_indices Indice of local used
*/
void PartitionLabel(const std::vector<data_size_t>& used_indices);
/*!
* \brief Partition meta data according to local used indices if need
* \param num_all_data Number of total training data, including other machines' data on parallel learning
* \param used_data_indices Indices of local used training data
*/
void CheckOrPartition(data_size_t num_all_data,
const std::vector<data_size_t>& used_data_indices);
void SetLabel(const label_t* label, data_size_t len);
void SetWeights(const label_t* weights, data_size_t len);
void SetQuery(const data_size_t* query, data_size_t len);
/*!
* \brief Set initial scores
* \param init_score Initial scores, this class will manage memory for init_score.
*/
void SetInitScore(const double* init_score, data_size_t len);
/*!
* \brief Save binary data to file
* \param file File want to write
*/
void SaveBinaryToFile(const VirtualFileWriter* writer) const;
/*!
* \brief Get sizes in byte of this object
*/
size_t SizesInByte() const;
/*!
* \brief Get pointer of label
* \return Pointer of label
*/
inline const label_t* label() const { return label_.data(); }
/*!
* \brief Set label for one record
* \param idx Index of this record
* \param value Label value of this record
*/
inline void SetLabelAt(data_size_t idx, label_t value)
{
label_[idx] = value;
}
/*!
* \brief Set Weight for one record
* \param idx Index of this record
* \param value Weight value of this record
*/
inline void SetWeightAt(data_size_t idx, label_t value)
{
weights_[idx] = value;
}
/*!
* \brief Set Query Id for one record
* \param idx Index of this record
* \param value Query Id value of this record
*/
inline void SetQueryAt(data_size_t idx, data_size_t value)
{
queries_[idx] = static_cast<data_size_t>(value);
}
/*!
* \brief Get weights, if not exists, will return nullptr
* \return Pointer of weights
*/
inline const label_t* weights() const {
if (!weights_.empty()) {
return weights_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get data boundaries on queries, if not exists, will return nullptr
* we assume data will order by query,
* the interval of [query_boundaris[i], query_boundaris[i+1])
* is the data indices for query i.
* \return Pointer of data boundaries on queries
*/
inline const data_size_t* query_boundaries() const {
if (!query_boundaries_.empty()) {
return query_boundaries_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get Number of queries
* \return Number of queries
*/
inline data_size_t num_queries() const { return num_queries_; }
/*!
* \brief Get weights for queries, if not exists, will return nullptr
* \return Pointer of weights for queries
*/
inline const label_t* query_weights() const {
if (!query_weights_.empty()) {
return query_weights_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get initial scores, if not exists, will return nullptr
* \return Pointer of initial scores
*/
inline const double* init_score() const {
if (!init_score_.empty()) {
return init_score_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get size of initial scores
*/
inline int64_t num_init_score() const { return num_init_score_; }
/*! \brief Disable copy */
Metadata& operator=(const Metadata&) = delete;
/*! \brief Disable copy */
Metadata(const Metadata&) = delete;
private:
/*! \brief Load initial scores from file */
void LoadInitialScore(const char* initscore_file);
/*! \brief Load wights from file */
void LoadWeights();
/*! \brief Load query boundaries from file */
void LoadQueryBoundaries();
/*! \brief Load query wights */
void LoadQueryWeights();
/*! \brief Filename of current data */
std::string data_filename_;
/*! \brief Number of data */
data_size_t num_data_;
/*! \brief Number of weights, used to check correct weight file */
data_size_t num_weights_;
/*! \brief Label data */
std::vector<label_t> label_;
/*! \brief Weights data */
std::vector<label_t> weights_;
/*! \brief Query boundaries */
std::vector<data_size_t> query_boundaries_;
/*! \brief Query weights */
std::vector<label_t> query_weights_;
/*! \brief Number of querys */
data_size_t num_queries_;
/*! \brief Number of Initial score, used to check correct weight file */
int64_t num_init_score_;
/*! \brief Initial score */
std::vector<double> init_score_;
/*! \brief Queries data */
std::vector<data_size_t> queries_;
/*! \brief mutex for threading safe call */
std::mutex mutex_;
bool weight_load_from_file_;
bool query_load_from_file_;
bool init_score_load_from_file_;
};
/*! \brief Interface for Parser */
class Parser {
public:
/*! \brief virtual destructor */
virtual ~Parser() {}
/*!
* \brief Parse one line with label
* \param str One line record, string format, should end with '\0'
* \param out_features Output columns, store in (column_idx, values)
* \param out_label Label will store to this if exists
*/
virtual void ParseOneLine(const char* str,
std::vector<std::pair<int, double>>* out_features, double* out_label) const = 0;
virtual int TotalColumns() const = 0;
/*!
* \brief Create a object of parser, will auto choose the format depend on file
* \param filename One Filename of data
* \param num_features Pass num_features of this data file if you know, <=0 means don't know
* \param label_idx index of label column
* \return Object of parser
*/
static Parser* CreateParser(const char* filename, bool header, int num_features, int label_idx);
};
/*! \brief The main class of data set,
* which are used to traning or validation
*/
class Dataset {
public:
friend DatasetLoader;
LIGHTGBM_EXPORT Dataset();
LIGHTGBM_EXPORT Dataset(data_size_t num_data);
void Construct(
std::vector<std::unique_ptr<BinMapper>>& bin_mappers,
int** sample_non_zero_indices,
const int* num_per_col,
size_t total_sample_cnt,
const Config& io_config);
/*! \brief Destructor */
LIGHTGBM_EXPORT ~Dataset();
LIGHTGBM_EXPORT bool CheckAlign(const Dataset& other) const {
if (num_features_ != other.num_features_) {
return false;
}
if (num_total_features_ != other.num_total_features_) {
return false;
}
if (label_idx_ != other.label_idx_) {
return false;
}
for (int i = 0; i < num_features_; ++i) {
if (!FeatureBinMapper(i)->CheckAlign(*(other.FeatureBinMapper(i)))) {
return false;
}
}
return true;
}
inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<double>& feature_values) {
if (is_finish_load_) { return; }
for (size_t i = 0; i < feature_values.size() && i < static_cast<size_t>(num_total_features_); ++i) {
int feature_idx = used_feature_map_[i];
if (feature_idx >= 0) {
const int group = feature2group_[feature_idx];
const int sub_feature = feature2subfeature_[feature_idx];
feature_groups_[group]->PushData(tid, sub_feature, row_idx, feature_values[i]);
}
}
}
inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<std::pair<int, double>>& feature_values) {
if (is_finish_load_) { return; }
for (auto& inner_data : feature_values) {
if (inner_data.first >= num_total_features_) { continue; }
int feature_idx = used_feature_map_[inner_data.first];
if (feature_idx >= 0) {
const int group = feature2group_[feature_idx];
const int sub_feature = feature2subfeature_[feature_idx];
feature_groups_[group]->PushData(tid, sub_feature, row_idx, inner_data.second);
}
}
}
inline void PushOneData(int tid, data_size_t row_idx, int group, int sub_feature, double value) {
feature_groups_[group]->PushData(tid, sub_feature, row_idx, value);
}
inline int RealFeatureIndex(int fidx) const {
return real_feature_idx_[fidx];
}
inline int InnerFeatureIndex(int col_idx) const {
return used_feature_map_[col_idx];
}
inline int Feature2Group(int feature_idx) const {
return feature2group_[feature_idx];
}
inline int Feture2SubFeature(int feature_idx) const {
return feature2subfeature_[feature_idx];
}
inline uint64_t GroupBinBoundary(int group_idx) const {
return group_bin_boundaries_[group_idx];
}
inline uint64_t NumTotalBin() const {
return group_bin_boundaries_.back();
}
inline std::vector<int> ValidFeatureIndices() const {
std::vector<int> ret;
for (int i = 0; i < num_total_features_; ++i) {
if (used_feature_map_[i] >= 0) {
ret.push_back(i);
}
}
return ret;
}
void ReSize(data_size_t num_data);
void CopySubset(const Dataset* fullset, const data_size_t* used_indices, data_size_t num_used_indices, bool need_meta_data);
LIGHTGBM_EXPORT void FinishLoad();
LIGHTGBM_EXPORT bool SetFloatField(const char* field_name, const float* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool SetDoubleField(const char* field_name, const double* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool SetIntField(const char* field_name, const int* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool GetFloatField(const char* field_name, data_size_t* out_len, const float** out_ptr);
LIGHTGBM_EXPORT bool GetDoubleField(const char* field_name, data_size_t* out_len, const double** out_ptr);
LIGHTGBM_EXPORT bool GetIntField(const char* field_name, data_size_t* out_len, const int** out_ptr);
/*!
* \brief Save current dataset into binary file, will save to "filename.bin"
*/
LIGHTGBM_EXPORT void SaveBinaryFile(const char* bin_filename);
LIGHTGBM_EXPORT void CopyFeatureMapperFrom(const Dataset* dataset);
LIGHTGBM_EXPORT void CreateValid(const Dataset* dataset);
void ConstructHistograms(const std::vector<int8_t>& is_feature_used,
const data_size_t* data_indices, data_size_t num_data,
int leaf_idx,
std::vector<std::unique_ptr<OrderedBin>>& ordered_bins,
const score_t* gradients, const score_t* hessians,
score_t* ordered_gradients, score_t* ordered_hessians,
bool is_constant_hessian,
HistogramBinEntry* histogram_data) const;
void FixHistogram(int feature_idx, double sum_gradient, double sum_hessian, data_size_t num_data,
HistogramBinEntry* data) const;
inline data_size_t Split(int feature,
const uint32_t* threshold, int num_threshold, bool default_left,
data_size_t* data_indices, data_size_t num_data,
data_size_t* lte_indices, data_size_t* gt_indices) const {
const int group = feature2group_[feature];
const int sub_feature = feature2subfeature_[feature];
return feature_groups_[group]->Split(sub_feature, threshold, num_threshold, default_left, data_indices, num_data, lte_indices, gt_indices);
}
inline int SubFeatureBinOffset(int i) const {
const int sub_feature = feature2subfeature_[i];
if (sub_feature == 0) {
return 1;
} else {
return 0;
}
}
inline int FeatureNumBin(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->num_bin();
}
inline int8_t FeatureMonotone(int i) const {
if (monotone_types_.empty()) {
return 0;
} else {
return monotone_types_[i];
}
}
inline double FeaturePenalte(int i) const {
if (feature_penalty_.empty()) {
return 1;
} else {
return feature_penalty_[i];
}
}
bool HasMonotone() const {
if (monotone_types_.empty()) {
return false;
} else {
for (size_t i = 0; i < monotone_types_.size(); ++i) {
if (monotone_types_[i] != 0) {
return true;
}
}
return false;
}
}
inline int FeatureGroupNumBin(int group) const {
return feature_groups_[group]->num_total_bin_;
}
inline const BinMapper* FeatureBinMapper(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature].get();
}
inline const Bin* FeatureBin(int i) const {
const int group = feature2group_[i];
return feature_groups_[group]->bin_data_.get();
}
inline const Bin* FeatureGroupBin(int group) const {
return feature_groups_[group]->bin_data_.get();
}
inline bool FeatureGroupIsSparse(int group) const {
return feature_groups_[group]->is_sparse_;
}
inline BinIterator* FeatureIterator(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->SubFeatureIterator(sub_feature);
}
inline BinIterator* FeatureGroupIterator(int group) const {
return feature_groups_[group]->FeatureGroupIterator();
}
inline double RealThreshold(int i, uint32_t threshold) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->BinToValue(threshold);
}
// given a real threshold, find the closest threshold bin
inline uint32_t BinThreshold(int i, double threshold_double) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->ValueToBin(threshold_double);
}
inline void CreateOrderedBins(std::vector<std::unique_ptr<OrderedBin>>* ordered_bins) const {
ordered_bins->resize(num_groups_);
OMP_INIT_EX();
#pragma omp parallel for schedule(guided)
for (int i = 0; i < num_groups_; ++i) {
OMP_LOOP_EX_BEGIN();
ordered_bins->at(i).reset(feature_groups_[i]->bin_data_->CreateOrderedBin());
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
}
/*!
* \brief Get meta data pointer
* \return Pointer of meta data
*/
inline const Metadata& metadata() const { return metadata_; }
/*! \brief Get Number of used features */
inline int num_features() const { return num_features_; }
/*! \brief Get Number of feature groups */
inline int num_feature_groups() const { return num_groups_;}
/*! \brief Get Number of total features */
inline int num_total_features() const { return num_total_features_; }
/*! \brief Get the index of label column */
inline int label_idx() const { return label_idx_; }
/*! \brief Get names of current data set */
inline const std::vector<std::string>& feature_names() const { return feature_names_; }
inline void set_feature_names(const std::vector<std::string>& feature_names) {
if (feature_names.size() != static_cast<size_t>(num_total_features_)) {
Log::Fatal("Size of feature_names error, should equal with total number of features");
}
feature_names_ = std::vector<std::string>(feature_names);
// replace ' ' in feature_names with '_'
bool spaceInFeatureName = false;
for (auto& feature_name: feature_names_){
if (feature_name.find(' ') != std::string::npos){
spaceInFeatureName = true;
std::replace(feature_name.begin(), feature_name.end(), ' ', '_');
}
}
if (spaceInFeatureName){
Log::Warning("Find whitespaces in feature_names, replace with underlines");
}
}
inline std::vector<std::string> feature_infos() const {
std::vector<std::string> bufs;
for (int i = 0; i < num_total_features_; i++) {
int fidx = used_feature_map_[i];
if (fidx == -1) {
bufs.push_back("none");
} else {
const auto bin_mapper = FeatureBinMapper(fidx);
bufs.push_back(bin_mapper->bin_info());
}
}
return bufs;
}
void ResetConfig(const char* parameters);
/*! \brief Get Number of data */
inline data_size_t num_data() const { return num_data_; }
/*! \brief Disable copy */
Dataset& operator=(const Dataset&) = delete;
/*! \brief Disable copy */
Dataset(const Dataset&) = delete;
private:
std::string data_filename_;
/*! \brief Store used features */
std::vector<std::unique_ptr<FeatureGroup>> feature_groups_;
/*! \brief Mapper from real feature index to used index*/
std::vector<int> used_feature_map_;
/*! \brief Number of used features*/
int num_features_;
/*! \brief Number of total features*/
int num_total_features_;
/*! \brief Number of total data*/
data_size_t num_data_;
/*! \brief Store some label level data*/
Metadata metadata_;
/*! \brief index of label column */
int label_idx_ = 0;
/*! \brief Threshold for treating a feature as a sparse feature */
double sparse_threshold_;
/*! \brief store feature names */
std::vector<std::string> feature_names_;
/*! \brief store feature names */
static const char* binary_file_token;
int num_groups_;
std::vector<int> real_feature_idx_;
std::vector<int> feature2group_;
std::vector<int> feature2subfeature_;
std::vector<uint64_t> group_bin_boundaries_;
std::vector<int> group_feature_start_;
std::vector<int> group_feature_cnt_;
std::vector<int8_t> monotone_types_;
std::vector<double> feature_penalty_;
bool is_finish_load_;
int max_bin_;
int bin_construct_sample_cnt_;
int min_data_in_bin_;
bool use_missing_;
bool zero_as_missing_;
};
} // namespace LightGBM
#endif // LightGBM_DATA_H_
|
elgamal.c | #include <ristretto_elgamal.h>
#include "word.h"
#include "field.h"
#include <stdio.h>
#include <errno.h>
#include <stdlib.h>
#include <time.h>
static void gf_invert_here(gf_25519_t *y, const gf_25519_t *x, int assert_nonzero) {
gf_25519_t t1, t2;
gf_sqr(&t1, x); // o^2
mask_t ret = gf_isr(&t2, &t1); // +-1/sqrt(o^2) = +-1/o
(void) ret;
(void) (assert_nonzero);
// if (assert_nonzero) assert(ret);
gf_sqr(&t1, &t2);
gf_mul(&t2, &t1, x); // not direct to y in case of alias.
gf_copy(y, &t2);
}
static void gf_batch_invert_here(
gf_25519_t *__restrict__ out,
const gf_25519_t *in,
unsigned int n
) {
gf_25519_t t1;
assert(n > 1);
gf_copy(&out[1], &in[0]);
int i;
for (i = 1; i < (int) (n - 1); i++) {
gf_mul(&out[i + 1], &out[i], &in[i]);
}
gf_mul(&out[0], &out[n - 1], &in[n - 1]);
gf_invert_here(&out[0], &out[0], 1);
for (i = n - 1; i > 0; i--) {
gf_mul(&t1, &out[i], &out[0]);
gf_copy(&out[i], &t1);
gf_mul(&t1, &out[0], &in[i]);
gf_copy(&out[0], &t1);
}
}
void KeyGen(
const char *filename_priv_1_key,
const char *filename_priv_2_key,
const char *filename_pub_1_key,
const char *filename_pub_2_key,
const char *filename_pub_key
) {
FILE *fp_priv_1_key = fopen(filename_priv_1_key, "wb");
FILE *fp_priv_2_key = fopen(filename_priv_2_key, "wb");
FILE *fp_pub_1_key = fopen(filename_pub_1_key, "wb");
FILE *fp_pub_2_key = fopen(filename_pub_2_key, "wb");
FILE *fp_pub_key = fopen(filename_pub_key, "wb");
if (fp_priv_1_key == NULL || fp_priv_2_key == NULL) {
perror("Cannot open the file for storing the private keys.\n");
exit(1);
}
if (fp_pub_1_key == NULL || fp_pub_2_key == NULL || fp_pub_key == NULL) {
perror("Cannot open the file for storing the public keys.\n");
exit(1);
}
/*
* Step 1: Generate random values, which are going to be the private key.
*/
ristretto255_point_t base;
ristretto255_point_copy(&base, &ristretto255_point_base);
unsigned char rand255_1[59][32];
unsigned char rand255_2[59][32];
FILE *rand_src = fopen("/dev/urandom", "rb");
if (rand_src == NULL) {
perror("cannot open the random source.\n");
exit(1);
}
for (int i = 0; i < 59; i++) {
fread(rand255_1[i], 32, 1, rand_src);
}
for (int i = 0; i < 59; i++) {
fread(rand255_2[i], 32, 1, rand_src);
}
ristretto255_scalar_t srv_1_sk[59];
ristretto255_scalar_t srv_2_sk[59];
ristretto255_point_t srv_1_pk[59];
ristretto255_point_t srv_2_pk[59];
ristretto255_point_t srv_pk[59];
ristretto255_point_add(&base, &base, &base); // 2
ristretto255_point_add(&base, &base, &base); // 4
ristretto255_point_add(&base, &base, &base); // 8
for (int i = 0; i < 59; i++) {
fread(rand255_1, sizeof(rand255_1), 1, rand_src);
fread(rand255_2, sizeof(rand255_2), 1, rand_src);
ristretto255_scalar_decode_long(&srv_1_sk[i], rand255_1[i], 32);
ristretto255_scalar_decode_long(&srv_2_sk[i], rand255_2[i], 32);
ristretto255_point_scalarmul(&srv_1_pk[i], &base, &srv_1_sk[i]);
ristretto255_point_scalarmul(&srv_2_pk[i], &base, &srv_2_sk[i]);
ristretto255_point_add(&srv_pk[i], &srv_1_pk[i], &srv_2_pk[i]);
}
for (int i = 0; i < 59; i++) {
fwrite(&srv_1_sk[i], sizeof(ristretto255_scalar_t), 1, fp_priv_1_key);
fwrite(&srv_2_sk[i], sizeof(ristretto255_scalar_t), 1, fp_priv_2_key);
fwrite(&srv_1_pk[i], sizeof(ristretto255_point_t), 1, fp_pub_1_key);
fwrite(&srv_2_pk[i], sizeof(ristretto255_point_t), 1, fp_pub_2_key);
fwrite(&srv_pk[i], sizeof(ristretto255_point_t), 1, fp_pub_key);
}
fclose(fp_priv_1_key);
fclose(fp_priv_2_key);
fclose(fp_pub_1_key);
fclose(fp_pub_2_key);
fclose(fp_pub_key);
fclose(rand_src);
}
void KeyGen_stage1(
const char *filename_priv_srv_key,
const char *filename_pub_srv_key
) {
FILE *fp_priv_srv_key = fopen(filename_priv_srv_key, "wb");
FILE *fp_pub_srv_key = fopen(filename_pub_srv_key, "wb");
if (fp_priv_srv_key == NULL) {
perror("Cannot open the file for storing the private key.\n");
exit(1);
}
if (fp_pub_srv_key == NULL) {
perror("Cannot open the file for storing the public key.\n");
exit(1);
}
/*
* Step 1: Generate random values, which are going to be the private key.
*/
ristretto255_point_t base;
ristretto255_point_copy(&base, &ristretto255_point_base);
unsigned char rand255[59][32];
FILE *rand_src = fopen("/dev/urandom", "rb");
if (rand_src == NULL) {
perror("cannot open the random source.\n");
exit(1);
}
for (int i = 0; i < 59; i++) {
fread(rand255[i], 32, 1, rand_src);
}
ristretto255_scalar_t srv_sk[59];
ristretto255_point_t srv_pk[59];
ristretto255_point_add(&base, &base, &base); // 2
ristretto255_point_add(&base, &base, &base); // 4
ristretto255_point_add(&base, &base, &base); // 8
for (int i = 0; i < 59; i++) {
fread(rand255, sizeof(rand255), 1, rand_src);
ristretto255_scalar_decode_long(&srv_sk[i], rand255[i], 32);
ristretto255_point_scalarmul(&srv_pk[i], &base, &srv_sk[i]);
}
for (int i = 0; i < 59; i++) {
fwrite(&srv_sk[i], sizeof(ristretto255_scalar_t), 1, fp_priv_srv_key);
fwrite(&srv_pk[i], sizeof(ristretto255_point_t), 1, fp_pub_srv_key);
}
fclose(fp_priv_srv_key);
fclose(fp_pub_srv_key);
fclose(rand_src);
}
void KeyGen_stage2(
const char *filename_pub_1_key,
const char *filename_pub_2_key,
const char *filename_pub_key
) {
FILE *fp_pub_1_key = fopen(filename_pub_1_key, "rb");
FILE *fp_pub_2_key = fopen(filename_pub_2_key, "rb");
FILE *fp_pub_key = fopen(filename_pub_key, "wb");
if (fp_pub_1_key == NULL || fp_pub_2_key == NULL) {
perror("Cannot open the file for reading the public keys.\n");
exit(1);
}
if (fp_pub_key == NULL) {
perror("Cannot open the file for storing the public keys.\n");
exit(1);
}
ristretto255_point_t srv_1_pk[59];
ristretto255_point_t srv_2_pk[59];
ristretto255_point_t srv_pk[59];
for (int i = 0; i < 59; i++) {
fread(&srv_1_pk[i], sizeof(ristretto255_point_t), 1, fp_pub_1_key);
fread(&srv_2_pk[i], sizeof(ristretto255_point_t), 1, fp_pub_2_key);
}
for (int i = 0; i < 59; i++) {
ristretto255_point_add(&srv_pk[i], &srv_1_pk[i], &srv_2_pk[i]);
}
for (int i = 0; i < 59; i++) {
fwrite(&srv_pk[i], sizeof(ristretto255_point_t), 1, fp_pub_key);
}
fclose(fp_pub_1_key);
fclose(fp_pub_2_key);
fclose(fp_pub_key);
}
void TablesGen(
const char *filename_pub_1_key,
const char *filename_pub_2_key,
const char *filename_pub_key,
const char *filename_pub_1_table_format,
const char *filename_pub_2_table_format,
const char *filename_pub_table_format
) {
/*
* Step 1: Generate random values, which are going to be the private key.
*/
ristretto255_point_t base;
ristretto255_point_copy(&base, &ristretto255_point_base);
ristretto255_point_t pk_1[59];
ristretto255_point_t pk_2[59];
ristretto255_point_t pk[59];
ristretto255_point_add(&base, &base, &base); // 2
ristretto255_point_add(&base, &base, &base); // 4
ristretto255_point_add(&base, &base, &base); // 8
FILE *fp_pub_1_key = fopen(filename_pub_1_key, "rb");
FILE *fp_pub_2_key = fopen(filename_pub_2_key, "rb");
FILE *fp_pub_key = fopen(filename_pub_key, "rb");
if (fp_pub_1_key == NULL || fp_pub_2_key == NULL || fp_pub_key == NULL) {
perror("Cannot open the file for reading the public keys.\n");
exit(1);
}
for (int i = 0; i < 59; i++) {
fread(&pk_1[i], sizeof(ristretto255_point_t), 1, fp_pub_1_key);
fread(&pk_2[i], sizeof(ristretto255_point_t), 1, fp_pub_2_key);
fread(&pk[i], sizeof(ristretto255_point_t), 1, fp_pub_key);
}
char filename[60][150];
#pragma omp parallel for
for (int i = 0; i < 59; i++) {
sprintf(filename[i], filename_pub_1_table_format, i);
TableGen(&pk_1[i], filename[i]);
}
sprintf(filename[59], filename_pub_1_table_format, 59);
TableGen(&base, filename[59]);
#pragma omp parallel for
for (int i = 0; i < 59; i++) {
sprintf(filename[i], filename_pub_2_table_format, i);
TableGen(&pk_2[i], filename[i]);
}
sprintf(filename[59], filename_pub_2_table_format, 59);
TableGen(&base, filename[59]);
#pragma omp parallel for
for (int i = 0; i < 59; i++) {
sprintf(filename[i], filename_pub_table_format, i);
TableGen(&pk[i], filename[i]);
}
sprintf(filename[59], filename_pub_table_format, 59);
TableGen(&base, filename[59]);
}
void LoadPrivKey(ristretto255_scalar_t *psk, const char *filename_priv_key) {
FILE *fp_priv_key = fopen(filename_priv_key, "rb");
if (fp_priv_key == NULL) {
perror("Cannot open the file for storing the private keys.\n");
exit(1);
}
for (int i = 0; i < 59; i++) {
fread(&psk[i], sizeof(ristretto255_scalar_t), 1, fp_priv_key);
}
fclose(fp_priv_key);
}
void LoadPubKey(ristretto255_point_t *ppk, const char *filename_pub_key) {
FILE *fp_pub_key = fopen(filename_pub_key, "rb");
if (fp_pub_key == NULL) {
perror("Cannot open the file for storing the public keys.\n");
exit(1);
}
for (int i = 0; i < 59; i++) {
fread(&ppk[i], sizeof(ristretto255_point_t), 1, fp_pub_key);
}
fclose(fp_pub_key);
}
void Encrypt(ristretto255_point_t ct[60], const ristretto255_point_t pt[59], const fastecexp_state st_pk[60],
FILE *rand_src) {
unsigned char rand255[32];
fread(rand255, 32, 1, rand_src);
TableCompute(&st_pk[59], &ct[59], rand255);
for (int i = 0; i < 59; i++) {
TableCompute(&st_pk[i], &ct[i], rand255);
}
for (int i = 0; i < 59; i++) {
ristretto255_point_add(&ct[i], &ct[i], &pt[i]);
}
}
/* Imporant: ct2 must differ from ct1 */
void
Rerand(ristretto255_point_t ct2[60], ristretto255_point_t ct1[60], const fastecexp_state st_pk[60], FILE *rand_src) {
unsigned char rand255[32];
fread(rand255, 32, 1, rand_src);
TableCompute(&st_pk[59], &ct2[59], rand255);
for (int i = 0; i < 59; i++) {
TableCompute(&st_pk[i], &ct2[i], rand255);
}
for (int i = 0; i < 60; i++) {
ristretto255_point_add(&ct2[i], &ct2[i], &ct1[i]);
}
}
void Rerand_to_cache(ristretto255_point_t ct[60], const fastecexp_state st_pk[60], FILE *rand_src) {
unsigned char rand255[32];
fread(rand255, 32, 1, rand_src);
TableCompute(&st_pk[59], &ct[59], rand255);
for (int i = 0; i < 59; i++) {
TableCompute(&st_pk[i], &ct[i], rand255);
}
}
void Rerand_use_cache(ristretto255_point_t ct[60], ristretto255_point_t cache[60]) {
for (int i = 0; i < 60; i++) {
ristretto255_point_add(&ct[i], &ct[i], &cache[i]);
}
}
void Decrypt(ristretto255_point_t pt[59], ristretto255_point_t ct[60], const ristretto255_scalar_t sk[59]) {
for (int i = 0; i < 59; i++) {
ristretto255_point_scalarmul(&pt[i], &ct[59], &sk[i]);
ristretto255_point_sub(&pt[i], &ct[i], &pt[i]);
}
}
/* now must rerandomize before decryption */
void PartDec1(ristretto255_point_t ct_short[1], ristretto255_point_t ct[60]) {
ristretto255_point_copy(&ct_short[0], &ct[59]);
}
void PartDec2(ristretto255_point_t pt[59], ristretto255_point_t ct_short[1], const ristretto255_scalar_t sk[59]) {
for (int i = 0; i < 59; i++) {
ristretto255_point_scalarmul(&pt[i], &ct_short[0], &sk[i]);
}
}
void PartDec3(ristretto255_point_t ct_dest[59], ristretto255_point_t ct_src[59]) {
for (int i = 0; i < 59; i++) {
ristretto255_point_sub(&ct_dest[i], &ct_src[i], &ct_dest[i]);
}
}
size_t Serialize_Honest_Size(int num_of_points) {
return SER_BYTES * 2 * num_of_points;
}
void Serialize_Honest(unsigned char *out, ristretto255_point_t *in, int num_of_points) {
uint8_t *serialized_output = out;
gf_25519_t *table = malloc(sizeof(gf_25519_t) * 2 * num_of_points);
gf_25519_t *zs = malloc(sizeof(gf_25519_t) * num_of_points);
gf_25519_t *zis = malloc(sizeof(gf_25519_t) * num_of_points);
if (zs == NULL || zis == NULL) {
perror("Cannot create space to store the z and its inverse.");
exit(1);
}
for (int i = 0; i < num_of_points; i++) {
gf_copy(&table[i * 2], &in[i].x);
gf_copy(&table[i * 2 + 1], &in[i].y);
gf_copy(&zs[i], &in[i].z);
}
gf_batch_invert_here(zis, zs, num_of_points);
int num_threads = omp_get_max_threads();
gf_25519_t product[num_threads];
#pragma omp parallel for
for (int i = 0; i < num_of_points; i++) {
int current_thread_num = omp_get_thread_num();
gf_25519_t *pp = &product[current_thread_num];
gf_mul(pp, &table[2 * i], &zis[i]);
gf_strong_reduce(pp);
gf_copy(&table[2 * i], pp);
gf_mul(pp, &table[2 * i + 1], &zis[i]);
gf_strong_reduce(pp);
gf_copy(&table[2 * i + 1], pp);
}
free(zis);
free(zs);
#pragma omp parallel for
for (int i = 0; i < 2 * num_of_points; i++) {
gf_serialize(&serialized_output[i * SER_BYTES], &table[i], 1);
}
}
size_t Serialize_Honest_Size_old(int num_of_points) {
return sizeof(gf_25519_t) * 2 * num_of_points;
}
void Serialize_Honest_old(unsigned char *out, ristretto255_point_t *in, int num_of_points) {
gf_25519_t *table = (gf_25519_t *) out;
gf_25519_t *zs = malloc(sizeof(gf_25519_t) * num_of_points);
gf_25519_t *zis = malloc(sizeof(gf_25519_t) * num_of_points);
if (zs == NULL || zis == NULL) {
perror("\033[0;31m[ERROR]\033[0m Cannot create space to store the z and its inverse.");
exit(1);
}
for (int i = 0; i < num_of_points; i++) {
gf_copy(&table[i * 2], &in[i].x);
gf_copy(&table[i * 2 + 1], &in[i].y);
gf_copy(&zs[i], &in[i].z);
}
gf_batch_invert_here(zis, zs, num_of_points);
int num_threads = omp_get_max_threads();
gf_25519_t product[num_threads];
#pragma omp parallel for
for (int i = 0; i < num_of_points; i++) {
int current_thread_num = omp_get_thread_num();
gf_25519_t *pp = &product[current_thread_num];
gf_mul(pp, &table[2 * i], &zis[i]);
gf_strong_reduce(pp);
gf_copy(&table[2 * i], pp);
gf_mul(pp, &table[2 * i + 1], &zis[i]);
gf_strong_reduce(pp);
gf_copy(&table[2 * i + 1], pp);
}
free(zis);
free(zs);
}
void Deserialize_Honest(ristretto255_point_t *out, unsigned char *in, int num_of_points) {
int num_threads = omp_get_max_threads();
gf_25519_t a[num_threads];
gf_25519_t b[num_threads];
#pragma omp parallel for
for (int i = 0; i < num_of_points; i++) {
int current_thread_num = omp_get_thread_num();
gf_deserialize(&a[current_thread_num], &in[i * SER_BYTES * 2], 1, 0);
gf_deserialize(&b[current_thread_num], &in[i * SER_BYTES * 2 + SER_BYTES], 1, 0);
gf_copy(&out[i].x, &a[current_thread_num]);
gf_copy(&out[i].y, &b[current_thread_num]);
gf_mul(&out[i].t, &out[i].x, &out[i].y);
gf_copy(&out[i].z, &ONE);
}
}
void Deserialize_Honest_old(ristretto255_point_t *out, unsigned char *in, int num_of_points) {
int num_threads = omp_get_max_threads();
gf_25519_t a[num_threads];
gf_25519_t b[num_threads];
#pragma omp parallel for
for (int i = 0; i < num_of_points; i++) {
int current_thread_num = omp_get_thread_num();
memcpy(&a[current_thread_num], &in[i * sizeof(gf_25519_t) * 2], sizeof(gf_25519_t));
memcpy(&b[current_thread_num], &in[i * sizeof(gf_25519_t) * 2 + sizeof(gf_25519_t)], sizeof(gf_25519_t));
gf_copy(&out[i].x, &a[current_thread_num]);
gf_copy(&out[i].y, &b[current_thread_num]);
gf_mul(&out[i].t, &out[i].x, &out[i].y);
gf_copy(&out[i].z, &ONE);
}
}
size_t Serialize_Malicious_Size(int num_of_points) {
return 32 * num_of_points;
}
void Serialize_Malicious(unsigned char *out, ristretto255_point_t *in, int num_of_points) {
#pragma omp parallel for
for (int i = 0; i < num_of_points; i++) {
ristretto255_point_encode(&out[32 * i], &in[i]);
}
}
ristretto_error_t Deserialize_Malicious(ristretto255_point_t *out, unsigned char *in, int num_of_points) {
int num_threads = omp_get_max_threads();
ristretto_error_t flag[num_threads];
ristretto_error_t flag_tmp[num_threads];
for (int i = 0; i < num_threads; i++) {
flag[i] = RISTRETTO_SUCCESS;
}
#pragma omp parallel for
for (int i = 0; i < num_of_points; i++) {
int current_thread_num = omp_get_thread_num();
flag_tmp[current_thread_num] = ristretto255_point_decode(&out[i], &in[i * 32], RISTRETTO_TRUE);
flag[current_thread_num] &= flag_tmp[current_thread_num];
}
ristretto_error_t final_flag = RISTRETTO_SUCCESS;
for (int i = 0; i < num_threads; i++) {
final_flag &= flag[i];
}
return final_flag;
}
|
effects.c | #define _POSIX_C_SOURCE 200809
#include <omp.h>
#include <stdlib.h>
#include <stdbool.h>
#include <dlfcn.h>
#include <string.h>
#include <errno.h>
#include <sys/wait.h>
#include <unistd.h>
#include <spawn.h>
#include "effects.h"
#include "log.h"
#include <time.h>
// glib might or might not have already defined MIN,
// depending on whether we have pixbuf or not...
#ifndef MIN
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#endif
extern char **environ;
static int screen_size_to_pix(struct swaylock_effect_screen_pos size, int screensize) {
int actual = size.pos;
if (size.is_percent)
actual = (size.pos / 100.0) * screensize;
return actual;
}
static int screen_pos_to_pix(struct swaylock_effect_screen_pos pos, int screensize) {
int actual = pos.pos;
if (pos.is_percent)
actual = (pos.pos / 100.0) * screensize;
if (actual < 0)
actual = screensize + actual;
return actual;
}
static void screen_pos_pair_to_pix(
struct swaylock_effect_screen_pos posx,
struct swaylock_effect_screen_pos posy,
int objwidth, int objheight,
int screenwidth, int screenheight, int gravity,
int *outx, int *outy) {
int x = screen_pos_to_pix(posx, screenwidth);
int y = screen_pos_to_pix(posy, screenheight);
// Adjust X
switch (gravity) {
case EFFECT_COMPOSE_GRAV_CENTER:
case EFFECT_COMPOSE_GRAV_N:
case EFFECT_COMPOSE_GRAV_S:
x -= objwidth / 2;
break;
case EFFECT_COMPOSE_GRAV_NW:
case EFFECT_COMPOSE_GRAV_SW:
case EFFECT_COMPOSE_GRAV_W:
break;
case EFFECT_COMPOSE_GRAV_NE:
case EFFECT_COMPOSE_GRAV_SE:
case EFFECT_COMPOSE_GRAV_E:
x -= objwidth;
break;
}
// Adjust Y
switch (gravity) {
case EFFECT_COMPOSE_GRAV_CENTER:
case EFFECT_COMPOSE_GRAV_W:
case EFFECT_COMPOSE_GRAV_E:
y -= objheight / 2;
break;
case EFFECT_COMPOSE_GRAV_NW:
case EFFECT_COMPOSE_GRAV_NE:
case EFFECT_COMPOSE_GRAV_N:
break;
case EFFECT_COMPOSE_GRAV_SW:
case EFFECT_COMPOSE_GRAV_SE:
case EFFECT_COMPOSE_GRAV_S:
y -= objheight;
break;
}
*outx = x;
*outy = y;
}
static uint32_t blend_pixels(float alpha, uint32_t srcpix, uint32_t destpix) {
uint8_t srcr = (srcpix & 0x00ff0000) >> 16;
uint8_t destr = (destpix & 0x00ff0000) >> 16;
uint8_t srcg = (srcpix & 0x0000ff00) >> 8;
uint8_t destg = (destpix & 0x0000ff00) >> 8;
uint8_t srcb = (srcpix & 0x000000ff) >> 0;
uint8_t destb = (destpix & 0x000000ff) >> 0;
return (uint32_t)0 |
(uint32_t)255 << 24 |
(uint32_t)(srcr + destr * (1 - alpha)) << 16 |
(uint32_t)(srcg + destg * (1 - alpha)) << 8 |
(uint32_t)(srcb + destb * (1 - alpha)) << 0;
}
static void blur_h(uint32_t *dest, uint32_t *src, int width, int height,
int radius) {
const int minradius = radius < width ? radius : width;
#pragma omp parallel for
for (int y = 0; y < height; ++y) {
uint32_t *srow = src + y * width;
uint32_t *drow = dest + y * width;
// 'range' is float, because floating point division is usually faster
// than integer division.
int r_acc = 0;
int g_acc = 0;
int b_acc = 0;
float range = minradius;
// Accumulate the range (0..radius)
for (int x = 0; x < minradius; ++x) {
r_acc += (srow[x] & 0xff0000) >> 16;
g_acc += (srow[x] & 0x00ff00) >> 8;
b_acc += (srow[x] & 0x0000ff);
}
// Deal with the main body
for (int x = 0; x < width; ++x) {
if (x >= minradius) {
r_acc -= (srow[x - radius] & 0xff0000) >> 16;
g_acc -= (srow[x - radius] & 0x00ff00) >> 8;
b_acc -= (srow[x - radius] & 0x0000ff);
range -= 1;
}
if (x < width - minradius) {
r_acc += (srow[x + radius] & 0xff0000) >> 16;
g_acc += (srow[x + radius] & 0x00ff00) >> 8;
b_acc += (srow[x + radius] & 0x0000ff);
range += 1;
}
drow[x] = 0 |
(int)(r_acc / range) << 16 |
(int)(g_acc / range) << 8 |
(int)(b_acc / range);
}
}
}
static void blur_v(uint32_t *dest, uint32_t *src, int width, int height,
int radius) {
const int minradius = radius < height ? radius : height;
#pragma omp parallel for
for (int x = 0; x < width; ++x) {
uint32_t *scol = src + x;
uint32_t *dcol = dest + x;
// 'range' is float, because floating point division is usually faster
// than integer division.
int r_acc = 0;
int g_acc = 0;
int b_acc = 0;
float range = minradius;
// Accumulate the range (0..radius)
for (int y = 0; y < minradius; ++y) {
r_acc += (scol[y * width] & 0xff0000) >> 16;
g_acc += (scol[y * width] & 0x00ff00) >> 8;
b_acc += (scol[y * width] & 0x0000ff);
}
// Deal with the main body
for (int y = 0; y < height; ++y) {
if (y >= minradius) {
r_acc -= (scol[(y - radius) * width] & 0xff0000) >> 16;
g_acc -= (scol[(y - radius) * width] & 0x00ff00) >> 8;
b_acc -= (scol[(y - radius) * width] & 0x0000ff);
range -= 1;
}
if (y < height - minradius) {
r_acc += (scol[(y + radius) * width] & 0xff0000) >> 16;
g_acc += (scol[(y + radius) * width] & 0x00ff00) >> 8;
b_acc += (scol[(y + radius) * width] & 0x0000ff);
range += 1;
}
dcol[y * width] = 0 |
(int)(r_acc / range) << 16 |
(int)(g_acc / range) << 8 |
(int)(b_acc / range);
}
}
}
static void blur_once(uint32_t *dest, uint32_t *src, uint32_t *scratch,
int width, int height, int radius) {
blur_h(scratch, src, width, height, radius);
blur_v(dest, scratch, width, height, radius);
}
// This effect_blur function, and the associated blur_* functions,
// are my own adaptations of code in yvbbrjdr's i3lock-fancy-rapid:
// https://github.com/yvbbrjdr/i3lock-fancy-rapid
static void effect_blur(uint32_t *dest, uint32_t *src, int width, int height,
int radius, int times) {
uint32_t *origdest = dest;
uint32_t *scratch = malloc(width * height * sizeof(*scratch));
blur_once(dest, src, scratch, width, height, radius);
for (int i = 0; i < times - 1; ++i) {
uint32_t *tmp = src;
src = dest;
dest = tmp;
blur_once(dest, src, scratch, width, height, radius);
}
free(scratch);
// We're flipping between using dest and src;
// if the last buffer we used was src, copy that over to dest.
if (dest != origdest)
memcpy(origdest, dest, width * height * sizeof(*dest));
}
static void effect_pixelate(uint32_t *data, int width, int height, int factor) {
#pragma omp parallel for
for (int y = 0; y < height / factor + 1; ++y) {
for (int x = 0; x < width / factor + 1; ++x) {
int total_r = 0, total_g = 0, total_b = 0;
int xstart = x * factor;
int ystart = y * factor;
int xlim = MIN(xstart + factor, width);
int ylim = MIN(ystart + factor, height);
// Average
for (int ry = ystart; ry < ylim; ++ry) {
for (int rx = xstart; rx < xlim; ++rx) {
int index = ry * width + rx;
total_r += (data[index] & 0xff0000) >> 16;
total_g += (data[index] & 0x00ff00) >> 8;
total_b += (data[index] & 0x0000ff);
}
}
int r = total_r / (factor * factor);
int g = total_g / (factor * factor);
int b = total_b / (factor * factor);
// Fill pixels
for (int ry = ystart; ry < ylim; ++ry) {
for (int rx = xstart; rx < xlim; ++rx) {
int index = ry * width + rx;
data[index] = r << 16 | g << 8 | b;
}
}
}
}
}
static void effect_scale(uint32_t *dest, uint32_t *src, int swidth, int sheight,
double scale) {
int dwidth = swidth * scale;
int dheight = sheight * scale;
double fact = 1.0 / scale;
#pragma omp parallel for
for (int dy = 0; dy < dheight; ++dy) {
int sy = dy * fact;
if (sy >= sheight) continue;
for (int dx = 0; dx < dwidth; ++dx) {
int sx = dx * fact;
if (sx >= swidth) continue;
dest[dy * dwidth + dx] = src[sy * swidth + sx];
}
}
}
static void effect_greyscale(uint32_t *data, int width, int height) {
#pragma omp parallel for
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
int index = y * width + x;
int r = (data[index] & 0xff0000) >> 16;
int g = (data[index] & 0x00ff00) >> 8;
int b = (data[index] & 0x0000ff);
int luma = 0.2989 * r + 0.5870 * g + 0.1140 * b;
if (luma < 0) luma = 0;
if (luma > 255) luma = 255;
luma &= 0xFF;
data[index] = luma << 16 | luma << 8 | luma;
}
}
}
static void effect_vignette(uint32_t *data, int width, int height,
double base, double factor) {
base = fmin(1, fmax(0, base));
factor = fmin(1 - base, fmax(0, factor));
#pragma omp parallel for
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
double xf = (x * 1.0) / width;
double yf = (y * 1.0) / height;
double vignette_factor = base + factor
* 16 * xf * yf * (1.0 - xf) * (1.0 - yf);
int index = y * width + x;
int r = (data[index] & 0xff0000) >> 16;
int g = (data[index] & 0x00ff00) >> 8;
int b = (data[index] & 0x0000ff);
r = (int)(r * vignette_factor) & 0xFF;
g = (int)(g * vignette_factor) & 0xFF;
b = (int)(b * vignette_factor) & 0xFF;
data[index] = r << 16 | g << 8 | b;
}
}
}
static void effect_compose(uint32_t *data, int width, int height,
struct swaylock_effect_screen_pos posx,
struct swaylock_effect_screen_pos posy,
struct swaylock_effect_screen_pos posw,
struct swaylock_effect_screen_pos posh,
int gravity, char *imgpath) {
#if !HAVE_GDK_PIXBUF
(void)&blend_pixels;
(void)&screen_size_to_pix;
(void)&screen_pos_pair_to_pix;
swaylock_log(LOG_ERROR, "Compose effect: Compiled without gdk_pixbuf support.\n");
return;
#else
int imgw = screen_size_to_pix(posw, width);
int imgh = screen_size_to_pix(posh, height);
bool preserve_aspect = imgw < 0 || imgh < 0;
GError *err = NULL;
GdkPixbuf *pixbuf = gdk_pixbuf_new_from_file_at_scale(
imgpath, imgw, imgh, preserve_aspect, &err);
if (!pixbuf) {
swaylock_log(LOG_ERROR, "Compose effect: Failed to load image file '%s' (%s).",
imgpath, err->message);
g_error_free(err);
return;
}
cairo_surface_t *image = gdk_cairo_image_surface_create_from_pixbuf(pixbuf);
g_object_unref(pixbuf);
int bufw = cairo_image_surface_get_width(image);
int bufh = cairo_image_surface_get_height(image);
uint32_t *bufdata = (uint32_t *)cairo_image_surface_get_data(image);
int bufstride = cairo_image_surface_get_stride(image) / 4;
bool bufalpha = cairo_image_surface_get_format(image) == CAIRO_FORMAT_ARGB32;
int imgx, imgy;
screen_pos_pair_to_pix(
posx, posy, bufw, bufh,
width, height, gravity,
&imgx, &imgy);
#pragma omp parallel for
for (int offy = 0; offy < bufh; ++offy) {
if (offy + imgy < 0 || offy + imgy > height)
continue;
for (int offx = 0; offx < bufw; ++offx) {
if (offx + imgx < 0 || offx + imgx > width)
continue;
size_t idx = (size_t)(offy + imgy) * width + (offx + imgx);
size_t bufidx = (size_t)offy * bufstride + (offx);
if (!bufalpha) {
data[idx] = bufdata[bufidx];
} else {
uint8_t alpha = (bufdata[bufidx] & 0xff000000) >> 24;
if (alpha == 255) {
data[idx] = bufdata[bufidx];
} else if (alpha != 0) {
data[idx] = blend_pixels(alpha / 255.0, bufdata[bufidx], data[idx]);
}
}
}
}
cairo_surface_destroy(image);
#endif
}
static void effect_custom(uint32_t *data, int width, int height,
char *path) {
void *dl = dlopen(path, RTLD_LAZY);
if (dl == NULL) {
swaylock_log(LOG_ERROR, "Custom effect: %s", dlerror());
return;
}
void (*effect_func)(uint32_t *data, int width, int height) =
dlsym(dl, "swaylock_effect");
if (effect_func != NULL) {
effect_func(data, width, height);
dlclose(dl);
return;
}
uint32_t (*pixel_func)(uint32_t pix, int x, int y, int width, int height) =
dlsym(dl, "swaylock_pixel");
if (pixel_func != NULL) {
#pragma omp parallel for
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
data[y * width + x] =
pixel_func(data[y * width + x], x, y, width, height);
}
}
dlclose(dl);
return;
}
swaylock_log(LOG_ERROR, "Custom effect: %s", dlerror());
}
cairo_surface_t *swaylock_effects_run(cairo_surface_t *surface,
struct swaylock_effect *effects, int count) {
for (int i = 0; i < count; ++i) {
struct swaylock_effect *effect = &effects[i];
switch (effect->tag) {
case EFFECT_BLUR: {
cairo_surface_t *surf = cairo_image_surface_create(
CAIRO_FORMAT_RGB24,
cairo_image_surface_get_width(surface),
cairo_image_surface_get_height(surface));
if (cairo_surface_status(surf) != CAIRO_STATUS_SUCCESS) {
swaylock_log(LOG_ERROR, "Failed to create surface for blur effect");
cairo_surface_destroy(surf);
break;
}
effect_blur(
(uint32_t *)cairo_image_surface_get_data(surf),
(uint32_t *)cairo_image_surface_get_data(surface),
cairo_image_surface_get_width(surface),
cairo_image_surface_get_height(surface),
effect->e.blur.radius, effect->e.blur.times);
cairo_surface_flush(surf);
cairo_surface_destroy(surface);
surface = surf;
break;
}
case EFFECT_PIXELATE: {
effect_pixelate(
(uint32_t *)cairo_image_surface_get_data(surface),
cairo_image_surface_get_width(surface),
cairo_image_surface_get_height(surface),
effect->e.pixelate.factor);
cairo_surface_flush(surface);
break;
}
case EFFECT_SCALE: {
cairo_surface_t *surf = cairo_image_surface_create(
CAIRO_FORMAT_RGB24,
cairo_image_surface_get_width(surface) * effect->e.scale,
cairo_image_surface_get_height(surface) * effect->e.scale);
if (cairo_surface_status(surf) != CAIRO_STATUS_SUCCESS) {
swaylock_log(LOG_ERROR, "Failed to create surface for scale effect");
cairo_surface_destroy(surf);
break;
}
effect_scale(
(uint32_t *)cairo_image_surface_get_data(surf),
(uint32_t *)cairo_image_surface_get_data(surface),
cairo_image_surface_get_width(surface),
cairo_image_surface_get_height(surface),
effect->e.scale);
cairo_surface_flush(surf);
cairo_surface_destroy(surface);
surface = surf;
break;
}
case EFFECT_GREYSCALE: {
effect_greyscale(
(uint32_t *)cairo_image_surface_get_data(surface),
cairo_image_surface_get_width(surface),
cairo_image_surface_get_height(surface));
cairo_surface_flush(surface);
break;
}
case EFFECT_VIGNETTE: {
effect_vignette(
(uint32_t *)cairo_image_surface_get_data(surface),
cairo_image_surface_get_width(surface),
cairo_image_surface_get_height(surface),
effect->e.vignette.base,
effect->e.vignette.factor);
cairo_surface_flush(surface);
break;
}
case EFFECT_COMPOSE: {
effect_compose(
(uint32_t *)cairo_image_surface_get_data(surface),
cairo_image_surface_get_width(surface),
cairo_image_surface_get_height(surface),
effect->e.compose.x, effect->e.compose.y,
effect->e.compose.w, effect->e.compose.h,
effect->e.compose.gravity, effect->e.compose.imgpath);
cairo_surface_flush(surface);
break;
}
case EFFECT_CUSTOM: {
effect_custom(
(uint32_t *)cairo_image_surface_get_data(surface),
cairo_image_surface_get_width(surface),
cairo_image_surface_get_height(surface),
effect->e.custom);
cairo_surface_flush(surface);
break;
} }
}
return surface;
}
|
non-blocking-omp.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <omp.h>
#include "mpi.h"
struct timeval startwtime, endwtime1, endwtime2;
double seq_time1, seq_time2;
double **minDist;
double **minLabels;
double* packer(double **received, int blocksize, int LINESIZE);
double** unpacker(double *toReceive,int blocksize, int LINESIZE);
void knnSearch(int rank, int l, double **local, double **received, int blocksize,int LINESIZE, int nbrs);
void pointCompare(long i, long j, int nbrs, double *pointA, double *pointB, int LINESIZE);
double Ndistance(double *pointA, double *pointB, int LINESIZE);
void bubbleSort(int i,int nbrs);
void swap(int i, int k);
int main(int argc, char** argv){
char filename[100];
int MAX = 0;
int LINESIZE = 0;
int nbrs = 0;
int i,j,l;
int blocksize = 0;
double **local, **received;
double *toSend, *toReceive;
int rank, p; //==number of procs
MPI_File fp;
MPI_File fpResults;
MPI_Request send_req;
MPI_Request recv_req;
MPI_Status status;
MPI_Offset offset;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &p);
if(argc!=5){
printf("ERROR: usage:\n%s filename MAX nbrs threads\n", argv[0]);
printf("filename is the name of the .bin file to take data from\n");
printf("MAX is the number of elements to take part in the search\n");
printf("nbrs is the number of the nearest neighbours search for each point\n");
printf("threads is the number of threads to be used for OpenMP\n");
exit(1);
}
MAX = atoi(argv[2]);
nbrs = atoi(argv[3]);
blocksize = MAX/p;
omp_set_num_threads(atoi(argv[4]));
strcpy(filename, argv[1]);
if(!strcmp(filename,"trainX_svd")){
LINESIZE = 30;
}
else{
LINESIZE = 784;
}
//creating blocks
local = (double **) malloc(blocksize*sizeof(double*));
received = (double **) malloc(blocksize*sizeof(double*));
toSend = (double *) malloc(blocksize*LINESIZE*sizeof(double));
toReceive = (double *) malloc(blocksize*LINESIZE*sizeof(double));
for(i=0; i<blocksize; i++){
local[i] = (double *) malloc(LINESIZE*sizeof(double));
received[i] = (double *) malloc(LINESIZE*sizeof(double));
}
//initialising results array
minDist = (double **) malloc(MAX*sizeof(double*));
minLabels = (double **) malloc(MAX*sizeof(double*));
for(i=0; i<MAX; i++){
minDist[i] = (double *) malloc(nbrs*sizeof(double));
minLabels[i] = (double *) malloc(nbrs*sizeof(double));
for(j=0; j<nbrs; j++){
//presetting minDist to sth very big
minDist[i][j] = 1000;
minLabels[i][j] = -1;
}
}
strcat(filename, ".bin");
if(MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDONLY, MPI_INFO_NULL, &fp)){
printf("Error reading file from Process %d (fp)\n", rank);
exit(1);
}
else{
//block reading
if(rank==0)
printf("Initialising kNN search for problem size %d and k = %d\nusing archive %s\n", MAX, nbrs, filename);
for(i=0; i<blocksize; i++){
for(j=0; j<LINESIZE ; j++){
offset = rank*blocksize*LINESIZE*sizeof(double)+i*LINESIZE*sizeof(double)+j*sizeof(double);
MPI_File_read_at(fp, offset, &local[i][j], 1, MPI_DOUBLE, &status);
}
}
//blockprint(local, blocksize, rank);
MPI_Barrier(MPI_COMM_WORLD);
MPI_File_close(&fp);
knnSearch(rank, rank, local, local, blocksize,LINESIZE, nbrs);
}
//2d array to 1d array:
toSend = packer(local, blocksize, LINESIZE);
if(rank==0) gettimeofday (&startwtime, NULL);
//circulation of blocks
for(l=0; l<(p-1); l++){
if(rank==0){
MPI_Isend(toSend, blocksize*LINESIZE, MPI_DOUBLE, rank+1, 10, MPI_COMM_WORLD, &send_req);
MPI_Irecv(toReceive, blocksize*LINESIZE, MPI_DOUBLE, p-1, 10, MPI_COMM_WORLD, &recv_req);
}
else if(rank==(p-1)){
MPI_Isend(toSend, blocksize*LINESIZE, MPI_DOUBLE, 0, 10, MPI_COMM_WORLD, &send_req);
MPI_Irecv(toReceive, blocksize*LINESIZE, MPI_DOUBLE, rank-1, 10, MPI_COMM_WORLD, &recv_req);
}
else{
MPI_Isend(toSend, blocksize*LINESIZE, MPI_DOUBLE, rank+1, 10, MPI_COMM_WORLD, &send_req);
MPI_Irecv(toReceive, blocksize*LINESIZE, MPI_DOUBLE, rank-1, 10, MPI_COMM_WORLD, &recv_req);
}
//wait to recieve
MPI_Wait(&recv_req, &status);
if(rank==0) gettimeofday (&endwtime1, NULL);
//1d to 2d array
received = unpacker(toReceive, blocksize, LINESIZE);
int tmp = status.MPI_SOURCE;
for(int t=0; t<l ;t++){
tmp--;
if(tmp<0) tmp = p-1;
}
knnSearch(rank, tmp, local, received, blocksize, LINESIZE, nbrs);
//wait for send to finish
MPI_Wait(&send_req, &status);
toSend = packer(received, blocksize, LINESIZE);
}
//preparing to send results to proc 0
toSend = (double *) realloc(toSend, blocksize*nbrs*sizeof(double));
toReceive = (double *) realloc(toReceive, blocksize*nbrs*sizeof(double));
if(rank==0){
for(i=1; i<p; i++){
MPI_Recv(toSend, blocksize*nbrs, MPI_DOUBLE, i, 15, MPI_COMM_WORLD, &status);
received = unpacker(toSend, blocksize, nbrs);
for(j=0; j<blocksize; j++){
for(int k=0; k<nbrs; k++){
minDist[status.MPI_SOURCE*blocksize+j][k] = received[j][k];
}
}
MPI_Recv(toReceive, blocksize*nbrs, MPI_DOUBLE, i, 20, MPI_COMM_WORLD, &status);
received = unpacker(toReceive, blocksize, nbrs);
for(j=0; j<blocksize; j++){
for(int k=0; k<nbrs; k++){
minLabels[status.MPI_SOURCE*blocksize+j][k] = received[j][k];
}
}
}
}
else{
//toSend buffer used for minDist
//toReceive buffer used for minLabels
for(i=0; i<blocksize; i++){
for(j=0; j<nbrs; j++){
toSend[i*nbrs+j] = minDist[rank*blocksize+i][j];
toReceive[i*nbrs+j] = minLabels[rank*blocksize+i][j];
}
}
MPI_Isend(toSend, blocksize*nbrs, MPI_DOUBLE, 0, 15, MPI_COMM_WORLD, &send_req);
MPI_Isend(toReceive, blocksize*nbrs, MPI_DOUBLE, 0, 20, MPI_COMM_WORLD, &recv_req);
}
strcpy(filename, argv[1]);
if(!strcmp(filename,"trainX_svd")){
strcpy(filename, "results-mpi-non-blocking-svd.txt");
}
else{
strcpy(filename, "results-mpi-non-blocking.txt");
}
if(MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fp)){
printf("Error opening file from Process %d (fp)\n", rank);
exit(1);
}
if(rank==0){
gettimeofday (&endwtime2, NULL);
}
/*
strcpy(filename, argv[1]);
if(!strcmp(filename,"trainX_svd")){
strcpy(filename, "results-mpi-blocking-labels-svd.txt");
}
else{
strcpy(filename, "results-mpi-non-blocking-labels.txt");
}
if(MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fp)){
printf("Error opening file from Process %d (fp)\n", rank);
exit(1);
}
strcpy(filename, argv[1]);
if(!strcmp(filename,"trainX_svd")){
strcpy(filename, "results-mpi-non-blocking-dist-svd.txt");
}
else{
strcpy(filename, "results-mpi-blocking-dist.txt");
}
if(MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fpResults)){
printf("Error opening file from Process %d (fp)\n", rank);
exit(1);
}
//printing results in two seperate files
for(i=0; i<blocksize; i++){
char buf[100];
for(j=0; j<nbrs; j++){
if(minLabels[rank*blocksize+i][j]==-1) printf("ERROR\n");
//offset = rank*blocksize*LINESIZE*sizeof(char)+i*LINESIZE*sizeof(char)+j*sizeof(char);
offset = rank*blocksize*LINESIZE*sizeof(char)+i*LINESIZE*sizeof(char)+j*sizeof(char);
sprintf(buf, "%f ", minLabels[rank*blocksize+i][j]);
MPI_File_write_at(fp, offset, buf, strlen(buf), MPI_CHAR, &status);
sprintf(buf, "%f ", minDist[rank*blocksize+i][j]);
MPI_File_write_at(fpResults, offset, buf, strlen(buf), MPI_CHAR, &status);
//printf("#%d: %d with a distance of %f\n", j+1, (int) minLabels[i][j], minDist[i][j]);
}
}
*/
if(rank==0){
//printing results in a sigle file in easily readable form from proc 0 ONLY
for(i=0; i<p*blocksize; i++){
char buf[100];
sprintf( buf, "Top %d closest to point %d:\n",nbrs, i);
MPI_File_write(fp, buf, strlen(buf), MPI_CHAR, &status);
//printf("Top %d closest to point %d:\n",nbrs, i);
for(j=0; j<nbrs; j++){
if(minLabels[i][j]==-1) printf("ERROR\n");
sprintf(buf, "#%d: %d with a distance of %f\n", j+1, (int) minLabels[i][j], minDist[i][j]);
MPI_File_write(fp, buf, strlen(buf), MPI_CHAR, &status);
//printf("#%d: %d with a distance of %f\n", j+1, (int) minLabels[i][j], minDist[i][j]);
}
}
}
if(rank==0){
seq_time1 = (double)((endwtime1.tv_usec - startwtime.tv_usec)/1.0e6
+ endwtime1.tv_sec - startwtime.tv_sec);
printf("COMMS Wall clock time = %f\n", seq_time1);
seq_time2 = (double)((endwtime2.tv_usec - startwtime.tv_usec)/1.0e6
+ endwtime2.tv_sec - startwtime.tv_sec);
printf("FINAL Wall clock time = %f\n", seq_time2);
printf("\nJob Done.\n");
}
MPI_File_close(&fp);
MPI_File_close(&fpResults);
free(local);
free(received);
free(toSend);
free(toReceive);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
return (0);
}
double* packer(double **received,int blocksize, int LINESIZE){
int i,j;
double *temp;
temp = (double *) malloc(blocksize*LINESIZE*sizeof(double));
for(i=0; i<blocksize; i++){
for(j=0; j<LINESIZE; j++){
temp[i*LINESIZE+j]=received[i][j];
}
}
return temp;
}
double** unpacker(double *toReceive,int blocksize, int LINESIZE){
int i,j;
double **temp;
temp = (double **) malloc(blocksize*sizeof(double));
for(i=0; i<blocksize; i++){
temp[i] = (double *) malloc(LINESIZE*sizeof(double));
for(j=0; j<LINESIZE; j++){
temp[i][j]= toReceive[i*LINESIZE+j];
}
}
return temp;
}
void knnSearch(int rank, int l, double **local, double **received, int blocksize, int LINESIZE, int nbrs){
int i,j,k;
double *pointA;
double *pointB;
pointA = (double *) malloc(LINESIZE*sizeof(double));
pointB = (double *) malloc(LINESIZE*sizeof(double));
#pragma omp parallel for
for(i=0; i<blocksize; i++){
//reading pointA from block local
for(k=0; k<LINESIZE; k++){
pointA[k]=local[i][k];
}
for(j=0; j<blocksize; j++){
//reading pointB from block received
for(k=0; k<LINESIZE; k++){
pointB[k]=received[j][k];
}
pointCompare(rank*blocksize+i, l*blocksize+j, nbrs, pointA, pointB, LINESIZE);
}
}
}
void pointCompare(long i, long j, int nbrs, double *pointA, double *pointB, int LINESIZE){
double dist=0;
int k,n;
//calculating distance
dist=Ndistance(pointA, pointB, LINESIZE);
//sorting top k closest neighbours
bubbleSort(i, nbrs);
for(n=0; n<nbrs ; n++){
//if dist = 0 then pointA=pointB
if(dist>0 && dist<minDist[i][n]){
//pushing back all elements
//from the end to the point where this new dist will be inserted
for(k=(nbrs-1); k>n; k--){
minDist[i][k] = minDist[i][k-1];
minLabels[i][k] = minLabels[i][k-1];
}
minDist[i][n] = dist;
minLabels[i][n] = j;
break;
}
}
}
double Ndistance(double *pointA, double *pointB, int LINESIZE){
double dist=0;
for(int k=0; k<LINESIZE; k++){
dist += pow(pointA[k]-pointB[k],2);
}
return sqrt(dist);
}
void bubbleSort(int i,int nbrs){
int j,k;
for(j=0; j<nbrs; j++){
for(k=(nbrs-1); k>j; k--){
if(minDist[i][k-1]>minDist[i][k]){
swap(i,k);
}
}
}
}
void swap(int i, int k){
double tmp;
tmp = minDist[i][k-1];
minDist[i][k-1] = minDist[i][k];
minDist[i][k] = tmp;
tmp = minLabels[i][k-1];
minLabels[i][k-1] = minLabels[i][k];
minLabels[i][k] = tmp;
} |
StmtOpenMP.h | //===- StmtOpenMP.h - Classes for OpenMP directives ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file defines OpenMP AST classes for executable directives and
/// clauses.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMTOPENMP_H
#define LLVM_CLANG_AST_STMTOPENMP_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/Expr.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
namespace clang {
//===----------------------------------------------------------------------===//
// AST classes for directives.
//===----------------------------------------------------------------------===//
/// Representation of an OpenMP canonical loop.
///
/// OpenMP 1.0 C/C++, section 2.4.1 for Construct; canonical-shape
/// OpenMP 2.0 C/C++, section 2.4.1 for Construct; canonical-shape
/// OpenMP 2.5, section 2.5.1 Loop Construct; canonical form
/// OpenMP 3.1, section 2.5.1 Loop Construct; canonical form
/// OpenMP 4.0, section 2.6 Canonical Loop Form
/// OpenMP 4.5, section 2.6 Canonical Loop Form
/// OpenMP 5.0, section 2.9.1 Canonical Loop Form
/// OpenMP 5.1, section 2.11.1 Canonical Loop Nest Form
///
/// An OpenMP canonical loop is a for-statement or range-based for-statement
/// with additional requirements that ensure that the number of iterations is
/// known before entering the loop and allow skipping to an arbitrary iteration.
/// The OMPCanonicalLoop AST node wraps a ForStmt or CXXForRangeStmt that is
/// known to fulfill OpenMP's canonical loop requirements because of being
/// associated to an OMPLoopBasedDirective. That is, the general structure is:
///
/// OMPLoopBasedDirective
/// [`- CapturedStmt ]
/// [ `- CapturedDecl]
/// ` OMPCanonicalLoop
/// `- ForStmt/CXXForRangeStmt
/// `- Stmt
///
/// One or multiple CapturedStmt/CapturedDecl pairs may be inserted by some
/// directives such as OMPParallelForDirective, but others do not need them
/// (such as OMPTileDirective). In The OMPCanonicalLoop and
/// ForStmt/CXXForRangeStmt pair is repeated for loop associated with the
/// directive. A OMPCanonicalLoop must not appear in the AST unless associated
/// with a OMPLoopBasedDirective. In an imperfectly nested loop nest, the
/// OMPCanonicalLoop may also be wrapped in a CompoundStmt:
///
/// [...]
/// ` OMPCanonicalLoop
/// `- ForStmt/CXXForRangeStmt
/// `- CompoundStmt
/// |- Leading in-between code (if any)
/// |- OMPCanonicalLoop
/// | `- ForStmt/CXXForRangeStmt
/// | `- ...
/// `- Trailing in-between code (if any)
///
/// The leading/trailing in-between code must not itself be a OMPCanonicalLoop
/// to avoid confusion which loop belongs to the nesting.
///
/// There are three different kinds of iteration variables for different
/// purposes:
/// * Loop user variable: The user-accessible variable with different value for
/// each iteration.
/// * Loop iteration variable: The variable used to identify a loop iteration;
/// for range-based for-statement, this is the hidden iterator '__begin'. For
/// other loops, it is identical to the loop user variable. Must be a
/// random-access iterator, pointer or integer type.
/// * Logical iteration counter: Normalized loop counter starting at 0 and
/// incrementing by one at each iteration. Allows abstracting over the type
/// of the loop iteration variable and is always an unsigned integer type
/// appropriate to represent the range of the loop iteration variable. Its
/// value corresponds to the logical iteration number in the OpenMP
/// specification.
///
/// This AST node provides two captured statements:
/// * The distance function which computes the number of iterations.
/// * The loop user variable function that computes the loop user variable when
/// given a logical iteration number.
///
/// These captured statements provide the link between C/C++ semantics and the
/// logical iteration counters used by the OpenMPIRBuilder which is
/// language-agnostic and therefore does not know e.g. how to advance a
/// random-access iterator. The OpenMPIRBuilder will use this information to
/// apply simd, workshare-loop, distribute, taskloop and loop directives to the
/// loop. For compatibility with the non-OpenMPIRBuilder codegen path, an
/// OMPCanonicalLoop can itself also be wrapped into the CapturedStmts of an
/// OMPLoopDirective and skipped when searching for the associated syntactical
/// loop.
///
/// Example:
/// <code>
/// std::vector<std::string> Container{1,2,3};
/// for (std::string Str : Container)
/// Body(Str);
/// </code>
/// which is syntactic sugar for approximately:
/// <code>
/// auto &&__range = Container;
/// auto __begin = std::begin(__range);
/// auto __end = std::end(__range);
/// for (; __begin != __end; ++__begin) {
/// std::String Str = *__begin;
/// Body(Str);
/// }
/// </code>
/// In this example, the loop user variable is `Str`, the loop iteration
/// variable is `__begin` of type `std::vector<std::string>::iterator` and the
/// logical iteration number type is `size_t` (unsigned version of
/// `std::vector<std::string>::iterator::difference_type` aka `ptrdiff_t`).
/// Therefore, the distance function will be
/// <code>
/// [&](size_t &Result) { Result = __end - __begin; }
/// </code>
/// and the loop variable function is
/// <code>
/// [&,__begin](std::vector<std::string>::iterator &Result, size_t Logical) {
/// Result = __begin + Logical;
/// }
/// </code>
/// The variable `__begin`, aka the loop iteration variable, is captured by
/// value because it is modified in the loop body, but both functions require
/// the initial value. The OpenMP specification explicitly leaves unspecified
/// when the loop expressions are evaluated such that a capture by reference is
/// sufficient.
class OMPCanonicalLoop : public Stmt {
friend class ASTStmtReader;
friend class ASTStmtWriter;
/// Children of this AST node.
enum {
LOOP_STMT,
DISTANCE_FUNC,
LOOPVAR_FUNC,
LOOPVAR_REF,
LastSubStmt = LOOPVAR_REF
};
private:
/// This AST node's children.
Stmt *SubStmts[LastSubStmt + 1] = {};
OMPCanonicalLoop() : Stmt(StmtClass::OMPCanonicalLoopClass) {}
public:
/// Create a new OMPCanonicalLoop.
static OMPCanonicalLoop *create(const ASTContext &Ctx, Stmt *LoopStmt,
CapturedStmt *DistanceFunc,
CapturedStmt *LoopVarFunc,
DeclRefExpr *LoopVarRef) {
OMPCanonicalLoop *S = new (Ctx) OMPCanonicalLoop();
S->setLoopStmt(LoopStmt);
S->setDistanceFunc(DistanceFunc);
S->setLoopVarFunc(LoopVarFunc);
S->setLoopVarRef(LoopVarRef);
return S;
}
/// Create an empty OMPCanonicalLoop for deserialization.
static OMPCanonicalLoop *createEmpty(const ASTContext &Ctx) {
return new (Ctx) OMPCanonicalLoop();
}
static bool classof(const Stmt *S) {
return S->getStmtClass() == StmtClass::OMPCanonicalLoopClass;
}
SourceLocation getBeginLoc() const { return getLoopStmt()->getBeginLoc(); }
SourceLocation getEndLoc() const { return getLoopStmt()->getEndLoc(); }
/// Return this AST node's children.
/// @{
child_range children() {
return child_range(&SubStmts[0], &SubStmts[0] + LastSubStmt + 1);
}
const_child_range children() const {
return const_child_range(&SubStmts[0], &SubStmts[0] + LastSubStmt + 1);
}
/// @}
/// The wrapped syntactic loop statement (ForStmt or CXXForRangeStmt).
/// @{
Stmt *getLoopStmt() { return SubStmts[LOOP_STMT]; }
const Stmt *getLoopStmt() const { return SubStmts[LOOP_STMT]; }
void setLoopStmt(Stmt *S) {
assert((isa<ForStmt>(S) || isa<CXXForRangeStmt>(S)) &&
"Canonical loop must be a for loop (range-based or otherwise)");
SubStmts[LOOP_STMT] = S;
}
/// @}
/// The function that computes the number of loop iterations. Can be evaluated
/// before entering the loop but after the syntactical loop's init
/// statement(s).
///
/// Function signature: void(LogicalTy &Result)
/// Any values necessary to compute the distance are captures of the closure.
/// @{
CapturedStmt *getDistanceFunc() {
return cast<CapturedStmt>(SubStmts[DISTANCE_FUNC]);
}
const CapturedStmt *getDistanceFunc() const {
return cast<CapturedStmt>(SubStmts[DISTANCE_FUNC]);
}
void setDistanceFunc(CapturedStmt *S) {
assert(S && "Expected non-null captured statement");
SubStmts[DISTANCE_FUNC] = S;
}
/// @}
/// The function that computes the loop user variable from a logical iteration
/// counter. Can be evaluated as first statement in the loop.
///
/// Function signature: void(LoopVarTy &Result, LogicalTy Number)
/// Any other values required to compute the loop user variable (such as start
/// value, step size) are captured by the closure. In particular, the initial
/// value of loop iteration variable is captured by value to be unaffected by
/// previous iterations.
/// @{
CapturedStmt *getLoopVarFunc() {
return cast<CapturedStmt>(SubStmts[LOOPVAR_FUNC]);
}
const CapturedStmt *getLoopVarFunc() const {
return cast<CapturedStmt>(SubStmts[LOOPVAR_FUNC]);
}
void setLoopVarFunc(CapturedStmt *S) {
assert(S && "Expected non-null captured statement");
SubStmts[LOOPVAR_FUNC] = S;
}
/// @}
/// Reference to the loop user variable as accessed in the loop body.
/// @{
DeclRefExpr *getLoopVarRef() {
return cast<DeclRefExpr>(SubStmts[LOOPVAR_REF]);
}
const DeclRefExpr *getLoopVarRef() const {
return cast<DeclRefExpr>(SubStmts[LOOPVAR_REF]);
}
void setLoopVarRef(DeclRefExpr *E) {
assert(E && "Expected non-null loop variable");
SubStmts[LOOPVAR_REF] = E;
}
/// @}
};
/// This is a basic class for representing single OpenMP executable
/// directive.
///
class OMPExecutableDirective : public Stmt {
friend class ASTStmtReader;
friend class ASTStmtWriter;
/// Kind of the directive.
OpenMPDirectiveKind Kind = llvm::omp::OMPD_unknown;
/// Starting location of the directive (directive keyword).
SourceLocation StartLoc;
/// Ending location of the directive.
SourceLocation EndLoc;
/// Get the clauses storage.
MutableArrayRef<OMPClause *> getClauses() {
if (!Data)
return llvm::None;
return Data->getClauses();
}
protected:
/// Data, associated with the directive.
OMPChildren *Data = nullptr;
/// Build instance of directive of class \a K.
///
/// \param SC Statement class.
/// \param K Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
///
OMPExecutableDirective(StmtClass SC, OpenMPDirectiveKind K,
SourceLocation StartLoc, SourceLocation EndLoc)
: Stmt(SC), Kind(K), StartLoc(std::move(StartLoc)),
EndLoc(std::move(EndLoc)) {}
template <typename T, typename... Params>
static T *createDirective(const ASTContext &C, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, unsigned NumChildren,
Params &&... P) {
void *Mem =
C.Allocate(sizeof(T) + OMPChildren::size(Clauses.size(), AssociatedStmt,
NumChildren),
alignof(T));
auto *Data = OMPChildren::Create(reinterpret_cast<T *>(Mem) + 1, Clauses,
AssociatedStmt, NumChildren);
auto *Inst = new (Mem) T(std::forward<Params>(P)...);
Inst->Data = Data;
return Inst;
}
template <typename T, typename... Params>
static T *createEmptyDirective(const ASTContext &C, unsigned NumClauses,
bool HasAssociatedStmt, unsigned NumChildren,
Params &&... P) {
void *Mem =
C.Allocate(sizeof(T) + OMPChildren::size(NumClauses, HasAssociatedStmt,
NumChildren),
alignof(T));
auto *Data =
OMPChildren::CreateEmpty(reinterpret_cast<T *>(Mem) + 1, NumClauses,
HasAssociatedStmt, NumChildren);
auto *Inst = new (Mem) T(std::forward<Params>(P)...);
Inst->Data = Data;
return Inst;
}
template <typename T>
static T *createEmptyDirective(const ASTContext &C, unsigned NumClauses,
bool HasAssociatedStmt = false,
unsigned NumChildren = 0) {
void *Mem =
C.Allocate(sizeof(T) + OMPChildren::size(NumClauses, HasAssociatedStmt,
NumChildren),
alignof(T));
auto *Data =
OMPChildren::CreateEmpty(reinterpret_cast<T *>(Mem) + 1, NumClauses,
HasAssociatedStmt, NumChildren);
auto *Inst = new (Mem) T;
Inst->Data = Data;
return Inst;
}
public:
/// Iterates over expressions/statements used in the construct.
class used_clauses_child_iterator
: public llvm::iterator_adaptor_base<
used_clauses_child_iterator, ArrayRef<OMPClause *>::iterator,
std::forward_iterator_tag, Stmt *, ptrdiff_t, Stmt *, Stmt *> {
ArrayRef<OMPClause *>::iterator End;
OMPClause::child_iterator ChildI, ChildEnd;
void MoveToNext() {
if (ChildI != ChildEnd)
return;
while (this->I != End) {
++this->I;
if (this->I != End) {
ChildI = (*this->I)->used_children().begin();
ChildEnd = (*this->I)->used_children().end();
if (ChildI != ChildEnd)
return;
}
}
}
public:
explicit used_clauses_child_iterator(ArrayRef<OMPClause *> Clauses)
: used_clauses_child_iterator::iterator_adaptor_base(Clauses.begin()),
End(Clauses.end()) {
if (this->I != End) {
ChildI = (*this->I)->used_children().begin();
ChildEnd = (*this->I)->used_children().end();
MoveToNext();
}
}
Stmt *operator*() const { return *ChildI; }
Stmt *operator->() const { return **this; }
used_clauses_child_iterator &operator++() {
++ChildI;
if (ChildI != ChildEnd)
return *this;
if (this->I != End) {
++this->I;
if (this->I != End) {
ChildI = (*this->I)->used_children().begin();
ChildEnd = (*this->I)->used_children().end();
}
}
MoveToNext();
return *this;
}
};
static llvm::iterator_range<used_clauses_child_iterator>
used_clauses_children(ArrayRef<OMPClause *> Clauses) {
return {used_clauses_child_iterator(Clauses),
used_clauses_child_iterator(llvm::makeArrayRef(Clauses.end(), 0))};
}
/// Iterates over a filtered subrange of clauses applied to a
/// directive.
///
/// This iterator visits only clauses of type SpecificClause.
template <typename SpecificClause>
class specific_clause_iterator
: public llvm::iterator_adaptor_base<
specific_clause_iterator<SpecificClause>,
ArrayRef<OMPClause *>::const_iterator, std::forward_iterator_tag,
const SpecificClause *, ptrdiff_t, const SpecificClause *,
const SpecificClause *> {
ArrayRef<OMPClause *>::const_iterator End;
void SkipToNextClause() {
while (this->I != End && !isa<SpecificClause>(*this->I))
++this->I;
}
public:
explicit specific_clause_iterator(ArrayRef<OMPClause *> Clauses)
: specific_clause_iterator::iterator_adaptor_base(Clauses.begin()),
End(Clauses.end()) {
SkipToNextClause();
}
const SpecificClause *operator*() const {
return cast<SpecificClause>(*this->I);
}
const SpecificClause *operator->() const { return **this; }
specific_clause_iterator &operator++() {
++this->I;
SkipToNextClause();
return *this;
}
};
template <typename SpecificClause>
static llvm::iterator_range<specific_clause_iterator<SpecificClause>>
getClausesOfKind(ArrayRef<OMPClause *> Clauses) {
return {specific_clause_iterator<SpecificClause>(Clauses),
specific_clause_iterator<SpecificClause>(
llvm::makeArrayRef(Clauses.end(), 0))};
}
template <typename SpecificClause>
llvm::iterator_range<specific_clause_iterator<SpecificClause>>
getClausesOfKind() const {
return getClausesOfKind<SpecificClause>(clauses());
}
/// Gets a single clause of the specified kind associated with the
/// current directive iff there is only one clause of this kind (and assertion
/// is fired if there is more than one clause is associated with the
/// directive). Returns nullptr if no clause of this kind is associated with
/// the directive.
template <typename SpecificClause>
static const SpecificClause *getSingleClause(ArrayRef<OMPClause *> Clauses) {
auto ClausesOfKind = getClausesOfKind<SpecificClause>(Clauses);
if (ClausesOfKind.begin() != ClausesOfKind.end()) {
assert(std::next(ClausesOfKind.begin()) == ClausesOfKind.end() &&
"There are at least 2 clauses of the specified kind");
return *ClausesOfKind.begin();
}
return nullptr;
}
template <typename SpecificClause>
const SpecificClause *getSingleClause() const {
return getSingleClause<SpecificClause>(clauses());
}
/// Returns true if the current directive has one or more clauses of a
/// specific kind.
template <typename SpecificClause>
bool hasClausesOfKind() const {
auto Clauses = getClausesOfKind<SpecificClause>();
return Clauses.begin() != Clauses.end();
}
/// Returns starting location of directive kind.
SourceLocation getBeginLoc() const { return StartLoc; }
/// Returns ending location of directive.
SourceLocation getEndLoc() const { return EndLoc; }
/// Set starting location of directive kind.
///
/// \param Loc New starting location of directive.
///
void setLocStart(SourceLocation Loc) { StartLoc = Loc; }
/// Set ending location of directive.
///
/// \param Loc New ending location of directive.
///
void setLocEnd(SourceLocation Loc) { EndLoc = Loc; }
/// Get number of clauses.
unsigned getNumClauses() const {
if (!Data)
return 0;
return Data->getNumClauses();
}
/// Returns specified clause.
///
/// \param I Number of clause.
///
OMPClause *getClause(unsigned I) const { return clauses()[I]; }
/// Returns true if directive has associated statement.
bool hasAssociatedStmt() const { return Data && Data->hasAssociatedStmt(); }
/// Returns statement associated with the directive.
const Stmt *getAssociatedStmt() const {
return const_cast<OMPExecutableDirective *>(this)->getAssociatedStmt();
}
Stmt *getAssociatedStmt() {
assert(hasAssociatedStmt() &&
"Expected directive with the associated statement.");
return Data->getAssociatedStmt();
}
/// Returns the captured statement associated with the
/// component region within the (combined) directive.
///
/// \param RegionKind Component region kind.
const CapturedStmt *getCapturedStmt(OpenMPDirectiveKind RegionKind) const {
assert(hasAssociatedStmt() &&
"Expected directive with the associated statement.");
SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind());
return Data->getCapturedStmt(RegionKind, CaptureRegions);
}
/// Get innermost captured statement for the construct.
CapturedStmt *getInnermostCapturedStmt() {
assert(hasAssociatedStmt() &&
"Expected directive with the associated statement.");
SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind());
return Data->getInnermostCapturedStmt(CaptureRegions);
}
const CapturedStmt *getInnermostCapturedStmt() const {
return const_cast<OMPExecutableDirective *>(this)
->getInnermostCapturedStmt();
}
OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
static bool classof(const Stmt *S) {
return S->getStmtClass() >= firstOMPExecutableDirectiveConstant &&
S->getStmtClass() <= lastOMPExecutableDirectiveConstant;
}
child_range children() {
if (!Data)
return child_range(child_iterator(), child_iterator());
return Data->getAssociatedStmtAsRange();
}
const_child_range children() const {
return const_cast<OMPExecutableDirective *>(this)->children();
}
ArrayRef<OMPClause *> clauses() const {
if (!Data)
return llvm::None;
return Data->getClauses();
}
/// Returns whether or not this is a Standalone directive.
///
/// Stand-alone directives are executable directives
/// that have no associated user code.
bool isStandaloneDirective() const;
/// Returns the AST node representing OpenMP structured-block of this
/// OpenMP executable directive,
/// Prerequisite: Executable Directive must not be Standalone directive.
const Stmt *getStructuredBlock() const {
return const_cast<OMPExecutableDirective *>(this)->getStructuredBlock();
}
Stmt *getStructuredBlock();
const Stmt *getRawStmt() const {
return const_cast<OMPExecutableDirective *>(this)->getRawStmt();
}
Stmt *getRawStmt() {
assert(hasAssociatedStmt() &&
"Expected directive with the associated statement.");
return Data->getRawStmt();
}
};
/// This represents '#pragma omp parallel' directive.
///
/// \code
/// #pragma omp parallel private(a,b) reduction(+: c,d)
/// \endcode
/// In this example directive '#pragma omp parallel' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending Location of the directive.
///
OMPParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPParallelDirectiveClass,
llvm::omp::OMPD_parallel, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPParallelDirective()
: OMPExecutableDirective(OMPParallelDirectiveClass,
llvm::omp::OMPD_parallel, SourceLocation(),
SourceLocation()) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; }
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement associated with the directive.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPParallelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[0]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPParallelDirective *>(this)->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelDirectiveClass;
}
};
/// The base class for all loop-based directives, including loop transformation
/// directives.
class OMPLoopBasedDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
protected:
/// Number of collapsed loops as specified by 'collapse' clause.
unsigned NumAssociatedLoops = 0;
/// Build instance of loop directive of class \a Kind.
///
/// \param SC Statement class.
/// \param Kind Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
/// \param NumAssociatedLoops Number of loops associated with the construct.
///
OMPLoopBasedDirective(StmtClass SC, OpenMPDirectiveKind Kind,
SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumAssociatedLoops)
: OMPExecutableDirective(SC, Kind, StartLoc, EndLoc),
NumAssociatedLoops(NumAssociatedLoops) {}
public:
/// The expressions built to support OpenMP loops in combined/composite
/// pragmas (e.g. pragma omp distribute parallel for)
struct DistCombinedHelperExprs {
/// DistributeLowerBound - used when composing 'omp distribute' with
/// 'omp for' in a same construct.
Expr *LB;
/// DistributeUpperBound - used when composing 'omp distribute' with
/// 'omp for' in a same construct.
Expr *UB;
/// DistributeEnsureUpperBound - used when composing 'omp distribute'
/// with 'omp for' in a same construct, EUB depends on DistUB
Expr *EUB;
/// Distribute loop iteration variable init used when composing 'omp
/// distribute'
/// with 'omp for' in a same construct
Expr *Init;
/// Distribute Loop condition used when composing 'omp distribute'
/// with 'omp for' in a same construct
Expr *Cond;
/// Update of LowerBound for statically scheduled omp loops for
/// outer loop in combined constructs (e.g. 'distribute parallel for')
Expr *NLB;
/// Update of UpperBound for statically scheduled omp loops for
/// outer loop in combined constructs (e.g. 'distribute parallel for')
Expr *NUB;
/// Distribute Loop condition used when composing 'omp distribute'
/// with 'omp for' in a same construct when schedule is chunked.
Expr *DistCond;
/// 'omp parallel for' loop condition used when composed with
/// 'omp distribute' in the same construct and when schedule is
/// chunked and the chunk size is 1.
Expr *ParForInDistCond;
};
/// The expressions built for the OpenMP loop CodeGen for the
/// whole collapsed loop nest.
struct HelperExprs {
/// Loop iteration variable.
Expr *IterationVarRef;
/// Loop last iteration number.
Expr *LastIteration;
/// Loop number of iterations.
Expr *NumIterations;
/// Calculation of last iteration.
Expr *CalcLastIteration;
/// Loop pre-condition.
Expr *PreCond;
/// Loop condition.
Expr *Cond;
/// Loop iteration variable init.
Expr *Init;
/// Loop increment.
Expr *Inc;
/// IsLastIteration - local flag variable passed to runtime.
Expr *IL;
/// LowerBound - local variable passed to runtime.
Expr *LB;
/// UpperBound - local variable passed to runtime.
Expr *UB;
/// Stride - local variable passed to runtime.
Expr *ST;
/// EnsureUpperBound -- expression UB = min(UB, NumIterations).
Expr *EUB;
/// Update of LowerBound for statically scheduled 'omp for' loops.
Expr *NLB;
/// Update of UpperBound for statically scheduled 'omp for' loops.
Expr *NUB;
/// PreviousLowerBound - local variable passed to runtime in the
/// enclosing schedule or null if that does not apply.
Expr *PrevLB;
/// PreviousUpperBound - local variable passed to runtime in the
/// enclosing schedule or null if that does not apply.
Expr *PrevUB;
/// DistInc - increment expression for distribute loop when found
/// combined with a further loop level (e.g. in 'distribute parallel for')
/// expression IV = IV + ST
Expr *DistInc;
/// PrevEUB - expression similar to EUB but to be used when loop
/// scheduling uses PrevLB and PrevUB (e.g. in 'distribute parallel for'
/// when ensuring that the UB is either the calculated UB by the runtime or
/// the end of the assigned distribute chunk)
/// expression UB = min (UB, PrevUB)
Expr *PrevEUB;
/// Counters Loop counters.
SmallVector<Expr *, 4> Counters;
/// PrivateCounters Loop counters.
SmallVector<Expr *, 4> PrivateCounters;
/// Expressions for loop counters inits for CodeGen.
SmallVector<Expr *, 4> Inits;
/// Expressions for loop counters update for CodeGen.
SmallVector<Expr *, 4> Updates;
/// Final loop counter values for GodeGen.
SmallVector<Expr *, 4> Finals;
/// List of counters required for the generation of the non-rectangular
/// loops.
SmallVector<Expr *, 4> DependentCounters;
/// List of initializers required for the generation of the non-rectangular
/// loops.
SmallVector<Expr *, 4> DependentInits;
/// List of final conditions required for the generation of the
/// non-rectangular loops.
SmallVector<Expr *, 4> FinalsConditions;
/// Init statement for all captured expressions.
Stmt *PreInits;
/// Expressions used when combining OpenMP loop pragmas
DistCombinedHelperExprs DistCombinedFields;
/// Check if all the expressions are built (does not check the
/// worksharing ones).
bool builtAll() {
return IterationVarRef != nullptr && LastIteration != nullptr &&
NumIterations != nullptr && PreCond != nullptr &&
Cond != nullptr && Init != nullptr && Inc != nullptr;
}
/// Initialize all the fields to null.
/// \param Size Number of elements in the
/// counters/finals/updates/dependent_counters/dependent_inits/finals_conditions
/// arrays.
void clear(unsigned Size) {
IterationVarRef = nullptr;
LastIteration = nullptr;
CalcLastIteration = nullptr;
PreCond = nullptr;
Cond = nullptr;
Init = nullptr;
Inc = nullptr;
IL = nullptr;
LB = nullptr;
UB = nullptr;
ST = nullptr;
EUB = nullptr;
NLB = nullptr;
NUB = nullptr;
NumIterations = nullptr;
PrevLB = nullptr;
PrevUB = nullptr;
DistInc = nullptr;
PrevEUB = nullptr;
Counters.resize(Size);
PrivateCounters.resize(Size);
Inits.resize(Size);
Updates.resize(Size);
Finals.resize(Size);
DependentCounters.resize(Size);
DependentInits.resize(Size);
FinalsConditions.resize(Size);
for (unsigned I = 0; I < Size; ++I) {
Counters[I] = nullptr;
PrivateCounters[I] = nullptr;
Inits[I] = nullptr;
Updates[I] = nullptr;
Finals[I] = nullptr;
DependentCounters[I] = nullptr;
DependentInits[I] = nullptr;
FinalsConditions[I] = nullptr;
}
PreInits = nullptr;
DistCombinedFields.LB = nullptr;
DistCombinedFields.UB = nullptr;
DistCombinedFields.EUB = nullptr;
DistCombinedFields.Init = nullptr;
DistCombinedFields.Cond = nullptr;
DistCombinedFields.NLB = nullptr;
DistCombinedFields.NUB = nullptr;
DistCombinedFields.DistCond = nullptr;
DistCombinedFields.ParForInDistCond = nullptr;
}
};
/// Get number of collapsed loops.
unsigned getLoopsNumber() const { return NumAssociatedLoops; }
/// Try to find the next loop sub-statement in the specified statement \p
/// CurStmt.
/// \param TryImperfectlyNestedLoops true, if we need to try to look for the
/// imperfectly nested loop.
static Stmt *tryToFindNextInnerLoop(Stmt *CurStmt,
bool TryImperfectlyNestedLoops);
static const Stmt *tryToFindNextInnerLoop(const Stmt *CurStmt,
bool TryImperfectlyNestedLoops) {
return tryToFindNextInnerLoop(const_cast<Stmt *>(CurStmt),
TryImperfectlyNestedLoops);
}
/// Calls the specified callback function for all the loops in \p CurStmt,
/// from the outermost to the innermost.
static bool
doForAllLoops(Stmt *CurStmt, bool TryImperfectlyNestedLoops,
unsigned NumLoops,
llvm::function_ref<bool(unsigned, Stmt *)> Callback,
llvm::function_ref<void(OMPLoopTransformationDirective *)>
OnTransformationCallback);
static bool
doForAllLoops(const Stmt *CurStmt, bool TryImperfectlyNestedLoops,
unsigned NumLoops,
llvm::function_ref<bool(unsigned, const Stmt *)> Callback,
llvm::function_ref<void(const OMPLoopTransformationDirective *)>
OnTransformationCallback) {
auto &&NewCallback = [Callback](unsigned Cnt, Stmt *CurStmt) {
return Callback(Cnt, CurStmt);
};
auto &&NewTransformCb =
[OnTransformationCallback](OMPLoopTransformationDirective *A) {
OnTransformationCallback(A);
};
return doForAllLoops(const_cast<Stmt *>(CurStmt), TryImperfectlyNestedLoops,
NumLoops, NewCallback, NewTransformCb);
}
/// Calls the specified callback function for all the loops in \p CurStmt,
/// from the outermost to the innermost.
static bool
doForAllLoops(Stmt *CurStmt, bool TryImperfectlyNestedLoops,
unsigned NumLoops,
llvm::function_ref<bool(unsigned, Stmt *)> Callback) {
auto &&TransformCb = [](OMPLoopTransformationDirective *) {};
return doForAllLoops(CurStmt, TryImperfectlyNestedLoops, NumLoops, Callback,
TransformCb);
}
static bool
doForAllLoops(const Stmt *CurStmt, bool TryImperfectlyNestedLoops,
unsigned NumLoops,
llvm::function_ref<bool(unsigned, const Stmt *)> Callback) {
auto &&NewCallback = [Callback](unsigned Cnt, const Stmt *CurStmt) {
return Callback(Cnt, CurStmt);
};
return doForAllLoops(const_cast<Stmt *>(CurStmt), TryImperfectlyNestedLoops,
NumLoops, NewCallback);
}
/// Calls the specified callback function for all the loop bodies in \p
/// CurStmt, from the outermost loop to the innermost.
static void doForAllLoopsBodies(
Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops,
llvm::function_ref<void(unsigned, Stmt *, Stmt *)> Callback);
static void doForAllLoopsBodies(
const Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops,
llvm::function_ref<void(unsigned, const Stmt *, const Stmt *)> Callback) {
auto &&NewCallback = [Callback](unsigned Cnt, Stmt *Loop, Stmt *Body) {
Callback(Cnt, Loop, Body);
};
doForAllLoopsBodies(const_cast<Stmt *>(CurStmt), TryImperfectlyNestedLoops,
NumLoops, NewCallback);
}
static bool classof(const Stmt *T) {
if (auto *D = dyn_cast<OMPExecutableDirective>(T))
return isOpenMPLoopDirective(D->getDirectiveKind());
return false;
}
};
/// The base class for all loop transformation directives.
class OMPLoopTransformationDirective : public OMPLoopBasedDirective {
friend class ASTStmtReader;
/// Number of loops generated by this loop transformation.
unsigned NumGeneratedLoops = 0;
protected:
explicit OMPLoopTransformationDirective(StmtClass SC,
OpenMPDirectiveKind Kind,
SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned NumAssociatedLoops)
: OMPLoopBasedDirective(SC, Kind, StartLoc, EndLoc, NumAssociatedLoops) {}
/// Set the number of loops generated by this loop transformation.
void setNumGeneratedLoops(unsigned Num) { NumGeneratedLoops = Num; }
public:
/// Return the number of associated (consumed) loops.
unsigned getNumAssociatedLoops() const { return getLoopsNumber(); }
/// Return the number of loops generated by this loop transformation.
unsigned getNumGeneratedLoops() { return NumGeneratedLoops; }
/// Get the de-sugared statements after after the loop transformation.
///
/// Might be nullptr if either the directive generates no loops and is handled
/// directly in CodeGen, or resolving a template-dependence context is
/// required.
Stmt *getTransformedStmt() const;
/// Return preinits statement.
Stmt *getPreInits() const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTileDirectiveClass ||
T->getStmtClass() == OMPUnrollDirectiveClass;
}
};
/// This is a common base class for loop directives ('omp simd', 'omp
/// for', 'omp for simd' etc.). It is responsible for the loop code generation.
///
class OMPLoopDirective : public OMPLoopBasedDirective {
friend class ASTStmtReader;
/// Offsets to the stored exprs.
/// This enumeration contains offsets to all the pointers to children
/// expressions stored in OMPLoopDirective.
/// The first 9 children are necessary for all the loop directives,
/// the next 8 are specific to the worksharing ones, and the next 11 are
/// used for combined constructs containing two pragmas associated to loops.
/// After the fixed children, three arrays of length NumAssociatedLoops are
/// allocated: loop counters, their updates and final values.
/// PrevLowerBound and PrevUpperBound are used to communicate blocking
/// information in composite constructs which require loop blocking
/// DistInc is used to generate the increment expression for the distribute
/// loop when combined with a further nested loop
/// PrevEnsureUpperBound is used as the EnsureUpperBound expression for the
/// for loop when combined with a previous distribute loop in the same pragma
/// (e.g. 'distribute parallel for')
///
enum {
IterationVariableOffset = 0,
LastIterationOffset = 1,
CalcLastIterationOffset = 2,
PreConditionOffset = 3,
CondOffset = 4,
InitOffset = 5,
IncOffset = 6,
PreInitsOffset = 7,
// The '...End' enumerators do not correspond to child expressions - they
// specify the offset to the end (and start of the following counters/
// updates/finals/dependent_counters/dependent_inits/finals_conditions
// arrays).
DefaultEnd = 8,
// The following 8 exprs are used by worksharing and distribute loops only.
IsLastIterVariableOffset = 8,
LowerBoundVariableOffset = 9,
UpperBoundVariableOffset = 10,
StrideVariableOffset = 11,
EnsureUpperBoundOffset = 12,
NextLowerBoundOffset = 13,
NextUpperBoundOffset = 14,
NumIterationsOffset = 15,
// Offset to the end for worksharing loop directives.
WorksharingEnd = 16,
PrevLowerBoundVariableOffset = 16,
PrevUpperBoundVariableOffset = 17,
DistIncOffset = 18,
PrevEnsureUpperBoundOffset = 19,
CombinedLowerBoundVariableOffset = 20,
CombinedUpperBoundVariableOffset = 21,
CombinedEnsureUpperBoundOffset = 22,
CombinedInitOffset = 23,
CombinedConditionOffset = 24,
CombinedNextLowerBoundOffset = 25,
CombinedNextUpperBoundOffset = 26,
CombinedDistConditionOffset = 27,
CombinedParForInDistConditionOffset = 28,
// Offset to the end (and start of the following
// counters/updates/finals/dependent_counters/dependent_inits/finals_conditions
// arrays) for combined distribute loop directives.
CombinedDistributeEnd = 29,
};
/// Get the counters storage.
MutableArrayRef<Expr *> getCounters() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind())]);
return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the private counters storage.
MutableArrayRef<Expr *> getPrivateCounters() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
getLoopsNumber()]);
return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the updates storage.
MutableArrayRef<Expr *> getInits() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
2 * getLoopsNumber()]);
return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the updates storage.
MutableArrayRef<Expr *> getUpdates() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
3 * getLoopsNumber()]);
return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the final counter updates storage.
MutableArrayRef<Expr *> getFinals() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
4 * getLoopsNumber()]);
return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the dependent counters storage.
MutableArrayRef<Expr *> getDependentCounters() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
5 * getLoopsNumber()]);
return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the dependent inits storage.
MutableArrayRef<Expr *> getDependentInits() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
6 * getLoopsNumber()]);
return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the finals conditions storage.
MutableArrayRef<Expr *> getFinalsConditions() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
7 * getLoopsNumber()]);
return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
protected:
/// Build instance of loop directive of class \a Kind.
///
/// \param SC Statement class.
/// \param Kind Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed loops from 'collapse' clause.
///
OMPLoopDirective(StmtClass SC, OpenMPDirectiveKind Kind,
SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopBasedDirective(SC, Kind, StartLoc, EndLoc, CollapsedNum) {}
/// Offset to the start of children expression arrays.
static unsigned getArraysOffset(OpenMPDirectiveKind Kind) {
if (isOpenMPLoopBoundSharingDirective(Kind))
return CombinedDistributeEnd;
if (isOpenMPWorksharingDirective(Kind) || isOpenMPTaskLoopDirective(Kind) ||
isOpenMPGenericLoopDirective(Kind) || isOpenMPDistributeDirective(Kind))
return WorksharingEnd;
return DefaultEnd;
}
/// Children number.
static unsigned numLoopChildren(unsigned CollapsedNum,
OpenMPDirectiveKind Kind) {
return getArraysOffset(Kind) +
8 * CollapsedNum; // Counters, PrivateCounters, Inits,
// Updates, Finals, DependentCounters,
// DependentInits, FinalsConditions.
}
void setIterationVariable(Expr *IV) {
Data->getChildren()[IterationVariableOffset] = IV;
}
void setLastIteration(Expr *LI) {
Data->getChildren()[LastIterationOffset] = LI;
}
void setCalcLastIteration(Expr *CLI) {
Data->getChildren()[CalcLastIterationOffset] = CLI;
}
void setPreCond(Expr *PC) { Data->getChildren()[PreConditionOffset] = PC; }
void setCond(Expr *Cond) { Data->getChildren()[CondOffset] = Cond; }
void setInit(Expr *Init) { Data->getChildren()[InitOffset] = Init; }
void setInc(Expr *Inc) { Data->getChildren()[IncOffset] = Inc; }
void setPreInits(Stmt *PreInits) {
Data->getChildren()[PreInitsOffset] = PreInits;
}
void setIsLastIterVariable(Expr *IL) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPGenericLoopDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[IsLastIterVariableOffset] = IL;
}
void setLowerBoundVariable(Expr *LB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPGenericLoopDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[LowerBoundVariableOffset] = LB;
}
void setUpperBoundVariable(Expr *UB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPGenericLoopDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[UpperBoundVariableOffset] = UB;
}
void setStrideVariable(Expr *ST) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPGenericLoopDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[StrideVariableOffset] = ST;
}
void setEnsureUpperBound(Expr *EUB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPGenericLoopDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[EnsureUpperBoundOffset] = EUB;
}
void setNextLowerBound(Expr *NLB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPGenericLoopDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[NextLowerBoundOffset] = NLB;
}
void setNextUpperBound(Expr *NUB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPGenericLoopDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[NextUpperBoundOffset] = NUB;
}
void setNumIterations(Expr *NI) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPGenericLoopDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[NumIterationsOffset] = NI;
}
void setPrevLowerBoundVariable(Expr *PrevLB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[PrevLowerBoundVariableOffset] = PrevLB;
}
void setPrevUpperBoundVariable(Expr *PrevUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[PrevUpperBoundVariableOffset] = PrevUB;
}
void setDistInc(Expr *DistInc) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[DistIncOffset] = DistInc;
}
void setPrevEnsureUpperBound(Expr *PrevEUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[PrevEnsureUpperBoundOffset] = PrevEUB;
}
void setCombinedLowerBoundVariable(Expr *CombLB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedLowerBoundVariableOffset] = CombLB;
}
void setCombinedUpperBoundVariable(Expr *CombUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedUpperBoundVariableOffset] = CombUB;
}
void setCombinedEnsureUpperBound(Expr *CombEUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedEnsureUpperBoundOffset] = CombEUB;
}
void setCombinedInit(Expr *CombInit) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedInitOffset] = CombInit;
}
void setCombinedCond(Expr *CombCond) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedConditionOffset] = CombCond;
}
void setCombinedNextLowerBound(Expr *CombNLB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedNextLowerBoundOffset] = CombNLB;
}
void setCombinedNextUpperBound(Expr *CombNUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedNextUpperBoundOffset] = CombNUB;
}
void setCombinedDistCond(Expr *CombDistCond) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound distribute sharing directive");
Data->getChildren()[CombinedDistConditionOffset] = CombDistCond;
}
void setCombinedParForInDistCond(Expr *CombParForInDistCond) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound distribute sharing directive");
Data->getChildren()[CombinedParForInDistConditionOffset] =
CombParForInDistCond;
}
void setCounters(ArrayRef<Expr *> A);
void setPrivateCounters(ArrayRef<Expr *> A);
void setInits(ArrayRef<Expr *> A);
void setUpdates(ArrayRef<Expr *> A);
void setFinals(ArrayRef<Expr *> A);
void setDependentCounters(ArrayRef<Expr *> A);
void setDependentInits(ArrayRef<Expr *> A);
void setFinalsConditions(ArrayRef<Expr *> A);
public:
Expr *getIterationVariable() const {
return cast<Expr>(Data->getChildren()[IterationVariableOffset]);
}
Expr *getLastIteration() const {
return cast<Expr>(Data->getChildren()[LastIterationOffset]);
}
Expr *getCalcLastIteration() const {
return cast<Expr>(Data->getChildren()[CalcLastIterationOffset]);
}
Expr *getPreCond() const {
return cast<Expr>(Data->getChildren()[PreConditionOffset]);
}
Expr *getCond() const { return cast<Expr>(Data->getChildren()[CondOffset]); }
Expr *getInit() const { return cast<Expr>(Data->getChildren()[InitOffset]); }
Expr *getInc() const { return cast<Expr>(Data->getChildren()[IncOffset]); }
const Stmt *getPreInits() const {
return Data->getChildren()[PreInitsOffset];
}
Stmt *getPreInits() { return Data->getChildren()[PreInitsOffset]; }
Expr *getIsLastIterVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPGenericLoopDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[IsLastIterVariableOffset]);
}
Expr *getLowerBoundVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPGenericLoopDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[LowerBoundVariableOffset]);
}
Expr *getUpperBoundVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPGenericLoopDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[UpperBoundVariableOffset]);
}
Expr *getStrideVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPGenericLoopDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[StrideVariableOffset]);
}
Expr *getEnsureUpperBound() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPGenericLoopDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[EnsureUpperBoundOffset]);
}
Expr *getNextLowerBound() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPGenericLoopDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[NextLowerBoundOffset]);
}
Expr *getNextUpperBound() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPGenericLoopDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[NextUpperBoundOffset]);
}
Expr *getNumIterations() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPGenericLoopDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[NumIterationsOffset]);
}
Expr *getPrevLowerBoundVariable() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[PrevLowerBoundVariableOffset]);
}
Expr *getPrevUpperBoundVariable() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[PrevUpperBoundVariableOffset]);
}
Expr *getDistInc() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[DistIncOffset]);
}
Expr *getPrevEnsureUpperBound() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[PrevEnsureUpperBoundOffset]);
}
Expr *getCombinedLowerBoundVariable() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedLowerBoundVariableOffset]);
}
Expr *getCombinedUpperBoundVariable() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedUpperBoundVariableOffset]);
}
Expr *getCombinedEnsureUpperBound() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedEnsureUpperBoundOffset]);
}
Expr *getCombinedInit() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedInitOffset]);
}
Expr *getCombinedCond() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedConditionOffset]);
}
Expr *getCombinedNextLowerBound() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedNextLowerBoundOffset]);
}
Expr *getCombinedNextUpperBound() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedNextUpperBoundOffset]);
}
Expr *getCombinedDistCond() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound distribute sharing directive");
return cast<Expr>(Data->getChildren()[CombinedDistConditionOffset]);
}
Expr *getCombinedParForInDistCond() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound distribute sharing directive");
return cast<Expr>(Data->getChildren()[CombinedParForInDistConditionOffset]);
}
Stmt *getBody();
const Stmt *getBody() const {
return const_cast<OMPLoopDirective *>(this)->getBody();
}
ArrayRef<Expr *> counters() { return getCounters(); }
ArrayRef<Expr *> counters() const {
return const_cast<OMPLoopDirective *>(this)->getCounters();
}
ArrayRef<Expr *> private_counters() { return getPrivateCounters(); }
ArrayRef<Expr *> private_counters() const {
return const_cast<OMPLoopDirective *>(this)->getPrivateCounters();
}
ArrayRef<Expr *> inits() { return getInits(); }
ArrayRef<Expr *> inits() const {
return const_cast<OMPLoopDirective *>(this)->getInits();
}
ArrayRef<Expr *> updates() { return getUpdates(); }
ArrayRef<Expr *> updates() const {
return const_cast<OMPLoopDirective *>(this)->getUpdates();
}
ArrayRef<Expr *> finals() { return getFinals(); }
ArrayRef<Expr *> finals() const {
return const_cast<OMPLoopDirective *>(this)->getFinals();
}
ArrayRef<Expr *> dependent_counters() { return getDependentCounters(); }
ArrayRef<Expr *> dependent_counters() const {
return const_cast<OMPLoopDirective *>(this)->getDependentCounters();
}
ArrayRef<Expr *> dependent_inits() { return getDependentInits(); }
ArrayRef<Expr *> dependent_inits() const {
return const_cast<OMPLoopDirective *>(this)->getDependentInits();
}
ArrayRef<Expr *> finals_conditions() { return getFinalsConditions(); }
ArrayRef<Expr *> finals_conditions() const {
return const_cast<OMPLoopDirective *>(this)->getFinalsConditions();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSimdDirectiveClass ||
T->getStmtClass() == OMPForDirectiveClass ||
T->getStmtClass() == OMPForSimdDirectiveClass ||
T->getStmtClass() == OMPParallelForDirectiveClass ||
T->getStmtClass() == OMPParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTaskLoopDirectiveClass ||
T->getStmtClass() == OMPTaskLoopSimdDirectiveClass ||
T->getStmtClass() == OMPMasterTaskLoopDirectiveClass ||
T->getStmtClass() == OMPMasterTaskLoopSimdDirectiveClass ||
T->getStmtClass() == OMPGenericLoopDirectiveClass ||
T->getStmtClass() == OMPTeamsGenericLoopDirectiveClass ||
T->getStmtClass() == OMPTargetTeamsGenericLoopDirectiveClass ||
T->getStmtClass() == OMPParallelGenericLoopDirectiveClass ||
T->getStmtClass() == OMPTargetParallelGenericLoopDirectiveClass ||
T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass ||
T->getStmtClass() == OMPParallelMasterTaskLoopSimdDirectiveClass ||
T->getStmtClass() == OMPDistributeDirectiveClass ||
T->getStmtClass() == OMPTargetParallelForDirectiveClass ||
T->getStmtClass() == OMPDistributeParallelForDirectiveClass ||
T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPDistributeSimdDirectiveClass ||
T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTargetSimdDirectiveClass ||
T->getStmtClass() == OMPTeamsDistributeDirectiveClass ||
T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass ||
T->getStmtClass() ==
OMPTeamsDistributeParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass ||
T->getStmtClass() ==
OMPTargetTeamsDistributeParallelForDirectiveClass ||
T->getStmtClass() ==
OMPTargetTeamsDistributeParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass ||
T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass;
}
};
/// This represents '#pragma omp simd' directive.
///
/// \code
/// #pragma omp simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp simd' has clauses 'private'
/// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and
/// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'.
///
class OMPSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPSimdDirectiveClass, llvm::omp::OMPD_simd, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPSimdDirectiveClass, llvm::omp::OMPD_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPSimdDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt,
const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSimdDirectiveClass;
}
};
/// This represents '#pragma omp for' directive.
///
/// \code
/// #pragma omp for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp for' has clauses 'private' with the
/// variables 'a' and 'b' and 'reduction' with operator '+' and variables 'c'
/// and 'd'.
///
class OMPForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if current directive has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPForDirectiveClass, llvm::omp::OMPD_for, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPForDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPForDirectiveClass, llvm::omp::OMPD_for,
SourceLocation(), SourceLocation(), CollapsedNum) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
Data->getChildren()[numLoopChildren(getLoopsNumber(),
llvm::omp::OMPD_for)] = E;
}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPForDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs,
Expr *TaskRedRef, bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
getLoopsNumber(), llvm::omp::OMPD_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPForDirective *>(this)->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPForDirectiveClass;
}
};
/// This represents '#pragma omp for simd' directive.
///
/// \code
/// #pragma omp for simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp for simd' has clauses 'private'
/// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and
/// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'.
///
class OMPForSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPForSimdDirectiveClass, llvm::omp::OMPD_for_simd,
StartLoc, EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPForSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPForSimdDirectiveClass, llvm::omp::OMPD_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPForSimdDirectiveClass;
}
};
/// This represents '#pragma omp sections' directive.
///
/// \code
/// #pragma omp sections private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp sections' has clauses 'private' with
/// the variables 'a' and 'b' and 'reduction' with operator '+' and variables
/// 'c' and 'd'.
///
class OMPSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if current directive has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPSectionsDirectiveClass,
llvm::omp::OMPD_sections, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPSectionsDirective()
: OMPExecutableDirective(OMPSectionsDirectiveClass,
llvm::omp::OMPD_sections, SourceLocation(),
SourceLocation()) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; }
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if current directive has inner directive.
///
static OMPSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPSectionsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[0]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPSectionsDirective *>(this)->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSectionsDirectiveClass;
}
};
/// This represents '#pragma omp section' directive.
///
/// \code
/// #pragma omp section
/// \endcode
///
class OMPSectionDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if current directive has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPSectionDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPSectionDirectiveClass,
llvm::omp::OMPD_section, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPSectionDirective()
: OMPExecutableDirective(OMPSectionDirectiveClass,
llvm::omp::OMPD_section, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param HasCancel true if current directive has inner directive.
///
static OMPSectionDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt, bool HasCancel);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPSectionDirective *CreateEmpty(const ASTContext &C, EmptyShell);
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSectionDirectiveClass;
}
};
/// This represents '#pragma omp single' directive.
///
/// \code
/// #pragma omp single private(a,b) copyprivate(c,d)
/// \endcode
/// In this example directive '#pragma omp single' has clauses 'private' with
/// the variables 'a' and 'b' and 'copyprivate' with variables 'c' and 'd'.
///
class OMPSingleDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPSingleDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPSingleDirectiveClass, llvm::omp::OMPD_single,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPSingleDirective()
: OMPExecutableDirective(OMPSingleDirectiveClass, llvm::omp::OMPD_single,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPSingleDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPSingleDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSingleDirectiveClass;
}
};
/// This represents '#pragma omp master' directive.
///
/// \code
/// #pragma omp master
/// \endcode
///
class OMPMasterDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPMasterDirectiveClass, llvm::omp::OMPD_master,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPMasterDirective()
: OMPExecutableDirective(OMPMasterDirectiveClass, llvm::omp::OMPD_master,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPMasterDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPMasterDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPMasterDirectiveClass;
}
};
/// This represents '#pragma omp critical' directive.
///
/// \code
/// #pragma omp critical
/// \endcode
///
class OMPCriticalDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Name of the directive.
DeclarationNameInfo DirName;
/// Build directive with the given start and end location.
///
/// \param Name Name of the directive.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPCriticalDirective(const DeclarationNameInfo &Name, SourceLocation StartLoc,
SourceLocation EndLoc)
: OMPExecutableDirective(OMPCriticalDirectiveClass,
llvm::omp::OMPD_critical, StartLoc, EndLoc),
DirName(Name) {}
/// Build an empty directive.
///
explicit OMPCriticalDirective()
: OMPExecutableDirective(OMPCriticalDirectiveClass,
llvm::omp::OMPD_critical, SourceLocation(),
SourceLocation()) {}
/// Set name of the directive.
///
/// \param Name Name of the directive.
///
void setDirectiveName(const DeclarationNameInfo &Name) { DirName = Name; }
public:
/// Creates directive.
///
/// \param C AST context.
/// \param Name Name of the directive.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPCriticalDirective *
Create(const ASTContext &C, const DeclarationNameInfo &Name,
SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPCriticalDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Return name of the directive.
///
DeclarationNameInfo getDirectiveName() const { return DirName; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCriticalDirectiveClass;
}
};
/// This represents '#pragma omp parallel for' directive.
///
/// \code
/// #pragma omp parallel for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel for' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if current region has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelForDirectiveClass,
llvm::omp::OMPD_parallel_for, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPParallelForDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelForDirectiveClass,
llvm::omp::OMPD_parallel_for, SourceLocation(),
SourceLocation(), CollapsedNum) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
Data->getChildren()[numLoopChildren(getLoopsNumber(),
llvm::omp::OMPD_parallel_for)] = E;
}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
getLoopsNumber(), llvm::omp::OMPD_parallel_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPParallelForDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelForDirectiveClass;
}
};
/// This represents '#pragma omp parallel for simd' directive.
///
/// \code
/// #pragma omp parallel for simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel for simd' has clauses
/// 'private' with the variables 'a' and 'b', 'linear' with variables 'i', 'j'
/// and linear step 's', 'reduction' with operator '+' and variables 'c' and
/// 'd'.
///
class OMPParallelForSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelForSimdDirectiveClass,
llvm::omp::OMPD_parallel_for_simd, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPParallelForSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelForSimdDirectiveClass,
llvm::omp::OMPD_parallel_for_simd, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp parallel master' directive.
///
/// \code
/// #pragma omp parallel master private(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel master' has clauses
/// 'private' with the variables 'a' and 'b'
///
class OMPParallelMasterDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
OMPParallelMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPParallelMasterDirectiveClass,
llvm::omp::OMPD_parallel_master, StartLoc,
EndLoc) {}
explicit OMPParallelMasterDirective()
: OMPExecutableDirective(OMPParallelMasterDirectiveClass,
llvm::omp::OMPD_parallel_master,
SourceLocation(), SourceLocation()) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
///
static OMPParallelMasterDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelMasterDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[0]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPParallelMasterDirective *>(this)
->getTaskReductionRefExpr();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelMasterDirectiveClass;
}
};
/// This represents '#pragma omp parallel sections' directive.
///
/// \code
/// #pragma omp parallel sections private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel sections' has clauses
/// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+'
/// and variables 'c' and 'd'.
///
class OMPParallelSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if current directive has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPParallelSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPParallelSectionsDirectiveClass,
llvm::omp::OMPD_parallel_sections, StartLoc,
EndLoc) {}
/// Build an empty directive.
///
explicit OMPParallelSectionsDirective()
: OMPExecutableDirective(OMPParallelSectionsDirectiveClass,
llvm::omp::OMPD_parallel_sections,
SourceLocation(), SourceLocation()) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; }
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPParallelSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelSectionsDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[0]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPParallelSectionsDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelSectionsDirectiveClass;
}
};
/// This represents '#pragma omp task' directive.
///
/// \code
/// #pragma omp task private(a,b) final(d)
/// \endcode
/// In this example directive '#pragma omp task' has clauses 'private' with the
/// variables 'a' and 'b' and 'final' with condition 'd'.
///
class OMPTaskDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if this directive has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTaskDirectiveClass, llvm::omp::OMPD_task,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTaskDirective()
: OMPExecutableDirective(OMPTaskDirectiveClass, llvm::omp::OMPD_task,
SourceLocation(), SourceLocation()) {}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param HasCancel true, if current directive has inner cancel directive.
///
static OMPTaskDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTaskDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
EmptyShell);
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskDirectiveClass;
}
};
/// This represents '#pragma omp taskyield' directive.
///
/// \code
/// #pragma omp taskyield
/// \endcode
///
class OMPTaskyieldDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTaskyieldDirectiveClass,
llvm::omp::OMPD_taskyield, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTaskyieldDirective()
: OMPExecutableDirective(OMPTaskyieldDirectiveClass,
llvm::omp::OMPD_taskyield, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPTaskyieldDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskyieldDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskyieldDirectiveClass;
}
};
/// This represents '#pragma omp barrier' directive.
///
/// \code
/// #pragma omp barrier
/// \endcode
///
class OMPBarrierDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPBarrierDirectiveClass,
llvm::omp::OMPD_barrier, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPBarrierDirective()
: OMPExecutableDirective(OMPBarrierDirectiveClass,
llvm::omp::OMPD_barrier, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPBarrierDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPBarrierDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPBarrierDirectiveClass;
}
};
/// This represents '#pragma omp taskwait' directive.
///
/// \code
/// #pragma omp taskwait
/// \endcode
///
class OMPTaskwaitDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTaskwaitDirectiveClass,
llvm::omp::OMPD_taskwait, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTaskwaitDirective()
: OMPExecutableDirective(OMPTaskwaitDirectiveClass,
llvm::omp::OMPD_taskwait, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
///
static OMPTaskwaitDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskwaitDirectiveClass;
}
};
/// This represents '#pragma omp taskgroup' directive.
///
/// \code
/// #pragma omp taskgroup
/// \endcode
///
class OMPTaskgroupDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskgroupDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTaskgroupDirectiveClass,
llvm::omp::OMPD_taskgroup, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTaskgroupDirective()
: OMPExecutableDirective(OMPTaskgroupDirectiveClass,
llvm::omp::OMPD_taskgroup, SourceLocation(),
SourceLocation()) {}
/// Sets the task_reduction return variable.
void setReductionRef(Expr *RR) { Data->getChildren()[0] = RR; }
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param ReductionRef Reference to the task_reduction return variable.
///
static OMPTaskgroupDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
Expr *ReductionRef);
/// Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTaskgroupDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Returns reference to the task_reduction return variable.
const Expr *getReductionRef() const {
return const_cast<OMPTaskgroupDirective *>(this)->getReductionRef();
}
Expr *getReductionRef() { return cast_or_null<Expr>(Data->getChildren()[0]); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskgroupDirectiveClass;
}
};
/// This represents '#pragma omp flush' directive.
///
/// \code
/// #pragma omp flush(a,b)
/// \endcode
/// In this example directive '#pragma omp flush' has 2 arguments- variables 'a'
/// and 'b'.
/// 'omp flush' directive does not have clauses but have an optional list of
/// variables to flush. This list of variables is stored within some fake clause
/// FlushClause.
class OMPFlushDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPFlushDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPFlushDirectiveClass, llvm::omp::OMPD_flush,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPFlushDirective()
: OMPExecutableDirective(OMPFlushDirectiveClass, llvm::omp::OMPD_flush,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses (only single OMPFlushClause clause is
/// allowed).
///
static OMPFlushDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPFlushDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPFlushDirectiveClass;
}
};
/// This represents '#pragma omp depobj' directive.
///
/// \code
/// #pragma omp depobj(a) depend(in:x,y)
/// \endcode
/// In this example directive '#pragma omp depobj' initializes a depobj object
/// 'a' with dependence type 'in' and a list with 'x' and 'y' locators.
class OMPDepobjDirective final : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPDepobjDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPDepobjDirectiveClass, llvm::omp::OMPD_depobj,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPDepobjDirective()
: OMPExecutableDirective(OMPDepobjDirectiveClass, llvm::omp::OMPD_depobj,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
///
static OMPDepobjDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPDepobjDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDepobjDirectiveClass;
}
};
/// This represents '#pragma omp ordered' directive.
///
/// \code
/// #pragma omp ordered
/// \endcode
///
class OMPOrderedDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPOrderedDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPOrderedDirectiveClass,
llvm::omp::OMPD_ordered, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPOrderedDirective()
: OMPExecutableDirective(OMPOrderedDirectiveClass,
llvm::omp::OMPD_ordered, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPOrderedDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
/// \param IsStandalone true, if the the standalone directive is created.
///
static OMPOrderedDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
bool IsStandalone, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPOrderedDirectiveClass;
}
};
/// This represents '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic capture
/// \endcode
/// In this example directive '#pragma omp atomic' has clause 'capture'.
///
class OMPAtomicDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
struct FlagTy {
/// Used for 'atomic update' or 'atomic capture' constructs. They may
/// have atomic expressions of forms:
/// \code
/// x = x binop expr;
/// x = expr binop x;
/// \endcode
/// This field is 1 for the first form of the expression and 0 for the
/// second. Required for correct codegen of non-associative operations (like
/// << or >>).
uint8_t IsXLHSInRHSPart : 1;
/// Used for 'atomic update' or 'atomic capture' constructs. They may
/// have atomic expressions of forms:
/// \code
/// v = x; <update x>;
/// <update x>; v = x;
/// \endcode
/// This field is 1 for the first(postfix) form of the expression and 0
/// otherwise.
uint8_t IsPostfixUpdate : 1;
} Flags;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPAtomicDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPAtomicDirectiveClass, llvm::omp::OMPD_atomic,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPAtomicDirective()
: OMPExecutableDirective(OMPAtomicDirectiveClass, llvm::omp::OMPD_atomic,
SourceLocation(), SourceLocation()) {}
enum DataPositionTy : size_t {
POS_X = 0,
POS_V,
POS_E,
POS_UpdateExpr,
POS_D,
POS_Cond,
};
/// Set 'x' part of the associated expression/statement.
void setX(Expr *X) { Data->getChildren()[DataPositionTy::POS_X] = X; }
/// Set helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
void setUpdateExpr(Expr *UE) {
Data->getChildren()[DataPositionTy::POS_UpdateExpr] = UE;
}
/// Set 'v' part of the associated expression/statement.
void setV(Expr *V) { Data->getChildren()[DataPositionTy::POS_V] = V; }
/// Set 'expr' part of the associated expression/statement.
void setExpr(Expr *E) { Data->getChildren()[DataPositionTy::POS_E] = E; }
/// Set 'd' part of the associated expression/statement.
void setD(Expr *D) { Data->getChildren()[DataPositionTy::POS_D] = D; }
/// Set conditional expression in `atomic compare`.
void setCond(Expr *C) { Data->getChildren()[DataPositionTy::POS_Cond] = C; }
public:
struct Expressions {
/// 'x' part of the associated expression/statement.
Expr *X = nullptr;
/// 'v' part of the associated expression/statement.
Expr *V = nullptr;
/// 'expr' part of the associated expression/statement.
Expr *E = nullptr;
/// UE Helper expression of the form:
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
Expr *UE = nullptr;
/// 'd' part of the associated expression/statement.
Expr *D = nullptr;
/// Conditional expression in `atomic compare` construct.
Expr *Cond = nullptr;
/// True if UE has the first form and false if the second.
bool IsXLHSInRHSPart;
/// True if original value of 'x' must be stored in 'v', not an updated one.
bool IsPostfixUpdate;
};
/// Creates directive with a list of \a Clauses and 'x', 'v' and 'expr'
/// parts of the atomic construct (see Section 2.12.6, atomic Construct, for
/// detailed description of 'x', 'v' and 'expr').
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Associated expressions or statements.
static OMPAtomicDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, Expressions Exprs);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPAtomicDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Get 'x' part of the associated expression/statement.
Expr *getX() {
return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_X]);
}
const Expr *getX() const {
return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_X]);
}
/// Get helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
Expr *getUpdateExpr() {
return cast_or_null<Expr>(
Data->getChildren()[DataPositionTy::POS_UpdateExpr]);
}
const Expr *getUpdateExpr() const {
return cast_or_null<Expr>(
Data->getChildren()[DataPositionTy::POS_UpdateExpr]);
}
/// Return true if helper update expression has form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' and false if it has form
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
bool isXLHSInRHSPart() const { return Flags.IsXLHSInRHSPart; }
/// Return true if 'v' expression must be updated to original value of
/// 'x', false if 'v' must be updated to the new value of 'x'.
bool isPostfixUpdate() const { return Flags.IsPostfixUpdate; }
/// Get 'v' part of the associated expression/statement.
Expr *getV() {
return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_V]);
}
const Expr *getV() const {
return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_V]);
}
/// Get 'expr' part of the associated expression/statement.
Expr *getExpr() {
return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_E]);
}
const Expr *getExpr() const {
return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_E]);
}
/// Get 'd' part of the associated expression/statement.
Expr *getD() {
return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_D]);
}
Expr *getD() const {
return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_D]);
}
/// Get the 'cond' part of the source atomic expression.
Expr *getCondExpr() {
return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_Cond]);
}
Expr *getCondExpr() const {
return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_Cond]);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPAtomicDirectiveClass;
}
};
/// This represents '#pragma omp target' directive.
///
/// \code
/// #pragma omp target if(a)
/// \endcode
/// In this example directive '#pragma omp target' has clause 'if' with
/// condition 'a'.
///
class OMPTargetDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTargetDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetDirectiveClass, llvm::omp::OMPD_target,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTargetDirective()
: OMPExecutableDirective(OMPTargetDirectiveClass, llvm::omp::OMPD_target,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetDirectiveClass;
}
};
/// This represents '#pragma omp target data' directive.
///
/// \code
/// #pragma omp target data device(0) if(a) map(b[:])
/// \endcode
/// In this example directive '#pragma omp target data' has clauses 'device'
/// with the value '0', 'if' with condition 'a' and 'map' with array
/// section 'b[:]'.
///
class OMPTargetDataDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
OMPTargetDataDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetDataDirectiveClass,
llvm::omp::OMPD_target_data, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTargetDataDirective()
: OMPExecutableDirective(OMPTargetDataDirectiveClass,
llvm::omp::OMPD_target_data, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetDataDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param N The number of clauses.
///
static OMPTargetDataDirective *CreateEmpty(const ASTContext &C, unsigned N,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetDataDirectiveClass;
}
};
/// This represents '#pragma omp target enter data' directive.
///
/// \code
/// #pragma omp target enter data device(0) if(a) map(b[:])
/// \endcode
/// In this example directive '#pragma omp target enter data' has clauses
/// 'device' with the value '0', 'if' with condition 'a' and 'map' with array
/// section 'b[:]'.
///
class OMPTargetEnterDataDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
OMPTargetEnterDataDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetEnterDataDirectiveClass,
llvm::omp::OMPD_target_enter_data, StartLoc,
EndLoc) {}
/// Build an empty directive.
///
explicit OMPTargetEnterDataDirective()
: OMPExecutableDirective(OMPTargetEnterDataDirectiveClass,
llvm::omp::OMPD_target_enter_data,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetEnterDataDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param N The number of clauses.
///
static OMPTargetEnterDataDirective *CreateEmpty(const ASTContext &C,
unsigned N, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetEnterDataDirectiveClass;
}
};
/// This represents '#pragma omp target exit data' directive.
///
/// \code
/// #pragma omp target exit data device(0) if(a) map(b[:])
/// \endcode
/// In this example directive '#pragma omp target exit data' has clauses
/// 'device' with the value '0', 'if' with condition 'a' and 'map' with array
/// section 'b[:]'.
///
class OMPTargetExitDataDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
OMPTargetExitDataDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetExitDataDirectiveClass,
llvm::omp::OMPD_target_exit_data, StartLoc,
EndLoc) {}
/// Build an empty directive.
///
explicit OMPTargetExitDataDirective()
: OMPExecutableDirective(OMPTargetExitDataDirectiveClass,
llvm::omp::OMPD_target_exit_data,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetExitDataDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param N The number of clauses.
///
static OMPTargetExitDataDirective *CreateEmpty(const ASTContext &C,
unsigned N, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetExitDataDirectiveClass;
}
};
/// This represents '#pragma omp target parallel' directive.
///
/// \code
/// #pragma omp target parallel if(a)
/// \endcode
/// In this example directive '#pragma omp target parallel' has clause 'if' with
/// condition 'a'.
///
class OMPTargetParallelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTargetParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetParallelDirectiveClass,
llvm::omp::OMPD_target_parallel, StartLoc,
EndLoc) {}
/// Build an empty directive.
///
explicit OMPTargetParallelDirective()
: OMPExecutableDirective(OMPTargetParallelDirectiveClass,
llvm::omp::OMPD_target_parallel,
SourceLocation(), SourceLocation()) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; }
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPTargetParallelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetParallelDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[0]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPTargetParallelDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetParallelDirectiveClass;
}
};
/// This represents '#pragma omp target parallel for' directive.
///
/// \code
/// #pragma omp target parallel for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp target parallel for' has clauses
/// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+'
/// and variables 'c' and 'd'.
///
class OMPTargetParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if current region has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetParallelForDirectiveClass,
llvm::omp::OMPD_target_parallel_for, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetParallelForDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetParallelForDirectiveClass,
llvm::omp::OMPD_target_parallel_for, SourceLocation(),
SourceLocation(), CollapsedNum) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
Data->getChildren()[numLoopChildren(
getLoopsNumber(), llvm::omp::OMPD_target_parallel_for)] = E;
}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPTargetParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
getLoopsNumber(), llvm::omp::OMPD_target_parallel_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPTargetParallelForDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetParallelForDirectiveClass;
}
};
/// This represents '#pragma omp teams' directive.
///
/// \code
/// #pragma omp teams if(a)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'if' with
/// condition 'a'.
///
class OMPTeamsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTeamsDirectiveClass, llvm::omp::OMPD_teams,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTeamsDirective()
: OMPExecutableDirective(OMPTeamsDirectiveClass, llvm::omp::OMPD_teams,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDirectiveClass;
}
};
/// This represents '#pragma omp cancellation point' directive.
///
/// \code
/// #pragma omp cancellation point for
/// \endcode
///
/// In this example a cancellation point is created for innermost 'for' region.
class OMPCancellationPointDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
OpenMPDirectiveKind CancelRegion = llvm::omp::OMPD_unknown;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// statements and child expressions.
///
OMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPCancellationPointDirectiveClass,
llvm::omp::OMPD_cancellation_point, StartLoc,
EndLoc) {}
/// Build an empty directive.
explicit OMPCancellationPointDirective()
: OMPExecutableDirective(OMPCancellationPointDirectiveClass,
llvm::omp::OMPD_cancellation_point,
SourceLocation(), SourceLocation()) {}
/// Set cancel region for current cancellation point.
/// \param CR Cancellation region.
void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; }
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPCancellationPointDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPCancellationPointDirective *CreateEmpty(const ASTContext &C,
EmptyShell);
/// Get cancellation region for the current cancellation point.
OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCancellationPointDirectiveClass;
}
};
/// This represents '#pragma omp cancel' directive.
///
/// \code
/// #pragma omp cancel for
/// \endcode
///
/// In this example a cancel is created for innermost 'for' region.
class OMPCancelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
OpenMPDirectiveKind CancelRegion = llvm::omp::OMPD_unknown;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPCancelDirectiveClass, llvm::omp::OMPD_cancel,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPCancelDirective()
: OMPExecutableDirective(OMPCancelDirectiveClass, llvm::omp::OMPD_cancel,
SourceLocation(), SourceLocation()) {}
/// Set cancel region for current cancellation point.
/// \param CR Cancellation region.
void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; }
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
///
static OMPCancelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, OpenMPDirectiveKind CancelRegion);
/// Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPCancelDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Get cancellation region for the current cancellation point.
OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCancelDirectiveClass;
}
};
/// This represents '#pragma omp taskloop' directive.
///
/// \code
/// #pragma omp taskloop private(a,b) grainsize(val) num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp taskloop' has clauses 'private'
/// with the variables 'a' and 'b', 'grainsize' with expression 'val' and
/// 'num_tasks' with expression 'num'.
///
class OMPTaskLoopDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTaskLoopDirectiveClass, llvm::omp::OMPD_taskloop,
StartLoc, EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTaskLoopDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTaskLoopDirectiveClass, llvm::omp::OMPD_taskloop,
SourceLocation(), SourceLocation(), CollapsedNum) {}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPTaskLoopDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTaskLoopDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskLoopDirectiveClass;
}
};
/// This represents '#pragma omp taskloop simd' directive.
///
/// \code
/// #pragma omp taskloop simd private(a,b) grainsize(val) num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp taskloop simd' has clauses 'private'
/// with the variables 'a' and 'b', 'grainsize' with expression 'val' and
/// 'num_tasks' with expression 'num'.
///
class OMPTaskLoopSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTaskLoopSimdDirectiveClass,
llvm::omp::OMPD_taskloop_simd, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTaskLoopSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTaskLoopSimdDirectiveClass,
llvm::omp::OMPD_taskloop_simd, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTaskLoopSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTaskLoopSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskLoopSimdDirectiveClass;
}
};
/// This represents '#pragma omp master taskloop' directive.
///
/// \code
/// #pragma omp master taskloop private(a,b) grainsize(val) num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp master taskloop' has clauses
/// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val'
/// and 'num_tasks' with expression 'num'.
///
class OMPMasterTaskLoopDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPMasterTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPMasterTaskLoopDirectiveClass,
llvm::omp::OMPD_master_taskloop, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPMasterTaskLoopDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPMasterTaskLoopDirectiveClass,
llvm::omp::OMPD_master_taskloop, SourceLocation(),
SourceLocation(), CollapsedNum) {}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPMasterTaskLoopDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPMasterTaskLoopDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPMasterTaskLoopDirectiveClass;
}
};
/// This represents '#pragma omp master taskloop simd' directive.
///
/// \code
/// #pragma omp master taskloop simd private(a,b) grainsize(val) num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp master taskloop simd' has clauses
/// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val'
/// and 'num_tasks' with expression 'num'.
///
class OMPMasterTaskLoopSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPMasterTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPMasterTaskLoopSimdDirectiveClass,
llvm::omp::OMPD_master_taskloop_simd, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPMasterTaskLoopSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPMasterTaskLoopSimdDirectiveClass,
llvm::omp::OMPD_master_taskloop_simd, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \p Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPMasterTaskLoopSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \p NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPMasterTaskLoopSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPMasterTaskLoopSimdDirectiveClass;
}
};
/// This represents '#pragma omp parallel master taskloop' directive.
///
/// \code
/// #pragma omp parallel master taskloop private(a,b) grainsize(val)
/// num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp parallel master taskloop' has clauses
/// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val'
/// and 'num_tasks' with expression 'num'.
///
class OMPParallelMasterTaskLoopDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPParallelMasterTaskLoopDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelMasterTaskLoopDirectiveClass,
llvm::omp::OMPD_parallel_master_taskloop, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPParallelMasterTaskLoopDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelMasterTaskLoopDirectiveClass,
llvm::omp::OMPD_parallel_master_taskloop,
SourceLocation(), SourceLocation(), CollapsedNum) {}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPParallelMasterTaskLoopDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelMasterTaskLoopDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass;
}
};
/// This represents '#pragma omp parallel master taskloop simd' directive.
///
/// \code
/// #pragma omp parallel master taskloop simd private(a,b) grainsize(val)
/// num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp parallel master taskloop simd' has
/// clauses 'private' with the variables 'a' and 'b', 'grainsize' with
/// expression 'val' and 'num_tasks' with expression 'num'.
///
class OMPParallelMasterTaskLoopSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPParallelMasterTaskLoopSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelMasterTaskLoopSimdDirectiveClass,
llvm::omp::OMPD_parallel_master_taskloop_simd,
StartLoc, EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPParallelMasterTaskLoopSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelMasterTaskLoopSimdDirectiveClass,
llvm::omp::OMPD_parallel_master_taskloop_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \p Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPParallelMasterTaskLoopSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelMasterTaskLoopSimdDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelMasterTaskLoopSimdDirectiveClass;
}
};
/// This represents '#pragma omp distribute' directive.
///
/// \code
/// #pragma omp distribute private(a,b)
/// \endcode
/// In this example directive '#pragma omp distribute' has clauses 'private'
/// with the variables 'a' and 'b'
///
class OMPDistributeDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeDirectiveClass,
llvm::omp::OMPD_distribute, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPDistributeDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeDirectiveClass,
llvm::omp::OMPD_distribute, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPDistributeDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeDirectiveClass;
}
};
/// This represents '#pragma omp target update' directive.
///
/// \code
/// #pragma omp target update to(a) from(b) device(1)
/// \endcode
/// In this example directive '#pragma omp target update' has clause 'to' with
/// argument 'a', clause 'from' with argument 'b' and clause 'device' with
/// argument '1'.
///
class OMPTargetUpdateDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
OMPTargetUpdateDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetUpdateDirectiveClass,
llvm::omp::OMPD_target_update, StartLoc,
EndLoc) {}
/// Build an empty directive.
///
explicit OMPTargetUpdateDirective()
: OMPExecutableDirective(OMPTargetUpdateDirectiveClass,
llvm::omp::OMPD_target_update, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetUpdateDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses The number of clauses.
///
static OMPTargetUpdateDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetUpdateDirectiveClass;
}
};
/// This represents '#pragma omp distribute parallel for' composite
/// directive.
///
/// \code
/// #pragma omp distribute parallel for private(a,b)
/// \endcode
/// In this example directive '#pragma omp distribute parallel for' has clause
/// 'private' with the variables 'a' and 'b'
///
class OMPDistributeParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPDistributeParallelForDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeParallelForDirectiveClass,
llvm::omp::OMPD_distribute_parallel_for, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPDistributeParallelForDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeParallelForDirectiveClass,
llvm::omp::OMPD_distribute_parallel_for,
SourceLocation(), SourceLocation(), CollapsedNum) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
Data->getChildren()[numLoopChildren(
getLoopsNumber(), llvm::omp::OMPD_distribute_parallel_for)] = E;
}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPDistributeParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
getLoopsNumber(), llvm::omp::OMPD_distribute_parallel_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPDistributeParallelForDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeParallelForDirectiveClass;
}
};
/// This represents '#pragma omp distribute parallel for simd' composite
/// directive.
///
/// \code
/// #pragma omp distribute parallel for simd private(x)
/// \endcode
/// In this example directive '#pragma omp distribute parallel for simd' has
/// clause 'private' with the variables 'x'
///
class OMPDistributeParallelForSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPDistributeParallelForSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeParallelForSimdDirectiveClass,
llvm::omp::OMPD_distribute_parallel_for_simd, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPDistributeParallelForSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeParallelForSimdDirectiveClass,
llvm::omp::OMPD_distribute_parallel_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPDistributeParallelForSimdDirective *Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeParallelForSimdDirective *CreateEmpty(
const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp distribute simd' composite directive.
///
/// \code
/// #pragma omp distribute simd private(x)
/// \endcode
/// In this example directive '#pragma omp distribute simd' has clause
/// 'private' with the variables 'x'
///
class OMPDistributeSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeSimdDirectiveClass,
llvm::omp::OMPD_distribute_simd, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPDistributeSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeSimdDirectiveClass,
llvm::omp::OMPD_distribute_simd, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPDistributeSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeSimdDirectiveClass;
}
};
/// This represents '#pragma omp target parallel for simd' directive.
///
/// \code
/// #pragma omp target parallel for simd private(a) map(b) safelen(c)
/// \endcode
/// In this example directive '#pragma omp target parallel for simd' has clauses
/// 'private' with the variable 'a', 'map' with the variable 'b' and 'safelen'
/// with the variable 'c'.
///
class OMPTargetParallelForSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetParallelForSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetParallelForSimdDirectiveClass,
llvm::omp::OMPD_target_parallel_for_simd, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetParallelForSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetParallelForSimdDirectiveClass,
llvm::omp::OMPD_target_parallel_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetParallelForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp target simd' directive.
///
/// \code
/// #pragma omp target simd private(a) map(b) safelen(c)
/// \endcode
/// In this example directive '#pragma omp target simd' has clauses 'private'
/// with the variable 'a', 'map' with the variable 'b' and 'safelen' with
/// the variable 'c'.
///
class OMPTargetSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetSimdDirectiveClass,
llvm::omp::OMPD_target_simd, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetSimdDirectiveClass,
llvm::omp::OMPD_target_simd, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetSimdDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute' directive.
///
/// \code
/// #pragma omp teams distribute private(a,b)
/// \endcode
/// In this example directive '#pragma omp teams distribute' has clauses
/// 'private' with the variables 'a' and 'b'
///
class OMPTeamsDistributeDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeDirectiveClass,
llvm::omp::OMPD_teams_distribute, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTeamsDistributeDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeDirectiveClass,
llvm::omp::OMPD_teams_distribute, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTeamsDistributeDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute simd'
/// combined directive.
///
/// \code
/// #pragma omp teams distribute simd private(a,b)
/// \endcode
/// In this example directive '#pragma omp teams distribute simd'
/// has clause 'private' with the variables 'a' and 'b'
///
class OMPTeamsDistributeSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTeamsDistributeSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeSimdDirectiveClass,
llvm::omp::OMPD_teams_distribute_simd, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTeamsDistributeSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeSimdDirectiveClass,
llvm::omp::OMPD_teams_distribute_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTeamsDistributeSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute parallel for simd' composite
/// directive.
///
/// \code
/// #pragma omp teams distribute parallel for simd private(x)
/// \endcode
/// In this example directive '#pragma omp teams distribute parallel for simd'
/// has clause 'private' with the variables 'x'
///
class OMPTeamsDistributeParallelForSimdDirective final
: public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeParallelForSimdDirectiveClass,
llvm::omp::OMPD_teams_distribute_parallel_for_simd,
StartLoc, EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTeamsDistributeParallelForSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeParallelForSimdDirectiveClass,
llvm::omp::OMPD_teams_distribute_parallel_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTeamsDistributeParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeParallelForSimdDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute parallel for' composite
/// directive.
///
/// \code
/// #pragma omp teams distribute parallel for private(x)
/// \endcode
/// In this example directive '#pragma omp teams distribute parallel for'
/// has clause 'private' with the variables 'x'
///
class OMPTeamsDistributeParallelForDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTeamsDistributeParallelForDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeParallelForDirectiveClass,
llvm::omp::OMPD_teams_distribute_parallel_for,
StartLoc, EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTeamsDistributeParallelForDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeParallelForDirectiveClass,
llvm::omp::OMPD_teams_distribute_parallel_for,
SourceLocation(), SourceLocation(), CollapsedNum) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
Data->getChildren()[numLoopChildren(
getLoopsNumber(), llvm::omp::OMPD_teams_distribute_parallel_for)] = E;
}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPTeamsDistributeParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeParallelForDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
getLoopsNumber(), llvm::omp::OMPD_teams_distribute_parallel_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPTeamsDistributeParallelForDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass;
}
};
/// This represents '#pragma omp target teams' directive.
///
/// \code
/// #pragma omp target teams if(a>0)
/// \endcode
/// In this example directive '#pragma omp target teams' has clause 'if' with
/// condition 'a>0'.
///
class OMPTargetTeamsDirective final : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTargetTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetTeamsDirectiveClass,
llvm::omp::OMPD_target_teams, StartLoc, EndLoc) {
}
/// Build an empty directive.
///
explicit OMPTargetTeamsDirective()
: OMPExecutableDirective(OMPTargetTeamsDirectiveClass,
llvm::omp::OMPD_target_teams, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetTeamsDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetTeamsDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute' combined directive.
///
/// \code
/// #pragma omp target teams distribute private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute' has clause
/// 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetTeamsDistributeDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetTeamsDistributeDirectiveClass,
llvm::omp::OMPD_target_teams_distribute, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetTeamsDistributeDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetTeamsDistributeDirectiveClass,
llvm::omp::OMPD_target_teams_distribute,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetTeamsDistributeDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute parallel for' combined
/// directive.
///
/// \code
/// #pragma omp target teams distribute parallel for private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute parallel
/// for' has clause 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeParallelForDirective final
: public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetTeamsDistributeParallelForDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetTeamsDistributeParallelForDirectiveClass,
llvm::omp::OMPD_target_teams_distribute_parallel_for,
StartLoc, EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetTeamsDistributeParallelForDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetTeamsDistributeParallelForDirectiveClass,
llvm::omp::OMPD_target_teams_distribute_parallel_for,
SourceLocation(), SourceLocation(), CollapsedNum) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
Data->getChildren()[numLoopChildren(
getLoopsNumber(),
llvm::omp::OMPD_target_teams_distribute_parallel_for)] = E;
}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPTargetTeamsDistributeParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeParallelForDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
getLoopsNumber(),
llvm::omp::OMPD_target_teams_distribute_parallel_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPTargetTeamsDistributeParallelForDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() ==
OMPTargetTeamsDistributeParallelForDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute parallel for simd'
/// combined directive.
///
/// \code
/// #pragma omp target teams distribute parallel for simd private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute parallel
/// for simd' has clause 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeParallelForSimdDirective final
: public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(
OMPTargetTeamsDistributeParallelForSimdDirectiveClass,
llvm::omp::OMPD_target_teams_distribute_parallel_for_simd, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetTeamsDistributeParallelForSimdDirective(
unsigned CollapsedNum)
: OMPLoopDirective(
OMPTargetTeamsDistributeParallelForSimdDirectiveClass,
llvm::omp::OMPD_target_teams_distribute_parallel_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetTeamsDistributeParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeParallelForSimdDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() ==
OMPTargetTeamsDistributeParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute simd' combined
/// directive.
///
/// \code
/// #pragma omp target teams distribute simd private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute simd'
/// has clause 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetTeamsDistributeSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetTeamsDistributeSimdDirectiveClass,
llvm::omp::OMPD_target_teams_distribute_simd, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetTeamsDistributeSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetTeamsDistributeSimdDirectiveClass,
llvm::omp::OMPD_target_teams_distribute_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetTeamsDistributeSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeSimdDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass;
}
};
/// This represents the '#pragma omp tile' loop transformation directive.
class OMPTileDirective final : public OMPLoopTransformationDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Default list of offsets.
enum {
PreInitsOffset = 0,
TransformedStmtOffset,
};
explicit OMPTileDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumLoops)
: OMPLoopTransformationDirective(OMPTileDirectiveClass,
llvm::omp::OMPD_tile, StartLoc, EndLoc,
NumLoops) {
setNumGeneratedLoops(3 * NumLoops);
}
void setPreInits(Stmt *PreInits) {
Data->getChildren()[PreInitsOffset] = PreInits;
}
void setTransformedStmt(Stmt *S) {
Data->getChildren()[TransformedStmtOffset] = S;
}
public:
/// Create a new AST node representation for '#pragma omp tile'.
///
/// \param C Context of the AST.
/// \param StartLoc Location of the introducer (e.g. the 'omp' token).
/// \param EndLoc Location of the directive's end (e.g. the tok::eod).
/// \param Clauses The directive's clauses.
/// \param NumLoops Number of associated loops (number of items in the
/// 'sizes' clause).
/// \param AssociatedStmt The outermost associated loop.
/// \param TransformedStmt The loop nest after tiling, or nullptr in
/// dependent contexts.
/// \param PreInits Helper preinits statements for the loop nest.
static OMPTileDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
unsigned NumLoops, Stmt *AssociatedStmt,
Stmt *TransformedStmt, Stmt *PreInits);
/// Build an empty '#pragma omp tile' AST node for deserialization.
///
/// \param C Context of the AST.
/// \param NumClauses Number of clauses to allocate.
/// \param NumLoops Number of associated loops to allocate.
static OMPTileDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned NumLoops);
/// Gets/sets the associated loops after tiling.
///
/// This is in de-sugared format stored as a CompoundStmt.
///
/// \code
/// for (...)
/// ...
/// \endcode
///
/// Note that if the generated loops a become associated loops of another
/// directive, they may need to be hoisted before them.
Stmt *getTransformedStmt() const {
return Data->getChildren()[TransformedStmtOffset];
}
/// Return preinits statement.
Stmt *getPreInits() const { return Data->getChildren()[PreInitsOffset]; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTileDirectiveClass;
}
};
/// This represents the '#pragma omp unroll' loop transformation directive.
///
/// \code
/// #pragma omp unroll
/// for (int i = 0; i < 64; ++i)
/// \endcode
class OMPUnrollDirective final : public OMPLoopTransformationDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Default list of offsets.
enum {
PreInitsOffset = 0,
TransformedStmtOffset,
};
explicit OMPUnrollDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPLoopTransformationDirective(OMPUnrollDirectiveClass,
llvm::omp::OMPD_unroll, StartLoc, EndLoc,
1) {}
/// Set the pre-init statements.
void setPreInits(Stmt *PreInits) {
Data->getChildren()[PreInitsOffset] = PreInits;
}
/// Set the de-sugared statement.
void setTransformedStmt(Stmt *S) {
Data->getChildren()[TransformedStmtOffset] = S;
}
public:
/// Create a new AST node representation for '#pragma omp unroll'.
///
/// \param C Context of the AST.
/// \param StartLoc Location of the introducer (e.g. the 'omp' token).
/// \param EndLoc Location of the directive's end (e.g. the tok::eod).
/// \param Clauses The directive's clauses.
/// \param AssociatedStmt The outermost associated loop.
/// \param TransformedStmt The loop nest after tiling, or nullptr in
/// dependent contexts.
/// \param PreInits Helper preinits statements for the loop nest.
static OMPUnrollDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
unsigned NumGeneratedLoops, Stmt *TransformedStmt, Stmt *PreInits);
/// Build an empty '#pragma omp unroll' AST node for deserialization.
///
/// \param C Context of the AST.
/// \param NumClauses Number of clauses to allocate.
static OMPUnrollDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses);
/// Get the de-sugared associated loops after unrolling.
///
/// This is only used if the unrolled loop becomes an associated loop of
/// another directive, otherwise the loop is emitted directly using loop
/// transformation metadata. When the unrolled loop cannot be used by another
/// directive (e.g. because of the full clause), the transformed stmt can also
/// be nullptr.
Stmt *getTransformedStmt() const {
return Data->getChildren()[TransformedStmtOffset];
}
/// Return the pre-init statements.
Stmt *getPreInits() const { return Data->getChildren()[PreInitsOffset]; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPUnrollDirectiveClass;
}
};
/// This represents '#pragma omp scan' directive.
///
/// \code
/// #pragma omp scan inclusive(a)
/// \endcode
/// In this example directive '#pragma omp scan' has clause 'inclusive' with
/// list item 'a'.
class OMPScanDirective final : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPScanDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPScanDirectiveClass, llvm::omp::OMPD_scan,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPScanDirective()
: OMPExecutableDirective(OMPScanDirectiveClass, llvm::omp::OMPD_scan,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses (only single OMPFlushClause clause is
/// allowed).
///
static OMPScanDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPScanDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPScanDirectiveClass;
}
};
/// This represents '#pragma omp interop' directive.
///
/// \code
/// #pragma omp interop init(target:obj) device(x) depend(inout:y) nowait
/// \endcode
/// In this example directive '#pragma omp interop' has
/// clauses 'init', 'device', 'depend' and 'nowait'.
///
class OMPInteropDirective final : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive.
/// \param EndLoc Ending location of the directive.
///
OMPInteropDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPInteropDirectiveClass,
llvm::omp::OMPD_interop, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPInteropDirective()
: OMPExecutableDirective(OMPInteropDirectiveClass,
llvm::omp::OMPD_interop, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses The directive's clauses.
///
static OMPInteropDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPInteropDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPInteropDirectiveClass;
}
};
/// This represents '#pragma omp dispatch' directive.
///
/// \code
/// #pragma omp dispatch device(dnum)
/// \endcode
/// This example shows a directive '#pragma omp dispatch' with a
/// device clause with variable 'dnum'.
///
class OMPDispatchDirective final : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// The location of the target-call.
SourceLocation TargetCallLoc;
/// Set the location of the target-call.
void setTargetCallLoc(SourceLocation Loc) { TargetCallLoc = Loc; }
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPDispatchDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPDispatchDirectiveClass,
llvm::omp::OMPD_dispatch, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPDispatchDirective()
: OMPExecutableDirective(OMPDispatchDirectiveClass,
llvm::omp::OMPD_dispatch, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param TargetCallLoc Location of the target-call.
///
static OMPDispatchDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
SourceLocation TargetCallLoc);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPDispatchDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Return location of target-call.
SourceLocation getTargetCallLoc() const { return TargetCallLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDispatchDirectiveClass;
}
};
/// This represents '#pragma omp masked' directive.
/// \code
/// #pragma omp masked filter(tid)
/// \endcode
/// This example shows a directive '#pragma omp masked' with a filter clause
/// with variable 'tid'.
///
class OMPMaskedDirective final : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPMaskedDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPMaskedDirectiveClass, llvm::omp::OMPD_masked,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPMaskedDirective()
: OMPExecutableDirective(OMPMaskedDirectiveClass, llvm::omp::OMPD_masked,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPMaskedDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPMaskedDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPMaskedDirectiveClass;
}
};
/// This represents '#pragma omp metadirective' directive.
///
/// \code
/// #pragma omp metadirective when(user={condition(N>10)}: parallel for)
/// \endcode
/// In this example directive '#pragma omp metadirective' has clauses 'when'
/// with a dynamic user condition to check if a variable 'N > 10'
///
class OMPMetaDirective final : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
Stmt *IfStmt;
OMPMetaDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPMetaDirectiveClass,
llvm::omp::OMPD_metadirective, StartLoc,
EndLoc) {}
explicit OMPMetaDirective()
: OMPExecutableDirective(OMPMetaDirectiveClass,
llvm::omp::OMPD_metadirective, SourceLocation(),
SourceLocation()) {}
void setIfStmt(Stmt *S) { IfStmt = S; }
public:
static OMPMetaDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, Stmt *IfStmt);
static OMPMetaDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
EmptyShell);
Stmt *getIfStmt() const { return IfStmt; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPMetaDirectiveClass;
}
};
/// This represents '#pragma omp loop' directive.
///
/// \code
/// #pragma omp loop private(a,b) binding(parallel) order(concurrent)
/// \endcode
/// In this example directive '#pragma omp loop' has
/// clauses 'private' with the variables 'a' and 'b', 'binding' with
/// modifier 'parallel' and 'order(concurrent).
///
class OMPGenericLoopDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPGenericLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPGenericLoopDirectiveClass, llvm::omp::OMPD_loop,
StartLoc, EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPGenericLoopDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPGenericLoopDirectiveClass, llvm::omp::OMPD_loop,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \p Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPGenericLoopDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with a place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
/// \param CollapsedNum Number of collapsed nested loops.
///
static OMPGenericLoopDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPGenericLoopDirectiveClass;
}
};
/// This represents '#pragma omp teams loop' directive.
///
/// \code
/// #pragma omp teams loop private(a,b) order(concurrent)
/// \endcode
/// In this example directive '#pragma omp teams loop' has
/// clauses 'private' with the variables 'a' and 'b', and order(concurrent).
///
class OMPTeamsGenericLoopDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTeamsGenericLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsGenericLoopDirectiveClass,
llvm::omp::OMPD_teams_loop, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTeamsGenericLoopDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsGenericLoopDirectiveClass,
llvm::omp::OMPD_teams_loop, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \p Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTeamsGenericLoopDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsGenericLoopDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsGenericLoopDirectiveClass;
}
};
/// This represents '#pragma omp target teams loop' directive.
///
/// \code
/// #pragma omp target teams loop private(a,b) order(concurrent)
/// \endcode
/// In this example directive '#pragma omp target teams loop' has
/// clauses 'private' with the variables 'a' and 'b', and order(concurrent).
///
class OMPTargetTeamsGenericLoopDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetTeamsGenericLoopDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetTeamsGenericLoopDirectiveClass,
llvm::omp::OMPD_target_teams_loop, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetTeamsGenericLoopDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetTeamsGenericLoopDirectiveClass,
llvm::omp::OMPD_target_teams_loop, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \p Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetTeamsGenericLoopDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsGenericLoopDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetTeamsGenericLoopDirectiveClass;
}
};
/// This represents '#pragma omp parallel loop' directive.
///
/// \code
/// #pragma omp parallel loop private(a,b) order(concurrent)
/// \endcode
/// In this example directive '#pragma omp parallel loop' has
/// clauses 'private' with the variables 'a' and 'b', and order(concurrent).
///
class OMPParallelGenericLoopDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPParallelGenericLoopDirective(SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelGenericLoopDirectiveClass,
llvm::omp::OMPD_parallel_loop, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPParallelGenericLoopDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelGenericLoopDirectiveClass,
llvm::omp::OMPD_parallel_loop, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \p Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPParallelGenericLoopDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelGenericLoopDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelGenericLoopDirectiveClass;
}
};
/// This represents '#pragma omp target parallel loop' directive.
///
/// \code
/// #pragma omp target parallel loop private(a,b) order(concurrent)
/// \endcode
/// In this example directive '#pragma omp target parallel loop' has
/// clauses 'private' with the variables 'a' and 'b', and order(concurrent).
///
class OMPTargetParallelGenericLoopDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetParallelGenericLoopDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetParallelGenericLoopDirectiveClass,
llvm::omp::OMPD_target_parallel_loop, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetParallelGenericLoopDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetParallelGenericLoopDirectiveClass,
llvm::omp::OMPD_target_parallel_loop, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \p Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetParallelGenericLoopDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetParallelGenericLoopDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetParallelGenericLoopDirectiveClass;
}
};
} // end namespace clang
#endif
|
kpoint.c | /* kpoint.c */
/* Copyright (C) 2008 Atsushi Togo */
#include <stdio.h>
#include <stdlib.h>
#include "mathfunc.h"
#include "symmetry.h"
#include "kpoint.h"
#include "debug.h"
/* #define GRID_ORDER_XYZ */
/* The addressing order of mesh grid is defined as running left */
/* element first. But when GRID_ORDER_XYZ is defined, it is changed to right */
/* element first. */
static PointSymmetry get_point_group_reciprocal(const MatINT * rotations,
const int is_time_reversal);
static PointSymmetry
get_point_group_reciprocal_with_q(SPGCONST PointSymmetry * pointgroup,
const double symprec,
const int num_q,
SPGCONST double qpoints[][3]);
static int get_ir_kpoints(int map[],
SPGCONST double kpoints[][3],
const int num_kpoint,
SPGCONST PointSymmetry * point_symmetry,
const double symprec);
static int get_ir_reciprocal_mesh(int grid_point[][3],
int map[],
const int mesh[3],
const int is_shift[3],
SPGCONST PointSymmetry * point_symmetry);
static Triplets * get_ir_triplets(const int mesh[3],
const int is_time_reversal,
const MatINT * rotations);
static int get_ir_triplets_at_q(int weights[],
int grid_points[][3],
int third_q[],
const int grid_point,
const int mesh[3],
PointSymmetry * pointgroup);
static int extract_ir_triplets_with_q(int triplets_with_q[][3],
int weight_with_q[],
const int fixed_grid_number,
SPGCONST int triplets[][3],
const int num_triplets,
const int mesh[3],
SPGCONST PointSymmetry * point_symmetry);
static void get_grid_mapping_table(int **map_sym,
SPGCONST PointSymmetry * point_symmetry,
const int mesh[3],
const int is_shift[3]);
static void address_to_grid(int grid_double[3],
const int address,
const int mesh[3],
const int is_shift[3]);
static void get_grid_points(int grid_point[3],
const int grid[3],
const int mesh[3]);
static void get_vector_modulo(int v[3],
const int m[3]);
static int grid_to_address(const int grid[3],
const int mesh[3],
const int is_shift[3]);
static void free_array2D_int(int **array,
const int num_row);
static int ** allocate_array2d_int(const int num_row,
const int num_column);
static Triplets * allocate_triplets(const int num_triplets, const int mesh[3]);
int kpt_get_irreducible_kpoints(int map[],
SPGCONST double kpoints[][3],
const int num_kpoint,
const Symmetry * symmetry,
const int is_time_reversal,
const double symprec)
{
int i;
PointSymmetry point_symmetry;
MatINT *rotations;
rotations = mat_alloc_MatINT(symmetry->size);
for (i = 0; i < symmetry->size; i++) {
mat_copy_matrix_i3(rotations->mat[i], symmetry->rot[i]);
}
point_symmetry = get_point_group_reciprocal(rotations,
is_time_reversal);
mat_free_MatINT(rotations);
return get_ir_kpoints(map, kpoints, num_kpoint, &point_symmetry, symprec);
}
/* grid_point (e.g. 4x4x4 mesh) */
/* [[ 0 0 0] */
/* [ 1 0 0] */
/* [ 2 0 0] */
/* [-1 0 0] */
/* [ 0 1 0] */
/* [ 1 1 0] */
/* [ 2 1 0] */
/* [-1 1 0] */
/* .... ] */
/* */
/* Each value of 'map' correspnds to the index of grid_point. */
int kpt_get_irreducible_reciprocal_mesh(int grid_points[][3],
int map[],
const int mesh[3],
const int is_shift[3],
const int is_time_reversal,
const Symmetry * symmetry)
{
int i;
PointSymmetry point_symmetry;
MatINT *rotations;
rotations = mat_alloc_MatINT(symmetry->size);
for (i = 0; i < symmetry->size; i++) {
mat_copy_matrix_i3(rotations->mat[i], symmetry->rot[i]);
}
point_symmetry = get_point_group_reciprocal(rotations,
is_time_reversal);
mat_free_MatINT(rotations);
return get_ir_reciprocal_mesh(grid_points,
map,
mesh,
is_shift,
&point_symmetry);
}
void kpt_free_triplets(Triplets * t)
{
free(t->triplets);
t->triplets = NULL;
free(t->weights);
t->weights = NULL;
free(t->mesh_points);
t->mesh_points = NULL;
free(t);
t = NULL;
}
int kpt_get_stabilized_reciprocal_mesh(int grid_points[][3],
int map[],
const int mesh[3],
const int is_shift[3],
const int is_time_reversal,
const MatINT * rotations,
const int num_q,
SPGCONST double qpoints[][3])
{
PointSymmetry pointgroup, pointgroup_q;
double tolerance;
pointgroup = get_point_group_reciprocal(rotations,
is_time_reversal);
tolerance = 0.1 / (mesh[0] + mesh[1] + mesh[2]);
pointgroup_q = get_point_group_reciprocal_with_q(&pointgroup,
tolerance,
num_q,
qpoints);
return get_ir_reciprocal_mesh(grid_points,
map,
mesh,
is_shift,
&pointgroup_q);
}
Triplets * kpt_get_triplets_reciprocal_mesh(const int mesh[3],
const int is_time_reversal,
const MatINT * rotations)
{
return get_ir_triplets(mesh,
is_time_reversal,
rotations);
}
int kpt_get_ir_triplets_at_q(int weights[],
int grid_points[][3],
int third_q[],
const int grid_point,
const int mesh[3],
const int is_time_reversal,
const MatINT * rotations)
{
PointSymmetry pointgroup;
pointgroup = get_point_group_reciprocal(rotations,
is_time_reversal);
return get_ir_triplets_at_q(weights,
grid_points,
third_q,
grid_point,
mesh,
&pointgroup);
}
int kpt_extract_triplets_reciprocal_mesh_at_q(int triplets_with_q[][3],
int weight_with_q[],
const int fixed_grid_number,
const int num_triplets,
SPGCONST int triplets[][3],
const int mesh[3],
const int is_time_reversal,
const MatINT * rotations)
{
PointSymmetry point_group;
point_group = get_point_group_reciprocal(rotations,
is_time_reversal);
return extract_ir_triplets_with_q(triplets_with_q,
weight_with_q,
fixed_grid_number,
triplets,
num_triplets,
mesh,
&point_group);
}
/* qpoints are used to find stabilizers (operations). */
/* num_q is the number of the qpoints. */
static PointSymmetry get_point_group_reciprocal(const MatINT * rotations,
const int is_time_reversal)
{
int i, j, num_pt = 0;
MatINT *rot_reciprocal;
PointSymmetry point_symmetry;
SPGCONST int inversion[3][3] = {
{-1, 0, 0 },
{ 0,-1, 0 },
{ 0, 0,-1 }
};
if (is_time_reversal) {
rot_reciprocal = mat_alloc_MatINT(rotations->size * 2);
} else {
rot_reciprocal = mat_alloc_MatINT(rotations->size);
}
for (i = 0; i < rotations->size; i++) {
mat_transpose_matrix_i3(rot_reciprocal->mat[i], rotations->mat[i]);
if (is_time_reversal) {
mat_multiply_matrix_i3(rot_reciprocal->mat[rotations->size+i],
inversion,
rot_reciprocal->mat[i]);
}
}
for (i = 0; i < rot_reciprocal->size; i++) {
for (j = 0; j < num_pt; j++) {
if (mat_check_identity_matrix_i3(point_symmetry.rot[j],
rot_reciprocal->mat[i])) {
goto escape;
}
}
mat_copy_matrix_i3(point_symmetry.rot[num_pt],
rot_reciprocal->mat[i]);
num_pt++;
escape:
;
}
point_symmetry.size = num_pt;
mat_free_MatINT(rot_reciprocal);
return point_symmetry;
}
static PointSymmetry
get_point_group_reciprocal_with_q(SPGCONST PointSymmetry * pointgroup,
const double symprec,
const int num_q,
SPGCONST double qpoints[][3])
{
int i, j, k, l, is_all_ok=0, num_ptq = 0;
double q_rot[3], diff[3];
PointSymmetry pointgroup_q;
for (i = 0; i < pointgroup->size; i++) {
for (j = 0; j < num_q; j++) {
is_all_ok = 0;
mat_multiply_matrix_vector_id3(q_rot,
pointgroup->rot[i],
qpoints[j]);
for (k = 0; k < num_q; k++) {
for (l = 0; l < 3; l++) {
diff[l] = q_rot[l] - qpoints[k][l];
diff[l] -= mat_Nint(diff[l]);
}
if (mat_Dabs(diff[0]) < symprec &&
mat_Dabs(diff[1]) < symprec &&
mat_Dabs(diff[2]) < symprec) {
is_all_ok = 1;
break;
}
}
if (! is_all_ok) {
break;
}
}
if (is_all_ok) {
mat_copy_matrix_i3(pointgroup_q.rot[num_ptq], pointgroup->rot[i]);
num_ptq++;
}
}
pointgroup_q.size = num_ptq;
return pointgroup_q;
}
static int get_ir_kpoints(int map[],
SPGCONST double kpoints[][3],
const int num_kpoint,
SPGCONST PointSymmetry * point_symmetry,
const double symprec)
{
int i, j, k, l, num_ir_kpoint = 0, is_found;
int *ir_map;
double kpt_rot[3], diff[3];
ir_map = (int*)malloc(num_kpoint*sizeof(int));
for (i = 0; i < num_kpoint; i++) {
map[i] = i;
is_found = 1;
for (j = 0; j < point_symmetry->size; j++) {
mat_multiply_matrix_vector_id3(kpt_rot, point_symmetry->rot[j], kpoints[i]);
for (k = 0; k < 3; k++) {
diff[k] = kpt_rot[k] - kpoints[i][k];
diff[k] = diff[k] - mat_Nint(diff[k]);
}
if (mat_Dabs(diff[0]) < symprec &&
mat_Dabs(diff[1]) < symprec &&
mat_Dabs(diff[2]) < symprec) {
continue;
}
for (k = 0; k < num_ir_kpoint; k++) {
mat_multiply_matrix_vector_id3(kpt_rot, point_symmetry->rot[j], kpoints[i]);
for (l = 0; l < 3; l++) {
diff[l] = kpt_rot[l] - kpoints[ir_map[k]][l];
diff[l] = diff[l] - mat_Nint(diff[l]);
}
if (mat_Dabs(diff[0]) < symprec &&
mat_Dabs(diff[1]) < symprec &&
mat_Dabs(diff[2]) < symprec) {
is_found = 0;
map[i] = ir_map[k];
break;
}
}
if (! is_found)
break;
}
if (is_found) {
ir_map[num_ir_kpoint] = i;
num_ir_kpoint++;
}
}
free(ir_map);
ir_map = NULL;
return num_ir_kpoint;
}
static int get_ir_reciprocal_mesh(int grid[][3],
int map[],
const int mesh[3],
const int is_shift[3],
SPGCONST PointSymmetry * point_symmetry)
{
/* In the following loop, mesh is doubled. */
/* Even and odd mesh numbers correspond to */
/* is_shift[i] = 0 and 1, respectively. */
/* is_shift = [0,0,0] gives Gamma center mesh. */
/* grid: reducible grid points */
/* map: the mapping from each point to ir-point. */
int i, j, k, l, address, address_rot, num_ir = 0;
int grid_double[3], grid_rot[3], mesh_double[3];
for (i = 0; i < 3; i++) {
mesh_double[i] = mesh[i] * 2;
}
/* "-1" means the element is not touched yet. */
for (i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++) {
map[i] = -1;
}
#ifndef GRID_ORDER_XYZ
for (i = 0; i < mesh_double[2]; i++) {
if ((is_shift[2] && i % 2 == 0) ||
(is_shift[2] == 0 && i % 2 != 0))
continue;
for (j = 0; j < mesh_double[1]; j++) {
if ((is_shift[1] && j % 2 == 0) ||
(is_shift[1] == 0 && j % 2 != 0))
continue;
for (k = 0; k < mesh_double[0]; k++) {
if ((is_shift[0] && k % 2 == 0) ||
(is_shift[0] == 0 && k % 2 != 0))
continue;
grid_double[0] = k;
grid_double[1] = j;
grid_double[2] = i;
#else
for (i = 0; i < mesh_double[0]; i++) {
if ((is_shift[0] && i % 2 == 0) ||
(is_shift[0] == 0 && i % 2 != 0))
continue;
for (j = 0; j < mesh_double[1]; j++) {
if ((is_shift[1] && j % 2 == 0) ||
(is_shift[1] == 0 && j % 2 != 0))
continue;
for (k = 0; k < mesh_double[2]; k++) {
if ((is_shift[2] && k % 2 == 0) ||
(is_shift[2] == 0 && k % 2 != 0))
continue;
grid_double[0] = i;
grid_double[1] = j;
grid_double[2] = k;
#endif
address = grid_to_address(grid_double, mesh, is_shift);
get_grid_points(grid[address], grid_double, mesh);
for (l = 0; l < point_symmetry->size; l++) {
mat_multiply_matrix_vector_i3(grid_rot, point_symmetry->rot[l], grid_double);
get_vector_modulo(grid_rot, mesh_double);
address_rot = grid_to_address(grid_rot, mesh, is_shift);
if (address_rot > -1) { /* Invalid if even --> odd or odd --> even */
if (map[address_rot] > -1) {
map[address] = map[address_rot];
break;
}
}
}
/* Set itself to the map when equivalent point */
/* with smaller numbering could not be found. */
if (map[address] == -1) {
map[address] = address;
num_ir++;
}
}
}
}
return num_ir;
}
/* Unique q-point triplets that conserve the momentum, */
/* q+q'+q''=G, are obtained. */
/* */
/* The first q-point is selected among the ir-q-points. */
/* The second q-point is selected among the ir-q-points */
/* constrained by the first q-point (stabilizer) */
/* The third q-point is searched through the all grid */
/* points and is checked if it satisfies q+q'+q''=G, */
/* here q, q', and q'' can be exchanged one another. */
static Triplets * get_ir_triplets(const int mesh[3],
const int is_time_reversal,
const MatINT * rotations)
{
int i, j, k, l, num_ir, num_grid, weight, weight_q, count, q_2;
int num_triplets, num_unique_q;
int mesh_double[3], address[3], is_shift[3];
int grid_double[3][3];
int (*grid)[3], (*grid_local)[3];
int *map, *map_q, *unique_q;
int **map_sym = NULL;
int **weight_counts;
double tolerance;
double stabilizer_q[1][3];
PointSymmetry point_symmetry, point_symmetry_q;
Triplets * tps;
const int index_exchange[6][3] = {{ 0, 1, 2 },
{ 2, 0, 1 },
{ 1, 2, 0 },
{ 2, 1, 0 },
{ 0, 2, 1 },
{ 1, 0, 2 }};
tolerance = 0.1 / (mesh[0] + mesh[1] + mesh[2]);
num_grid = mesh[0] * mesh[1] * mesh[2];
map = (int*) malloc(num_grid * sizeof(int));
unique_q = (int*) malloc(num_grid * sizeof(int));
grid = (int (*)[3]) malloc(sizeof(int[3]) * num_grid);
point_symmetry = get_point_group_reciprocal(rotations,
is_time_reversal);
/* Only consider the gamma-point */
for (i = 0; i < 3; i++) {
is_shift[i] = 0;
}
num_ir = get_ir_reciprocal_mesh(grid,
map,
mesh,
is_shift,
&point_symmetry);
weight_counts = allocate_array2d_int(num_ir, num_grid);
for (i = 0; i < num_ir; i++) {
for (j = 0; j < num_grid; j++) {
weight_counts[i][j] = 0;
}
}
for (i = 0; i < 3; i++) {
mesh_double[i] = mesh[i] * 2;
}
/* Prepare triplet mapping table to enhance speed of query */
/* 'unique_q' numbering is prepared for saving memory space */
num_unique_q = 0;
for (i = 0; i < num_grid; i++) {
if (i == map[i]) {
unique_q[i] = num_unique_q;
num_unique_q++;
}
else {
unique_q[i] = unique_q[map[i]];
}
}
/* Prepare grid point mapping table */
map_sym = allocate_array2d_int(point_symmetry.size, num_grid);
get_grid_mapping_table(map_sym,
&point_symmetry,
mesh,
is_shift);
/* Search triplets without considersing combination */
/* #pragma omp parallel for private(j, k, l, grid_double, point_symmetry_q, stabilizer_q, weight_q, grid_local, address, map_q, weight ) */
for (i = 0; i < num_grid; i++) {
if (! (i == map[i])) {
continue;
}
weight = 0;
for (j = 0; j < num_grid; j++) {
if (i == map[j]) {
weight++;
}
}
/* Search irreducible q-points (map_q) with a stabilizer */
address_to_grid(grid_double[0], i, mesh, is_shift); /* q */
for (j = 0; j < 3; j++) {
stabilizer_q[0][j] = (double)grid_double[0][j] / mesh_double[j];
}
point_symmetry_q = get_point_group_reciprocal_with_q(&point_symmetry,
tolerance,
1,
stabilizer_q);
grid_local = (int (*)[3]) malloc(sizeof(int[3]) * num_grid);
map_q = (int*) malloc(num_grid * sizeof(int));
get_ir_reciprocal_mesh(grid_local,
map_q,
mesh,
is_shift,
&point_symmetry_q);
free(grid_local);
grid_local = NULL;
for (j = 0; j < num_grid; j++) {
if (! (j == map_q[j])) {
continue;
}
weight_q = 0;
for (k = 0; k < num_grid; k++) {
if (j == map_q[k]) {
weight_q++;
}
}
address_to_grid(grid_double[1], j, mesh, is_shift); /* q' */
for (k = 0; k < 3; k++) { /* q'' */
grid_double[2][k] = - grid_double[0][k] - grid_double[1][k];
}
get_vector_modulo(grid_double[2], mesh_double);
q_2 = grid_to_address(grid_double[2], mesh, is_shift);
/* Look for irreducible triplets exchanging three q-points */
/* and equivalent by symmetry rotations */
for (k = 0; k < point_symmetry.size; k++) {
/* Index exchange */
for (l = 0; l < 6; l++) {
/* Rotated grid point addresses with index exchange */
address[index_exchange[l][0]] = map_sym[k][i];
address[index_exchange[l][1]] = map_sym[k][j];
address[index_exchange[l][2]] = map_sym[k][q_2];
/* address[0] has to be one of ir-q-points. */
if (address[0] == map[address[0]]) {
/* Is the set of ddress[0] and address[1] already found? */
if (weight_counts[unique_q[address[0]]][address[1]]) {
weight_counts[unique_q[address[0]]][address[1]] +=
weight * weight_q;
goto escape;
}
}
}
}
/* Not found, then this is an irreducible triplet. */
weight_counts[unique_q[i]][j] = weight * weight_q;
escape:
;
}
free(map_q);
map_q = NULL;
}
num_triplets = 0;
for (i = 0; i < num_grid; i++) {
if (! (i == map[i])) {
continue;
}
for (j = 0; j < num_grid; j++) {
if (weight_counts[unique_q[i]][j]) {
num_triplets++;
}
}
}
tps = allocate_triplets(num_triplets, mesh);
for (i = 0; i < num_grid; i++) {
for (j = 0; j < 3; j++) {
tps->mesh_points[i][j] = grid[i][j];
}
}
count = 0;
for (i = 0; i < num_grid; i++) {
if (! (i == map[i])) {
continue;
}
for (j = 0; j < num_grid; j++) {
if (weight_counts[unique_q[i]][j] ) {
tps->triplets[count][0] = i;
tps->triplets[count][1] = j;
address_to_grid(grid_double[0], i, mesh, is_shift); /* q */
address_to_grid(grid_double[1], j, mesh, is_shift); /* q' */
for (l = 0; l < 3; l++) { /* q'' */
grid_double[2][l] = - grid_double[0][l] - grid_double[1][l];
}
get_vector_modulo(grid_double[2], mesh_double);
tps->triplets[count][2] = grid_to_address(grid_double[2], mesh, is_shift);
tps->weights[count] = weight_counts[unique_q[i]][j];
count++;
}
}
}
free_array2D_int(map_sym, point_symmetry.size);
free_array2D_int(weight_counts, num_ir);
free(map);
map = NULL;
free(unique_q);
unique_q = NULL;
free(grid);
grid = NULL;
return tps;
}
static int get_ir_triplets_at_q(int weights[],
int grid_points[][3],
int third_q[],
const int grid_point,
const int mesh[3],
PointSymmetry * pointgroup)
{
int i, j, k, num_grid, weight_q, q_2, num_ir;
int mesh_double[3], address[3], is_shift[3];
int grid_double[3][3];
int *map_q;
double tolerance;
double stabilizer_q[1][3];
PointSymmetry pointgroup_q;
tolerance = 0.1 / (mesh[0] + mesh[1] + mesh[2]);
num_grid = mesh[0] * mesh[1] * mesh[2];
for (i = 0; i < 3; i++) {
/* Only consider the gamma-point */
is_shift[i] = 0;
mesh_double[i] = mesh[i] * 2;
}
/* Search irreducible q-points (map_q) with a stabilizer */
address_to_grid(grid_double[0], grid_point, mesh, is_shift); /* q */
for (i = 0; i < 3; i++) {
stabilizer_q[0][i] = (double)grid_double[0][i] / mesh_double[i];
}
pointgroup_q = get_point_group_reciprocal_with_q(pointgroup,
tolerance,
1,
stabilizer_q);
map_q = (int*) malloc(sizeof(int) * num_grid);
get_ir_reciprocal_mesh(grid_points,
map_q,
mesh,
is_shift,
&pointgroup_q);
for (i = 0; i < num_grid; i++) {
weights[i] = 0;
third_q[i] = -1;
}
num_ir = 0;
for (i = 0; i < num_grid; i++) {
if (i != map_q[i]) { /* pass only ir-q'-point */
continue;
}
weight_q = 0;
for (j = 0; j < num_grid; j++) {
if (i == map_q[j]) {
weight_q++;
}
}
address_to_grid(grid_double[1], i, mesh, is_shift); /* q' */
for (j = 0; j < 3; j++) { /* q'' */
grid_double[2][j] = - grid_double[0][j] - grid_double[1][j];
}
get_vector_modulo(grid_double[2], mesh_double);
q_2 = grid_to_address(grid_double[2], mesh, is_shift);
third_q[i] = q_2;
if (weights[map_q[q_2]]) {
weights[map_q[q_2]] += weight_q;
} else {
weights[i] = weight_q;
num_ir++;
}
}
free(map_q);
map_q = NULL;
return num_ir;
}
static int extract_ir_triplets_with_q(int triplets_with_q[][3],
int weight_with_q[],
const int fixed_grid_number,
SPGCONST int triplets[][3],
const int num_triplets,
const int mesh[3],
SPGCONST PointSymmetry *point_symmetry)
{
int i, j, k, sym_num, rest_index, num_triplets_with_q;
int address0, address1, address1_orig, found;
int is_shift[3];
int num_grid;
int **map_sym;
num_grid = mesh[0] * mesh[1] * mesh[2];
map_sym = allocate_array2d_int(point_symmetry->size, num_grid);
/* Only consider the gamma-point */
for (i = 0; i < 3; i++) {
is_shift[i] = 0;
}
/* Prepare mapping tables */
get_grid_mapping_table(map_sym,
point_symmetry,
mesh,
is_shift);
num_triplets_with_q = 0;
for (i = 0; i < num_triplets; i++) {
sym_num = -1;
for (j = 0; j < point_symmetry->size; j++) {
address0 = map_sym[j][fixed_grid_number];
if (triplets[i][0] == address0 ||
triplets[i][1] == address0 ||
triplets[i][2] == address0) {
for (k = 0; k < num_grid; k++) {
address1 = map_sym[j][k];
/* Matching indices 0 and 1 */
if ((triplets[i][0] == address0 && triplets[i][1] == address1) ||
(triplets[i][1] == address0 && triplets[i][0] == address1)) {
sym_num = j;
rest_index = 2;
address1_orig = k;
break;
}
/* Matching indices 1 and 2 */
if ((triplets[i][1] == address0 && triplets[i][2] == address1) ||
(triplets[i][2] == address0 && triplets[i][1] == address1)) {
sym_num = j;
rest_index = 0;
address1_orig = k;
break;
}
/* Matching indices 2 and 0 */
if ((triplets[i][2] == address0 && triplets[i][0] == address1) ||
(triplets[i][0] == address0 && triplets[i][2] == address1)) {
sym_num = j;
rest_index = 1;
address1_orig = k;
break;
}
}
if (sym_num > -1) {
break;
}
}
}
/* Found? */
if (sym_num > -1) {
for (j = 0; j < num_grid; j++) {
if (map_sym[sym_num][j] == triplets[i][rest_index]) {
triplets_with_q[num_triplets_with_q][0] = fixed_grid_number;
if (j > address1_orig) {
triplets_with_q[num_triplets_with_q][1] = address1_orig;
triplets_with_q[num_triplets_with_q][2] = j;
} else {
triplets_with_q[num_triplets_with_q][2] = address1_orig;
triplets_with_q[num_triplets_with_q][1] = j;
}
num_triplets_with_q++;
break;
}
}
}
}
for (i = 0; i < num_triplets_with_q; i++) {
weight_with_q[i] = 0;
}
for (i = 0; i < num_grid; i++) {
found = 0;
for (j = 0; j < num_triplets_with_q; j++) {
for (k = 0; k < point_symmetry->size; k++) {
if (map_sym[k][fixed_grid_number] == triplets_with_q[j][0]) {
if (map_sym[k][i] == triplets_with_q[j][1] ||
map_sym[k][i] == triplets_with_q[j][2]) {
weight_with_q[j]++;
found = 1;
break;
}
}
if (map_sym[k][fixed_grid_number] == triplets_with_q[j][1]) {
if (map_sym[k][i] == triplets_with_q[j][2] ||
map_sym[k][i] == triplets_with_q[j][0]) {
weight_with_q[j]++;
found = 1;
break;
}
}
if (map_sym[k][fixed_grid_number] == triplets_with_q[j][2]) {
if (map_sym[k][i] == triplets_with_q[j][0] ||
map_sym[k][i] == triplets_with_q[j][1]) {
weight_with_q[j]++;
found = 1;
break;
}
}
}
if (found) {
break;
}
}
if (! found) {
warning_print("spglib: Unexpected behavior in extract_ir_triplets_with_q ");
warning_print("(line %d, %s).\n", __LINE__, __FILE__);
num_triplets_with_q = 0;
break;
}
}
free_array2D_int(map_sym, point_symmetry->size);
return num_triplets_with_q;
}
static void get_grid_mapping_table(int **map_sym,
SPGCONST PointSymmetry *point_symmetry,
const int mesh[3],
const int is_shift[3])
{
int i, j;
int grid_rot[3], grid_double[3], mesh_double[3];
for (i = 0; i < 3; i++) {
mesh_double[i] = mesh[i] * 2;
}
for (i = 0; i < point_symmetry->size; i++) {
for (j = 0; j < mesh[0]*mesh[1]*mesh[2]; j++) {
address_to_grid(grid_double, j, mesh, is_shift);
mat_multiply_matrix_vector_i3(grid_rot,
point_symmetry->rot[i],
grid_double);
get_vector_modulo(grid_rot, mesh_double);
map_sym[i][j] = grid_to_address(grid_rot, mesh, is_shift);
}
}
}
static int grid_to_address(const int grid_double[3],
const int mesh[3],
const int is_shift[3])
{
int i, grid[3];
for (i = 0; i < 3; i++) {
if (grid_double[i] % 2 == 0 && (! is_shift[i]) ) {
grid[i] = grid_double[i] / 2;
} else {
if (grid_double[i] % 2 != 0 && is_shift[i]) {
grid[i] = (grid_double[i] - 1) / 2;
} else {
return -1;
}
}
}
#ifndef GRID_ORDER_XYZ
return grid[2] * mesh[0] * mesh[1] + grid[1] * mesh[0] + grid[0];
#else
return grid[0] * mesh[1] * mesh[2] + grid[1] * mesh[2] + grid[2];
#endif
}
static void address_to_grid(int grid_double[3],
const int address,
const int mesh[3],
const int is_shift[3])
{
int i;
int grid[3];
#ifndef GRID_ORDER_XYZ
grid[2] = address / (mesh[0] * mesh[1]);
grid[1] = (address - grid[2] * mesh[0] * mesh[1]) / mesh[0];
grid[0] = address % mesh[0];
#else
grid[0] = address / (mesh[1] * mesh[2]);
grid[1] = (address - grid[0] * mesh[1] * mesh[2]) / mesh[2];
grid[2] = address % mesh[2];
#endif
for (i = 0; i < 3; i++) {
grid_double[i] = grid[i] * 2 + is_shift[i];
}
}
static void get_grid_points(int grid[3],
const int grid_double[3],
const int mesh[3])
{
int i;
for (i = 0; i < 3; i++) {
if (grid_double[i] % 2 == 0) {
grid[i] = grid_double[i] / 2;
} else {
grid[i] = (grid_double[i] - 1) / 2;
}
#ifndef GRID_BOUNDARY_AS_NEGATIVE
grid[i] = grid[i] - mesh[i] * (grid[i] > mesh[i] / 2);
#else
grid[i] = grid[i] - mesh[i] * (grid[i] >= mesh[i] / 2);
#endif
}
}
static void get_vector_modulo(int v[3],
const int m[3])
{
int i;
for (i = 0; i < 3; i++) {
v[i] = v[i] % m[i];
if (v[i] < 0)
v[i] += m[i];
}
}
static void free_array2D_int(int **array,
const int num_row)
{
int i;
for (i = 0; i < num_row; i++) {
free(array[i]);
array[i] = NULL;
}
free(array);
array = NULL;
}
static int ** allocate_array2d_int(const int num_row,
const int num_column)
{
int i;
int **array;
array = (int**) malloc(num_row * sizeof(int*));
for (i = 0; i < num_row; i++) {
array[i] = (int*) malloc(num_column * sizeof(int));
}
return array;
}
static Triplets * allocate_triplets(const int num_triplets, const int mesh[3])
{
int i, num_grid;
Triplets * tps;
num_grid = mesh[0] * mesh[1] * mesh[2];
tps = (Triplets*) malloc(sizeof(Triplets));
tps->size = num_triplets;
tps->triplets = (int (*)[3]) malloc(sizeof(int[3]) * num_triplets);
tps->weights = (int*) malloc(sizeof(int) * num_triplets);
tps->mesh_points = (int (*)[3]) malloc(sizeof(int[3]) * num_grid);
for (i = 0; i < 3; i++) {
tps->mesh[i] = mesh[i];
}
return tps;
}
|
GB_unop__identity_fc32_int32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fc32_int32)
// op(A') function: GB (_unop_tran__identity_fc32_int32)
// C type: GxB_FC32_t
// A type: int32_t
// cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fc32_int32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const int32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int32_t aij = Ax [p] ;
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc32_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
wserver.c | /*
MIT License
Copyright (c) 2017 Emanuele Giona
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "wserver.h"
int XOR(int a, int b) {
return a^b;
}
int fileXOR(char srcfile[], char dstfile[], long long dim, int seed) {
//apertura file
HANDLE src = CreateFile(srcfile,GENERIC_READ,FILE_SHARE_READ,NULL,OPEN_EXISTING, FILE_FLAG_RANDOM_ACCESS,NULL);
if (src==INVALID_HANDLE_VALUE) {
sprintf(lastError, "Errore apertura file %s.\n", srcfile);
return 400;
}
HANDLE dst = CreateFile(dstfile, GENERIC_READ | GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_FLAG_RANDOM_ACCESS, NULL);
if (dst==INVALID_HANDLE_VALUE) {
sprintf(lastError, "Errore apertura file %s.\n", dstfile);
CloseHandle(src);
return 400;
}
//lock file
OVERLAPPED srcoverlap;
memset(&srcoverlap,0,sizeof(srcoverlap));
if (!LockFileEx(src, LOCKFILE_EXCLUSIVE_LOCK | LOCKFILE_FAIL_IMMEDIATELY, 0, 0, dim, &srcoverlap)) {
sprintf(lastError, "Errore lock su file %s.\n", srcfile);
CloseHandle(src);
CloseHandle(dst);
return 500;
}
OVERLAPPED dstoverlap;
memset(&dstoverlap, 0, sizeof(dstoverlap));
if (!LockFileEx(dst, LOCKFILE_EXCLUSIVE_LOCK | LOCKFILE_FAIL_IMMEDIATELY, 0, 0, dim, &dstoverlap)) {
sprintf(lastError, "Errore lock su file %s.\n", dstfile);
UnlockFileEx(src,0,0,dim,&srcoverlap);
CloseHandle(src);
CloseHandle(dst);
return 500;
}
LARGE_INTEGER fileSize, fileMapSize, mapViewSize, fileMapStart;
DWORD granularity;
SYSTEM_INFO sysInfo;
long offset;
GetSystemInfo(&sysInfo);
granularity = sysInfo.dwAllocationGranularity;
//arrivo in fondo al file di output
LARGE_INTEGER LIrounded;
LIrounded.HighPart = 0;
LIrounded.LowPart = dim - 2;
if(!SetFilePointerEx(dst,LIrounded,NULL,FILE_BEGIN)){
sprintf(lastError, "Errore stretch file %s.\n", dstfile);
UnlockFileEx(src, 0, 0, dim, &srcoverlap);
UnlockFileEx(dst, 0, 0, dim, &dstoverlap);
CloseHandle(src);
CloseHandle(dst);
return 500;
}
//scrivo un placeholder per mantenere le modifiche di dimensione
char buff[] = "\0";
if (!WriteFile(dst, buff, sizeof(buff), NULL, NULL)) {
sprintf(lastError, "Errore scrittura su file %s.\n", dstfile);
UnlockFileEx(src, 0, 0, dim, &srcoverlap);
UnlockFileEx(dst, 0, 0, dim, &dstoverlap);
CloseHandle(src);
CloseHandle(dst);
return 500;
}
//imposto il seed per rand()
srand(seed);
//file mapping per entrambi i file
HANDLE handle_srcmap = CreateFileMapping(src, NULL, PAGE_READONLY, 0, 0, NULL);
if (handle_srcmap == NULL) {
sprintf(lastError, "Errore file mapping su file %s: %d\n", srcfile, GetLastError());
UnlockFileEx(src, 0, 0, dim, &srcoverlap);
UnlockFileEx(dst, 0, 0, dim, &dstoverlap);
CloseHandle(src);
CloseHandle(dst);
return 500;
}
HANDLE handle_dstmap = CreateFileMapping(dst, NULL, PAGE_READWRITE, 0, 0, NULL);
if (handle_dstmap == NULL) {
sprintf(lastError, "Errore file mapping su file %s: %d\n", dstfile, GetLastError());
CloseHandle(handle_srcmap);
UnlockFileEx(src, 0, 0, dim, &srcoverlap);
UnlockFileEx(dst, 0, 0, dim, &dstoverlap);
CloseHandle(src);
CloseHandle(dst);
return 500;
}
//thread non necessari sotto 256KB
if (dim <= 256 * 1024) {
MEMORYSTATUSEX memstatus;
memstatus.dwLength = sizeof(memstatus);
GlobalMemoryStatusEx(&memstatus);
long freeMem = memstatus.ullAvailVirtual;
if (freeMem <= 3 * dim) {
sprintf(lastError, "RAM insufficiente per aprire il file %s.\n", srcfile);
CloseHandle(handle_srcmap);
CloseHandle(handle_dstmap);
UnlockFileEx(src, 0, 0, dim, &srcoverlap);
UnlockFileEx(dst, 0, 0, dim, &dstoverlap);
CloseHandle(src);
CloseHandle(dst);
return 500;
}
//utilizzo effettivamente il mapping, per intero
char *srcmap = (char *)MapViewOfFile(handle_srcmap,FILE_MAP_READ,0,0,0);
if ((LPVOID)srcmap == NULL) {
sprintf(lastError, "Errore mapview su file %s: %d.\n", srcfile, GetLastError());
CloseHandle(handle_srcmap);
CloseHandle(handle_dstmap);
UnlockFileEx(src, 0, 0, dim, &srcoverlap);
UnlockFileEx(dst, 0, 0, dim, &dstoverlap);
CloseHandle(src);
CloseHandle(dst);
return 500;
}
char *dstmap = (char *)MapViewOfFile(handle_dstmap, FILE_MAP_ALL_ACCESS, 0, 0, 0);
if ((LPVOID)dstmap == NULL) {
sprintf(lastError, "Errore mapview su file %s: %d.\n", dstfile, GetLastError());
UnmapViewOfFile((LPVOID)srcmap);
CloseHandle(handle_srcmap);
CloseHandle(handle_dstmap);
UnlockFileEx(src, 0, 0, dim, &srcoverlap);
UnlockFileEx(dst, 0, 0, dim, &dstoverlap);
CloseHandle(src);
CloseHandle(dst);
return 500;
}
//array della chiave per lo XOR, 4 byte consecutivi con la stessa chiave
long keyDim = (long)ceil((double)dim / 4) * 4;
int *key;
key = malloc(keyDim * sizeof(int));
if (key == NULL) {
sprintf(lastError, "Errore malloc.\n");
UnmapViewOfFile((LPVOID)srcmap);
UnmapViewOfFile((LPVOID)dstmap);
CloseHandle(handle_srcmap);
CloseHandle(handle_dstmap);
UnlockFileEx(src, 0, 0, dim, &srcoverlap);
UnlockFileEx(dst, 0, 0, dim, &dstoverlap);
CloseHandle(src);
CloseHandle(dst);
return 500;
}
for (long i = 0; i<keyDim; i++) {
key[i] = rand() % 65536; //limite del numero generato, per portabilita' tra compilatori
}
//effettua lo XOR e scrivi nel mapping, byte a byte
long i, j;
for (i = 0, j = 0; i<dim && j<keyDim; i += 4, j++) {
dstmap[i] = (char)(XOR((int)srcmap[i], key[j]));
dstmap[i + 1] = (char)(XOR((int)srcmap[i + 1], key[j]));
dstmap[i + 2] = (char)(XOR((int)srcmap[i + 2], key[j]));
dstmap[i + 3] = (char)(XOR((int)srcmap[i + 3], key[j]));
}
free(key);
UnmapViewOfFile((LPVOID)srcmap);
UnmapViewOfFile((LPVOID)dstmap);
CloseHandle(handle_srcmap);
CloseHandle(handle_dstmap);
UnlockFileEx(src, 0, 0, dim, &srcoverlap);
UnlockFileEx(dst, 0, 0, dim, &dstoverlap);
CloseHandle(src);
CloseHandle(dst);
}
//sono necessari thread, utilizzo OpenMP; suggerito: 1 thread omp per ogni blocco di 256KB
else {
long fiveMB = 5 * pow(2, 20);
int chunks = (int)ceil((double)dim / fiveMB);
for (int c = 0; c<chunks; c++) {
MEMORYSTATUSEX memstatus;
memstatus.dwLength = sizeof(memstatus);
GlobalMemoryStatusEx(&memstatus);
long freeMem = memstatus.ullAvailVirtual;
if (freeMem <= 2 * fiveMB) {
sprintf(lastError, "RAM insufficiente per aprire il file %s.\n", srcfile);
CloseHandle(handle_srcmap);
CloseHandle(handle_dstmap);
UnlockFileEx(src, 0, 0, dim, &srcoverlap);
UnlockFileEx(dst, 0, 0, dim, &dstoverlap);
CloseHandle(src);
CloseHandle(dst);
return 500;
}
long start = (c)*fiveMB;
long end = (c + 1)*fiveMB;
long realEnd = end;
if (dim<realEnd)
realEnd = dim;
long chunkDim = realEnd - start;
fileMapStart.QuadPart = (start/granularity)*granularity;
offset = start - fileMapStart.QuadPart;
if (dim - fileMapStart.LowPart < chunkDim)
chunkDim = dim - fileMapStart.LowPart;
mapViewSize.QuadPart = (start%granularity) + chunkDim;
//mapping del chunk c
char *srcmap = (char *)MapViewOfFile(handle_srcmap, FILE_MAP_READ, fileMapStart.HighPart, fileMapStart.LowPart, mapViewSize.QuadPart);
if ((LPVOID)srcmap == NULL) {
sprintf(lastError, "Errore mapview su file %s, chunk #%i: %d\n", srcfile, c, GetLastError());
CloseHandle(handle_srcmap);
CloseHandle(handle_dstmap);
UnlockFileEx(src, 0, 0, dim, &srcoverlap);
UnlockFileEx(dst, 0, 0, dim, &dstoverlap);
CloseHandle(src);
CloseHandle(dst);
return 500;
}
//arrivo correttamente all'inizio del mapping, bypassando la granularità
srcmap += offset;
char *dstmap = (char *)MapViewOfFile(handle_dstmap, FILE_MAP_ALL_ACCESS, fileMapStart.HighPart, fileMapStart.LowPart, mapViewSize.QuadPart);
if ((LPVOID)dstmap == NULL) {
sprintf(lastError, "Errore mapview su file %s: %d\n", dstfile, GetLastError());
UnmapViewOfFile((LPVOID)srcmap);
CloseHandle(handle_srcmap);
CloseHandle(handle_dstmap);
UnlockFileEx(src, 0, 0, dim, &srcoverlap);
UnlockFileEx(dst, 0, 0, dim, &dstoverlap);
CloseHandle(src);
CloseHandle(dst);
return 500;
}
//arrivo correttamente all'inizio del mapping, bypassando la granularità
dstmap += offset;
//1 thread OpenMP ogni 256KB
int mpThreads = (int)ceil((double)chunkDim / (256 * 1024));
//matrice della chiave per lo XOR, 4 byte consecutivi con la stessa chiave
//ogni thread OpenMP ha il suo array di dimensione ridotta
long keyDimT = (long)ceil((double)chunkDim / (mpThreads*4));
int *key;
key = malloc(mpThreads*keyDimT*sizeof(int));
if (key == NULL) {
sprintf(lastError, "Errore malloc.\n");
UnmapViewOfFile((LPVOID)srcmap);
UnmapViewOfFile((LPVOID)dstmap);
CloseHandle(handle_srcmap);
CloseHandle(handle_dstmap);
UnlockFileEx(src, 0, 0, dim, &srcoverlap);
UnlockFileEx(dst, 0, 0, dim, &dstoverlap);
CloseHandle(src);
CloseHandle(dst);
return 500;
}
for (long j = 0; j<mpThreads; j++) {
for (long i = 0; i<keyDimT; i++) {
key[j*mpThreads + i] = rand() % 65536; //limite del numero generato, per portabilita' tra compilatori
}
}
#pragma omp parallel num_threads(mpThreads)
{
int threadID = omp_get_thread_num();
int min = (threadID) * 256 * 1024;
int max = (threadID + 1) * 256 * 1024;
//effettua lo XOR e scrivi nel mapping, byte a byte con ogni thread unicamente nella sua sezione
for (long i = min; i<max && i<chunkDim; i += 4) {
int val = key[(threadID*mpThreads) + ((i - min) / 4)];
dstmap[i] = (char)(XOR((int)srcmap[i], val));
dstmap[i + 1] = (char)(XOR((int)srcmap[i + 1], val));
dstmap[i + 2] = (char)(XOR((int)srcmap[i + 2], val));
dstmap[i + 3] = (char)(XOR((int)srcmap[i + 3], val));
}
}
free(key);
UnmapViewOfFile((LPVOID)srcmap);
UnmapViewOfFile((LPVOID)dstmap);
}
CloseHandle(handle_srcmap);
CloseHandle(handle_dstmap);
UnlockFileEx(src, 0, 0, dim, &srcoverlap);
UnlockFileEx(dst, 0, 0, dim, &dstoverlap);
CloseHandle(src);
CloseHandle(dst);
}
return 200;
}
int sendMessage(SOCKET sock, char message[]) {
char buf[BUFSIZE];
ZeroMemory(buf,BUFSIZE);
strncpy(buf,message,BUFSIZE);
int res = send(sock, buf, BUFSIZE, 0);
if (res == SOCKET_ERROR) {
printf("Errore send: %d\n", WSAGetLastError());
closesocket(sock);
//WSACleanup();
return 1;
}
return 0;
}
int encrypt(char src[], int seed, SOCKET sock) {
char dst[PATHLEN] = "";
strncpy(dst, src, strlen(src));
strncat(dst, "_enc", 5);
LARGE_INTEGER dim;
HANDLE srcfile;
srcfile = CreateFile(src, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
if (srcfile == INVALID_HANDLE_VALUE) {
sprintf(lastError, "File %s non esistente.\n", src);
return 400;
}
if (!GetFileSizeEx(srcfile,&dim)) {
sprintf(lastError, "Errore nel calcolo dimensione del file %s.\n", src);
CloseHandle(srcfile);
return 500;
}
CloseHandle(srcfile);
int ret = fileXOR(src, dst, (long long)dim.QuadPart, seed);
if (ret == 200 && !DeleteFile(src)) {
sprintf(lastError, "Errore nella cancellazione del file %s: %d\n", src, GetLastError());
return 500;
}
return ret;
}
int decrypt(char src[], int seed, SOCKET sock) {
char *enc = NULL;
char *temp = strstr(src, "_enc");
while (temp) {
enc = temp++;
temp = strstr(temp, "_enc");
}
if (enc == NULL || strlen(enc) != 4) {
sprintf(lastError, "Il file %s non e' un file cifrato.\n", src);
return 400;
}
char dst[PATHLEN] = "";
strncpy(dst, src, strlen(src) - 4);
LARGE_INTEGER dim;
HANDLE srcfile;
srcfile = CreateFile(src, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
if (srcfile == INVALID_HANDLE_VALUE) {
sprintf(lastError, "File %s non esistente.\n", src);
return 400;
}
if (!GetFileSizeEx(srcfile, &dim)) {
sprintf(lastError, "Errore nel calcolo dimensione del file %s.\n", src);
CloseHandle(srcfile);
return 500;
}
CloseHandle(srcfile);
int ret = fileXOR(src, dst, (long long)dim.QuadPart, seed);
if (ret == 200 && !DeleteFile(src)) {
sprintf(lastError, "Errore nella cancellazione del file %s.\n", src);
return 500;
}
return ret;
}
int listFolder(char folder[], SOCKET sock) {
WIN32_FIND_DATA find_data;
char dir[MAX_PATH];
LARGE_INTEGER dim;
HANDLE handle_find = INVALID_HANDLE_VALUE;
snprintf(dir,MAX_PATH,"%s\\*.*",folder);
handle_find = FindFirstFile(dir,&find_data);
if(handle_find == INVALID_HANDLE_VALUE){
sprintf(lastError, "Errore apertura directory %s.\n", folder);
return 400;
}
do {
char path[PATHLEN];
char entry[PATHLEN+50];
ZeroMemory(path,sizeof(path));
ZeroMemory(entry, sizeof(entry));
if (strcmp(find_data.cFileName,".")==0 || strcmp(find_data.cFileName,"..")==0 || find_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) {
continue;
}
else {
dim.LowPart = find_data.nFileSizeLow;
dim.HighPart = find_data.nFileSizeHigh;
snprintf(path,PATHLEN,"%s/%s",folder,find_data.cFileName);
snprintf(entry,PATHLEN+50,"%llu %s", dim.QuadPart, path);
sendMessage(sock, entry);
sendMessage(sock, "\r\n");
}
} while (FindNextFile(handle_find,&find_data)!=0);
FindClose(handle_find);
return 200;
}
int listRecursive(char folder[], SOCKET sock) {
WIN32_FIND_DATA find_data;
char dir[MAX_PATH];
LARGE_INTEGER dim;
HANDLE handle_find = INVALID_HANDLE_VALUE;
snprintf(dir, MAX_PATH, "%s\\*.*", folder);
handle_find = FindFirstFile(dir, &find_data);
if (handle_find == INVALID_HANDLE_VALUE) {
sprintf(lastError, "Errore apertura directory %s.\n", folder);
return 400;
}
do {
char path[PATHLEN];
char entry[PATHLEN+50];
ZeroMemory(path, sizeof(path));
ZeroMemory(entry, sizeof(entry));
if (strcmp(find_data.cFileName, ".") == 0 || strcmp(find_data.cFileName, "..") == 0) {
continue;
}
else if(find_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY){
sprintf(path, "%s/%s", folder, find_data.cFileName);
int ret = listRecursive(path,sock);
if (ret != 200)
return ret;
}
else {
dim.LowPart = find_data.nFileSizeLow;
dim.HighPart = find_data.nFileSizeHigh;
sprintf(path, "%s/%s", folder, find_data.cFileName);
sprintf(entry, "%llu %s", dim.QuadPart, path);
sendMessage(sock, entry);
sendMessage(sock, "\r\n");
}
} while (FindNextFile(handle_find, &find_data) != 0);
FindClose(handle_find);
return 200;
}
int parseRequest(char folder[], char message[], SOCKET sock) {
WIN32_FIND_DATA dirdata;
char temp[MAX_PATH];
snprintf(temp,MAX_PATH,"%s\\*.*",folder);
HANDLE dir = FindFirstFile(temp,&dirdata);
if (dir == INVALID_HANDLE_VALUE) {
return 1;
}
FindClose(dir);
int ret = 0;
if (strstr(message, "LSTF") != NULL) {
sendMessage(sock, STATE_PENDING);
ret = listFolder(folder, sock);
sendMessage(sock, "\r\n.\r\n");
}
else if (strstr(message, "LSTR") != NULL) {
sendMessage(sock, STATE_PENDING);
ret = listRecursive(folder, sock);
sendMessage(sock, "\r\n.\r\n");
}
else if (strstr(message, "ENCR") != NULL) {
char s[4] = "";
unsigned int seed = -1;
char path[PATHLEN] = "errore";
sscanf(message, "%s %u %[^\n]%*s", s, &seed, path);
if (seed != -1 && strcmp(path, "errore") != 0) {
ret = encrypt(path, seed, sock);
}
}
else if (strstr(message, "DECR") != NULL) {
char s[4] = "";
unsigned int seed = -1;
char path[PATHLEN] = "errore";
sscanf(message, "%s %u %[^\n]%*s", s, &seed, path);
if (seed != -1 && strcmp(path, "errore") != 0) {
ret = decrypt(path, seed, sock);
}
}
//gestione codici di risposta
if (ret == 200) {
sendMessage(sock, STATE_OK);
}
else if (ret == 400) {
sendMessage(sock, lastError);
sendMessage(sock, STATE_ERROR);
}
else if (ret == 500) {
sendMessage(sock, lastError);
sendMessage(sock, STATE_UNAVAIL);
}
return ret;
}
int addRequest(SRWLOCK *mutex, CONDITION_VARIABLE *cond, char *folder, char *address, char *message, SOCKET sock) {
struct request *req = (struct request *)malloc(sizeof(struct request));
if (!req) {
char toLog[BUFSIZE] = "";
sprintf(toLog, "Errore malloc richiesta.\n");
writeLog(LOGFILE, toLog);
return 1;
}
AcquireSRWLockExclusive(mutex);
req->ID = nextReqID;
req->folder = folder;
req->address = address;
char buf[PATHLEN+100];
ZeroMemory(buf,sizeof(buf));
sprintf(buf,"%s",message);
req->message = buf;
req->sock = sock;
req->next = NULL;
char toLog[BUFSIZE] = "";
sprintf(toLog, "[Richiesta #%i] [%s] [%s]\n", nextReqID, address, message);
writeLog(LOGFILE, toLog);
//printf("[Richiesta #%i] [%s] [%s]\n",nextReqID,address,message);
if (numReqs == 0)
first = req;
else
last->next = req;
last = req;
numReqs++;
WakeAllConditionVariable(cond);
ReleaseSRWLockExclusive(mutex);
nextReqID++;
return 0;
}
struct request* removeRequest(SRWLOCK *mutex) {
struct request *req;
AcquireSRWLockExclusive(mutex);
if (numReqs>0) {
req = first;
first = req->next;
if (first == NULL)
last = NULL;
numReqs--;
}
else {
req = NULL;
}
ReleaseSRWLockExclusive(mutex);
return req;
}
DWORD WINAPI task(void *arg) {
int *threadID = (int *)arg;
struct request *req;
while (run) {
AcquireSRWLockExclusive(&reqMutex);
int r = numReqs;
ReleaseSRWLockExclusive(&reqMutex);
if (r>0) {
req = removeRequest(&reqMutex);
if (req!=NULL) {
char *folder = req->folder;
char *message = req->message;
SOCKET sock = req->sock;
int reqID = req->ID;
//printf("[Richiesta #%i] [Thread #%i - assegnata]\n",reqID,*threadID);
int ret = parseRequest(folder, message, sock);
char toLog[BUFSIZE] = "";
sprintf(toLog, "[Richiesta #%i] [Thread #%i: %i]\n", reqID, *threadID, ret);
writeLog(LOGFILE, toLog);
//printf("[Richiesta #%i] [Thread #%i: %i]\n",reqID,*threadID,ret);
free(req);
closesocket(sock);
}
}
else {
AcquireSRWLockExclusive(&reqMutex);
SleepConditionVariableSRW(&reqCond,&reqMutex,INFINITE,0);
ReleaseSRWLockExclusive(&reqMutex);
}
}
return 0;
}
int executeServer(char folder[], unsigned short port, int threadNum) {
WIN32_FIND_DATA dirdata;
char temp[PATHLEN];
snprintf(temp,PATHLEN,"%s\\*.*",folder);
HANDLE dir = FindFirstFile(temp,&dirdata);
if (dir==INVALID_HANDLE_VALUE) {
char toLog[BUFSIZE] = "";
sprintf(toLog, "La cartella %s non e' una directory valida o non esiste.\n", folder);
writeLog(LOGFILE, toLog);
//printf("La cartella %s non e' una directory valida o non esiste.\n",folder);
return 1;
}
FindClose(dir);
WSADATA wsaData;
int res;
SOCKET serverSock;
struct addrinfo *result = NULL;
struct addrinfo serveraddr;
int sendRes;
char message[BUFSIZE];
int msglen;
char strPort[6];
snprintf(strPort,6,"%hu",port);
//inizializzazione WinSock
res = WSAStartup(MAKEWORD(2,2), &wsaData);
if (res!=0) {
printf("Errore WSAStartup: %i\n",res);
return 1;
}
ZeroMemory(&serveraddr,sizeof(serveraddr));
serveraddr.ai_family = AF_INET;
serveraddr.ai_socktype = SOCK_STREAM;
serveraddr.ai_protocol = IPPROTO_TCP;
serveraddr.ai_flags = AI_PASSIVE;
//crea socket in ascolto
res = getaddrinfo(NULL, strPort, &serveraddr, &result);
if (res!=0) {
printf("Errore getadddrinfo: %i\n",res);
WSACleanup();
return 1;
}
//Crea socket ora
serverSock = socket(result->ai_family,result->ai_socktype,result->ai_protocol);
if (serverSock==INVALID_SOCKET) {
printf("Errore socket: %ld\n",WSAGetLastError());
freeaddrinfo(result);
WSACleanup();
return 1;
}
//bind
res = bind(serverSock,result->ai_addr,(int)result->ai_addrlen);
if (res==SOCKET_ERROR) {
printf("Errore bind: %d\n",WSAGetLastError());
freeaddrinfo(result);
closesocket(serverSock);
WSACleanup();
return 1;
}
freeaddrinfo(result);
//listen
res = listen(serverSock,SOMAXCONN);
if (res==SOCKET_ERROR) {
printf("Errore listen: %d\n",WSAGetLastError());
freeaddrinfo(result);
closesocket(serverSock);
WSACleanup();
return 1;
}
//crea thread pool
int *threadID = malloc(threadNum*sizeof(int));
HANDLE *threads = malloc(threadNum*sizeof(HANDLE));
for (int i = 0; i<threadNum; i++) {
threadID[i] = i;
threads[i] = CreateThread(NULL, 0, task, &threadID[i], 0, NULL);
}
SOCKET clientSock;
struct sockaddr_in clientAddr;
//struct hostent *clientInfo;
unsigned int clientlen = sizeof(clientAddr);
//ricevi richiesta ed inseriscila in coda, da processare da un thread
while (true) {
//accept
clientSock = accept(serverSock, (struct sockaddr *)&clientAddr, &clientlen);
if (clientSock == INVALID_SOCKET) {
printf("Errore accept: %d\n", WSAGetLastError());
freeaddrinfo(result);
closesocket(serverSock);
WSACleanup();
return 1;
}
//ottiene l'indirizzo del client
char clientAddrReadable[NI_MAXHOST];
if (getnameinfo((const struct sockaddr *)&clientAddr, clientlen, clientAddrReadable, sizeof(clientAddrReadable), NULL, sizeof(NULL), NI_NUMERICHOST) != 0) {
printf("Errore risoluzione client.\n");
freeaddrinfo(result);
closesocket(serverSock);
WSACleanup();
return 1;
}
ZeroMemory(message, BUFSIZE);
msglen = recv(clientSock, message, BUFSIZE, 0);
if (addRequest(&reqMutex, &reqCond, folder, clientAddrReadable, message, clientSock) != 0) {
freeaddrinfo(result);
break;
}
}
closesocket(serverSock);
WSACleanup();
return 0;
}
void showHelp(char *command) {
printf("server~ ");
if (strcmp(command, "-h") != 0)
printf("Comando non valido.\n\t");
printf("Usage: {comando_1} [valore_1] ... {comando_n} [valore_n]\n\t\
Ogni valore e' marcato come opzionale, ma puo' essere obbligatorio a seconda del comando che lo precede.\n\n\t\
Comandi (valori obbligatori):\n\t\
-c\t obbligatorio, specifica la cartella di partenza\n\t\
\t ignora la voce folder=<dir/to/start/with>\n\t\
-p\t specifica la porta TCP sulla quale restare in ascolto; default: 8888\n\t\
\t ignora la voce port=<portNum>\n\t\
-n\t specifica il numero di thread da utilizzare; default: 1\n\t\
\t ignora la voce threadNumber=<threadNum>\n\n\t\
Comandi (nessun valore necessario):\n\t\
-h\t mostra questo messaggio\n\n\t\
Dettagli:\n\t\
Tutti i parametri possono essere definiti tramite il file misc/server.conf, ma ignorati se specificati tramite riga di comando.\n\t\
In particolare, l'opzione -c non e' obbligatoria se la cartella e' specificata in tale file.\n");
return;
}
int main(int argc, char *argv[]) {
BOOL r = CreateDirectory("misc",NULL);
if (r != TRUE && GetLastError() != ERROR_ALREADY_EXISTS) {
printf("Errore creazione directory di log.\n");
return 1;
}
FILE *srvlog = fopen(LOGFILE, "w");
if (srvlog == NULL) {
printf("Errore creazione file di log.\n");
return 1;
}
fclose(srvlog);
memset(folder,0,PATHLEN);
port = 0;
threadNum = -1;
loadConfig(&port, folder, &threadNum);
if (argc>1) {
for (int i = 1; i<argc; i++) {
if (strcmp(argv[i], "-c") == 0) {
if (i + 1<argc && strstr(argv[i + 1], "-") == NULL) {
memset(folder, 0, PATHLEN);
strncpy(folder, argv[i + 1], strlen(argv[i + 1]));
i++;
}
else {
showHelp(argv[i]);
}
}
else if (strcmp(argv[i], "-p") == 0) {
if (i + 1<argc && strstr(argv[i + 1], "-") == NULL) {
port = (unsigned short)atoi(argv[i + 1]);
i++;
}
else {
showHelp(argv[i]);
}
}
else if (strcmp(argv[i], "-n") == 0) {
if (i + 1<argc && strstr(argv[i + 1], "-") == NULL) {
threadNum = atoi(argv[i + 1]);
i++;
}
else {
showHelp(argv[i]);
}
}
else
showHelp(argv[i]);
}
}
if (strcmp(folder, "\0") == 0) {
showHelp(argv[0]);
return 1;
}
//inizializzazione variabili globali
nextReqID = 0;
numReqs = 0;
InitializeSRWLock(&reqMutex);
InitializeConditionVariable(&reqCond);
run = true;
executeServer(folder,port,threadNum);
//fa terminare i thread in background
run = false;
return 0;
}
|
vision.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% V V IIIII SSSSS IIIII OOO N N %
% V V I SS I O O NN N %
% V V I SSS I O O N N N %
% V V I SS I O O N NN %
% V IIIII SSSSS IIIII OOO N N %
% %
% %
% MagickCore Computer Vision Methods %
% %
% Software Design %
% Cristy %
% September 2014 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/constitute.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/effect.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/matrix.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/montage.h"
#include "MagickCore/morphology.h"
#include "MagickCore/morphology-private.h"
#include "MagickCore/opencl-private.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resource_.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/vision.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n n e c t e d C o m p o n e n t s I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConnectedComponentsImage() returns the connected-components of the image
% uniquely labeled. The returned connected components image colors member
% defines the number of unique objects. Choose from 4 or 8-way connectivity.
%
% You are responsible for freeing the connected components objects resources
% with this statement;
%
% objects = (CCObjectInfo *) RelinquishMagickMemory(objects);
%
% The format of the ConnectedComponentsImage method is:
%
% Image *ConnectedComponentsImage(const Image *image,
% const size_t connectivity,CCObjectInfo **objects,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o connectivity: how many neighbors to visit, choose from 4 or 8.
%
% o objects: return the attributes of each unique object.
%
% o exception: return any errors or warnings in this structure.
%
*/
static int CCObjectInfoCompare(const void *x,const void *y)
{
CCObjectInfo
*p,
*q;
p=(CCObjectInfo *) x;
q=(CCObjectInfo *) y;
return((int) (q->area-(ssize_t) p->area));
}
MagickExport Image *ConnectedComponentsImage(const Image *image,
const size_t connectivity,CCObjectInfo **objects,ExceptionInfo *exception)
{
#define ConnectedComponentsImageTag "ConnectedComponents/Image"
CacheView
*component_view,
*image_view,
*object_view;
CCObjectInfo
*object;
char
*c;
const char
*artifact,
*metrics[CCMaxMetrics];
double
max_threshold,
min_threshold;
Image
*component_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MatrixInfo
*equivalences;
RectangleInfo
bounding_box;
register ssize_t
i;
size_t
size;
ssize_t
background_id,
connect4[2][2] = { { -1, 0 }, { 0, -1 } },
connect8[4][2] = { { -1, -1 }, { -1, 0 }, { -1, 1 }, { 0, -1 } },
dx,
dy,
first,
last,
n,
step,
y;
/*
Initialize connected components image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (objects != (CCObjectInfo **) NULL)
*objects=(CCObjectInfo *) NULL;
component_image=CloneImage(image,0,0,MagickTrue,exception);
if (component_image == (Image *) NULL)
return((Image *) NULL);
component_image->depth=MAGICKCORE_QUANTUM_DEPTH;
if (AcquireImageColormap(component_image,MaxColormapSize,exception) == MagickFalse)
{
component_image=DestroyImage(component_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Initialize connected components equivalences.
*/
size=image->columns*image->rows;
if (image->columns != (size/image->rows))
{
component_image=DestroyImage(component_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
equivalences=AcquireMatrixInfo(size,1,sizeof(ssize_t),exception);
if (equivalences == (MatrixInfo *) NULL)
{
component_image=DestroyImage(component_image);
return((Image *) NULL);
}
for (n=0; n < (ssize_t) (image->columns*image->rows); n++)
(void) SetMatrixElement(equivalences,n,0,&n);
object=(CCObjectInfo *) AcquireQuantumMemory(MaxColormapSize,sizeof(*object));
if (object == (CCObjectInfo *) NULL)
{
equivalences=DestroyMatrixInfo(equivalences);
component_image=DestroyImage(component_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memset(object,0,MaxColormapSize*sizeof(*object));
for (i=0; i < (ssize_t) MaxColormapSize; i++)
{
object[i].id=i;
object[i].bounding_box.x=(ssize_t) image->columns;
object[i].bounding_box.y=(ssize_t) image->rows;
GetPixelInfo(image,&object[i].color);
}
/*
Find connected components.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
for (n=0; n < (ssize_t) (connectivity > 4 ? 4 : 2); n++)
{
if (status == MagickFalse)
continue;
dx=connectivity > 4 ? connect8[n][1] : connect4[n][1];
dy=connectivity > 4 ? connect8[n][0] : connect4[n][0];
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y-1,image->columns,3,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
p+=GetPixelChannels(image)*image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelInfo
pixel,
target;
ssize_t
neighbor_offset,
obj,
offset,
ox,
oy,
root;
/*
Is neighbor an authentic pixel and a different color than the pixel?
*/
GetPixelInfoPixel(image,p,&pixel);
if (((x+dx) < 0) || ((x+dx) >= (ssize_t) image->columns) ||
((y+dy) < 0) || ((y+dy) >= (ssize_t) image->rows))
{
p+=GetPixelChannels(image);
continue;
}
neighbor_offset=dy*(GetPixelChannels(image)*image->columns)+dx*
GetPixelChannels(image);
GetPixelInfoPixel(image,p+neighbor_offset,&target);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
p+=GetPixelChannels(image);
continue;
}
/*
Resolve this equivalence.
*/
offset=y*image->columns+x;
neighbor_offset=dy*image->columns+dx;
ox=offset;
status=GetMatrixElement(equivalences,ox,0,&obj);
while (obj != ox)
{
ox=obj;
status=GetMatrixElement(equivalences,ox,0,&obj);
}
oy=offset+neighbor_offset;
status=GetMatrixElement(equivalences,oy,0,&obj);
while (obj != oy)
{
oy=obj;
status=GetMatrixElement(equivalences,oy,0,&obj);
}
if (ox < oy)
{
status=SetMatrixElement(equivalences,oy,0,&ox);
root=ox;
}
else
{
status=SetMatrixElement(equivalences,ox,0,&oy);
root=oy;
}
ox=offset;
status=GetMatrixElement(equivalences,ox,0,&obj);
while (obj != root)
{
status=GetMatrixElement(equivalences,ox,0,&obj);
status=SetMatrixElement(equivalences,ox,0,&root);
}
oy=offset+neighbor_offset;
status=GetMatrixElement(equivalences,oy,0,&obj);
while (obj != root)
{
status=GetMatrixElement(equivalences,oy,0,&obj);
status=SetMatrixElement(equivalences,oy,0,&root);
}
status=SetMatrixElement(equivalences,y*image->columns+x,0,&root);
p+=GetPixelChannels(image);
}
}
}
/*
Label connected components.
*/
n=0;
component_view=AcquireAuthenticCacheView(component_image,exception);
for (y=0; y < (ssize_t) component_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(component_view,0,y,component_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) component_image->columns; x++)
{
ssize_t
id,
offset;
offset=y*image->columns+x;
status=GetMatrixElement(equivalences,offset,0,&id);
if (id != offset)
status=GetMatrixElement(equivalences,id,0,&id);
else
{
id=n++;
if (id >= (ssize_t) MaxColormapSize)
break;
}
status=SetMatrixElement(equivalences,offset,0,&id);
if (x < object[id].bounding_box.x)
object[id].bounding_box.x=x;
if (x >= (ssize_t) object[id].bounding_box.width)
object[id].bounding_box.width=(size_t) x;
if (y < object[id].bounding_box.y)
object[id].bounding_box.y=y;
if (y >= (ssize_t) object[id].bounding_box.height)
object[id].bounding_box.height=(size_t) y;
object[id].color.red+=QuantumScale*GetPixelRed(image,p);
object[id].color.green+=QuantumScale*GetPixelGreen(image,p);
object[id].color.blue+=QuantumScale*GetPixelBlue(image,p);
if (image->alpha_trait != UndefinedPixelTrait)
object[id].color.alpha+=QuantumScale*GetPixelAlpha(image,p);
if (image->colorspace == CMYKColorspace)
object[id].color.black+=QuantumScale*GetPixelBlack(image,p);
object[id].centroid.x+=x;
object[id].centroid.y+=y;
object[id].area++;
SetPixelIndex(component_image,(Quantum) id,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(component_image);
}
if (n > (ssize_t) MaxColormapSize)
break;
if (SyncCacheViewAuthenticPixels(component_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
progress++;
proceed=SetImageProgress(image,ConnectedComponentsImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
component_view=DestroyCacheView(component_view);
image_view=DestroyCacheView(image_view);
equivalences=DestroyMatrixInfo(equivalences);
if (n > (ssize_t) MaxColormapSize)
{
object=(CCObjectInfo *) RelinquishMagickMemory(object);
component_image=DestroyImage(component_image);
ThrowImageException(ResourceLimitError,"TooManyObjects");
}
background_id=0;
min_threshold=0.0;
max_threshold=HUGE_VAL;
component_image->colors=(size_t) n;
for (i=0; i < (ssize_t) component_image->colors; i++)
{
object[i].bounding_box.width-=(object[i].bounding_box.x-1);
object[i].bounding_box.height-=(object[i].bounding_box.y-1);
object[i].color.red/=(object[i].area/QuantumRange);
object[i].color.green/=(object[i].area/QuantumRange);
object[i].color.blue/=(object[i].area/QuantumRange);
if (image->alpha_trait != UndefinedPixelTrait)
object[i].color.alpha/=(object[i].area/QuantumRange);
if (image->colorspace == CMYKColorspace)
object[i].color.black/=(object[i].area/QuantumRange);
object[i].centroid.x/=object[i].area;
object[i].centroid.y/=object[i].area;
max_threshold+=object[i].area;
if (object[i].area > object[background_id].area)
background_id=i;
}
max_threshold+=MagickEpsilon;
n=(-1);
artifact=GetImageArtifact(image,"connected-components:background-id");
if (artifact != (const char *) NULL)
background_id=(ssize_t) StringToDouble(artifact,(char **) NULL);
artifact=GetImageArtifact(image,"connected-components:area-threshold");
if (artifact != (const char *) NULL)
{
/*
Merge any object not within the min and max area threshold.
*/
(void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold);
for (i=0; i < (ssize_t) component_image->colors; i++)
if (((object[i].area < min_threshold) ||
(object[i].area >= max_threshold)) && (i != background_id))
object[i].merge=MagickTrue;
}
artifact=GetImageArtifact(image,"connected-components:keep-colors");
if (artifact != (const char *) NULL)
{
register const char
*p;
/*
Keep selected objects based on color, merge others.
*/
for (i=0; i < (ssize_t) component_image->colors; i++)
object[i].merge=MagickTrue;
for (p=artifact; ; )
{
char
color[MagickPathExtent];
PixelInfo
pixel;
register const char
*q;
for (q=p; *q != '\0'; q++)
if (*q == ';')
break;
(void) CopyMagickString(color,p,(size_t) MagickMin(q-p+1,
MagickPathExtent));
(void) QueryColorCompliance(color,AllCompliance,&pixel,exception);
for (i=0; i < (ssize_t) component_image->colors; i++)
if (IsFuzzyEquivalencePixelInfo(&object[i].color,&pixel) != MagickFalse)
object[i].merge=MagickFalse;
if (*q == '\0')
break;
p=q+1;
}
}
artifact=GetImageArtifact(image,"connected-components:keep-ids");
if (artifact == (const char *) NULL)
artifact=GetImageArtifact(image,"connected-components:keep");
if (artifact != (const char *) NULL)
{
/*
Keep selected objects based on id, merge others.
*/
for (i=0; i < (ssize_t) component_image->colors; i++)
object[i].merge=MagickTrue;
for (c=(char *) artifact; *c != '\0';)
{
while ((isspace((int) ((unsigned char) *c)) != 0) || (*c == ','))
c++;
first=(ssize_t) strtol(c,&c,10);
if (first < 0)
first+=(ssize_t) component_image->colors;
last=first;
while (isspace((int) ((unsigned char) *c)) != 0)
c++;
if (*c == '-')
{
last=(ssize_t) strtol(c+1,&c,10);
if (last < 0)
last+=(ssize_t) component_image->colors;
}
step=(ssize_t) (first > last ? -1 : 1);
for ( ; first != (last+step); first+=step)
object[first].merge=MagickFalse;
}
}
artifact=GetImageArtifact(image,"connected-components:keep-top");
if (artifact != (const char *) NULL)
{
CCObjectInfo
*top_objects;
ssize_t
top_ids;
/*
Keep top objects.
*/
top_ids=(ssize_t) StringToDouble(artifact,(char **) NULL);
top_objects=(CCObjectInfo *) AcquireQuantumMemory(component_image->colors,
sizeof(*top_objects));
if (top_objects == (CCObjectInfo *) NULL)
{
object=(CCObjectInfo *) RelinquishMagickMemory(object);
component_image=DestroyImage(component_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memcpy(top_objects,object,component_image->colors*sizeof(*object));
qsort((void *) top_objects,component_image->colors,sizeof(*top_objects),
CCObjectInfoCompare);
for (i=top_ids+1; i < (ssize_t) component_image->colors; i++)
object[top_objects[i].id].merge=MagickTrue;
top_objects=(CCObjectInfo *) RelinquishMagickMemory(top_objects);
}
artifact=GetImageArtifact(image,"connected-components:remove-colors");
if (artifact != (const char *) NULL)
{
register const char
*p;
/*
Remove selected objects based on color, keep others.
*/
for (p=artifact; ; )
{
char
color[MagickPathExtent];
PixelInfo
pixel;
register const char
*q;
for (q=p; *q != '\0'; q++)
if (*q == ';')
break;
(void) CopyMagickString(color,p,(size_t) MagickMin(q-p+1,
MagickPathExtent));
(void) QueryColorCompliance(color,AllCompliance,&pixel,exception);
for (i=0; i < (ssize_t) component_image->colors; i++)
if (IsFuzzyEquivalencePixelInfo(&object[i].color,&pixel) != MagickFalse)
object[i].merge=MagickTrue;
if (*q == '\0')
break;
p=q+1;
}
}
artifact=GetImageArtifact(image,"connected-components:remove-ids");
if (artifact == (const char *) NULL)
artifact=GetImageArtifact(image,"connected-components:remove");
if (artifact != (const char *) NULL)
for (c=(char *) artifact; *c != '\0';)
{
/*
Remove selected objects based on id, keep others.
*/
while ((isspace((int) ((unsigned char) *c)) != 0) || (*c == ','))
c++;
first=(ssize_t) strtol(c,&c,10);
if (first < 0)
first+=(ssize_t) component_image->colors;
last=first;
while (isspace((int) ((unsigned char) *c)) != 0)
c++;
if (*c == '-')
{
last=(ssize_t) strtol(c+1,&c,10);
if (last < 0)
last+=(ssize_t) component_image->colors;
}
step=(ssize_t) (first > last ? -1 : 1);
for ( ; first != (last+step); first+=step)
object[first].merge=MagickTrue;
}
artifact=GetImageArtifact(image,"connected-components:perimeter-threshold");
if (artifact != (const char *) NULL)
{
/*
Merge any object not within the min and max perimeter threshold.
*/
(void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold);
metrics[++n]="perimeter";
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(status) \
magick_number_threads(component_image,component_image,component_image->colors,1)
#endif
for (i=0; i < (ssize_t) component_image->colors; i++)
{
CacheView
*component_view;
RectangleInfo
bounding_box;
size_t
pattern[4] = { 1, 0, 0, 0 };
ssize_t
y;
/*
Compute perimeter of each object.
*/
if (status == MagickFalse)
continue;
component_view=AcquireAuthenticCacheView(component_image,exception);
bounding_box=object[i].bounding_box;
for (y=(-1); y < (ssize_t) bounding_box.height+1; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x-1,
bounding_box.y+y,bounding_box.width+2,2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=(-1); x < (ssize_t) bounding_box.width+1; x++)
{
Quantum
pixels[4];
register ssize_t
v;
size_t
foreground;
/*
An Algorithm for Calculating Objects’ Shape Features in Binary
Images, Lifeng He, Yuyan Chao.
*/
foreground=0;
for (v=0; v < 2; v++)
{
register ssize_t
u;
for (u=0; u < 2; u++)
{
ssize_t
offset;
offset=v*(bounding_box.width+2)*
GetPixelChannels(component_image)+u*
GetPixelChannels(component_image);
pixels[2*v+u]=GetPixelIndex(component_image,p+offset);
if ((ssize_t) pixels[2*v+u] == i)
foreground++;
}
}
if (foreground == 1)
pattern[1]++;
else
if (foreground == 2)
{
if ((((ssize_t) pixels[0] == i) &&
((ssize_t) pixels[3] == i)) ||
(((ssize_t) pixels[1] == i) &&
((ssize_t) pixels[2] == i)))
pattern[0]++; /* diagonal */
else
pattern[2]++;
}
else
if (foreground == 3)
pattern[3]++;
p+=GetPixelChannels(component_image);
}
}
component_view=DestroyCacheView(component_view);
object[i].metric[n]=ceil(MagickSQ1_2*pattern[1]+1.0*pattern[2]+
MagickSQ1_2*pattern[3]+MagickSQ2*pattern[0]-0.5);
}
for (i=0; i < (ssize_t) component_image->colors; i++)
if (((object[i].metric[n] < min_threshold) ||
(object[i].metric[n] >= max_threshold)) && (i != background_id))
object[i].merge=MagickTrue;
}
artifact=GetImageArtifact(image,"connected-components:circularity-threshold");
if (artifact != (const char *) NULL)
{
/*
Merge any object not within the min and max circularity threshold.
*/
(void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold);
metrics[++n]="circularity";
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(status) \
magick_number_threads(component_image,component_image,component_image->colors,1)
#endif
for (i=0; i < (ssize_t) component_image->colors; i++)
{
CacheView
*component_view;
RectangleInfo
bounding_box;
size_t
pattern[4] = { 1, 0, 0, 0 };
ssize_t
y;
/*
Compute perimeter of each object.
*/
if (status == MagickFalse)
continue;
component_view=AcquireAuthenticCacheView(component_image,exception);
bounding_box=object[i].bounding_box;
for (y=(-1); y < (ssize_t) bounding_box.height; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x-1,
bounding_box.y+y,bounding_box.width+2,2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=(-1); x < (ssize_t) bounding_box.width; x++)
{
Quantum
pixels[4];
register ssize_t
v;
size_t
foreground;
/*
An Algorithm for Calculating Objects’ Shape Features in Binary
Images, Lifeng He, Yuyan Chao.
*/
foreground=0;
for (v=0; v < 2; v++)
{
register ssize_t
u;
for (u=0; u < 2; u++)
{
ssize_t
offset;
offset=v*(bounding_box.width+2)*
GetPixelChannels(component_image)+u*
GetPixelChannels(component_image);
pixels[2*v+u]=GetPixelIndex(component_image,p+offset);
if ((ssize_t) pixels[2*v+u] == i)
foreground++;
}
}
if (foreground == 1)
pattern[1]++;
else
if (foreground == 2)
{
if ((((ssize_t) pixels[0] == i) &&
((ssize_t) pixels[3] == i)) ||
(((ssize_t) pixels[1] == i) &&
((ssize_t) pixels[2] == i)))
pattern[0]++; /* diagonal */
else
pattern[2]++;
}
else
if (foreground == 3)
pattern[3]++;
p+=GetPixelChannels(component_image);
}
}
component_view=DestroyCacheView(component_view);
object[i].metric[n]=ceil(MagickSQ1_2*pattern[1]+1.0*pattern[2]+
MagickSQ1_2*pattern[3]+MagickSQ2*pattern[0]-0.5);
object[i].metric[n]=4.0*MagickPI*object[i].area/(object[i].metric[n]*
object[i].metric[n]);
}
for (i=0; i < (ssize_t) component_image->colors; i++)
if (((object[i].metric[n] < min_threshold) ||
(object[i].metric[n] >= max_threshold)) && (i != background_id))
object[i].merge=MagickTrue;
}
artifact=GetImageArtifact(image,"connected-components:diameter-threshold");
if (artifact != (const char *) NULL)
{
/*
Merge any object not within the min and max diameter threshold.
*/
(void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold);
metrics[++n]="diameter";
for (i=0; i < (ssize_t) component_image->colors; i++)
{
object[i].metric[n]=ceil(sqrt(4.0*object[i].area/MagickPI)-0.5);
if (((object[i].metric[n] < min_threshold) ||
(object[i].metric[n] >= max_threshold)) && (i != background_id))
object[i].merge=MagickTrue;
}
}
artifact=GetImageArtifact(image,"connected-components:major-axis-threshold");
if (artifact != (const char *) NULL)
{
/*
Merge any object not within the min and max ellipse major threshold.
*/
(void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold);
metrics[++n]="major-axis";
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(status) \
magick_number_threads(component_image,component_image,component_image->colors,1)
#endif
for (i=0; i < (ssize_t) component_image->colors; i++)
{
CacheView
*component_view;
double
M00 = 0.0,
M01 = 0.0,
M02 = 0.0,
M10 = 0.0,
M11 = 0.0,
M20 = 0.0;
PointInfo
centroid = { 0.0, 0.0 };
RectangleInfo
bounding_box;
register const Quantum
*magick_restrict p;
register ssize_t
x;
ssize_t
y;
/*
Compute ellipse major axis of each object.
*/
if (status == MagickFalse)
continue;
component_view=AcquireAuthenticCacheView(component_image,exception);
bounding_box=object[i].bounding_box;
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,p) == i)
{
M00++;
M10+=x;
M01+=y;
}
p+=GetPixelChannels(component_image);
}
}
centroid.x=M10*PerceptibleReciprocal(M00);
centroid.y=M01*PerceptibleReciprocal(M00);
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,p) == i)
{
M11+=(x-centroid.x)*(y-centroid.y);
M20+=(x-centroid.x)*(x-centroid.x);
M02+=(y-centroid.y)*(y-centroid.y);
}
p+=GetPixelChannels(component_image);
}
}
component_view=DestroyCacheView(component_view);
object[i].metric[n]=sqrt((2.0*PerceptibleReciprocal(M00))*((M20+M02)+
sqrt(4.0*M11*M11+(M20-M02)*(M20-M02))));
}
for (i=0; i < (ssize_t) component_image->colors; i++)
if (((object[i].metric[n] < min_threshold) ||
(object[i].metric[n] >= max_threshold)) && (i != background_id))
object[i].merge=MagickTrue;
}
artifact=GetImageArtifact(image,"connected-components:minor-axis-threshold");
if (artifact != (const char *) NULL)
{
/*
Merge any object not within the min and max ellipse minor threshold.
*/
(void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold);
metrics[++n]="minor-axis";
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(status) \
magick_number_threads(component_image,component_image,component_image->colors,1)
#endif
for (i=0; i < (ssize_t) component_image->colors; i++)
{
CacheView
*component_view;
double
M00 = 0.0,
M01 = 0.0,
M02 = 0.0,
M10 = 0.0,
M11 = 0.0,
M20 = 0.0;
PointInfo
centroid = { 0.0, 0.0 };
RectangleInfo
bounding_box;
register const Quantum
*magick_restrict p;
register ssize_t
x;
ssize_t
y;
/*
Compute ellipse major axis of each object.
*/
if (status == MagickFalse)
continue;
component_view=AcquireAuthenticCacheView(component_image,exception);
bounding_box=object[i].bounding_box;
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,p) == i)
{
M00++;
M10+=x;
M01+=y;
}
p+=GetPixelChannels(component_image);
}
}
centroid.x=M10*PerceptibleReciprocal(M00);
centroid.y=M01*PerceptibleReciprocal(M00);
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,p) == i)
{
M11+=(x-centroid.x)*(y-centroid.y);
M20+=(x-centroid.x)*(x-centroid.x);
M02+=(y-centroid.y)*(y-centroid.y);
}
p+=GetPixelChannels(component_image);
}
}
component_view=DestroyCacheView(component_view);
object[i].metric[n]=sqrt((2.0*PerceptibleReciprocal(M00))*((M20+M02)-
sqrt(4.0*M11*M11+(M20-M02)*(M20-M02))));
}
for (i=0; i < (ssize_t) component_image->colors; i++)
if (((object[i].metric[n] < min_threshold) ||
(object[i].metric[n] >= max_threshold)) && (i != background_id))
object[i].merge=MagickTrue;
}
artifact=GetImageArtifact(image,
"connected-components:eccentricity-threshold");
if (artifact != (const char *) NULL)
{
/*
Merge any object not within the min and max eccentricity threshold.
*/
(void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold);
metrics[++n]="eccentricy";
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(status) \
magick_number_threads(component_image,component_image,component_image->colors,1)
#endif
for (i=0; i < (ssize_t) component_image->colors; i++)
{
CacheView
*component_view;
double
M00 = 0.0,
M01 = 0.0,
M02 = 0.0,
M10 = 0.0,
M11 = 0.0,
M20 = 0.0;
PointInfo
centroid = { 0.0, 0.0 },
ellipse_axis = { 0.0, 0.0 };
RectangleInfo
bounding_box;
register const Quantum
*magick_restrict p;
register ssize_t
x;
ssize_t
y;
/*
Compute eccentricity of each object.
*/
if (status == MagickFalse)
continue;
component_view=AcquireAuthenticCacheView(component_image,exception);
bounding_box=object[i].bounding_box;
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,p) == i)
{
M00++;
M10+=x;
M01+=y;
}
p+=GetPixelChannels(component_image);
}
}
centroid.x=M10*PerceptibleReciprocal(M00);
centroid.y=M01*PerceptibleReciprocal(M00);
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,p) == i)
{
M11+=(x-centroid.x)*(y-centroid.y);
M20+=(x-centroid.x)*(x-centroid.x);
M02+=(y-centroid.y)*(y-centroid.y);
}
p+=GetPixelChannels(component_image);
}
}
component_view=DestroyCacheView(component_view);
ellipse_axis.x=sqrt((2.0*PerceptibleReciprocal(M00))*((M20+M02)+
sqrt(4.0*M11*M11+(M20-M02)*(M20-M02))));
ellipse_axis.y=sqrt((2.0*PerceptibleReciprocal(M00))*((M20+M02)-
sqrt(4.0*M11*M11+(M20-M02)*(M20-M02))));
object[i].metric[n]=sqrt(1.0-(ellipse_axis.y*ellipse_axis.y*
PerceptibleReciprocal(ellipse_axis.x*ellipse_axis.x)));
}
for (i=0; i < (ssize_t) component_image->colors; i++)
if (((object[i].metric[n] < min_threshold) ||
(object[i].metric[n] >= max_threshold)) && (i != background_id))
object[i].merge=MagickTrue;
}
artifact=GetImageArtifact(image,"connected-components:angle-threshold");
if (artifact != (const char *) NULL)
{
/*
Merge any object not within the min and max ellipse angle threshold.
*/
(void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold);
metrics[++n]="angle";
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(status) \
magick_number_threads(component_image,component_image,component_image->colors,1)
#endif
for (i=0; i < (ssize_t) component_image->colors; i++)
{
CacheView
*component_view;
double
M00 = 0.0,
M01 = 0.0,
M02 = 0.0,
M10 = 0.0,
M11 = 0.0,
M20 = 0.0;
PointInfo
centroid = { 0.0, 0.0 };
RectangleInfo
bounding_box;
register const Quantum
*magick_restrict p;
register ssize_t
x;
ssize_t
y;
/*
Compute ellipse angle of each object.
*/
if (status == MagickFalse)
continue;
component_view=AcquireAuthenticCacheView(component_image,exception);
bounding_box=object[i].bounding_box;
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,p) == i)
{
M00++;
M10+=x;
M01+=y;
}
p+=GetPixelChannels(component_image);
}
}
centroid.x=M10*PerceptibleReciprocal(M00);
centroid.y=M01*PerceptibleReciprocal(M00);
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,p) == i)
{
M11+=(x-centroid.x)*(y-centroid.y);
M20+=(x-centroid.x)*(x-centroid.x);
M02+=(y-centroid.y)*(y-centroid.y);
}
p+=GetPixelChannels(component_image);
}
}
component_view=DestroyCacheView(component_view);
object[i].metric[n]=RadiansToDegrees(1.0/2.0*atan(2.0*M11*
PerceptibleReciprocal(M20-M02)));
if (fabs(M11) < 0.0)
{
if ((fabs(M20-M02) >= 0.0) && ((M20-M02) < 0.0))
object[i].metric[n]+=90.0;
}
else
if (M11 < 0.0)
{
if (fabs(M20-M02) >= 0.0)
{
if ((M20-M02) < 0.0)
object[i].metric[n]+=90.0;
else
object[i].metric[n]+=180.0;
}
}
else
if ((fabs(M20-M02) >= 0.0) && ((M20-M02) < 0.0))
object[i].metric[n]+=90.0;
}
for (i=0; i < (ssize_t) component_image->colors; i++)
if (((object[i].metric[n] < min_threshold) ||
(object[i].metric[n] >= max_threshold)) && (i != background_id))
object[i].merge=MagickTrue;
}
/*
Merge any object not within the min and max area threshold.
*/
component_view=AcquireAuthenticCacheView(component_image,exception);
object_view=AcquireVirtualCacheView(component_image,exception);
for (i=0; i < (ssize_t) component_image->colors; i++)
{
register ssize_t
j;
size_t
id;
if (status == MagickFalse)
continue;
if ((object[i].merge == MagickFalse) || (i == background_id))
continue; /* keep object */
/*
Merge this object.
*/
for (j=0; j < (ssize_t) component_image->colors; j++)
object[j].census=0;
bounding_box=object[i].bounding_box;
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
register ssize_t
n;
if (status == MagickFalse)
continue;
j=(ssize_t) GetPixelIndex(component_image,p);
if (j == i)
for (n=0; n < (ssize_t) (connectivity > 4 ? 4 : 2); n++)
{
register const Quantum
*p;
/*
Compute area of adjacent objects.
*/
if (status == MagickFalse)
continue;
dx=connectivity > 4 ? connect8[n][1] : connect4[n][1];
dy=connectivity > 4 ? connect8[n][0] : connect4[n][0];
p=GetCacheViewVirtualPixels(object_view,bounding_box.x+x+dx,
bounding_box.y+y+dy,1,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
j=(ssize_t) GetPixelIndex(component_image,p);
if (j != i)
object[j].census++;
}
p+=GetPixelChannels(component_image);
}
}
/*
Merge with object of greatest adjacent area.
*/
id=0;
for (j=1; j < (ssize_t) component_image->colors; j++)
if (object[j].census > object[id].census)
id=(size_t) j;
object[id].area+=object[i].area;
object[i].area=0.0;
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,q) == i)
SetPixelIndex(component_image,(Quantum) id,q);
q+=GetPixelChannels(component_image);
}
if (SyncCacheViewAuthenticPixels(component_view,exception) == MagickFalse)
status=MagickFalse;
}
}
object_view=DestroyCacheView(object_view);
component_view=DestroyCacheView(component_view);
artifact=GetImageArtifact(image,"connected-components:mean-color");
if (IsStringTrue(artifact) != MagickFalse)
{
/*
Replace object with mean color.
*/
for (i=0; i < (ssize_t) component_image->colors; i++)
component_image->colormap[i]=object[i].color;
}
(void) SyncImage(component_image,exception);
artifact=GetImageArtifact(image,"connected-components:verbose");
if ((IsStringTrue(artifact) != MagickFalse) ||
(objects != (CCObjectInfo **) NULL))
{
/*
Report statistics on each unique object.
*/
for (i=0; i < (ssize_t) component_image->colors; i++)
{
object[i].bounding_box.width=0;
object[i].bounding_box.height=0;
object[i].bounding_box.x=(ssize_t) component_image->columns;
object[i].bounding_box.y=(ssize_t) component_image->rows;
object[i].centroid.x=0;
object[i].centroid.y=0;
object[i].census=object[i].area == 0.0 ? 0.0 : 1.0;
object[i].area=0;
}
component_view=AcquireVirtualCacheView(component_image,exception);
for (y=0; y < (ssize_t) component_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,0,y,component_image->columns,
1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) component_image->columns; x++)
{
size_t
id;
id=(size_t) GetPixelIndex(component_image,p);
if (x < object[id].bounding_box.x)
object[id].bounding_box.x=x;
if (x > (ssize_t) object[id].bounding_box.width)
object[id].bounding_box.width=(size_t) x;
if (y < object[id].bounding_box.y)
object[id].bounding_box.y=y;
if (y > (ssize_t) object[id].bounding_box.height)
object[id].bounding_box.height=(size_t) y;
object[id].centroid.x+=x;
object[id].centroid.y+=y;
object[id].area++;
p+=GetPixelChannels(component_image);
}
}
for (i=0; i < (ssize_t) component_image->colors; i++)
{
object[i].bounding_box.width-=(object[i].bounding_box.x-1);
object[i].bounding_box.height-=(object[i].bounding_box.y-1);
object[i].centroid.x=object[i].centroid.x/object[i].area;
object[i].centroid.y=object[i].centroid.y/object[i].area;
}
component_view=DestroyCacheView(component_view);
qsort((void *) object,component_image->colors,sizeof(*object),
CCObjectInfoCompare);
if (objects == (CCObjectInfo **) NULL)
{
register ssize_t
j;
artifact=GetImageArtifact(image,
"connected-components:exclude-header");
if (IsStringTrue(artifact) == MagickFalse)
{
(void) fprintf(stdout,
"Objects (id: bounding-box centroid area mean-color");
for (j=0; j <= n; j++)
(void) fprintf(stdout," %s",metrics[j]);
(void) fprintf(stdout,"):\n");
}
for (i=0; i < (ssize_t) component_image->colors; i++)
if (object[i].census > 0.0)
{
char
mean_color[MagickPathExtent];
GetColorTuple(&object[i].color,MagickFalse,mean_color);
(void) fprintf(stdout,
" %.20g: %.20gx%.20g%+.20g%+.20g %.1f,%.1f %.*g %s",
(double) object[i].id,(double) object[i].bounding_box.width,
(double) object[i].bounding_box.height,(double)
object[i].bounding_box.x,(double) object[i].bounding_box.y,
object[i].centroid.x,object[i].centroid.y,
GetMagickPrecision(),(double) object[i].area,mean_color);
for (j=0; j <= n; j++)
(void) fprintf(stdout," %.*g",GetMagickPrecision(),
object[i].metric[j]);
(void) fprintf(stdout,"\n");
}
}
}
if (objects == (CCObjectInfo **) NULL)
object=(CCObjectInfo *) RelinquishMagickMemory(object);
else
*objects=object;
return(component_image);
}
|
GB_unop__ainv_uint16_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__ainv_uint16_uint16)
// op(A') function: GB (_unop_tran__ainv_uint16_uint16)
// C type: uint16_t
// A type: uint16_t
// cast: uint16_t cij = aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = aij ; \
Cx [pC] = -z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__ainv_uint16_uint16)
(
uint16_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint16_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
uint16_t z = aij ;
Cx [p] = -z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
uint16_t z = aij ;
Cx [p] = -z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__ainv_uint16_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_dr.h | /*
* OpenMP + dag_recorder
*/
/*
this file provides macros with which users can
easily turn on/off dag recorder for your OpenMP
task parallel programs.
provided macros are:
(i) pragma_omp_task(option, statement)
(ii) pragma_omp_taskc(option, callable)
(iii) pragma_omp_taskwait
they are respectively translated into
#pragma omp task option
statement
#pragma omp task option
callable()
#pragma omp taskwait
when DAG_RECORDER is set to a number >= 2,
they insert instrumentation code for dag
recorder.
ideally we like to instrument OpenMP
programs written with the regular
pragma's, but I don't know how to do
it. so we ask the programmer to write
OpenMP fragment such as
#pragma omp task shared(x)
x = foo();
as
pragma_omp_task(shared(x),
x = foo());
*/
#pragma once
#include <omp.h>
#include <dag_recorder.h>
#define do_pragma(x) _Pragma( #x )
#define pragma_omp(x) do_pragma(omp x)
#define pragma_omp_task_no_prof(options, statement) \
pragma_omp(task options) do { statement; } while(0)
#define pragma_omp_taskc_no_prof(options, callable) \
pragma_omp_task_no_prof(options, callable())
#define pragma_omp_taskwait_no_prof pragma_omp(taskwait)
#define pragma_omp_task_with_prof(options, statement) do { \
dr_dag_node * __c__ = 0; \
dr_dag_node * __t__ = dr_enter_create_task(&__c__); \
pragma_omp(task options) do { \
dr_start_task(__c__); \
statement; \
dr_end_task(); \
} while(0); \
dr_return_from_create_task(__t__); \
} while (0)
#define pragma_omp_taskc_with_prof(options, callable) \
pragma_omp_task_with_prof(options, callable())
#define pragma_omp_taskwait_with_prof do { \
dr_dag_node * __t__ = dr_enter_wait_tasks(); \
pragma_omp(taskwait); \
dr_return_from_wait_tasks(__t__); \
} while(0)
#if DAG_RECORDER>=2
#define pragma_omp_task(options, statement) \
pragma_omp_task_with_prof(options, statement)
#define pragma_omp_taskc(options, callable) \
pragma_omp_taskc_with_prof(options, callable)
#define pragma_omp_taskwait pragma_omp_taskwait_with_prof
#define dr_get_max_workers() (omp_in_parallel() ? omp_get_num_threads() : omp_get_max_threads())
#define dr_get_worker() omp_get_thread_num()
/* when using DAG Recorder with OpenMP task parallelism,
the following usual sequence needs to be instrumented
#pragma omp parallel
#pragma omp single
S;
to the following
{
dr_dag_node * __t__ = dr_enter_other();
#pragma omp parallel
#pragma omp single
{
dr_return_from_other(__t__);
S;
__t__ = dr_enter_other();
}
dr_return_from_other(__t__);
}
*/
#define pragma_omp_parallel_single(clause, S) \
do { \
dr_dag_node * __t__ = dr_enter_other(); \
pragma_omp(parallel) { \
pragma_omp(single clause) { \
dr_return_from_other(__t__); \
S \
__t__ = dr_enter_other(); \
} \
} \
dr_return_from_other(__t__); \
} while(0)
#else
#define pragma_omp_task(options, statement) \
pragma_omp_task_no_prof(options, statement)
#define pragma_omp_taskc(options, callable) \
pragma_omp_taskc_no_prof(options, callable)
#define pragma_omp_taskwait pragma_omp_taskwait_no_prof
#define pragma_omp_parallel_single(clause, S) \
do { \
pragma_omp(parallel) { \
pragma_omp(single clause) { \
S \
} \
} \
} while(0)
#endif
|
proc-tst-omp.c | /*
* Oracle Linux DTrace.
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Licensed under the Universal Permissive License v 1.0 as shown at
* http://oss.oracle.com/licenses/upl.
*/
#include <stdio.h>
int main(int argc, char **argv)
{
printf("TEST: start\n");
#pragma omp parallel num_threads(2)
{
printf("TEST: underway\n");
}
return 0;
}
|
RecordTable.h | /*
* Souffle - A Datalog Compiler
* Copyright (c) 2020, The Souffle Developers. All rights reserved.
* Licensed under the Universal Permissive License v 1.0 as shown at:
* - https://opensource.org/licenses/UPL
* - <souffle root>/licenses/SOUFFLE-UPL.txt
*/
/************************************************************************
*
* @file RecordTable.h
*
* Data container to store Records of the Datalog program.
*
***********************************************************************/
#pragma once
#include "CompiledTuple.h"
#include "ParallelUtils.h"
#include "RamTypes.h"
#include <algorithm>
#include <cassert>
#include <iostream>
#include <limits>
#include <map>
#include <unordered_map>
#include <vector>
namespace souffle {
/**
* A bidirectional mapping between tuples and reference indices.
*/
class RecordMap {
/** The arity of the stored tuples */
const size_t arity;
/** The mapping from tuples to references/indices */
std::map<std::vector<RamDomain>, RamDomain> recordToIndex;
/** The mapping from indices to tuples */
std::vector<std::vector<RamDomain>> indexToRecord;
public:
explicit RecordMap(size_t arity) : arity(arity), indexToRecord(1) {} // note: index 0 element left free
/**
* Pack the given vector -- create a new reference in necessary.
*/
RamDomain pack(const std::vector<RamDomain>& vector) {
RamDomain index;
#pragma omp critical(record_pack)
{
auto pos = recordToIndex.find(vector);
if (pos != recordToIndex.end()) {
index = pos->second;
} else {
#pragma omp critical(record_unpack)
{
indexToRecord.push_back(vector);
index = indexToRecord.size() - 1;
recordToIndex[vector] = index;
// assert that new index is smaller than the range
assert(index != std::numeric_limits<RamDomain>::max());
}
}
}
return index;
}
/**
* Packs the given tuple -- and may create a new reference if necessary.
*/
RamDomain pack(const RamDomain* tuple) {
std::vector<RamDomain> tmp(arity);
for (size_t i = 0; i < arity; i++) {
tmp[i] = tuple[i];
}
return pack(tmp);
}
/**
* Obtains a pointer to the tuple addressed by the given index.
*/
RamDomain* unpack(RamDomain index) {
RamDomain* res;
#pragma omp critical(record_unpack)
res = indexToRecord[index].data();
return res;
}
const RamDomain* unpack(RamDomain index) const {
const RamDomain* res;
#pragma omp critical(record_unpack)
res = indexToRecord[index].data();
return res;
}
};
class RecordTable {
public:
RecordTable() = default;
virtual ~RecordTable() = default;
/**
* A function packing a tuple of the given arity into a reference.
*/
RamDomain pack(RamDomain* tuple, size_t arity) {
return getForArity(arity).pack(tuple);
}
/**
* A function packing a vector into a reference.
*/
RamDomain pack(const std::vector<RamDomain>& vector) {
return getForArity(vector.size()).pack(vector);
}
/**
* A function packing a tuple of the given arity into a reference.
*/
template <typename Domain, std::size_t Arity>
RamDomain pack(ram::Tuple<Domain, Arity> tuple) {
return getForArity(Arity).pack(static_cast<RamDomain*>(tuple.data));
}
/**
* A function obtaining a pointer to the tuple addressed by the given reference.
*/
RamDomain* unpack(RamDomain ref, size_t arity) {
auto iter = maps.find(arity);
assert(iter != maps.end() && "Attempting to unpack non-existing record");
return (iter->second).unpack(ref);
}
/**
* A function obtaining a pointer to the tuple addressed by the given reference.
*/
const RamDomain* unpack(RamDomain ref, size_t arity) const {
auto iter = maps.find(arity);
assert(iter != maps.end() && "Attempting to unpack non-existing record");
return (iter->second).unpack(ref);
}
/**
* A function obtaining a pointer to the tuple addressed by the given reference.
*/
template <typename Domain, std::size_t Arity>
ram::Tuple<Domain, Arity> unpackTuple(RamDomain ref) {
ram::Tuple<RamDomain, Arity> tuple;
RamDomain* data = getForArity(Arity).unpack(ref);
for (size_t i = 0; i < Arity; ++i) {
tuple.data[i] = data[i];
}
return tuple;
}
/**
* Determines whether the given reference is the nil reference encoding
* the absence of any nested record.
*/
bool isNil(RamDomain ref) const {
return ref == getNil();
}
static constexpr RamDomain getNil() {
return 0;
}
private:
std::unordered_map<size_t, RecordMap> maps;
RecordMap& getForArity(size_t arity) {
std::unordered_map<size_t, RecordMap>::iterator mapsIterator;
#pragma omp critical(RecordTableGetForArity)
{
// This will create a new map if it doesn't exist yet.
mapsIterator = maps.emplace(arity, arity).first;
}
return mapsIterator->second;
}
};
} // namespace souffle
|
sum.c | #include <omp.h>
#define N 100000000
#define NTHREADS 8
int values[N];
int
main(int argc, char *argv[])
{
int tid;
static int sum[NTHREADS];
#ifdef _OPENMP
omp_set_num_threads(NTHREADS);
#pragma omp parallel private(tid)
{
tid = omp_get_thread_num();
#else
for (tid = 0; tid < NTHREADS; tid++) {
#endif
for (int i = 0; i < N; i++)
sum[tid] += values[i] >> tid;
}
}
|
9115.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp parallel private(j) collapse(2) schedule(static, 8) num_threads(2)
for (i = 1; i < _PB_NI - 1; ++i)
{
#pragma omp for schedule(static, 8)
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
gather_mm.h | /*!
* Copyright (c) 2022 by Contributors
* \file array/cpu/gather_mm.h
* \brief GATHER_MM CPU kernel function header.
*/
#ifndef DGL_ARRAY_CPU_GATHER_MM_H_
#define DGL_ARRAY_CPU_GATHER_MM_H_
#include <dgl/array.h>
#include <dgl/bcast.h>
#include <utility>
namespace dgl {
namespace aten {
namespace cpu {
template <typename DType>
void transpose(const DType *in, DType *out, const int N, const int M) {
#pragma omp parallel for
for (int n = 0; n < N * M; n++) {
int i = n / N;
int j = n % N;
out[n] = in[M * j + i];
}
}
template <typename DType>
void matmul(const DType *A, const DType *B,
DType *C, const int M, const int N, const int K) {
#pragma omp parallel
{
int i, j, k;
#pragma omp for
for (i = 0; i < M; i++) {
for (j = 0; j < N; j++) {
DType local_accum = 0;
for (k = 0; k < K; k++) {
local_accum += A[i * K + k] * B[k * N + j];
}
C[i * N + j] = local_accum;
}
}
}
}
/*!
* \brief CPU kernel of Gather_mm. The input matrix A is expected to be
* sorted according to relation type.
* \param A The input dense matrix of dimension m x k
* \param B The input dense matrix of dimension k x n
* \param C The output dense matrix od dimension m x n
* \param A_dim1_per_rel The number of rows in each relation in A
* \param B_dim1_per_rel The number of rows in each relation in B
* \param a_trans Matrix A to be transposed
* \param b_trans Matrix B to be transposed
*/
template <int XPU, typename IdType, typename DType>
void gatherMM_SortedEtype(const NDArray A,
const NDArray B,
NDArray C,
const NDArray A_dim1_per_rel,
const NDArray B_dim1_per_rel,
bool a_trans, bool b_trans) {
assert(A_dim1_per_rel.NumElements() == B_dim1_per_rel.NumElements());
int64_t num_rel = A_dim1_per_rel.NumElements();
const DType *A_data = A.Ptr<DType>();
const DType *B_data = B.Ptr<DType>();
const IdType* A_rel_data = A_dim1_per_rel.Ptr<IdType>();
const IdType* B_rel_data = B_dim1_per_rel.Ptr<IdType>();
DType *C_data = C.Ptr<DType>();
int64_t A_offset = 0, B_offset = 0, C_offset = 0;
int64_t m, n, k, h_col, w_row;
for (int etype = 0; etype < num_rel; ++etype) {
assert((a_trans) ? A_rel_data[etype] : A->shape[1] == \
(b_trans) ? B->shape[1] : B_rel_data[etype]);
m = A_rel_data[etype]; // rows of A
n = B->shape[1]; // cols of B
k = B_rel_data[etype]; // rows of B == cols of A
NDArray A_trans, B_trans;
if (a_trans) {
A_trans = NDArray::Empty({m * k}, A->dtype, A->ctx);
transpose<DType>(A_data + A_offset, static_cast<DType *>(A_trans->data), m, k);
}
if (b_trans) {
B_trans = NDArray::Empty({k * n}, B->dtype, B->ctx);
transpose<DType>(B_data + B_offset, static_cast<DType *>(B_trans->data), k, n);
}
if (a_trans || b_trans) {
int64_t tmp = k;
if (a_trans)
std::swap(m, k);
if (b_trans) {
k = tmp;
std::swap(n, k);
}
}
matmul<DType>(
(a_trans) ? static_cast<DType *>(A_trans->data) : A_data + A_offset,
(b_trans) ? static_cast<DType *>(B_trans->data) : B_data + B_offset,
C_data + C_offset, m, n, k);
A_offset += m * k;
B_offset += k * n;
C_offset += m * n;
}
}
} // namespace cpu
} // namespace aten
} // namespace dgl
#endif // DGL_ARRAY_CPU_GATHER_MM_H_
|
GxB_SelectOp_wait.c | //------------------------------------------------------------------------------
// GxB_SelectOp_wait: wait for a user-defined GxB_SelectOp to complete
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// In SuiteSparse:GraphBLAS, a user-defined GxB_SelectOp has no pending
// operations to wait for. All this method does is verify that the op is
// properly initialized, and then it does an OpenMP flush.
#include "GB.h"
GrB_Info GxB_SelectOp_wait // no work, just check if the GxB_SelectOp is valid
(
GxB_SelectOp op,
GrB_WaitMode waitmode
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_WHERE1 ("GxB_SelectOp_wait (op, waitmode)") ;
GB_RETURN_IF_NULL_OR_FAULTY (op) ;
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
shared_array.h | #ifndef OPENMC_SHARED_ARRAY_H
#define OPENMC_SHARED_ARRAY_H
//! \file shared_array.h
//! \brief Shared array data structure
#include <memory>
namespace openmc {
//==============================================================================
// Class declarations
//==============================================================================
// This container is an array that is capable of being appended to in an
// thread safe manner by use of atomics. It only provides protection for the
// use cases currently present in OpenMC. Namely, it covers the scenario where
// multiple threads are appending to an array, but no threads are reading from
// or operating on it in any other way at the same time. Multiple threads can
// call the thread_safe_append() function concurrently and store data to the
// object at the index returned from thread_safe_append() safely, but no other
// operations are protected.
template <typename T>
class SharedArray {
public:
//==========================================================================
// Constructors
//! Default constructor.
SharedArray() = default;
//! Construct a zero size container with space to hold capacity number of
//! elements.
//
//! \param capacity The number of elements for the container to allocate
//! space for
SharedArray(int64_t capacity) : capacity_(capacity)
{
data_ = std::make_unique<T[]>(capacity);
}
//==========================================================================
// Methods and Accessors
//! Return a reference to the element at specified location i. No bounds
//! checking is performed.
T& operator[](int64_t i) {return data_[i];}
const T& operator[](int64_t i) const { return data_[i]; }
//! Allocate space in the container for the specified number of elements.
//! reserve() does not change the size of the container.
//
//! \param capacity The number of elements to allocate in the container
void reserve(int64_t capacity)
{
data_ = std::make_unique<T[]>(capacity);
capacity_ = capacity;
}
//! Increase the size of the container by one and append value to the
//! array. Returns an index to the element of the array written to. Also
//! tests to enforce that the append operation does not read off the end
//! of the array. In the event that this does happen, set the size to be
//! equal to the capacity and return -1.
//
//! \value The value of the element to append
//! \return The index in the array written to. In the event that this
//! index would be greater than what was allocated for the container,
//! return -1.
int64_t thread_safe_append(const T& value)
{
// Atomically capture the index we want to write to
int64_t idx;
#pragma omp atomic capture seq_cst
idx = size_++;
// Check that we haven't written off the end of the array
if (idx >= capacity_) {
#pragma omp atomic write seq_cst
size_ = capacity_;
return -1;
}
// Copy element value to the array
data_[idx] = value;
return idx;
}
//! Free any space that was allocated for the container. Set the
//! container's size and capacity to 0.
void clear()
{
data_.reset();
size_ = 0;
capacity_ = 0;
}
//! Return the number of elements in the container
int64_t size() {return size_;}
//! Resize the container to contain a specified number of elements. This is
//! useful in cases where the container is written to in a non-thread safe manner,
//! where the internal size of the array needs to be manually updated.
//
//! \param size The new size of the container
void resize(int64_t size) {size_ = size;}
//! Return the number of elements that the container has currently allocated
//! space for.
int64_t capacity() {return capacity_;}
//! Return pointer to the underlying array serving as element storage.
T* data() {return data_.get();}
const T* data() const {return data_.get();}
private:
//==========================================================================
// Data members
std::unique_ptr<T[]> data_; //!< An RAII handle to the elements
int64_t size_ {0}; //!< The current number of elements
int64_t capacity_ {0}; //!< The total space allocated for elements
};
} // namespace openmc
#endif // OPENMC_SHARED_ARRAY_H
|
WAV.h | #ifndef _H_WAV_
#define _H_WAV_
#include <stdlib.h>
#include <stdio.h>
//for types
#include <stdint.h>
#include <cstdlib>
#include <string.h>
#include <string>
#include <vector>
// Data Type is fixed as short
class WAV {
/*
* http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/WAVE.html
* */
private:
uint32_t head_size=44;
bool non_pcm;
// 4bytes fixed size header infomation -> uint32_t
char riff_id[4]; // riff string
uint32_t riff_size; // overall size of fp in bytes
char wave_id[4]; // wave string
char fmt_id[4]; // fmt string with trailing null char
// | pcm | non-pcm
// fmt_size | 16 | 18
//
uint32_t fmt_size; // format chunk size 16,18,40
short
fmt_type; // format type 1-PCM 3-IEEE float 6- 8bit A law, 7- 8bit ,u law
unsigned short channels; // no of channel
uint32_t sample_rate; // SampleRate(blocks per second)
uint32_t byte_rate; // ByteRate = SampleRate * NumChannels * BitsPerSample/8
short block_align; // NumChannels * BitsPerSample/8
short bit_per_sample; // bits per sample, 8 - 8bits, 16-16bits etc
/* (if non -pcm )*/
uint32_t cb_size; //size of the extension
char fact_id[4];
uint32_t fact_size;
uint32_t dwSampleLength;
char data_id[4]; // DATA string or FLLR string
uint32_t data_size; // NumSamples * NumChannels * BitsPerSample/8 - size of
// the nex chunk that will be read
FILE *fp;
bool IsOpen;
char file_name[1024];
// For Input usage only
bool use_buf;
int frame_size;
int shift_size;
int size_unit;
void* buf;
bool isRead;
/*for extensible format*/
bool extensible;
int w_valid_bits_per_sample;
int dw_channel_mask;
int sub_format;
// short* buf;
public:
inline WAV();
inline WAV(short _ch, uint32_t _rate);
inline WAV(short _ch, uint32_t _rate, int frame_size, int shift_size);
inline ~WAV();
inline int NewFile(const char *_file_name);
inline int NewFile(std::string file_name_);
inline int OpenFile(const char *_file_name);
inline int OpenFile(std::string file_name_);
inline int Append(short *app_data, unsigned int app_size);
inline int Append(float*app_data, unsigned int app_size);
inline void WriteHeader();
// set default
inline void Init();
// reset data;
inline void Clear();
inline void Finish();
inline void ReadHeader();
/* There might be compile error for ReadUnit() in Visual Studio.
* in this case, try to update your VS to most recent version. */
inline size_t ReadUnit(short*dest,int unit);
inline size_t ReadUnit(float*dest,int unit);
inline int IsEOF() const;
inline void Print() const;
inline void Rewind();
inline int Convert2ShiftedArray(double **raw);
inline int Convert2ShiftedArray(double *raw);
inline int Convert2Array(double **raw);
// Split 2 channel Wav into two 1 channel wav files.
inline void SplitBy2(const char* f1,const char* f2);
inline void SetSizes(int frame,int shift);
inline int GetChannels();
inline bool GetIsOpen();
inline uint32_t GetSize();
inline uint32_t GetSizeUnit();
inline uint32_t GetSampleRate();
inline uint32_t GetNumOfSamples();
inline const char* GetFileName();
inline short GetFmtType();
inline void UseBuf(int frame_size,int shift_size);
inline bool checkValidHeader();
inline FILE* GetFilePointer();
// Normalize WAV
inline void Normalize();
/*Split Wav fp into each channel */
inline static void Split(char* );
};
void WAV::Init() {
fp = nullptr;
buf = nullptr;
isRead = false;
riff_id[0] = 'R';
riff_id[1] = 'I';
riff_id[2] = 'F';
riff_id[3] = 'F';
wave_id[0] = 'W';
wave_id[1] = 'A';
wave_id[2] = 'V';
wave_id[3] = 'E';
fmt_id[0] = 'f';
fmt_id[1] = 'm';
fmt_id[2] = 't';
fmt_id[3] = ' ';
// short 16bit ->2 bytes
fmt_size = 16;
// 1- PCM
fmt_type = 1;
channels = 0;
// bit per sample, 8 or 16. not sure
// Presume short = 16 bit
bit_per_sample = 16;
size_unit = bit_per_sample/8;
// have to be Optional
sample_rate = 0;
// smaple_rate * channels * fmt_size / 8
byte_rate = sample_rate * channels * bit_per_sample/ 8;
block_align = bit_per_sample * channels / 8;
data_id[0] = 'd';
data_id[1] = 'a';
data_id[2] = 't';
data_id[3] = 'a';
// Number of Samples * Number of Channels * Bit_per_sample / 8
data_size = 0 * channels * bit_per_sample / 8;
riff_size = data_size + head_size;
IsOpen = false;
use_buf = false;
non_pcm = false;
extensible = false;
frame_size = 512;
shift_size = 512;
}
/* default WAV format */
WAV::WAV() {
#ifndef NDEBUG
// printf("WAV::contsructor\n");
#endif
Init();
}
WAV::WAV(short _ch, uint32_t _rate) : WAV() {
#ifndef NDEBUG
// printf("WAV::constructor (ch,rate)\n");
#endif
channels = _ch;
// have to be Optional
sample_rate = _rate;
// have to be Optional
// smaple_rate * channels * bit_per_sample / 8
byte_rate = sample_rate * channels * bit_per_sample/ 8;
block_align = bit_per_sample * channels / 8;
// smaple_rate * channels * bit_per_sample/ 8
byte_rate = sample_rate * channels * bit_per_sample/ 8;
// bit_per_sample*channels /8
block_align = bit_per_sample * channels / 8;
// Number of Samples * Number of Channels * Bit_per_sample / 8
data_size = 0 * channels * bit_per_sample / 8;
riff_size = data_size + head_size;
}
WAV::WAV(short _ch, uint32_t _rate, int _frame_size, int _shift_size)
: WAV(_ch, _rate) {
#ifndef NDEBUG
// printf("WAV::constructor(ch,rate,frame,shift)\n");
#endif
frame_size = _frame_size;
shift_size = _shift_size;
}
WAV::~WAV() {
if (IsOpen) {
#ifndef NDEBUG
//printf("WAV::%s destructor\n", file_name);
#endif
Finish();
}
if (use_buf){
switch(fmt_type){
case 3:
delete[] (float*)buf;
break;
default:
delete[] (short*)buf;
break;
}
}
}
void WAV::WriteHeader() {
if (isRead)return;
if (!fp) {
printf("ERROR::File doesn't exist\n");
}
fseek(fp, 0, SEEK_SET);
#ifndef NDEBUG
// printf("WriteHeader::ftell %ld\n",ftell(fp));
#endif
riff_size = data_size + head_size;
fwrite(riff_id, sizeof(char), 4, fp);
fwrite(&(riff_size), sizeof(uint32_t), 1, fp);
fwrite((wave_id), sizeof(char), 4, fp);
fwrite((fmt_id), sizeof(char), 4, fp);
fwrite(&(fmt_size), sizeof(uint32_t), 1, fp);
fwrite(&(fmt_type), sizeof(short), 1, fp);
fwrite(&(channels), sizeof(short), 1, fp);
fwrite(&(sample_rate), sizeof(uint32_t), 1, fp);
fwrite(&(byte_rate), sizeof(uint32_t), 1, fp);
fwrite(&(block_align), sizeof(short), 1, fp);
fwrite(&(bit_per_sample), sizeof(short), 1, fp);
fwrite(data_id, sizeof(char), 4, fp);
fwrite(&(data_size), sizeof(uint32_t), 1, fp);
}
int WAV::NewFile(const char *_file_name) {
Clear();
fp = fopen(_file_name, "wb");
if (fp == NULL) {
printf("WAV::NewFile::Failed to Open : %s\n", _file_name);
exit(-1);
}
WriteHeader();
strcpy(file_name,_file_name);
IsOpen = true;
isRead = false;
return 0;
};
int WAV::NewFile(std::string file_name_) {
return NewFile(file_name_.c_str());
}
int WAV::Append(short *app_data, unsigned int app_size) {
fseek(fp, 0, SEEK_END);
#ifndef NDEBUG
// printf("Append::ftell %ld | size %d\n",ftell(fp),app_size);
#endif
if (!fwrite(reinterpret_cast<void*>( app_data), size_unit, app_size, fp)){
printf("ERROR::Append<short>\n");
}
data_size += app_size *size_unit ;
WriteHeader();
return 0;
};
int WAV::Append(float*app_data, unsigned int app_size) {
fseek(fp, 0, SEEK_END);
#ifndef NDEBUG
// printf("Append::ftell %ld | size %d\n",ftell(fp),app_size);
#endif
if (!fwrite(reinterpret_cast<void*>(app_data), size_unit, app_size, fp)){
printf("ERROR::Append<float>\n");
}
data_size += app_size *size_unit ;
printf("data_size : %d \n",data_size);
WriteHeader();
return 0;
};
int WAV::OpenFile(const char *_file_name) {
fp = fopen(_file_name, "rb");
strcpy(file_name,_file_name);
if (fp == NULL) {
printf("WAV::OpenFile::Failed to Open : '%s'\n", _file_name);
return 1;
}
ReadHeader();
IsOpen = true;
isRead = true;
UseBuf(frame_size,shift_size);
return 0;
};
int WAV::OpenFile(std::string file_name_) {
return OpenFile(file_name_.c_str());
}
void WAV::ReadHeader() {
head_size = 44;
unsigned char buffer4[4];
unsigned char buffer2[2];
unsigned char buffer16[16];
unsigned int temp;
/*
* http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/WAVE.html
* */
if (!fp) {
printf("ERROR::File doesn't exist\n");
}
fread(riff_id, sizeof(riff_id), 1, fp);
fread(buffer4, sizeof(buffer4), 1, fp);
// convert little endial to big endian 4 bytes int;
riff_size =
buffer4[0] | (buffer4[1] << 8) | (buffer4[2] << 16) | (buffer4[3] << 24);
// fread(riff_size,sizeof(riff_size),1,fp);
fread(wave_id, sizeof(wave_id), 1, fp);
fread(fmt_id, sizeof(fmt_id), 1, fp);
fread(buffer4, sizeof(buffer4), 1, fp);
// convert little endial to big endian 4 bytes int;
// | pcm | non-pcm
// fmt_size | 16 | 18
//
//Chunk size: 16, 18 or 40
//
//
fmt_size =
buffer4[0] | (buffer4[1] << 8) | (buffer4[2] << 16) | (buffer4[3] << 24);
if(fmt_size==18)
non_pcm = true;
if (fmt_size == 16 || fmt_size == 18);
// Unkown fmt_size;
else if (fmt_size == 40)
extensible = true;
else fmt_size=16;
fread(buffer2, sizeof(buffer2), 1, fp);
// convert little endial to big endian 2 bytes int;
fmt_type = buffer2[0] | (buffer2[1] << 8);
if(fmt_type==1 || fmt_type == 3)
;
// for undefined type
else
fmt_type = 1;
// convert little endian to big endian 2 bytes int;
fread(buffer2, sizeof(buffer2), 1, fp);
//Check
if(channels!=0){
temp = buffer2[0] | (buffer2[1] << 8);
if(temp!=channels){
printf("ERROR::WAV channels is not expected (%d != %d)\n",channels,temp);
exit(-1);
}
}
else
channels = buffer2[0] | (buffer2[1] << 8);
fread(buffer4, sizeof(buffer4), 1, fp);
// convert little endial to big endian 4 bytes int;
// Check
if(sample_rate!=0){
temp
= buffer4[0] | (buffer4[1] << 8) | (buffer4[2] << 16) | (buffer4[3] << 24);
if(temp != sample_rate){
printf("ERROR::WAV sampe_rate is not expected (%d != %d)\n",sample_rate,temp);
exit(-1);
}
}
else
sample_rate =
buffer4[0] | (buffer4[1] << 8) | (buffer4[2] << 16) | (buffer4[3] << 24);
fread(buffer4, sizeof(buffer4), 1, fp);
// convert little endial to big endian 4 bytes int;
byte_rate =
buffer4[0] | (buffer4[1] << 8) | (buffer4[2] << 16) | (buffer4[3] << 24);
fread(buffer2, sizeof(buffer2), 1, fp);
// convert little endial to big endian 2 bytes int;
block_align = buffer2[0] | (buffer2[1] << 8);
fread(buffer2, sizeof(buffer2), 1, fp);
// convert little endial to big endian 2 bytes int;
bit_per_sample = buffer2[0] | (buffer2[1] << 8);
size_unit = bit_per_sample/8;
/* non_pcm format has more elements */
if(non_pcm){
fread(buffer2, sizeof(buffer2), 1, fp);
cb_size= buffer2[0] | (buffer2[1] << 8);
fread(fact_id, sizeof(fact_id), 1, fp);
fread(buffer4, sizeof(buffer4), 1, fp);
fact_size =
buffer4[0] | (buffer4[1] << 8) | (buffer4[2] << 16) | (buffer4[3] << 24);
fread(buffer4, sizeof(buffer4), 1, fp);
dwSampleLength =
buffer4[0] | (buffer4[1] << 8) | (buffer4[2] << 16) | (buffer4[3] << 24);
}
/* extensible format has more elements */
if (extensible) {
fread(buffer2, sizeof(buffer2), 1, fp);
cb_size = buffer2[0] | (buffer2[1] << 8);
if(cb_size!=22){
printf("ERROR::WAV size of extension is not correct (%d != %d)\n", cb_size, 22);
exit(-1);
}
fread(buffer2, sizeof(buffer2), 1, fp);
w_valid_bits_per_sample = buffer2[0] | (buffer2[1] << 8);
fread(buffer4, sizeof(buffer4), 1, fp);
w_valid_bits_per_sample = buffer4[0] | (buffer4[1] << 8) | (buffer4[2] << 16) | (buffer4[3] << 24);
fread(buffer16, sizeof(buffer16), 1, fp);
//need subFormat reading
fread(fact_id, sizeof(fact_id), 1, fp);
fread(buffer4, sizeof(buffer4), 1, fp);
fact_size =
buffer4[0] | (buffer4[1] << 8) | (buffer4[2] << 16) | (buffer4[3] << 24);
fread(buffer4, sizeof(buffer4), 1, fp);
dwSampleLength =
buffer4[0] | (buffer4[1] << 8) | (buffer4[2] << 16) | (buffer4[3] << 24);
head_size += 7;
}
fread(data_id, sizeof(data_id), 1, fp);
fread(buffer4, sizeof(buffer4), 1, fp);
// convert little endial to big endian 4 bytes int;
data_size =
buffer4[0] | (buffer4[1] << 8) | (buffer4[2] << 16) | (buffer4[3] << 24);
}
size_t WAV::ReadUnit(short*dest,int unit){
return fread(dest,size_unit,unit,fp);
}
size_t WAV::ReadUnit(float*dest,int unit){
return fread(dest,size_unit,unit,fp);
}
void WAV::Finish() {
if (fp) {
WriteHeader();
fclose(fp);
IsOpen = false;
}
}
void WAV::Print()const {
int t;
if (!IsOpen) {
printf("ERROR::file is not opened.\n");
return;
}
printf("-------- WAV HEADER INFOMATION of [ %s ] ----------\n", file_name);
printf("riff_id : %c%c%c%c\n", riff_id[0], riff_id[1], riff_id[2],
riff_id[3]);
printf("riff_size : %u \n", riff_size);
printf("wave_id : %c%c%c%c\n", wave_id[0], wave_id[1], wave_id[2],
wave_id[3]);
printf("fmt_id : %c%c%c%c\n", fmt_id[0], fmt_id[1], fmt_id[2],
fmt_id[3]);
printf("fmt_size : %u\n", fmt_size);
switch (fmt_type) {
case 1:
printf("fmt_type : %u - PCM\n", fmt_type);
break;
case 3:
printf("fmt_type : %u - IEEE float\n", fmt_type);
break;
case 6:
printf("fmt_type : %u - 8bit A law\n", fmt_type);
break;
case 7:
printf("fmt_type : %u - 8 bit U law\n", fmt_type);
break;
default:
printf("fmt_type : %u - Unknown\n", fmt_type);
break;
}
printf("channels : %u\n", channels);
printf("sample_rate : %u \n", sample_rate);
printf("byte_rate : %u\n", byte_rate);
printf("block_align : %u\n", block_align);
printf("bit_per_sample : %u\n", bit_per_sample);
if(non_pcm){
printf("cbSize : %u\n", cb_size);
printf("fact_id : %c%c%c%c\n", fact_id[0], fact_id[1], fact_id[2],
fact_id[3]);
printf("fact_size : %u\n",fact_size);
printf("dwSampleLength : %u\n", dwSampleLength);
}
printf("data_id : %c%c%c%c\n", data_id[0], data_id[1], data_id[2],
data_id[3]);
printf("data_size : %u\n", data_size);
printf("duration : %.3lf sec\n", (double)riff_size / byte_rate);
}
bool WAV::checkValidHeader() {
int t;
if(riff_id[0]!='R' || riff_id[1]!='I'|| riff_id[2]!='F'|| riff_id[3]!='F')
return false;
if(wave_id[0]!='W' || wave_id[1]!='A'|| wave_id[2]!='V'|| wave_id[3]!='E')
return false;
if(!(fmt_type == 1 || fmt_type ==3 || fmt_type ==6 || fmt_type ==7))
return false;
if(!(fmt_size == 16 || fmt_size == 18))
return false;
if(byte_rate!=(block_align*sample_rate))
return false;
if(data_id[0]!='d' || data_id[1]!='a'|| data_id[2]!='t'|| data_id[3]!='a')
return false;
return true;
}
int WAV::IsEOF() const { return feof(fp); }
void WAV::Rewind() {
#ifndef NDEBUG
printf("INFO::Rewind\n");
#endif
rewind(fp);
ReadHeader();
}
void WAV::SplitBy2(const char* f1,const char* f2){
if(!IsOpen){
printf("ERROR::WAV must be opened\n");
exit(-1);
}
if(channels != 2){
printf("ERROR::WAV must be 2 channels\n");
exit(-1);
}
WAV w1(1,sample_rate);
WAV w2(1,sample_rate);
w1.NewFile(f1);
w2.NewFile(f2);
short temp;
while(!feof(fp)){
fread(&temp, size_unit, 1, fp);
w1.Append(&temp,1);
fread(&temp, size_unit, 1, fp);
w2.Append(&temp,1);
}
w1.Finish();
w2.Finish();
}
int WAV::GetChannels(){
return channels;
}
bool WAV::GetIsOpen(){
return IsOpen;
}
uint32_t WAV::GetSize(){
return riff_size;
}
uint32_t WAV::GetSizeUnit(){
return size_unit;
}
uint32_t WAV::GetNumOfSamples(){
return riff_size / (2 * channels);
}
void WAV::UseBuf(int _frame_size,int _shift_size){
if(!use_buf){
frame_size = _frame_size;
shift_size = _shift_size;
use_buf = true;
switch(fmt_type){
/* 3 - IEEE float*/
case 3:
buf = new float[channels * shift_size];
break;
/* 1 - PCM*/
default:
buf = new short[channels * shift_size];
break;
}
}
}
uint32_t WAV::GetSampleRate(){
return sample_rate;
}
int WAV::Convert2ShiftedArray(double **raw) {
int i, j,read;
read=fread(buf, size_unit, channels * shift_size, fp);
/*
printf("READ : %d\n",read);
for(int q=0;q<shift_size;q++)
printf("read %d %d\n",q,reinterpret_cast<T*>(buf)[q]);
*/
if(read == channels*shift_size){
// SHIFT
for (j = 0; j < channels; j++) {
for (i = 0; i < (frame_size - shift_size); i++) {
raw[j][i] = raw[j][i + shift_size];
}
}
// COPY as doulbe
// memcpy(arr,data.buffer+read_offset,shift_size);
switch(fmt_type){
case 3:
for (i = 0; i < shift_size; i++) {
for (j = 0; j < channels; j++){
raw[j][i + (frame_size - shift_size)]
= static_cast<double>(reinterpret_cast<float*>(buf)[i * channels + j]);
}
}
break;
default:
for (i = 0; i < shift_size; i++) {
for (j = 0; j < channels; j++){
raw[j][i + (frame_size - shift_size)]
= static_cast<double>(reinterpret_cast<short*>(buf)[i * channels + j]);
}
}
break;
}
return 1;
// Not enough buf to read
}else{
read = read/channels;
for (j = 0; j < channels; j++) {
for (i = 0; i < (frame_size - shift_size); i++) {
raw[j][i] = raw[j][i + shift_size];
}
}
switch(fmt_type){
case 3:
for (i = 0; i < read; i++) {
for (j = 0; j < channels; j++)
raw[j][i + (frame_size - shift_size)]
= static_cast<double>(reinterpret_cast<float*>(buf)[i * channels + j]);
}
break;
default:
for (i = 0; i < read; i++) {
for (j = 0; j < channels; j++)
raw[j][i + (frame_size - shift_size)]
= static_cast<double>(reinterpret_cast<short*>(buf)[i * channels + j]);
}
break;
}
for (i = read; i < shift_size; i++) {
for (j = 0; j < channels; j++)
raw[j][i + (frame_size - shift_size)] = 0;
}
return 0;
}
return 0;
}
int WAV::Convert2ShiftedArray(double *raw) {
int i, j,read;
read=fread(buf, size_unit, channels * shift_size, fp);
/*
printf("READ : %d\n",read);
for(int q=0;q<shift_size;q++)
printf("read %d %d\n",q,reinterpret_cast<T*>(buf)[q]);
*/
if(read == channels*shift_size){
// SHIFT
for (j = 0; j < channels; j++) {
for (i = 0; i < (frame_size - shift_size); i++) {
raw[j*frame_size + i] = raw[j*frame_size + i + shift_size ];
}
}
// COPY as doulbe
// memcpy(arr,data.buffer+read_offset,shift_size);
switch(fmt_type){
case 3:
for (i = 0; i < shift_size; i++) {
for (j = 0; j < channels; j++){
raw[j*frame_size + i + (frame_size - shift_size)]
= static_cast<double>(reinterpret_cast<float*>(buf)[i * channels + j]);
}
}
break;
default:
for (i = 0; i < shift_size; i++) {
for (j = 0; j < channels; j++){
raw[j*frame_size + i + (frame_size - shift_size)]
= static_cast<double>(reinterpret_cast<short*>(buf)[i * channels + j]);
}
}
break;
}
return 1;
// Not enough buf to read
}else{
read = read/channels;
for (j = 0; j < channels; j++) {
for (i = 0; i < (frame_size - shift_size); i++) {
raw[j*frame_size + i] = raw[j*frame_size + i + shift_size];
}
}
switch(fmt_type){
case 3:
for (i = 0; i < read; i++) {
for (j = 0; j < channels; j++)
raw[j*frame_size + i + (frame_size - shift_size)]
= static_cast<double>(reinterpret_cast<float*>(buf)[i * channels + j]);
}
break;
default:
for (i = 0; i < read; i++) {
for (j = 0; j < channels; j++)
raw[j* frame_size + i + (frame_size - shift_size)]
= static_cast<double>(reinterpret_cast<short*>(buf)[i * channels + j]);
}
break;
}
for (i = read; i < shift_size; i++) {
for (j = 0; j < channels; j++)
raw[j * frame_size+ i + (frame_size - shift_size)] = 0;
}
return 0;
}
return 0;
}
/* Note :: There is no padding for edge!
TOOD :: need to add padding
*/
int WAV::Convert2Array(double **raw) {
int i, j,read;
read=fread(buf, size_unit, channels * shift_size, fp);
//printf("read : %d\n",read);
if(read == channels*shift_size){
// COPY as doulbe
// memcpy(arr,data.buffer+read_offset,shift_size);
switch(fmt_type){
case 3:
for (i = 0; i < shift_size; i++) {
for (j = 0; j < channels; j++){
raw[j][i]
= static_cast<double>(reinterpret_cast<float*>(buf)[i * channels + j]);
}
}
break;
default:
for (i = 0; i < shift_size; i++) {
for (j = 0; j < channels; j++){
raw[j][i]
= static_cast<double>(reinterpret_cast<short*>(buf)[i * channels + j]);
}
}
break;
}
return 1;
// Not enough buf to read
}else{
read = read/channels;
switch(fmt_type){
case 3:
for (i = 0; i < read; i++) {
for (j = 0; j < channels; j++)
raw[j][i]
= static_cast<double>(reinterpret_cast<float*>(buf)[i * channels + j]);
}
break;
default:
for (i = 0; i < read; i++) {
for (j = 0; j < channels; j++)
raw[j][i ]
= static_cast<double>(reinterpret_cast<short*>(buf)[i * channels + j]);
}
break;
}
for (i = read; i < shift_size; i++) {
for (j = 0; j < channels; j++)
raw[j][i] = 0;
}
}
return 0;
}
void WAV::SetSizes(int frame,int shift){
frame_size = frame;
shift_size = shift;
}
short WAV::GetFmtType(){
return fmt_type;
}
void WAV::Split(char* _file_name ) {
const int fr = 512;
char temp_file_name[512];
WAV input;
input.OpenFile(_file_name);
int ch = input.GetChannels();
int sr = input.GetSampleRate();
short *temp;
std::vector<WAV> splited(ch,WAV(1,sr));
temp = new short[fr * ch];
for(int i=0;i<ch;i++){
char *temp_str = strtok(_file_name, ".");
sprintf(temp_file_name,"%s_%d.wav",temp_str,i+1);
splited[i].NewFile(temp_file_name);
}
// Distribute
while(!input.IsEOF()){
input.ReadUnit(temp,fr*ch);
for(int i=0;i<fr;i++){
for(int j=0;j<ch;j++)
splited[j].Append(temp + i*ch + j,1);
}
}
splited.clear();
delete[] temp;
}
FILE* WAV::GetFilePointer() {
return fp;
}
const char* WAV::GetFileName() {
return file_name;
}
/* channel wise normalization */
void WAV::Normalize() {
int cnt;
short *max;
float *rate;
if (fmt_type != 1) {
printf("SORRY::Currently, I do not support Normalize() for other than short type.");
return;
}
if (fp)fclose(fp);
OpenFile(file_name);
// Print();
int n_sample = data_size / size_unit;
max = new short[channels];
memset(max, 0, sizeof(short) * channels);
rate = new float[channels];
short* temp_buf = new short[n_sample];
cnt = ReadUnit(temp_buf, n_sample);
if (cnt != n_sample) {
printf("WARNNING::Read Sample Num : %d != Sample Num %d\n",cnt,n_sample);
}
Finish();
NewFile(file_name);
data_size = 0;
riff_size = data_size + head_size;
// Get Max
for (int i = 0; i < cnt; i++) {
if (std::abs(temp_buf[i]) > max[i%channels])
max[i%channels] = std::abs(temp_buf[i]);
}
// Normalize
for(int i=0;i<channels;i++)
rate[i] = (float)((32767.0 /(float)max[i]));
#pragma omp parallel for
for (int i = 0; i < cnt; i++) {
temp_buf[i] = (short)(temp_buf[i] * rate[i%channels]);
}
Append(temp_buf, cnt);
// Print();
delete[] temp_buf;
delete[] max;
delete[] rate;
}
void WAV::Clear() {
data_size = 0;
riff_size = data_size + head_size;
}
#endif
|
rose_v1_matrixmultiply2.c | /*
Naive matrix-matrix multiplication(mmm)
By C. Liao
*/
#define N 1000
#define M 1000
#define K 1000
#include <omp.h>
int i;
int j;
int k;
double a[1000][1000];
double b[1000][1000];
double c[1000][1000];
int mmm()
{
//#pragma omp parallel for private(i,j,k) shared(a,b,c)
#pragma omp parallel for private (i,j,k)
for (i = 0; i <= 999; i += 1) {
for (k = 0; k <= 999; k += 1) {
#pragma omp parallel for private (j)
for (j = 0; j <= 999; j += 1) {
c[i][j] = c[i][j] + a[i][k] * b[k][j];
}
}
}
return 0;
}
|
TruthSet.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifndef _SPTAG_COMMON_TRUTHSET_H_
#define _SPTAG_COMMON_TRUTHSET_H_
#include "../VectorIndex.h"
#include "QueryResultSet.h"
namespace SPTAG
{
namespace COMMON
{
class TruthSet {
public:
static void LoadTruthTXT(std::shared_ptr<SPTAG::Helper::DiskPriorityIO>& ptr, std::vector<std::set<SizeType>>& truth, int K, int& originalK, SizeType& p_iTruthNumber)
{
std::size_t lineBufferSize = 20;
std::unique_ptr<char[]> currentLine(new char[lineBufferSize]);
truth.clear();
truth.resize(p_iTruthNumber);
for (int i = 0; i < p_iTruthNumber; ++i)
{
truth[i].clear();
for (int j = 0; j < K; ++j)
{
if (ptr->ReadString(lineBufferSize, currentLine, ' ') == 0) {
LOG(Helper::LogLevel::LL_Error, "Truth number(%d) and query number(%d) are not match!\n", i, p_iTruthNumber);
exit(1);
}
truth[i].insert(std::atoi(currentLine.get()));
}
if (ptr->ReadString(lineBufferSize, currentLine, '\n') == 0) {
LOG(Helper::LogLevel::LL_Error, "Truth number(%d) and query number(%d) are not match!\n", i, p_iTruthNumber);
exit(1);
}
}
}
static void LoadTruthXVEC(std::shared_ptr<SPTAG::Helper::DiskPriorityIO>& ptr, std::vector<std::set<SizeType>>& truth, int K, int& originalK, SizeType& p_iTruthNumber)
{
truth.clear();
truth.resize(p_iTruthNumber);
std::vector<int> vec(K);
for (int i = 0; i < p_iTruthNumber; i++) {
if (ptr->ReadBinary(4, (char*)&originalK) != 4 || originalK < K) {
LOG(Helper::LogLevel::LL_Error, "Error: Xvec file has No.%d vector whose dims are fewer than expected. Expected: %d, Fact: %d\n", i, K, originalK);
exit(1);
}
if (originalK > K) vec.resize(originalK);
if (ptr->ReadBinary(originalK * 4, (char*)vec.data()) != originalK * 4) {
LOG(Helper::LogLevel::LL_Error, "Truth number(%d) and query number(%d) are not match!\n", i, p_iTruthNumber);
exit(1);
}
truth[i].insert(vec.begin(), vec.begin() + K);
}
}
static void LoadTruthDefault(std::shared_ptr<SPTAG::Helper::DiskPriorityIO>& ptr, std::vector<std::set<SizeType>>& truth, int K, int& originalK, SizeType& p_iTruthNumber) {
if (ptr->TellP() == 0) {
int row;
if (ptr->ReadBinary(4, (char*)&row) != 4 || ptr->ReadBinary(4, (char*)&originalK) != 4) {
LOG(Helper::LogLevel::LL_Error, "Fail to read truth file!\n");
exit(1);
}
}
truth.clear();
truth.resize(p_iTruthNumber);
std::vector<int> vec(originalK);
for (int i = 0; i < p_iTruthNumber; i++)
{
if (ptr->ReadBinary(4 * originalK, (char*)vec.data()) != 4 * originalK) {
LOG(Helper::LogLevel::LL_Error, "Truth number(%d) and query number(%d) are not match!\n", i, p_iTruthNumber);
exit(1);
}
truth[i].insert(vec.begin(), vec.begin() + K);
}
}
static void LoadTruth(std::shared_ptr<SPTAG::Helper::DiskPriorityIO>& ptr, std::vector<std::set<SizeType>>& truth, SizeType& NumQuerys, int& originalK, int K, TruthFileType type)
{
if (type == TruthFileType::TXT)
{
LoadTruthTXT(ptr, truth, K, originalK, NumQuerys);
}
else if (type == TruthFileType::XVEC)
{
LoadTruthXVEC(ptr, truth, K, originalK, NumQuerys);
}
else if (type == TruthFileType::DEFAULT) {
LoadTruthDefault(ptr, truth, K, originalK, NumQuerys);
}
else
{
LOG(Helper::LogLevel::LL_Error, "TruthFileType Unsupported.\n");
exit(1);
}
}
static void writeTruthFile(const std::string truthFile, SizeType queryNumber, const int K, std::vector<std::vector<SPTAG::SizeType>>& truthset, std::vector<std::vector<float>>& distset, SPTAG::TruthFileType TFT) {
auto ptr = SPTAG::f_createIO();
if (ptr == nullptr || !ptr->Initialize(truthFile.c_str(), std::ios::out | std::ios::binary)) {
LOG(Helper::LogLevel::LL_Error, "Fail to create the file:%s\n", truthFile.c_str());
exit(1);
}
if (TFT == SPTAG::TruthFileType::TXT)
{
for (SizeType i = 0; i < queryNumber; i++)
{
for (int k = 0; k < K; k++)
{
if (ptr->WriteString((std::to_string(truthset[i][k]) + " ").c_str()) == 0) {
LOG(Helper::LogLevel::LL_Error, "Fail to write the truth file!\n");
exit(1);
}
}
if (ptr->WriteString("\n") == 0) {
LOG(Helper::LogLevel::LL_Error, "Fail to write the truth file!\n");
exit(1);
}
}
}
else if (TFT == SPTAG::TruthFileType::XVEC)
{
for (SizeType i = 0; i < queryNumber; i++)
{
if (ptr->WriteBinary(sizeof(K), (char*)&K) != sizeof(K) || ptr->WriteBinary(K * 4, (char*)(truthset[i].data())) != K * 4) {
LOG(Helper::LogLevel::LL_Error, "Fail to write the truth file!\n");
exit(1);
}
}
}
else if (TFT == SPTAG::TruthFileType::DEFAULT) {
ptr->WriteBinary(4, (char*)&queryNumber);
ptr->WriteBinary(4, (char*)&K);
for (SizeType i = 0; i < queryNumber; i++)
{
if (ptr->WriteBinary(K * 4, (char*)(truthset[i].data())) != K * 4) {
LOG(Helper::LogLevel::LL_Error, "Fail to write the truth file!\n");
exit(1);
}
}
for (SizeType i = 0; i < queryNumber; i++)
{
if (ptr->WriteBinary(K * 4, (char*)(distset[i].data())) != K * 4) {
LOG(Helper::LogLevel::LL_Error, "Fail to write the truth file!\n");
exit(1);
}
}
}
else {
LOG(Helper::LogLevel::LL_Error, "Found unsupported file type for generating truth.");
exit(-1);
}
}
template<typename T>
static void GenerateTruth(std::shared_ptr<VectorSet> querySet, std::shared_ptr<VectorSet> vectorSet, const std::string truthFile,
const SPTAG::DistCalcMethod distMethod, const int K, const SPTAG::TruthFileType p_truthFileType) {
if (querySet->Dimension() != vectorSet->Dimension() && !SPTAG::COMMON::DistanceUtils::Quantizer)
{
LOG(Helper::LogLevel::LL_Error, "query and vector have different dimensions.");
exit(-1);
}
std::vector< std::vector<SPTAG::SizeType> > truthset(querySet->Count(), std::vector<SPTAG::SizeType>(K, 0));
std::vector< std::vector<float> > distset(querySet->Count(), std::vector<float>(K, 0));
#pragma omp parallel for
for (int i = 0; i < querySet->Count(); ++i)
{
SPTAG::COMMON::QueryResultSet<T> query((const T*)(querySet->GetVector(i)), K);
for (SPTAG::SizeType j = 0; j < vectorSet->Count(); j++)
{
float dist = SPTAG::COMMON::DistanceUtils::ComputeDistance(query.GetQuantizedTarget(), reinterpret_cast<T*>(vectorSet->GetVector(j)), vectorSet->Dimension(), distMethod);
query.AddPoint(j, dist);
}
query.SortResult();
for (int k = 0; k < K; k++)
{
truthset[i][k] = (query.GetResult(k))->VID;
distset[i][k] = (query.GetResult(k))->Dist;
}
}
writeTruthFile(truthFile, querySet->Count(), K, truthset, distset, p_truthFileType);
auto ptr = SPTAG::f_createIO();
if (ptr == nullptr || !ptr->Initialize((truthFile + ".dist.bin").c_str(), std::ios::out | std::ios::binary)) {
LOG(Helper::LogLevel::LL_Error, "Fail to create the file:%s\n", (truthFile + ".dist.bin").c_str());
exit(1);
}
int int32_queryNumber = (int)querySet->Count();
ptr->WriteBinary(4, (char*)&int32_queryNumber);
ptr->WriteBinary(4, (char*)&K);
for (size_t i = 0; i < int32_queryNumber; i++)
{
for (int k = 0; k < K; k++) {
if (ptr->WriteBinary(4, (char*)(&(truthset[i][k]))) != 4) {
LOG(Helper::LogLevel::LL_Error, "Fail to write the truth dist file!\n");
exit(1);
}
if (ptr->WriteBinary(4, (char*)(&(distset[i][k]))) != 4) {
LOG(Helper::LogLevel::LL_Error, "Fail to write the truth dist file!\n");
exit(1);
}
}
}
}
template <typename T>
static float CalculateRecall(VectorIndex* index, std::vector<QueryResult>& results, const std::vector<std::set<SizeType>>& truth, int K, int truthK, std::shared_ptr<SPTAG::VectorSet> querySet, std::shared_ptr<SPTAG::VectorSet> vectorSet, SizeType NumQuerys, std::ofstream* log = nullptr, bool debug = false)
{
float meanrecall = 0, minrecall = MaxDist, maxrecall = 0, stdrecall = 0;
std::vector<float> thisrecall(NumQuerys, 0);
std::unique_ptr<bool[]> visited(new bool[K]);
for (SizeType i = 0; i < NumQuerys; i++)
{
memset(visited.get(), 0, K * sizeof(bool));
for (SizeType id : truth[i])
{
for (int j = 0; j < K; j++)
{
if (visited[j] || results[i].GetResult(j)->VID < 0) continue;
if (results[i].GetResult(j)->VID == id)
{
thisrecall[i] += 1;
visited[j] = true;
break;
}
else if (vectorSet != nullptr) {
float dist = COMMON::DistanceUtils::ComputeDistance((const T*)querySet->GetVector(i), (const T*)vectorSet->GetVector(results[i].GetResult(j)->VID), vectorSet->Dimension(), index->GetDistCalcMethod());
float truthDist = COMMON::DistanceUtils::ComputeDistance((const T*)querySet->GetVector(i), (const T*)vectorSet->GetVector(id), vectorSet->Dimension(), index->GetDistCalcMethod());
if (index->GetDistCalcMethod() == SPTAG::DistCalcMethod::Cosine && fabs(dist - truthDist) < Epsilon) {
thisrecall[i] += 1;
visited[j] = true;
break;
}
else if (index->GetDistCalcMethod() == SPTAG::DistCalcMethod::L2 && fabs(dist - truthDist) < Epsilon * (dist + Epsilon)) {
thisrecall[i] += 1;
visited[j] = true;
break;
}
}
}
}
thisrecall[i] /= truthK;
meanrecall += thisrecall[i];
if (thisrecall[i] < minrecall) minrecall = thisrecall[i];
if (thisrecall[i] > maxrecall) maxrecall = thisrecall[i];
if (debug) {
std::string ll("recall:" + std::to_string(thisrecall[i]) + "\ngroundtruth:");
std::vector<NodeDistPair> truthvec;
for (SizeType id : truth[i]) {
float truthDist = 0.0;
if (vectorSet != nullptr) {
truthDist = COMMON::DistanceUtils::ComputeDistance((const T*)querySet->GetVector(i), (const T*)vectorSet->GetVector(id), querySet->Dimension(), index->GetDistCalcMethod());
}
truthvec.emplace_back(id, truthDist);
}
std::sort(truthvec.begin(), truthvec.end());
for (int j = 0; j < truthvec.size(); j++)
ll += std::to_string(truthvec[j].node) + "@" + std::to_string(truthvec[j].distance) + ",";
LOG(Helper::LogLevel::LL_Info, "%s\n", ll.c_str());
ll = "ann:";
for (int j = 0; j < K; j++)
ll += std::to_string(results[i].GetResult(j)->VID) + "@" + std::to_string(results[i].GetResult(j)->Dist) + ",";
LOG(Helper::LogLevel::LL_Info, "%s\n", ll.c_str());
}
}
meanrecall /= NumQuerys;
for (SizeType i = 0; i < NumQuerys; i++)
{
stdrecall += (thisrecall[i] - meanrecall) * (thisrecall[i] - meanrecall);
}
stdrecall = std::sqrt(stdrecall / NumQuerys);
if (log) (*log) << meanrecall << " " << stdrecall << " " << minrecall << " " << maxrecall << std::endl;
return meanrecall;
}
template <typename T>
static float CalculateRecall(VectorIndex* index, T* query, int K) {
COMMON::QueryResultSet<void> sampleANN(query, K);
COMMON::QueryResultSet<void> sampleTruth(query, K);
void* reconstructVector = nullptr;
if (SPTAG::COMMON::DistanceUtils::Quantizer)
{
reconstructVector = _mm_malloc(SPTAG::COMMON::DistanceUtils::Quantizer->ReconstructSize(), ALIGN_SPTAG);
SPTAG::COMMON::DistanceUtils::Quantizer->ReconstructVector((const uint8_t*)query, reconstructVector);
sampleANN.SetTarget(reconstructVector);
sampleTruth.SetTarget(reconstructVector);
}
index->SearchIndex(sampleANN);
for (SizeType y = 0; y < index->GetNumSamples(); y++)
{
float dist = index->ComputeDistance(sampleTruth.GetQuantizedTarget(), index->GetSample(y));
sampleTruth.AddPoint(y, dist);
}
sampleTruth.SortResult();
float recalls = 0;
std::vector<bool> visited(K, false);
for (SizeType y = 0; y < K; y++)
{
for (SizeType z = 0; z < K; z++)
{
if (visited[z]) continue;
if (fabs(sampleANN.GetResult(z)->Dist - sampleTruth.GetResult(y)->Dist) < Epsilon)
{
recalls += 1;
visited[z] = true;
break;
}
}
}
if (reconstructVector)
{
_mm_free(reconstructVector);
}
return recalls / K;
}
};
}
}
#endif // _SPTAG_COMMON_TRUTHSET_H_
|
z_solve.c | //-------------------------------------------------------------------------//
// //
// This benchmark is an OpenMP C version of the NPB BT code. This OpenMP //
// C version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the OpenMP Fortran versions in //
// "NPB3.3-OMP" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this OpenMP C version to //
// cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include "header.h"
#include "work_lhs.h"
#include "timers.h"
//---------------------------------------------------------------------
// Performs line solves in Z direction by first factoring
// the block-tridiagonal matrix into an upper triangular matrix,
// and then performing back substitution to solve for the unknow
// vectors of each line.
//
// Make sure we treat elements zero to cell_size in the direction
// of the sweep.
//---------------------------------------------------------------------
void z_solve()
{
// printf("zzzzzzzzz\n");
int i, j, k, m, n, ksize;
//kai
// int k14;
//consistent_data(&k14, "int", 1);
//---------------------------------------------------------------------
//---------------------------------------------------------------------
if (timeron) timer_start(t_zsolve);
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// This function computes the left hand side for the three z-factors
//---------------------------------------------------------------------
ksize = grid_points[2]-1;
//---------------------------------------------------------------------
// Compute the indices for storing the block-diagonal matrix;
// determine c (labeled f) and s jacobians
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) shared(ksize) private(i,j,k,m,n)
for (j = k14+1; j <= grid_points[1]-2; j++) {
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 0; k <= ksize; k++) {
tmp1 = 1.0 / u[k][j][i][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
fjac[k][0][0] = 0.0;
fjac[k][1][0] = 0.0;
fjac[k][2][0] = 0.0;
fjac[k][3][0] = 1.0;
fjac[k][4][0] = 0.0;
fjac[k][0][1] = - ( u[k][j][i][1]*u[k][j][i][3] ) * tmp2;
fjac[k][1][1] = u[k][j][i][3] * tmp1;
fjac[k][2][1] = 0.0;
fjac[k][3][1] = u[k][j][i][1] * tmp1;
fjac[k][4][1] = 0.0;
fjac[k][0][2] = - ( u[k][j][i][2]*u[k][j][i][3] ) * tmp2;
fjac[k][1][2] = 0.0;
fjac[k][2][2] = u[k][j][i][3] * tmp1;
fjac[k][3][2] = u[k][j][i][2] * tmp1;
fjac[k][4][2] = 0.0;
fjac[k][0][3] = - (u[k][j][i][3]*u[k][j][i][3] * tmp2 )
+ c2 * qs[k][j][i];
fjac[k][1][3] = - c2 * u[k][j][i][1] * tmp1;
fjac[k][2][3] = - c2 * u[k][j][i][2] * tmp1;
fjac[k][3][3] = ( 2.0 - c2 ) * u[k][j][i][3] * tmp1;
fjac[k][4][3] = c2;
fjac[k][0][4] = ( c2 * 2.0 * square[k][j][i] - c1 * u[k][j][i][4] )
* u[k][j][i][3] * tmp2;
fjac[k][1][4] = - c2 * ( u[k][j][i][1]*u[k][j][i][3] ) * tmp2;
fjac[k][2][4] = - c2 * ( u[k][j][i][2]*u[k][j][i][3] ) * tmp2;
fjac[k][3][4] = c1 * ( u[k][j][i][4] * tmp1 )
- c2 * ( qs[k][j][i] + u[k][j][i][3]*u[k][j][i][3] * tmp2 );
fjac[k][4][4] = c1 * u[k][j][i][3] * tmp1;
njac[k][0][0] = 0.0;
njac[k][1][0] = 0.0;
njac[k][2][0] = 0.0;
njac[k][3][0] = 0.0;
njac[k][4][0] = 0.0;
njac[k][0][1] = - c3c4 * tmp2 * u[k][j][i][1];
njac[k][1][1] = c3c4 * tmp1;
njac[k][2][1] = 0.0;
njac[k][3][1] = 0.0;
njac[k][4][1] = 0.0;
njac[k][0][2] = - c3c4 * tmp2 * u[k][j][i][2];
njac[k][1][2] = 0.0;
njac[k][2][2] = c3c4 * tmp1;
njac[k][3][2] = 0.0;
njac[k][4][2] = 0.0;
njac[k][0][3] = - con43 * c3c4 * tmp2 * u[k][j][i][3];
njac[k][1][3] = 0.0;
njac[k][2][3] = 0.0;
njac[k][3][3] = con43 * c3 * c4 * tmp1;
njac[k][4][3] = 0.0;
njac[k][0][4] = - ( c3c4
- c1345 ) * tmp3 * (u[k][j][i][1]*u[k][j][i][1])
- ( c3c4 - c1345 ) * tmp3 * (u[k][j][i][2]*u[k][j][i][2])
- ( con43 * c3c4
- c1345 ) * tmp3 * (u[k][j][i][3]*u[k][j][i][3])
- c1345 * tmp2 * u[k][j][i][4];
njac[k][1][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][1];
njac[k][2][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][2];
njac[k][3][4] = ( con43 * c3c4
- c1345 ) * tmp2 * u[k][j][i][3];
njac[k][4][4] = ( c1345 )* tmp1;
}
//---------------------------------------------------------------------
// now jacobians set, so form left hand side in z direction
//---------------------------------------------------------------------
lhsinit(lhs, ksize);
for (k = 1; k <= ksize-1; k++) {
tmp1 = dt * tz1;
tmp2 = dt * tz2;
lhs[k][AA][0][0] = - tmp2 * fjac[k-1][0][0]
- tmp1 * njac[k-1][0][0]
- tmp1 * dz1;
lhs[k][AA][1][0] = - tmp2 * fjac[k-1][1][0]
- tmp1 * njac[k-1][1][0];
lhs[k][AA][2][0] = - tmp2 * fjac[k-1][2][0]
- tmp1 * njac[k-1][2][0];
lhs[k][AA][3][0] = - tmp2 * fjac[k-1][3][0]
- tmp1 * njac[k-1][3][0];
lhs[k][AA][4][0] = - tmp2 * fjac[k-1][4][0]
- tmp1 * njac[k-1][4][0];
lhs[k][AA][0][1] = - tmp2 * fjac[k-1][0][1]
- tmp1 * njac[k-1][0][1];
lhs[k][AA][1][1] = - tmp2 * fjac[k-1][1][1]
- tmp1 * njac[k-1][1][1]
- tmp1 * dz2;
lhs[k][AA][2][1] = - tmp2 * fjac[k-1][2][1]
- tmp1 * njac[k-1][2][1];
lhs[k][AA][3][1] = - tmp2 * fjac[k-1][3][1]
- tmp1 * njac[k-1][3][1];
lhs[k][AA][4][1] = - tmp2 * fjac[k-1][4][1]
- tmp1 * njac[k-1][4][1];
lhs[k][AA][0][2] = - tmp2 * fjac[k-1][0][2]
- tmp1 * njac[k-1][0][2];
lhs[k][AA][1][2] = - tmp2 * fjac[k-1][1][2]
- tmp1 * njac[k-1][1][2];
lhs[k][AA][2][2] = - tmp2 * fjac[k-1][2][2]
- tmp1 * njac[k-1][2][2]
- tmp1 * dz3;
lhs[k][AA][3][2] = - tmp2 * fjac[k-1][3][2]
- tmp1 * njac[k-1][3][2];
lhs[k][AA][4][2] = - tmp2 * fjac[k-1][4][2]
- tmp1 * njac[k-1][4][2];
lhs[k][AA][0][3] = - tmp2 * fjac[k-1][0][3]
- tmp1 * njac[k-1][0][3];
lhs[k][AA][1][3] = - tmp2 * fjac[k-1][1][3]
- tmp1 * njac[k-1][1][3];
lhs[k][AA][2][3] = - tmp2 * fjac[k-1][2][3]
- tmp1 * njac[k-1][2][3];
lhs[k][AA][3][3] = - tmp2 * fjac[k-1][3][3]
- tmp1 * njac[k-1][3][3]
- tmp1 * dz4;
lhs[k][AA][4][3] = - tmp2 * fjac[k-1][4][3]
- tmp1 * njac[k-1][4][3];
lhs[k][AA][0][4] = - tmp2 * fjac[k-1][0][4]
- tmp1 * njac[k-1][0][4];
lhs[k][AA][1][4] = - tmp2 * fjac[k-1][1][4]
- tmp1 * njac[k-1][1][4];
lhs[k][AA][2][4] = - tmp2 * fjac[k-1][2][4]
- tmp1 * njac[k-1][2][4];
lhs[k][AA][3][4] = - tmp2 * fjac[k-1][3][4]
- tmp1 * njac[k-1][3][4];
lhs[k][AA][4][4] = - tmp2 * fjac[k-1][4][4]
- tmp1 * njac[k-1][4][4]
- tmp1 * dz5;
lhs[k][BB][0][0] = 1.0
+ tmp1 * 2.0 * njac[k][0][0]
+ tmp1 * 2.0 * dz1;
lhs[k][BB][1][0] = tmp1 * 2.0 * njac[k][1][0];
lhs[k][BB][2][0] = tmp1 * 2.0 * njac[k][2][0];
lhs[k][BB][3][0] = tmp1 * 2.0 * njac[k][3][0];
lhs[k][BB][4][0] = tmp1 * 2.0 * njac[k][4][0];
lhs[k][BB][0][1] = tmp1 * 2.0 * njac[k][0][1];
lhs[k][BB][1][1] = 1.0
+ tmp1 * 2.0 * njac[k][1][1]
+ tmp1 * 2.0 * dz2;
lhs[k][BB][2][1] = tmp1 * 2.0 * njac[k][2][1];
lhs[k][BB][3][1] = tmp1 * 2.0 * njac[k][3][1];
lhs[k][BB][4][1] = tmp1 * 2.0 * njac[k][4][1];
lhs[k][BB][0][2] = tmp1 * 2.0 * njac[k][0][2];
lhs[k][BB][1][2] = tmp1 * 2.0 * njac[k][1][2];
lhs[k][BB][2][2] = 1.0
+ tmp1 * 2.0 * njac[k][2][2]
+ tmp1 * 2.0 * dz3;
lhs[k][BB][3][2] = tmp1 * 2.0 * njac[k][3][2];
lhs[k][BB][4][2] = tmp1 * 2.0 * njac[k][4][2];
lhs[k][BB][0][3] = tmp1 * 2.0 * njac[k][0][3];
lhs[k][BB][1][3] = tmp1 * 2.0 * njac[k][1][3];
lhs[k][BB][2][3] = tmp1 * 2.0 * njac[k][2][3];
lhs[k][BB][3][3] = 1.0
+ tmp1 * 2.0 * njac[k][3][3]
+ tmp1 * 2.0 * dz4;
lhs[k][BB][4][3] = tmp1 * 2.0 * njac[k][4][3];
lhs[k][BB][0][4] = tmp1 * 2.0 * njac[k][0][4];
lhs[k][BB][1][4] = tmp1 * 2.0 * njac[k][1][4];
lhs[k][BB][2][4] = tmp1 * 2.0 * njac[k][2][4];
lhs[k][BB][3][4] = tmp1 * 2.0 * njac[k][3][4];
lhs[k][BB][4][4] = 1.0
+ tmp1 * 2.0 * njac[k][4][4]
+ tmp1 * 2.0 * dz5;
lhs[k][CC][0][0] = tmp2 * fjac[k+1][0][0]
- tmp1 * njac[k+1][0][0]
- tmp1 * dz1;
lhs[k][CC][1][0] = tmp2 * fjac[k+1][1][0]
- tmp1 * njac[k+1][1][0];
lhs[k][CC][2][0] = tmp2 * fjac[k+1][2][0]
- tmp1 * njac[k+1][2][0];
lhs[k][CC][3][0] = tmp2 * fjac[k+1][3][0]
- tmp1 * njac[k+1][3][0];
lhs[k][CC][4][0] = tmp2 * fjac[k+1][4][0]
- tmp1 * njac[k+1][4][0];
lhs[k][CC][0][1] = tmp2 * fjac[k+1][0][1]
- tmp1 * njac[k+1][0][1];
lhs[k][CC][1][1] = tmp2 * fjac[k+1][1][1]
- tmp1 * njac[k+1][1][1]
- tmp1 * dz2;
lhs[k][CC][2][1] = tmp2 * fjac[k+1][2][1]
- tmp1 * njac[k+1][2][1];
lhs[k][CC][3][1] = tmp2 * fjac[k+1][3][1]
- tmp1 * njac[k+1][3][1];
lhs[k][CC][4][1] = tmp2 * fjac[k+1][4][1]
- tmp1 * njac[k+1][4][1];
lhs[k][CC][0][2] = tmp2 * fjac[k+1][0][2]
- tmp1 * njac[k+1][0][2];
lhs[k][CC][1][2] = tmp2 * fjac[k+1][1][2]
- tmp1 * njac[k+1][1][2];
lhs[k][CC][2][2] = tmp2 * fjac[k+1][2][2]
- tmp1 * njac[k+1][2][2]
- tmp1 * dz3;
lhs[k][CC][3][2] = tmp2 * fjac[k+1][3][2]
- tmp1 * njac[k+1][3][2];
lhs[k][CC][4][2] = tmp2 * fjac[k+1][4][2]
- tmp1 * njac[k+1][4][2];
lhs[k][CC][0][3] = tmp2 * fjac[k+1][0][3]
- tmp1 * njac[k+1][0][3];
lhs[k][CC][1][3] = tmp2 * fjac[k+1][1][3]
- tmp1 * njac[k+1][1][3];
lhs[k][CC][2][3] = tmp2 * fjac[k+1][2][3]
- tmp1 * njac[k+1][2][3];
lhs[k][CC][3][3] = tmp2 * fjac[k+1][3][3]
- tmp1 * njac[k+1][3][3]
- tmp1 * dz4;
lhs[k][CC][4][3] = tmp2 * fjac[k+1][4][3]
- tmp1 * njac[k+1][4][3];
lhs[k][CC][0][4] = tmp2 * fjac[k+1][0][4]
- tmp1 * njac[k+1][0][4];
lhs[k][CC][1][4] = tmp2 * fjac[k+1][1][4]
- tmp1 * njac[k+1][1][4];
lhs[k][CC][2][4] = tmp2 * fjac[k+1][2][4]
- tmp1 * njac[k+1][2][4];
lhs[k][CC][3][4] = tmp2 * fjac[k+1][3][4]
- tmp1 * njac[k+1][3][4];
lhs[k][CC][4][4] = tmp2 * fjac[k+1][4][4]
- tmp1 * njac[k+1][4][4]
- tmp1 * dz5;
}
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// performs guaussian elimination on this cell.
//
// assumes that unpacking routines for non-first cells
// preload C' and rhs' from previous cell.
//
// assumed send happens outside this routine, but that
// c'(KMAX) and rhs'(KMAX) will be sent to next cell.
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// outer most do loops - sweeping in i direction
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// multiply c[0][j][i] by b_inverse and copy back to c
// multiply rhs(0) by b_inverse(0) and copy to rhs
//---------------------------------------------------------------------
binvcrhs( lhs[0][BB], lhs[0][CC], rhs[0][j][i] );
//---------------------------------------------------------------------
// begin inner most do loop
// do all the elements of the cell unless last
//---------------------------------------------------------------------
for (k = 1; k <= ksize-1; k++) {
//-------------------------------------------------------------------
// subtract A*lhs_vector(k-1) from lhs_vector(k)
//
// rhs(k) = rhs(k) - A*rhs(k-1)
//-------------------------------------------------------------------
matvec_sub(lhs[k][AA], rhs[k-1][j][i], rhs[k][j][i]);
//-------------------------------------------------------------------
// B(k) = B(k) - C(k-1)*A(k)
// matmul_sub(AA,i,j,k,c,CC,i,j,k-1,c,BB,i,j,k)
//-------------------------------------------------------------------
matmul_sub(lhs[k][AA], lhs[k-1][CC], lhs[k][BB]);
//-------------------------------------------------------------------
// multiply c[k][j][i] by b_inverse and copy back to c
// multiply rhs[0][j][i] by b_inverse[0][j][i] and copy to rhs
//-------------------------------------------------------------------
binvcrhs( lhs[k][BB], lhs[k][CC], rhs[k][j][i] );
}
//---------------------------------------------------------------------
// Now finish up special cases for last cell
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// rhs(ksize) = rhs(ksize) - A*rhs(ksize-1)
//---------------------------------------------------------------------
matvec_sub(lhs[ksize][AA], rhs[ksize-1][j][i], rhs[ksize][j][i]);
//---------------------------------------------------------------------
// B(ksize) = B(ksize) - C(ksize-1)*A(ksize)
// matmul_sub(AA,i,j,ksize,c,
// $ CC,i,j,ksize-1,c,BB,i,j,ksize)
//---------------------------------------------------------------------
matmul_sub(lhs[ksize][AA], lhs[ksize-1][CC], lhs[ksize][BB]);
//---------------------------------------------------------------------
// multiply rhs(ksize) by b_inverse(ksize) and copy to rhs
//---------------------------------------------------------------------
binvrhs( lhs[ksize][BB], rhs[ksize][j][i] );
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// back solve: if last cell, then generate U(ksize)=rhs(ksize)
// else assume U(ksize) is loaded in un pack backsub_info
// so just use it
// after u(kstart) will be sent to next cell
//---------------------------------------------------------------------
for (k = ksize-1; k >= 0; k--) {
for (m = 0; m < BLOCK_SIZE; m++) {
for (n = 0; n < BLOCK_SIZE; n++) {
rhs[k][j][i][m] = rhs[k][j][i][m]
- lhs[k][CC][n][m]*rhs[k+1][j][i][n];
}
}
}
}
//kai
k14 = 0;
// printf("k14=%p\n",&k14);
}
if (timeron) timer_stop(t_zsolve);
}
|
convolutiondepthwise_5x5_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw5x5s1_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
v4f32 _bias0 = bias ? (v4f32)__msa_ld_w(bias + g * 4, 0) : (v4f32)__msa_fill_w(0);
const float* k0 = kernel.row(g);
float* outptr0 = out.row(0);
float* outptr1 = out.row(1);
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
const float* r4 = img0.row(4);
const float* r5 = img0.row(5);
int i = 0;
for (; i + 1 < outh; i += 2)
{
int j = 0;
for (; j < outw; j++)
{
__builtin_prefetch(r0 + 160);
__builtin_prefetch(r1 + 160);
__builtin_prefetch(r2 + 160);
__builtin_prefetch(r3 + 160);
__builtin_prefetch(r4 + 160);
__builtin_prefetch(r5 + 160);
__builtin_prefetch(k0 + 800);
v4f32 _sum0 = _bias0;
v4f32 _sum1 = _bias0;
v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0);
v4f32 _r04 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0);
v4f32 _k00 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k01 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k02 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k03 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k04 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
k0 += 4 * 5;
_sum0 = __msa_fmadd_w(_sum0, _k00, _r00);
_sum0 = __msa_fmadd_w(_sum0, _k01, _r01);
_sum0 = __msa_fmadd_w(_sum0, _k02, _r02);
_sum0 = __msa_fmadd_w(_sum0, _k03, _r03);
_sum0 = __msa_fmadd_w(_sum0, _k04, _r04);
v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0);
v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0);
v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0);
v4f32 _r13 = (v4f32)__msa_ld_w(r1 + 4 * 3, 0);
v4f32 _r14 = (v4f32)__msa_ld_w(r1 + 4 * 4, 0);
_sum1 = __msa_fmadd_w(_sum1, _k00, _r10);
_sum1 = __msa_fmadd_w(_sum1, _k01, _r11);
_sum1 = __msa_fmadd_w(_sum1, _k02, _r12);
_sum1 = __msa_fmadd_w(_sum1, _k03, _r13);
_sum1 = __msa_fmadd_w(_sum1, _k04, _r14);
v4f32 _k10 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k11 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k12 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k13 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k14 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
k0 += 4 * 5;
_sum0 = __msa_fmadd_w(_sum0, _k10, _r10);
_sum0 = __msa_fmadd_w(_sum0, _k11, _r11);
_sum0 = __msa_fmadd_w(_sum0, _k12, _r12);
_sum0 = __msa_fmadd_w(_sum0, _k13, _r13);
_sum0 = __msa_fmadd_w(_sum0, _k14, _r14);
v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0);
v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0);
v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0);
v4f32 _r23 = (v4f32)__msa_ld_w(r2 + 4 * 3, 0);
v4f32 _r24 = (v4f32)__msa_ld_w(r2 + 4 * 4, 0);
_sum1 = __msa_fmadd_w(_sum1, _k10, _r20);
_sum1 = __msa_fmadd_w(_sum1, _k11, _r21);
_sum1 = __msa_fmadd_w(_sum1, _k12, _r22);
_sum1 = __msa_fmadd_w(_sum1, _k13, _r23);
_sum1 = __msa_fmadd_w(_sum1, _k14, _r24);
v4f32 _k20 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k21 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k22 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k23 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k24 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
k0 += 4 * 5;
_sum0 = __msa_fmadd_w(_sum0, _k20, _r20);
_sum0 = __msa_fmadd_w(_sum0, _k21, _r21);
_sum0 = __msa_fmadd_w(_sum0, _k22, _r22);
_sum0 = __msa_fmadd_w(_sum0, _k23, _r23);
_sum0 = __msa_fmadd_w(_sum0, _k24, _r24);
v4f32 _r30 = (v4f32)__msa_ld_w(r3, 0);
v4f32 _r31 = (v4f32)__msa_ld_w(r3 + 4, 0);
v4f32 _r32 = (v4f32)__msa_ld_w(r3 + 4 * 2, 0);
v4f32 _r33 = (v4f32)__msa_ld_w(r3 + 4 * 3, 0);
v4f32 _r34 = (v4f32)__msa_ld_w(r3 + 4 * 4, 0);
_sum1 = __msa_fmadd_w(_sum1, _k20, _r30);
_sum1 = __msa_fmadd_w(_sum1, _k21, _r31);
_sum1 = __msa_fmadd_w(_sum1, _k22, _r32);
_sum1 = __msa_fmadd_w(_sum1, _k23, _r33);
_sum1 = __msa_fmadd_w(_sum1, _k24, _r34);
v4f32 _k30 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k31 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k32 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k33 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k34 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
k0 += 4 * 5;
_sum0 = __msa_fmadd_w(_sum0, _k30, _r30);
_sum0 = __msa_fmadd_w(_sum0, _k31, _r31);
_sum0 = __msa_fmadd_w(_sum0, _k32, _r32);
_sum0 = __msa_fmadd_w(_sum0, _k33, _r33);
_sum0 = __msa_fmadd_w(_sum0, _k34, _r34);
v4f32 _r40 = (v4f32)__msa_ld_w(r4, 0);
v4f32 _r41 = (v4f32)__msa_ld_w(r4 + 4, 0);
v4f32 _r42 = (v4f32)__msa_ld_w(r4 + 4 * 2, 0);
v4f32 _r43 = (v4f32)__msa_ld_w(r4 + 4 * 3, 0);
v4f32 _r44 = (v4f32)__msa_ld_w(r4 + 4 * 4, 0);
_sum1 = __msa_fmadd_w(_sum1, _k30, _r40);
_sum1 = __msa_fmadd_w(_sum1, _k31, _r41);
_sum1 = __msa_fmadd_w(_sum1, _k32, _r42);
_sum1 = __msa_fmadd_w(_sum1, _k33, _r43);
_sum1 = __msa_fmadd_w(_sum1, _k34, _r44);
v4f32 _k40 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k41 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k42 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k43 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k44 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
k0 -= 4 * 20;
_sum0 = __msa_fmadd_w(_sum0, _k40, _r40);
_sum0 = __msa_fmadd_w(_sum0, _k41, _r41);
_sum0 = __msa_fmadd_w(_sum0, _k42, _r42);
_sum0 = __msa_fmadd_w(_sum0, _k43, _r43);
_sum0 = __msa_fmadd_w(_sum0, _k44, _r44);
v4f32 _r50 = (v4f32)__msa_ld_w(r5, 0);
v4f32 _r51 = (v4f32)__msa_ld_w(r5 + 4, 0);
v4f32 _r52 = (v4f32)__msa_ld_w(r5 + 4 * 2, 0);
v4f32 _r53 = (v4f32)__msa_ld_w(r5 + 4 * 3, 0);
v4f32 _r54 = (v4f32)__msa_ld_w(r5 + 4 * 4, 0);
_sum1 = __msa_fmadd_w(_sum1, _k40, _r50);
_sum1 = __msa_fmadd_w(_sum1, _k41, _r51);
_sum1 = __msa_fmadd_w(_sum1, _k42, _r52);
_sum1 = __msa_fmadd_w(_sum1, _k43, _r53);
_sum1 = __msa_fmadd_w(_sum1, _k44, _r54);
__msa_st_w((v4i32)_sum0, outptr0, 0);
__msa_st_w((v4i32)_sum1, outptr1, 0);
outptr0 += 4;
outptr1 += 4;
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
}
r0 += 4 * 4 + w * 4;
r1 += 4 * 4 + w * 4;
r2 += 4 * 4 + w * 4;
r3 += 4 * 4 + w * 4;
r4 += 4 * 4 + w * 4;
r5 += 4 * 4 + w * 4;
outptr0 += outw * 4;
outptr1 += outw * 4;
}
for (; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
__builtin_prefetch(r0 + 160);
__builtin_prefetch(r1 + 160);
__builtin_prefetch(r2 + 160);
__builtin_prefetch(r3 + 160);
__builtin_prefetch(r4 + 160);
__builtin_prefetch(k0 + 800);
v4f32 _sum0 = _bias0;
v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0);
v4f32 _r04 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0);
v4f32 _k00 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k01 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k02 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k03 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k04 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
k0 += 4 * 5;
_sum0 = __msa_fmadd_w(_sum0, _k00, _r00);
_sum0 = __msa_fmadd_w(_sum0, _k01, _r01);
_sum0 = __msa_fmadd_w(_sum0, _k02, _r02);
_sum0 = __msa_fmadd_w(_sum0, _k03, _r03);
_sum0 = __msa_fmadd_w(_sum0, _k04, _r04);
v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0);
v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0);
v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0);
v4f32 _r13 = (v4f32)__msa_ld_w(r1 + 4 * 3, 0);
v4f32 _r14 = (v4f32)__msa_ld_w(r1 + 4 * 4, 0);
v4f32 _k10 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k11 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k12 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k13 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k14 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
k0 += 4 * 5;
_sum0 = __msa_fmadd_w(_sum0, _k10, _r10);
_sum0 = __msa_fmadd_w(_sum0, _k11, _r11);
_sum0 = __msa_fmadd_w(_sum0, _k12, _r12);
_sum0 = __msa_fmadd_w(_sum0, _k13, _r13);
_sum0 = __msa_fmadd_w(_sum0, _k14, _r14);
v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0);
v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0);
v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0);
v4f32 _r23 = (v4f32)__msa_ld_w(r2 + 4 * 3, 0);
v4f32 _r24 = (v4f32)__msa_ld_w(r2 + 4 * 4, 0);
v4f32 _k20 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k21 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k22 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k23 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k24 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
k0 += 4 * 5;
_sum0 = __msa_fmadd_w(_sum0, _k20, _r20);
_sum0 = __msa_fmadd_w(_sum0, _k21, _r21);
_sum0 = __msa_fmadd_w(_sum0, _k22, _r22);
_sum0 = __msa_fmadd_w(_sum0, _k23, _r23);
_sum0 = __msa_fmadd_w(_sum0, _k24, _r24);
v4f32 _r30 = (v4f32)__msa_ld_w(r3, 0);
v4f32 _r31 = (v4f32)__msa_ld_w(r3 + 4, 0);
v4f32 _r32 = (v4f32)__msa_ld_w(r3 + 4 * 2, 0);
v4f32 _r33 = (v4f32)__msa_ld_w(r3 + 4 * 3, 0);
v4f32 _r34 = (v4f32)__msa_ld_w(r3 + 4 * 4, 0);
v4f32 _k30 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k31 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k32 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k33 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k34 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
k0 += 4 * 5;
_sum0 = __msa_fmadd_w(_sum0, _k30, _r30);
_sum0 = __msa_fmadd_w(_sum0, _k31, _r31);
_sum0 = __msa_fmadd_w(_sum0, _k32, _r32);
_sum0 = __msa_fmadd_w(_sum0, _k33, _r33);
_sum0 = __msa_fmadd_w(_sum0, _k34, _r34);
v4f32 _r40 = (v4f32)__msa_ld_w(r4, 0);
v4f32 _r41 = (v4f32)__msa_ld_w(r4 + 4, 0);
v4f32 _r42 = (v4f32)__msa_ld_w(r4 + 4 * 2, 0);
v4f32 _r43 = (v4f32)__msa_ld_w(r4 + 4 * 3, 0);
v4f32 _r44 = (v4f32)__msa_ld_w(r4 + 4 * 4, 0);
v4f32 _k40 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k41 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k42 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k43 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k44 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
k0 -= 4 * 20;
_sum0 = __msa_fmadd_w(_sum0, _k40, _r40);
_sum0 = __msa_fmadd_w(_sum0, _k41, _r41);
_sum0 = __msa_fmadd_w(_sum0, _k42, _r42);
_sum0 = __msa_fmadd_w(_sum0, _k43, _r43);
_sum0 = __msa_fmadd_w(_sum0, _k44, _r44);
__msa_st_w((v4i32)_sum0, outptr0, 0);
outptr0 += 4;
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
}
r0 += 4 * 4;
r1 += 4 * 4;
r2 += 4 * 4;
r3 += 4 * 4;
r4 += 4 * 4;
}
}
}
static void convdw5x5s2_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2 * outw + w) * 4;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
v4f32 _bias0 = bias ? (v4f32)__msa_ld_w(bias + g * 4, 0) : (v4f32)__msa_fill_w(0);
const float* k0 = kernel.row(g);
float* outptr0 = out;
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
const float* r4 = img0.row(4);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
__builtin_prefetch(r0 + 160);
__builtin_prefetch(r1 + 160);
__builtin_prefetch(r2 + 160);
__builtin_prefetch(r3 + 160);
__builtin_prefetch(r4 + 160);
__builtin_prefetch(k0 + 800);
v4f32 _sum0 = _bias0;
v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0);
v4f32 _r04 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0);
v4f32 _k00 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k01 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k02 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k03 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k04 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
k0 += 4 * 5;
_sum0 = __msa_fmadd_w(_sum0, _k00, _r00);
_sum0 = __msa_fmadd_w(_sum0, _k01, _r01);
_sum0 = __msa_fmadd_w(_sum0, _k02, _r02);
_sum0 = __msa_fmadd_w(_sum0, _k03, _r03);
_sum0 = __msa_fmadd_w(_sum0, _k04, _r04);
v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0);
v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0);
v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0);
v4f32 _r13 = (v4f32)__msa_ld_w(r1 + 4 * 3, 0);
v4f32 _r14 = (v4f32)__msa_ld_w(r1 + 4 * 4, 0);
v4f32 _k10 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k11 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k12 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k13 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k14 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
k0 += 4 * 5;
_sum0 = __msa_fmadd_w(_sum0, _k10, _r10);
_sum0 = __msa_fmadd_w(_sum0, _k11, _r11);
_sum0 = __msa_fmadd_w(_sum0, _k12, _r12);
_sum0 = __msa_fmadd_w(_sum0, _k13, _r13);
_sum0 = __msa_fmadd_w(_sum0, _k14, _r14);
v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0);
v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0);
v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0);
v4f32 _r23 = (v4f32)__msa_ld_w(r2 + 4 * 3, 0);
v4f32 _r24 = (v4f32)__msa_ld_w(r2 + 4 * 4, 0);
v4f32 _k20 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k21 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k22 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k23 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k24 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
k0 += 4 * 5;
_sum0 = __msa_fmadd_w(_sum0, _k20, _r20);
_sum0 = __msa_fmadd_w(_sum0, _k21, _r21);
_sum0 = __msa_fmadd_w(_sum0, _k22, _r22);
_sum0 = __msa_fmadd_w(_sum0, _k23, _r23);
_sum0 = __msa_fmadd_w(_sum0, _k24, _r24);
v4f32 _r30 = (v4f32)__msa_ld_w(r3, 0);
v4f32 _r31 = (v4f32)__msa_ld_w(r3 + 4, 0);
v4f32 _r32 = (v4f32)__msa_ld_w(r3 + 4 * 2, 0);
v4f32 _r33 = (v4f32)__msa_ld_w(r3 + 4 * 3, 0);
v4f32 _r34 = (v4f32)__msa_ld_w(r3 + 4 * 4, 0);
v4f32 _k30 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k31 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k32 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k33 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k34 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
k0 += 4 * 5;
_sum0 = __msa_fmadd_w(_sum0, _k30, _r30);
_sum0 = __msa_fmadd_w(_sum0, _k31, _r31);
_sum0 = __msa_fmadd_w(_sum0, _k32, _r32);
_sum0 = __msa_fmadd_w(_sum0, _k33, _r33);
_sum0 = __msa_fmadd_w(_sum0, _k34, _r34);
v4f32 _r40 = (v4f32)__msa_ld_w(r4, 0);
v4f32 _r41 = (v4f32)__msa_ld_w(r4 + 4, 0);
v4f32 _r42 = (v4f32)__msa_ld_w(r4 + 4 * 2, 0);
v4f32 _r43 = (v4f32)__msa_ld_w(r4 + 4 * 3, 0);
v4f32 _r44 = (v4f32)__msa_ld_w(r4 + 4 * 4, 0);
v4f32 _k40 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k41 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k42 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k43 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k44 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
k0 -= 4 * 20;
_sum0 = __msa_fmadd_w(_sum0, _k40, _r40);
_sum0 = __msa_fmadd_w(_sum0, _k41, _r41);
_sum0 = __msa_fmadd_w(_sum0, _k42, _r42);
_sum0 = __msa_fmadd_w(_sum0, _k43, _r43);
_sum0 = __msa_fmadd_w(_sum0, _k44, _r44);
__msa_st_w((v4i32)_sum0, outptr0, 0);
outptr0 += 4;
r0 += 4 * 2;
r1 += 4 * 2;
r2 += 4 * 2;
r3 += 4 * 2;
r4 += 4 * 2;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
}
|
task_dep_iterator2.c | #include<stdio.h>
void set_an_element(int*p, int val) {
*p = val;
}
void print_all_elements(int*v, int n) {
int i;
for (i = 0; i < n; ++i) {
printf("%d, ", v[i]);
}
printf("\n");
}
void parallel_computation(int n) {
int v[n];
#pragma omp parallel
#pragma omp single
{
int i;
for (i = 0; i < n; ++i)
#pragma omp task depend(out: v)
set_an_element(&v[i], i);
#pragma omp task depend(iterator(it = 0:n), in: v)
print_all_elements(v, n);
}
}
|
ParticleFilterOMP.h | //------------------------------------------------------------------------
// ____ _ _
// / ___|____ _ _ ____ ____| |__ | |
// | | / ___| | | | _ \/ ___| _ \| |
// | |___| | | |_| | | | | |___| | | ||_|
// \____|_| \_____|_| |_|\____|_| |_|(_) Media benchmarks
//
// 2006, Intel Corporation, licensed under Apache 2.0
//
// file : ParticleFilterOMP.h
// author : Scott Ettinger - scott.m.ettinger@intel.com
//
// description : OpenMP parallelized version of the particle filter
// object derived from ParticleFilter.h
//
// modified :
//--------------------------------------------------------------------------
#ifndef PARTICLEFILTEROMP_H
#define PARTICLEFILTEROMP_H
#if defined(HAVE_CONFIG_H)
# include "config.h"
#endif
#include <omp.h>
#include "ParticleFilter.h"
template<class T>
class ParticleFilterOMP : public ParticleFilter<T> {
using ParticleFilter<T>:: mModel;
using ParticleFilter<T>:: mWeights;
using ParticleFilter<T>:: mParticles;
using ParticleFilter<T>:: mNewParticles;
using ParticleFilter<T>:: mBestParticle;
using ParticleFilter<T>:: mNParticles;
using ParticleFilter<T>:: mMinParticles;
using ParticleFilter<T>:: mBins;
using ParticleFilter<T>:: mRnd;
typedef typename ParticleFilter<T>::fpType fpType;
typedef typename ParticleFilter<T>::Vectorf Vectorf;
protected:
std::vector<int> mIndex; //list of particles to regenerate
//calculate particle weights - threaded version
void CalcWeights(std::vector<Vectorf > &particles); //calculate particle weights based on model likelihood
//New particle generation - threaded version
void GenerateNewParticles(int k);
};
//Calculate particle weights (mWeights) and find highest likelihood particle.
//computes an optimal annealing factor and scales the likelihoods.
template<class T>
void ParticleFilterOMP<T>::CalcWeights(std::vector<Vectorf > &particles)
{
std::vector<unsigned char> valid(particles.size());
mBestParticle = 0;
fpType total = 0, best = 0, minWeight = 1e30f, annealingFactor = 1;
mWeights.resize(particles.size());
int np = (int)particles.size(), j;
#pragma omp parallel for //OpenMP parallelized loop to compute log-likelihoods
for(j = 0; j < np; j++)
{ bool vflag;
int n = omp_get_thread_num();
mWeights[j] = mModel->LogLikelihood(particles[j], vflag, n); //compute log-likelihood weights for each particle
valid[j] = vflag ? 1 : 0;
}
uint i = 0;
while(i < particles.size())
{ if(!valid[i]) //if not valid(model prior), remove the particle from the list
{ particles[i] = particles[particles.size() - 1];
mWeights[i] = mWeights[particles.size() - 1];
valid[i] = valid[valid.size() - 1];
particles.pop_back(); mWeights.pop_back(); valid.pop_back();
}
else
minWeight = std::min(mWeights[i++], minWeight); //find minimum log-likelihood
}
if((int)particles.size() < mMinParticles) return; //bail out if not enough valid particles
mWeights -= minWeight; //shift weights to zero for numerical stability
if(mModel->StdDevs().size() > 1)
annealingFactor = BetaAnnealingFactor(mWeights, 0.5f); //calculate annealing factor if more than 1 step
for(i = 0; i < mWeights.size(); i++)
{ double wa = annealingFactor * mWeights[i];
mWeights[i] = (float)exp(wa); //exponentiate log-likelihoods scaled by annealing factor
total += mWeights[i]; //save sum of all weights
if(i == 0 || mWeights[i] > best) //find highest likelihood particle
{ best = mWeights[i];
mBestParticle = i;
}
}
mWeights *= fpType(1.0) / total; //normalize weights
}
//generate new particles distributed with std deviation given by the model annealing parameter - threaded
template<class T>
void ParticleFilterOMP<T>::GenerateNewParticles(int k)
{ int p = 0;
mNewParticles.resize(mNParticles);
mIndex.resize(mNParticles);
for(int i = 0; i < (int)mBins.size(); i++)
for(uint j = 0; j < mBins[i]; j++) //index particles to be regenerated
mIndex[p++] = i;
#pragma omp parallel for
for(int i = 0; i < mNParticles; i++) //distribute new particles randomly according to model stdDevs
{ mNewParticles[i] = mParticles[mIndex[i]]; //add new particle for each entry in each bin distributed randomly about duplicated particle
this->AddGaussianNoise(mNewParticles[i], mModel->StdDevs()[k], mRnd[i]);
}
}
#endif
|
nstream-alloc-target.c | ///
/// Copyright (c) 2019, Intel Corporation
///
/// Redistribution and use in source and binary forms, with or without
/// modification, are permitted provided that the following conditions
/// are met:
///
/// * Redistributions of source code must retain the above copyright
/// notice, this list of conditions and the following disclaimer.
/// * Redistributions in binary form must reproduce the above
/// copyright notice, this list of conditions and the following
/// disclaimer in the documentation and/or other materials provided
/// with the distribution.
/// * Neither the name of Intel Corporation nor the names of its
/// contributors may be used to endorse or promote products
/// derived from this software without specific prior written
/// permission.
///
/// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
/// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
/// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
/// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
/// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
/// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
/// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
/// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
/// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
/// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
/// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
/// POSSIBILITY OF SUCH DAMAGE.
//////////////////////////////////////////////////////////////////////
///
/// NAME: nstream
///
/// PURPOSE: To compute memory bandwidth when adding a vector of a given
/// number of double precision values to the scalar multiple of
/// another vector of the same length, and storing the result in
/// a third vector.
///
/// USAGE: The program takes as input the number
/// of iterations to loop over the triad vectors and
/// the length of the vectors.
///
/// <progname> <# iterations> <vector length>
///
/// The output consists of diagnostics to make sure the
/// algorithm worked, and of timing statistics.
///
/// NOTES: Bandwidth is determined as the number of words read, plus the
/// number of words written, times the size of the words, divided
/// by the execution time. For a vector length of N, the total
/// number of words read and written is 4*N*sizeof(double).
///
///
/// HISTORY: This code is loosely based on the Stream benchmark by John
/// McCalpin, but does not follow all the Stream rules. Hence,
/// reported results should not be associated with Stream in
/// external publications
///
/// Converted to C++11 by Jeff Hammond, November 2017.
/// Converted to C11 by Jeff Hammond, February 2019.
///
//////////////////////////////////////////////////////////////////////
#pragma omp requires unified_address
#include "prk_util.h"
#include "prk_openmp.h"
int main(int argc, char * argv[])
{
printf("Parallel Research Kernels version %d\n", PRKVERSION );
printf("C11/OpenMP TARGET STREAM triad: A = B + scalar * C\n");
//////////////////////////////////////////////////////////////////////
/// Read and test input parameters
//////////////////////////////////////////////////////////////////////
if (argc < 3) {
printf("Usage: <# iterations> <vector length>\n");
return 1;
}
int iterations = atoi(argv[1]);
if (iterations < 1) {
printf("ERROR: iterations must be >= 1\n");
return 1;
}
// length of a the vector
size_t length = atol(argv[2]);
if (length <= 0) {
printf("ERROR: Vector length must be greater than 0\n");
return 1;
}
int device = (argc > 3) ? atol(argv[3]) : omp_get_initial_device();
if ( (device < 0 || omp_get_num_devices() <= device ) && (device != omp_get_initial_device()) ) {
printf("ERROR: device number %d is not valid.\n", device);
return 1;
}
printf("Number of iterations = %d\n", iterations);
printf("Vector length = %zu\n", length);
printf("OpenMP Device = %d\n", device);
//////////////////////////////////////////////////////////////////////
// Allocate space and perform the computation
//////////////////////////////////////////////////////////////////////
double nstream_time = 0.0;
size_t bytes = length*sizeof(double);
double * restrict A = omp_target_alloc(bytes, device);
double * restrict B = omp_target_alloc(bytes, device);
double * restrict C = omp_target_alloc(bytes, device);
double scalar = 3.0;
#pragma omp target teams distribute parallel for simd schedule(static) device(device) is_device_ptr(A,B,C)
for (size_t i=0; i<length; i++) {
A[i] = 0.0;
B[i] = 2.0;
C[i] = 2.0;
}
{
for (int iter = 0; iter<=iterations; iter++) {
if (iter==1) nstream_time = omp_get_wtime();
#pragma omp target teams distribute parallel for simd schedule(static) device(device) is_device_ptr(A,B,C)
for (size_t i=0; i<length; i++) {
A[i] += B[i] + scalar * C[i];
}
}
nstream_time = omp_get_wtime() - nstream_time;
}
omp_target_free(C, device);
omp_target_free(B, device);
//////////////////////////////////////////////////////////////////////
/// Analyze and output results
//////////////////////////////////////////////////////////////////////
double ar = 0.0;
double br = 2.0;
double cr = 2.0;
for (int i=0; i<=iterations; i++) {
ar += br + scalar * cr;
}
ar *= length;
double asum = 0.0;
#pragma omp target teams distribute parallel for reduction(+:asum) device(device) is_device_ptr(A)
for (size_t i=0; i<length; i++) {
asum += fabs(A[i]);
}
omp_target_free(A, device);
double epsilon=1.e-8;
if (fabs(ar-asum)/asum > epsilon) {
printf("Failed Validation on output array\n"
" Expected checksum: %lf\n"
" Observed checksum: %lf\n"
"ERROR: solution did not validate\n", ar, asum);
return 1;
} else {
printf("Solution validates\n");
double avgtime = nstream_time/iterations;
double nbytes = 4.0 * length * sizeof(double);
printf("Rate (MB/s): %lf Avg time (s): %lf\n", 1.e-6*nbytes/avgtime, avgtime);
}
return 0;
}
|
GB_kroner.c | //------------------------------------------------------------------------------
// GB_kroner: Kronecker product, C = kron (A,B)
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// C = kron(A,B) where op determines the binary multiplier to use. The type of
// A and B are compatible with the x and y inputs of z=op(x,y), but can be
// different. The type of C is the type of z. C is hypersparse if either A
// or B are hypersparse.
// FUTURE: GB_kron would be faster with built-in types and operators.
// FUTURE: at most one thread is used for each vector of C=kron(A,B). The
// matrix C is normally very large, but if both A and B are n-by-1, then C is
// n^2-by-1 and only a single thread is used. A better method for this case
// would construct vectors of C in parallel.
// FUTURE: each vector C(:,k) takes O(nnz(C(:,k))) work, but this is not
// accounted for in the parallel load-balancing.
#include "GB_kron.h"
GrB_Info GB_kroner // C = kron (A,B)
(
GrB_Matrix *Chandle, // output matrix
const bool C_is_csc, // desired format of C
const GrB_BinaryOp op, // multiply operator
const GrB_Matrix A, // input matrix
const GrB_Matrix B, // input matrix
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (Chandle != NULL) ;
ASSERT_OK (GB_check (A, "A for kron (A,B)", GB0)) ;
ASSERT_OK (GB_check (B, "B for kron (A,B)", GB0)) ;
ASSERT_OK (GB_check (op, "op for kron (A,B)", GB0)) ;
ASSERT (!GB_PENDING (A)) ; ASSERT (!GB_ZOMBIES (A)) ;
ASSERT (!GB_PENDING (B)) ; ASSERT (!GB_ZOMBIES (B)) ;
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
GrB_Info info ;
(*Chandle) = NULL ;
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ah = A->h ;
const int64_t *restrict Ai = A->i ;
const GB_void *restrict Ax = A->x ;
const int64_t asize = A->type->size ;
const int64_t avlen = A->vlen ;
const int64_t avdim = A->vdim ;
int64_t anvec = A->nvec ;
int64_t anz = GB_NNZ (A) ;
const int64_t *restrict Bp = B->p ;
const int64_t *restrict Bh = B->h ;
const int64_t *restrict Bi = B->i ;
const GB_void *restrict Bx = B->x ;
const int64_t bsize = B->type->size ;
const int64_t bvlen = B->vlen ;
const int64_t bvdim = B->vdim ;
int64_t bnvec = B->nvec ;
int64_t bnz = GB_NNZ (B) ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
double work = ((double) anz) * ((double) bnz)
+ (((double) anvec) * ((double) bnvec)) ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (work, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// allocate the output matrix C
//--------------------------------------------------------------------------
// C has the same type as z for the multiply operator, z=op(x,y)
GrB_Index cvlen, cvdim, cnzmax, cnvec ;
bool ok = GB_Index_multiply (&cvlen, avlen, bvlen) ;
ok = ok & GB_Index_multiply (&cvdim, avdim, bvdim) ;
ok = ok & GB_Index_multiply (&cnzmax, anz, bnz) ;
ok = ok & GB_Index_multiply (&cnvec, anvec, bnvec) ;
ASSERT (ok) ;
// C is hypersparse if either A or B are hypersparse
bool C_is_hyper = (cvdim > 1) && (A->is_hyper || B->is_hyper) ;
GrB_Matrix C = NULL ; // allocate a new header for C
GB_CREATE (&C, op->ztype, (int64_t) cvlen, (int64_t) cvdim, GB_Ap_malloc,
C_is_csc, GB_SAME_HYPER_AS (C_is_hyper), B->hyper_ratio, cnvec,
cnzmax, true, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
return (info) ;
}
//--------------------------------------------------------------------------
// get C
//--------------------------------------------------------------------------
int64_t *restrict Cp = C->p ;
int64_t *restrict Ch = C->h ;
int64_t *restrict Ci = C->i ;
GB_void *restrict Cx = C->x ;
const int64_t csize = C->type->size ;
GxB_binary_function fmult = op->function ;
GB_cast_function
cast_A = GB_cast_factory (op->xtype->code, A->type->code),
cast_B = GB_cast_factory (op->ytype->code, B->type->code) ;
//--------------------------------------------------------------------------
// compute the column counts of C, and C->h if C is hypersparse
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(guided) collapse(2)
for (int64_t kA = 0 ; kA < anvec ; kA++)
{
for (int64_t kB = 0 ; kB < bnvec ; kB++)
{
// get A(:,jA), the (kA)th vector of A
int64_t jA = (Ah == NULL) ? kA : Ah [kA] ;
int64_t aknz = Ap [kA+1] - Ap [kA] ;
// get B(:,jB), the (kB)th vector of B
int64_t jB = (Bh == NULL) ? kB : Bh [kB] ;
int64_t bknz = Bp [kB+1] - Bp [kB] ;
// determine # entries in C(:,jC), the (kC)th vector of C
int64_t kC = kA * bnvec + kB ;
Cp [kC] = aknz * bknz ;
if (C_is_hyper)
{
Ch [kC] = jA * bvdim + jB ;
}
}
}
//--------------------------------------------------------------------------
// replace Cp with its cumulative sum
//--------------------------------------------------------------------------
GB_cumsum (Cp, cnvec, &(C->nvec_nonempty), nthreads) ;
if (C_is_hyper) C->nvec = cnvec ;
C->magic = GB_MAGIC ;
//--------------------------------------------------------------------------
// C = kron (A,B)
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(guided) collapse(2)
for (int64_t kA = 0 ; kA < anvec ; kA++)
{
for (int64_t kB = 0 ; kB < bnvec ; kB++)
{
// get B(:,jB), the (kB)th vector of B
int64_t pB_start = Bp [kB] ;
int64_t pB_end = Bp [kB+1] ;
int64_t bknz = pB_start - pB_end ;
if (bknz == 0) continue ;
GB_void bwork [bsize] ;
// get C(:,jC), the (kC)th vector of C
int64_t kC = kA * bnvec + kB ;
int64_t pC = Cp [kC] ;
// get A(:,jA), the (kA)th vector of A
int64_t pA_start = Ap [kA] ;
int64_t pA_end = Ap [kA+1] ;
GB_void awork [asize] ;
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
// awork = A(iA,jA), typecasted to op->xtype
int64_t iA = Ai [pA] ;
int64_t iAblock = iA * bvlen ;
cast_A (awork, Ax +(pA*asize), asize) ;
for (int64_t pB = pB_start ; pB < pB_end ; pB++)
{
// bwork = B(iB,jB), typecasted to op->ytype
int64_t iB = Bi [pB] ;
cast_B (bwork, Bx +(pB*bsize), bsize) ;
// C(iC,jC) = A(iA,jA) * B(iB,jB)
int64_t iC = iAblock + iB ;
Ci [pC] = iC ;
fmult (Cx +(pC*csize), awork, bwork) ;
pC++ ;
}
}
}
}
//--------------------------------------------------------------------------
// remove empty vectors from C, if hypersparse
//--------------------------------------------------------------------------
if (C_is_hyper && C->nvec_nonempty < cnvec)
{
// create new Cp_new and Ch_new arrays, with no empty vectors
int64_t *restrict Cp_new = NULL ;
int64_t *restrict Ch_new = NULL ;
int64_t nvec_new ;
info = GB_hyper_prune (&Cp_new, &Ch_new, &nvec_new, C->p, C->h, cnvec,
Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_MATRIX_FREE (&C) ;
return (info) ;
}
// transplant the new hyperlist into C
GB_FREE_MEMORY (C->p, cnvec+1, sizeof (int64_t)) ;
GB_FREE_MEMORY (C->h, cnvec, sizeof (int64_t)) ;
C->p = Cp_new ;
C->h = Ch_new ;
C->nvec = nvec_new ;
C->plen = nvec_new ;
ASSERT (C->nvec == C->nvec_nonempty) ;
}
ASSERT (C->nvec_nonempty == GB_nvec_nonempty (C, Context)) ;
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
ASSERT_OK (GB_check (C, "C=kron(A,B)", GB0)) ;
(*Chandle) = C ;
return (GrB_SUCCESS) ;
}
|
lis_matvec_msr.c | /* Copyright (C) 2002-2012 The SSI Project. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the project nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE SCALABLE SOFTWARE INFRASTRUCTURE
PROJECT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
#include "lis_config.h"
#else
#ifdef HAVE_CONFIG_WIN32_H
#include "lis_config_win32.h"
#endif
#endif
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef HAVE_MALLOC_H
#include <malloc.h>
#endif
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#ifdef USE_MPI
#include <mpi.h>
#endif
#include "lislib.h"
void lis_matvec_msr(LIS_MATRIX A, LIS_SCALAR x[], LIS_SCALAR y[])
{
LIS_INT i,j,js,je,jj;
LIS_INT n;
LIS_SCALAR t;
n = A->n;
if( A->is_splited )
{
n = A->n;
#ifdef _OPENMP
#pragma omp parallel for private(i,j,js,je,t,jj)
#endif
for(i=0; i<n; i++)
{
t = A->D->value[i] * x[i];
js = A->L->index[i];
je = A->L->index[i+1];
for(j=js;j<je;j++)
{
jj = A->L->index[j];
t += A->L->value[j] * x[jj];
}
js = A->U->index[i];
je = A->U->index[i+1];
for(j=js;j<je;j++)
{
jj = A->U->index[j];
t += A->U->value[j] * x[jj];
}
y[i] = t;
}
}
else
{
#ifdef _OPENMP
#pragma omp parallel for private(i,j,js,je,t,jj)
#endif
for(i=0; i<n; i++)
{
t = A->value[i] * x[i];
js = A->index[i];
je = A->index[i+1];
for(j=js;j<je;j++)
{
jj = A->index[j];
t += A->value[j] * x[jj];
}
y[i] = t;
}
}
}
void lis_matvect_msr(LIS_MATRIX A, LIS_SCALAR x[], LIS_SCALAR y[])
{
LIS_INT i,j,js,je,jj;
LIS_INT n,np;
LIS_SCALAR t;
#ifdef _OPENMP
LIS_INT k,is,ie,nprocs;
LIS_SCALAR *w;
#endif
n = A->n;
np = A->np;
if( A->is_splited )
{
for(i=0; i<n; i++)
{
y[i] = A->D->value[i] * x[i];
}
for(i=0; i<n; i++)
{
t = x[i];
js = A->L->index[i];
je = A->L->index[i+1];
for(j=js;j<je;j++)
{
jj = A->L->index[j];
y[jj] += A->L->value[j] * t;
}
js = A->U->index[i];
je = A->U->index[i+1];
for(j=js;j<je;j++)
{
jj = A->U->index[j];
y[jj] += A->U->value[j] * t;
}
}
}
else
{
#ifdef _OPENMP
nprocs = omp_get_max_threads();
w = (LIS_SCALAR *)lis_malloc( nprocs*np*sizeof(LIS_SCALAR),"lis_matvect_msr::w" );
#pragma omp parallel private(i,j,js,je,t,jj,k)
{
k = omp_get_thread_num();
#pragma omp for
for(j=0;j<nprocs;j++)
{
memset( &w[j*np], 0, np*sizeof(LIS_SCALAR) );
}
#pragma omp for
for(i=0; i<n; i++)
{
js = A->index[i];
je = A->index[i+1];
t = x[i];
for(j=js;j<je;j++)
{
jj = k*np+A->index[j];
w[jj] += A->value[j] * t;
}
w[k*np+i] += A->value[i] * x[i];
}
#pragma omp for
for(i=0;i<np;i++)
{
t = 0.0;
for(j=0;j<nprocs;j++)
{
t += w[j*np+i];
}
y[i] = t;
}
}
lis_free(w);
#else
for(i=0; i<n; i++)
{
y[i] = A->value[i] * x[i];
}
for(i=0; i<n; i++)
{
t = x[i];
js = A->index[i];
je = A->index[i+1];
for(j=js;j<je;j++)
{
jj = A->index[j];
y[jj] += A->value[j] * t;
}
}
#endif
}
}
|
GeneralMatrixMatrix.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_GENERAL_MATRIX_MATRIX_H
#define EIGEN_GENERAL_MATRIX_MATRIX_H
namespace Eigen {
namespace internal {
template<typename _LhsScalar, typename _RhsScalar>
class level3_blocking;
/* Specialization for a row-major destination matrix => simple transposition of the product */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index,
LhsScalar,
LhsStorageOrder,
ConjugateLhs,
RhsScalar,
RhsStorageOrder,
ConjugateRhs,
RowMajor> {
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static EIGEN_STRONG_INLINE void run(
Index rows, Index cols, Index depth,
const LhsScalar *lhs, Index lhsStride,
const RhsScalar *rhs, Index rhsStride,
ResScalar *res, Index resStride,
ResScalar alpha,
level3_blocking<RhsScalar, LhsScalar> &blocking,
GemmParallelInfo<Index> *info = 0) {
// transpose the product such that the result is column major
general_matrix_matrix_product<Index,
RhsScalar, RhsStorageOrder == RowMajor ? ColMajor : RowMajor, ConjugateRhs,
LhsScalar, LhsStorageOrder == RowMajor ? ColMajor : RowMajor, ConjugateLhs,
ColMajor>
::run(cols, rows, depth, rhs, rhsStride, lhs, lhsStride, res, resStride, alpha, blocking, info);
}
};
/* Specialization for a col-major destination matrix
* => Blocking algorithm following Goto's paper */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index,
LhsScalar,
LhsStorageOrder,
ConjugateLhs,
RhsScalar,
RhsStorageOrder,
ConjugateRhs,
ColMajor> {
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static void run(Index rows, Index cols, Index depth,
const LhsScalar *_lhs, Index lhsStride,
const RhsScalar *_rhs, Index rhsStride,
ResScalar *res, Index resStride,
ResScalar alpha,
level3_blocking<LhsScalar, RhsScalar> &blocking,
GemmParallelInfo<Index> *info = 0) {
const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> lhs(_lhs, lhsStride);
const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> rhs(_rhs, rhsStride);
typedef gebp_traits<LhsScalar, RhsScalar> Traits;
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = (std::min)(rows, blocking.mc()); // cache block size along the M direction
//Index nc = blocking.nc(); // cache block size along the N direction
gemm_pack_lhs<LhsScalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<RhsScalar, Index, Traits::nr, RhsStorageOrder> pack_rhs;
gebp_kernel<LhsScalar, RhsScalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;
#ifdef EIGEN_HAS_OPENMP
if(info)
{
// this is the parallel version!
Index tid = omp_get_thread_num();
Index threads = omp_get_num_threads();
std::size_t sizeA = kc*mc;
std::size_t sizeW = kc*Traits::WorkSpaceFactor;
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, 0);
ei_declare_aligned_stack_constructed_variable(RhsScalar, w, sizeW, 0);
RhsScalar* blockB = blocking.blockB();
eigen_internal_assert(blockB!=0);
// For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...
for(Index k=0; k<depth; k+=kc)
{
const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A'
// In order to reduce the chance that a thread has to wait for the other,
// let's start by packing A'.
pack_lhs(blockA, &lhs(0,k), lhsStride, actual_kc, mc);
// Pack B_k to B' in a parallel fashion:
// each thread packs the sub block B_k,j to B'_j where j is the thread id.
// However, before copying to B'_j, we have to make sure that no other thread is still using it,
// i.e., we test that info[tid].users equals 0.
// Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it.
while(info[tid].users!=0) {}
info[tid].users += threads;
pack_rhs(blockB+info[tid].rhs_start*actual_kc, &rhs(k,info[tid].rhs_start), rhsStride, actual_kc, info[tid].rhs_length);
// Notify the other threads that the part B'_j is ready to go.
info[tid].sync = k;
// Computes C_i += A' * B' per B'_j
for(Index shift=0; shift<threads; ++shift)
{
Index j = (tid+shift)%threads;
// At this point we have to make sure that B'_j has been updated by the thread j,
// we use testAndSetOrdered to mimic a volatile access.
// However, no need to wait for the B' part which has been updated by the current thread!
if(shift>0)
while(info[j].sync!=k) {}
gebp(res+info[j].rhs_start*resStride, resStride, blockA, blockB+info[j].rhs_start*actual_kc, mc, actual_kc, info[j].rhs_length, alpha, -1,-1,0,0, w);
}
// Then keep going as usual with the remaining A'
for(Index i=mc; i<rows; i+=mc)
{
const Index actual_mc = (std::min)(i+mc,rows)-i;
// pack A_i,k to A'
pack_lhs(blockA, &lhs(i,k), lhsStride, actual_kc, actual_mc);
// C_i += A' * B'
gebp(res+i, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1,-1,0,0, w);
}
// Release all the sub blocks B'_j of B' for the current thread,
// i.e., we simply decrement the number of users by 1
for(Index j=0; j<threads; ++j)
{
#pragma omp atomic
info[j].users -= 1;
}
}
}
else
#endif // EIGEN_HAS_OPENMP
{
EIGEN_UNUSED_VARIABLE(info);
// this is the sequential version!
std::size_t sizeA = kc * mc;
std::size_t sizeB = kc * cols;
std::size_t sizeW = kc * Traits::WorkSpaceFactor;
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA());
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB());
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockW, sizeW, blocking.blockW());
// For each horizontal panel of the rhs, and corresponding panel of the lhs...
// (==GEMM_VAR1)
for (Index k2 = 0; k2 < depth; k2 += kc) {
const Index actual_kc = (std::min)(k2 + kc, depth) - k2;
// OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
// => Pack rhs's panel into a sequential chunk of memory (L2 caching)
// Note that this panel will be read as many times as the number of blocks in the lhs's
// vertical panel which is, in practice, a very low number.
pack_rhs(blockB, &rhs(k2, 0), rhsStride, actual_kc, cols);
// For each mc x kc block of the lhs's vertical panel...
// (==GEPP_VAR1)
for (Index i2 = 0; i2 < rows; i2 += mc) {
const Index actual_mc = (std::min)(i2 + mc, rows) - i2;
// We pack the lhs's block into a sequential chunk of memory (L1 caching)
// Note that this block will be read a very high number of times, which is equal to the number of
// micro vertical panel of the large rhs's panel (e.g., cols/4 times).
pack_lhs(blockA, &lhs(i2, k2), lhsStride, actual_kc, actual_mc);
// Everything is packed, we can now call the block * panel kernel:
gebp(res + i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1, -1, 0, 0, blockW);
}
}
}
}
};
/*********************************************************************************
* Specialization of GeneralProduct<> for "large" GEMM, i.e.,
* implementation of the high level wrapper to general_matrix_matrix_product
**********************************************************************************/
template<typename Lhs, typename Rhs>
struct traits<GeneralProduct < Lhs, Rhs, GemmProduct> >
: traits<ProductBase < GeneralProduct < Lhs, Rhs, GemmProduct>, Lhs, Rhs> > {
};
template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>
struct gemm_functor {
gemm_functor(const Lhs &lhs, const Rhs &rhs, Dest &dest, const Scalar &actualAlpha,
BlockingType &blocking)
: m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking) {}
void initParallelSession() const {
m_blocking.allocateB();
}
void operator()(Index row, Index rows, Index col = 0, Index cols = -1, GemmParallelInfo<Index> *info = 0) const {
if (cols == -1)
cols = m_rhs.cols();
Gemm::run(rows, cols, m_lhs.cols(),
/*(const Scalar*)*/&m_lhs.coeffRef(row, 0), m_lhs.outerStride(),
/*(const Scalar*)*/&m_rhs.coeffRef(0, col), m_rhs.outerStride(),
(Scalar *) &(m_dest.coeffRef(row, col)), m_dest.outerStride(),
m_actualAlpha, m_blocking, info);
}
protected:
const Lhs &m_lhs;
const Rhs &m_rhs;
Dest &m_dest;
Scalar m_actualAlpha;
BlockingType &m_blocking;
};
template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor = 1,
bool FiniteAtCompileTime = MaxRows != Dynamic && MaxCols != Dynamic && MaxDepth != Dynamic>
class gemm_blocking_space;
template<typename _LhsScalar, typename _RhsScalar>
class level3_blocking {
typedef _LhsScalar LhsScalar;
typedef _RhsScalar RhsScalar;
protected:
LhsScalar *m_blockA;
RhsScalar *m_blockB;
RhsScalar *m_blockW;
DenseIndex m_mc;
DenseIndex m_nc;
DenseIndex m_kc;
public:
level3_blocking()
: m_blockA(0), m_blockB(0), m_blockW(0), m_mc(0), m_nc(0), m_kc(0) {}
inline DenseIndex mc() const { return m_mc; }
inline DenseIndex nc() const { return m_nc; }
inline DenseIndex kc() const { return m_kc; }
inline LhsScalar *blockA() { return m_blockA; }
inline RhsScalar *blockB() { return m_blockB; }
inline RhsScalar *blockW() { return m_blockW; }
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder, _LhsScalar, _RhsScalar, MaxRows, MaxCols, MaxDepth, KcFactor, true>
: public level3_blocking<
typename conditional<StorageOrder == RowMajor, _RhsScalar, _LhsScalar>::type,
typename conditional<StorageOrder == RowMajor, _LhsScalar, _RhsScalar>::type> {
enum {
Transpose = StorageOrder == RowMajor,
ActualRows = Transpose ? MaxCols : MaxRows,
ActualCols = Transpose ? MaxRows : MaxCols
};
typedef typename conditional<Transpose, _RhsScalar, _LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose, _LhsScalar, _RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar, RhsScalar> Traits;
enum {
SizeA = ActualRows * MaxDepth,
SizeB = ActualCols * MaxDepth,
SizeW = MaxDepth * Traits::WorkSpaceFactor
};
EIGEN_ALIGN16 LhsScalar m_staticA[SizeA];
EIGEN_ALIGN16 RhsScalar m_staticB[SizeB];
EIGEN_ALIGN16 RhsScalar m_staticW[SizeW];
public:
gemm_blocking_space(DenseIndex /*rows*/, DenseIndex /*cols*/, DenseIndex /*depth*/) {
this->m_mc = ActualRows;
this->m_nc = ActualCols;
this->m_kc = MaxDepth;
this->m_blockA = m_staticA;
this->m_blockB = m_staticB;
this->m_blockW = m_staticW;
}
inline void allocateA() {}
inline void allocateB() {}
inline void allocateW() {}
inline void allocateAll() {}
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder, _LhsScalar, _RhsScalar, MaxRows, MaxCols, MaxDepth, KcFactor, false>
: public level3_blocking<
typename conditional<StorageOrder == RowMajor, _RhsScalar, _LhsScalar>::type,
typename conditional<StorageOrder == RowMajor, _LhsScalar, _RhsScalar>::type> {
enum {
Transpose = StorageOrder == RowMajor
};
typedef typename conditional<Transpose, _RhsScalar, _LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose, _LhsScalar, _RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar, RhsScalar> Traits;
DenseIndex m_sizeA;
DenseIndex m_sizeB;
DenseIndex m_sizeW;
public:
gemm_blocking_space(DenseIndex rows, DenseIndex cols, DenseIndex depth) {
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
computeProductBlockingSizes<LhsScalar, RhsScalar, KcFactor>(this->m_kc, this->m_mc, this->m_nc);
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
m_sizeW = this->m_kc * Traits::WorkSpaceFactor;
}
void allocateA() {
if (this->m_blockA == 0)
this->m_blockA = aligned_new<LhsScalar>(m_sizeA);
}
void allocateB() {
if (this->m_blockB == 0)
this->m_blockB = aligned_new<RhsScalar>(m_sizeB);
}
void allocateW() {
if (this->m_blockW == 0)
this->m_blockW = aligned_new<RhsScalar>(m_sizeW);
}
void allocateAll() {
allocateA();
allocateB();
allocateW();
}
~gemm_blocking_space() {
aligned_delete(this->m_blockA, m_sizeA);
aligned_delete(this->m_blockB, m_sizeB);
aligned_delete(this->m_blockW, m_sizeW);
}
};
} // end namespace internal
template<typename Lhs, typename Rhs>
class GeneralProduct<Lhs, Rhs, GemmProduct>
: public ProductBase<GeneralProduct<Lhs, Rhs, GemmProduct>, Lhs, Rhs> {
enum {
MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime, Rhs::MaxRowsAtCompileTime)
};
public:
EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
typedef typename Lhs::Scalar LhsScalar;
typedef typename Rhs::Scalar RhsScalar;
typedef Scalar ResScalar;
GeneralProduct(const Lhs &lhs, const Rhs &rhs) : Base(lhs, rhs) {
#if !(defined(EIGEN_NO_STATIC_ASSERT) && defined(EIGEN_NO_DEBUG))
typedef internal::scalar_product_op<LhsScalar, RhsScalar> BinOp;
EIGEN_CHECK_BINARY_COMPATIBILIY(BinOp, LhsScalar, RhsScalar);
#endif
}
template<typename Dest>
void scaleAndAddTo(Dest &dst, const Scalar &alpha) const {
eigen_assert(dst.rows() == m_lhs.rows() && dst.cols() == m_rhs.cols());
if (m_lhs.cols() == 0 || m_lhs.rows() == 0 || m_rhs.cols() == 0)
return;
typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(m_lhs);
typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(m_rhs);
Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs)
* RhsBlasTraits::extractScalarFactor(m_rhs);
typedef internal::gemm_blocking_space<(Dest::Flags & RowMajorBit) ? RowMajor : ColMajor, LhsScalar, RhsScalar,
Dest::MaxRowsAtCompileTime, Dest::MaxColsAtCompileTime, MaxDepthAtCompileTime> BlockingType;
typedef internal::gemm_functor<
Scalar, Index,
internal::general_matrix_matrix_product<
Index,
LhsScalar, (_ActualLhsType::Flags & RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate),
RhsScalar, (_ActualRhsType::Flags & RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate),
(Dest::Flags & RowMajorBit) ? RowMajor : ColMajor>,
_ActualLhsType, _ActualRhsType, Dest, BlockingType> GemmFunctor;
BlockingType blocking(dst.rows(), dst.cols(), lhs.cols());
internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime > 32 || Dest::MaxRowsAtCompileTime == Dynamic)>(GemmFunctor(
lhs,
rhs,
dst,
actualAlpha,
blocking), this->rows(), this->cols(), Dest::Flags & RowMajorBit);
}
};
} // end namespace Eigen
#endif // EIGEN_GENERAL_MATRIX_MATRIX_H
|
jacobi.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#include <stdint.h>
#include "../timer.h"
//------------------------------------------------------------------------------------------------------------------------------
void smooth(level_type * level, int x_id, int rhs_id, double a, double b){
if(NUM_SMOOTHS&1){
printf("error - NUM_SMOOTHS must be even...\n");
exit(0);
}
int box,s;
int ghosts = level->box_ghosts;
int radius = STENCIL_RADIUS;
int starShaped = STENCIL_STAR_SHAPED;
int communicationAvoiding = ghosts > radius;
#ifdef USE_L1JACOBI
double weight = 1.0;
#else
double weight = 2.0/3.0;
#endif
// if communication-avoiding, need updated RHS for stencils in ghost zones
if(communicationAvoiding)exchange_boundary(level,rhs_id,0);
for(s=0;s<NUM_SMOOTHS;s+=ghosts){
// Jacobi ping pongs between x_id and VECTOR_TEMP
if((s&1)==0){exchange_boundary(level, x_id,STENCIL_IS_STAR_SHAPED && !communicationAvoiding);apply_BCs(level, x_id);}
else{exchange_boundary(level,VECTOR_TEMP,STENCIL_IS_STAR_SHAPED && !communicationAvoiding);apply_BCs(level,VECTOR_TEMP);}
// now do ghosts communication-avoiding smooths on each box...
uint64_t _timeStart = CycleTime();
#pragma omp parallel for private(box) OMP_THREAD_ACROSS_BOXES(level->concurrent_boxes)
for(box=0;box<level->num_my_boxes;box++){
int i,j,k,ss;
const int jStride = level->my_boxes[box].jStride;
const int kStride = level->my_boxes[box].kStride;
const int dim = level->my_boxes[box].dim;
const double h2inv = 1.0/(level->h*level->h);
const double * __restrict__ rhs = level->my_boxes[box].vectors[ rhs_id] + ghosts*(1+jStride+kStride);
const double * __restrict__ alpha = level->my_boxes[box].vectors[VECTOR_ALPHA ] + ghosts*(1+jStride+kStride);
const double * __restrict__ beta_i = level->my_boxes[box].vectors[VECTOR_BETA_I] + ghosts*(1+jStride+kStride);
const double * __restrict__ beta_j = level->my_boxes[box].vectors[VECTOR_BETA_J] + ghosts*(1+jStride+kStride);
const double * __restrict__ beta_k = level->my_boxes[box].vectors[VECTOR_BETA_K] + ghosts*(1+jStride+kStride);
const double * __restrict__ valid = level->my_boxes[box].vectors[VECTOR_VALID ] + ghosts*(1+jStride+kStride); // cell is inside the domain
#ifdef USE_L1JACOBI
const double * __restrict__ lambda = level->my_boxes[box].vectors[VECTOR_L1INV ] + ghosts*(1+jStride+kStride);
#else
const double * __restrict__ lambda = level->my_boxes[box].vectors[VECTOR_DINV ] + ghosts*(1+jStride+kStride);
#endif
int ghostsToOperateOn=ghosts-1;
for(ss=s;ss<s+ghosts;ss++,ghostsToOperateOn--){
const double * __restrict__ x_n;
double * __restrict__ x_np1;
if((ss&1)==0){x_n = level->my_boxes[box].vectors[ x_id] + ghosts*(1+jStride+kStride);
x_np1 = level->my_boxes[box].vectors[VECTOR_TEMP] + ghosts*(1+jStride+kStride);}
else{x_n = level->my_boxes[box].vectors[VECTOR_TEMP] + ghosts*(1+jStride+kStride);
x_np1 = level->my_boxes[box].vectors[ x_id] + ghosts*(1+jStride+kStride);}
#pragma omp parallel for private(k,j,i) OMP_THREAD_WITHIN_A_BOX(level->threads_per_box)
for(k=0-ghostsToOperateOn;k<dim+ghostsToOperateOn;k++){
for(j=0-ghostsToOperateOn;j<dim+ghostsToOperateOn;j++){
for(i=0-ghostsToOperateOn;i<dim+ghostsToOperateOn;i++){
int ijk = i + j*jStride + k*kStride;
double Ax_n = apply_op_ijk(x_n);
x_np1[ijk] = x_n[ijk] + weight*lambda[ijk]*(rhs[ijk]-Ax_n);
}}}
} // ss-loop
} // box-loop
level->cycles.smooth += (uint64_t)(CycleTime()-_timeStart);
} // s-loop
}
//------------------------------------------------------------------------------------------------------------------------------
|
9591.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#define EXTRALARGE_DATASET
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "correlation.h"
/* Array initialization. */
static
void init_array (int m,
int n,
DATA_TYPE *float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n))
{
int i, j;
*float_n = 1.2;
for (i = 0; i < m; i++)
for (j = 0; j < n; j++)
data[i][j] = ((DATA_TYPE) i*j) / M;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int m,
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m))
{
int i, j;
for (i = 0; i < m; i++)
for (j = 0; j < m; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]);
if ((i * m + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_correlation(int m, int n,
DATA_TYPE float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n),
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m),
DATA_TYPE POLYBENCH_1D(mean,M,m),
DATA_TYPE POLYBENCH_1D(stddev,M,m))
{
int i, j, j1, j2;
DATA_TYPE eps = 0.1f;
#define sqrt_of_array_cell(x,j) sqrt(x[j])
#pragma scop
/* Determine mean of column vectors of input data matrix */
#pragma omp parallel private(i, j, j2) num_threads(4)
{
#pragma omp for schedule(static, 1)
for (j = 0; j < _PB_M; j++)
{
mean[j] = 0.0;
for (i = 0; i < _PB_N; i++)
mean[j] += data[i][j];
mean[j] /= float_n;
}
/* Determine standard deviations of column vectors of data matrix. */
#pragma omp for schedule(static, 1)
for (j = 0; j < _PB_M; j++)
{
stddev[j] = 0.0;
for (i = 0; i < _PB_N; i++)
stddev[j] += (data[i][j] - mean[j]) * (data[i][j] - mean[j]);
stddev[j] /= float_n;
stddev[j] = sqrt_of_array_cell(stddev, j);
/* The following in an inelegant but usual way to handle
near-zero std. dev. values, which below would cause a zero-
divide. */
stddev[j] = stddev[j] <= eps ? 1.0 : stddev[j];
}
/* Center and reduce the column vectors. */
#pragma omp for schedule(static, 1)
for (i = 0; i < _PB_N; i++)
for (j = 0; j < _PB_M; j++)
{
data[i][j] -= mean[j];
data[i][j] /= sqrt(float_n) * stddev[j];
}
/* Calculate the m * m correlation matrix. */
#pragma omp for schedule(static, 1)
for (j1 = 0; j1 < _PB_M-1; j1++)
{
symmat[j1][j1] = 1.0;
for (j2 = j1+1; j2 < _PB_M; j2++)
{
symmat[j1][j2] = 0.0;
for (i = 0; i < _PB_N; i++)
symmat[j1][j2] += (data[i][j1] * data[i][j2]);
symmat[j2][j1] = symmat[j1][j2];
}
}
}
#pragma endscop
symmat[_PB_M-1][_PB_M-1] = 1.0;
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int m = M;
/* Variable declaration/allocation. */
DATA_TYPE float_n;
POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n);
POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m);
POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m);
POLYBENCH_1D_ARRAY_DECL(stddev,DATA_TYPE,M,m);
/* Initialize array(s). */
init_array (m, n, &float_n, POLYBENCH_ARRAY(data));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_correlation (m, n, float_n,
POLYBENCH_ARRAY(data),
POLYBENCH_ARRAY(symmat),
POLYBENCH_ARRAY(mean),
POLYBENCH_ARRAY(stddev));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(data);
POLYBENCH_FREE_ARRAY(symmat);
POLYBENCH_FREE_ARRAY(mean);
POLYBENCH_FREE_ARRAY(stddev);
return 0;
}
|
omp_prod_vet.c | #include <stdio.h>
#include <omp.h>
#define tamanho 100
int main ()
{
int i, chunk;
float a[tamanho], b[tamanho], result;
/* Some initializations */
chunk = 10;
result = 0.0;
for (i=0; i < tamanho; i++)
{
a[i] = i * 1.0;
b[i] = i * 2.0;
}
#pragma omp parallel for \
default(shared) private(i) \
schedule(static,chunk) \
reduction(+:result)
for (i=0; i < tamanho; i++)
result = result + (a[i] * b[i]);
printf("Final result= %f\n",result);
}
|
3431.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "atax.h"
/* Array initialization. */
static
void init_array (int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny))
{
int i, j;
for (i = 0; i < ny; i++)
x[i] = i * M_PI;
for (i = 0; i < nx; i++)
for (j = 0; j < ny; j++)
A[i][j] = ((DATA_TYPE) i*(j+1)) / nx;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int nx,
DATA_TYPE POLYBENCH_1D(y,NX,nx))
{
int i;
for (i = 0; i < nx; i++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]);
if (i % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_atax(int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny),
DATA_TYPE POLYBENCH_1D(y,NY,ny),
DATA_TYPE POLYBENCH_1D(tmp,NX,nx))
{
int i, j;
#pragma scop
#pragma omp parallel num_threads(2)
{
#pragma omp for schedule(static, 1)
for (i = 0; i < _PB_NY; i++)
y[i] = 0;
#pragma omp for private (j) schedule(static, 1)
for (i = 0; i < _PB_NX; i++)
{
tmp[i] = 0;
for (j = 0; j < _PB_NY; j++)
tmp[i] = tmp[i] + A[i][j] * x[j];
for (j = 0; j < _PB_NY; j++)
y[j] = y[j] + A[i][j] * tmp[i];
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int nx = NX;
int ny = NY;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny);
POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx);
/* Initialize array(s). */
init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_atax (nx, ny,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(x),
POLYBENCH_ARRAY(y),
POLYBENCH_ARRAY(tmp));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(x);
POLYBENCH_FREE_ARRAY(y);
POLYBENCH_FREE_ARRAY(tmp);
return 0;
}
|
zero_omp.c | /*
* File: zero_omp.c
* Author: Philip Mucci
* mucci@cs.utk.edu
* Mods: Nils Smeds
* smeds@pdc.kth.se
* Anders Nilsson
* anni@pdc.kth.se
*/
/* This file performs the following test: start, stop and timer
functionality for 2 slave OMP threads
- It attempts to use the following two counters. It may use less
depending on hardware counter resource limitations. These are counted
in the default counting domain and default granularity, depending on
the platform. Usually this is the user domain (PAPI_DOM_USER) and
thread context (PAPI_GRN_THR).
+ PAPI_FP_INS
+ PAPI_TOT_CYC
Each thread inside the Thread routine:
- Get cyc.
- Get us.
- Start counters
- Do flops
- Stop and read counters
- Get us.
- Get cyc.
Master serial thread:
- Get us.
- Get cyc.
- Run parallel for loop
- Get us.
- Get cyc.
*/
#include "papi_test.h"
#ifdef _OPENMP
#include <omp.h>
#else
#error "This compiler does not understand OPENMP"
#endif
const PAPI_hw_info_t *hw_info = NULL;
void
Thread( int n )
{
int retval, num_tests = 1;
int EventSet1 = PAPI_NULL;
int PAPI_event, mask1;
int num_events1;
long long **values;
long long elapsed_us, elapsed_cyc;
char event_name[PAPI_MAX_STR_LEN];
printf( "Thread %#x started\n", omp_get_thread_num( ) );
num_events1 = 2;
/* add PAPI_TOT_CYC and one of the events in
PAPI_FP_INS, PAPI_FP_OPS or PAPI_TOT_INS,
depending on the availability of the event
on the platform */
EventSet1 = add_two_events( &num_events1, &PAPI_event, &mask1 );
retval = PAPI_event_code_to_name( PAPI_event, event_name );
if ( retval != PAPI_OK )
test_fail( __FILE__, __LINE__, "PAPI_event_code_to_name", retval );
values = allocate_test_space( num_tests, num_events1 );
elapsed_us = PAPI_get_real_usec( );
elapsed_cyc = PAPI_get_real_cyc( );
retval = PAPI_start( EventSet1 );
if ( retval != PAPI_OK )
test_fail( __FILE__, __LINE__, "PAPI_start", retval );
do_flops( n );
retval = PAPI_stop( EventSet1, values[0] );
if ( retval != PAPI_OK )
test_fail( __FILE__, __LINE__, "PAPI_stop", retval );
elapsed_us = PAPI_get_real_usec( ) - elapsed_us;
elapsed_cyc = PAPI_get_real_cyc( ) - elapsed_cyc;
remove_test_events( &EventSet1, mask1 );
if ( !TESTS_QUIET ) {
printf( "Thread %#x %-12s : \t%lld\n", omp_get_thread_num( ),
event_name, values[0][1] );
printf( "Thread %#x PAPI_TOT_CYC: \t%lld\n", omp_get_thread_num( ),
values[0][0] );
printf( "Thread %#x Real usec : \t%lld\n", omp_get_thread_num( ),
elapsed_us );
printf( "Thread %#x Real cycles : \t%lld\n", omp_get_thread_num( ),
elapsed_cyc );
}
/* It is illegal for the threads to exit in OpenMP */
/* test_pass(__FILE__,0,0); */
free_test_space( values, num_tests );
PAPI_unregister_thread( );
printf( "Thread %#x finished\n", omp_get_thread_num( ) );
}
int
main( int argc, char **argv )
{
int retval;
long long elapsed_us, elapsed_cyc;
tests_quiet( argc, argv ); /* Set TESTS_QUIET variable */
retval = PAPI_library_init( PAPI_VER_CURRENT );
if ( retval != PAPI_VER_CURRENT )
test_fail( __FILE__, __LINE__, "PAPI_library_init", retval );
hw_info = PAPI_get_hardware_info( );
if ( hw_info == NULL )
test_fail( __FILE__, __LINE__, "PAPI_get_hardware_info", 2 );
elapsed_us = PAPI_get_real_usec( );
elapsed_cyc = PAPI_get_real_cyc( );
retval =
PAPI_thread_init( ( unsigned
long ( * )( void ) ) ( omp_get_thread_num ) );
if ( retval != PAPI_OK ) {
if ( retval == PAPI_ECMP )
test_skip( __FILE__, __LINE__, "PAPI_thread_init", retval );
else
test_fail( __FILE__, __LINE__, "PAPI_thread_init", retval );
}
#pragma omp parallel
{
Thread( 1000000 * ( omp_get_thread_num( ) + 1 ) );
}
omp_set_num_threads( 1 );
Thread( 1000000 * ( omp_get_thread_num( ) + 1 ) );
omp_set_num_threads( omp_get_max_threads( ) );
#pragma omp parallel
{
Thread( 1000000 * ( omp_get_thread_num( ) + 1 ) );
}
elapsed_cyc = PAPI_get_real_cyc( ) - elapsed_cyc;
elapsed_us = PAPI_get_real_usec( ) - elapsed_us;
if ( !TESTS_QUIET ) {
printf( "Master real usec : \t%lld\n", elapsed_us );
printf( "Master real cycles : \t%lld\n", elapsed_cyc );
}
test_pass( __FILE__, NULL, 0 );
exit( 0 );
}
|
Tensor_Implementation.h | #pragma once
#include "Tensor.h"
#include "TensorShape.h"
#include "stdafx.h"
//#include <omp.h> //TODO: have this here by default?
template<typename T>
Tensor<T>::Tensor(const initializer_list<size_t>& dims, bool InitZero)
:Tensor(TensorShape(dims), InitZero) {}
template<typename T>
Tensor<T>::Tensor(const TensorShape& dim, T *ptr, bool ownership, bool InitZero)
:shape_(dim), coeffs_(ptr), ownership_(ownership) {
if (InitZero) { zero(); }
}
// Construct from external tensor that holds the memory
template<typename T>
Tensor<T>::Tensor(const TensorShape& dim, Tensor<T>& A, bool ownership, bool InitZero)
: Tensor<T>(dim, &A[0], ownership, false){
if (dim.totalDimension() > A.shape().totalDimension()) {
cerr << "Error: memory too small for tensor.\n";
ownership_ = false;
exit(1);
}
if (ownership &! A.ownership_) {
cerr << "Error: cannot transfer ownership, since original tensor was not owning memory.\n";
ownership_ = false;
exit(1);
}
if (ownership) {
A.ownership_ = false;
}
if (InitZero) { zero(); }
}
template<typename T>
Tensor<T>::Tensor(const TensorShape& dim, const bool InitZero)
:shape_(dim), coeffs_((T *) malloc(dim.totalDimension() * sizeof(T))), ownership_(true) {
// :shape_(dim), coeffs_(new T[dim.totalDimension()]), ownership_(true) {
if (InitZero) { zero(); }
}
template<typename T>
Tensor<T>::Tensor(istream& is)
:Tensor() {
read(is);
}
template<typename T>
Tensor<T>::Tensor(const string& filename)
: Tensor() {
ifstream is(filename);
read(is);
}
// Copy constructor
template<typename T>
Tensor<T>::Tensor(const Tensor& old)
:Tensor(old.shape_, false) {
memcpy(coeffs_, old.coeffs_, shape().totalDimension() * sizeof(T));
// for (size_t i = 0; i < shape_.totalDimension(); i++) {
// coeffs_[i] = old.coeffs_[i];
// }
}
// Copy-Multyply constructor
template<typename T>
Tensor<T>::Tensor(const Tensor& old, T factor)
:Tensor(old.shape_, false) {
for (size_t i = 0; i < shape_.totalDimension(); i++) {
coeffs_[i] = old.coeffs_[i] * factor;
}
}
// Move constructor
template<typename T>
Tensor<T>::Tensor(Tensor&& old) noexcept
:shape_(old.shape_), coeffs_(old.coeffs_), ownership_(old.ownership_) {
old.coeffs_ = nullptr;
old.ownership_ = false;
}
// Copy Assignment Operator
template<typename T>
Tensor<T>& Tensor<T>::operator=(const Tensor& old) {
if (this == &old) {
return *this;
} else if (old.shape() == this->shape()) {
memcpy(coeffs_, old.coeffs_, shape().totalDimension() * sizeof(T));
} else {
Tensor tmp(old);
*this = move(tmp);
}
return *this;
}
// Move Assignment Operator
template<typename T>
Tensor<T>& Tensor<T>::operator=(Tensor&& old) noexcept {
delete[] coeffs_;
shape_ = old.shape_;
coeffs_ = old.coeffs_;
ownership_ = old.ownership_;
old.coeffs_ = nullptr;
old.ownership_ = false;
return *this;
}
template<typename T>
Tensor<T>::~Tensor() {
if (ownership_) { delete[] coeffs_; }
}
//////////////////////////////////////////////////////////
// Operators
//////////////////////////////////////////////////////////
template<typename T>
inline T& Tensor<T>::operator()(const size_t i) const {
size_t dimtot = shape_.totalDimension();
assert(i < dimtot);
return coeffs_[i];
}
template<typename T>
inline T& Tensor<T>::operator()(const size_t i) {
size_t dimtot = shape_.totalDimension();
assert(i < dimtot);
return coeffs_[i];
}
//////////////////////////////////////////////////////////
// Bracket Operators
//////////////////////////////////////////////////////////
template<typename T>
inline const T& Tensor<T>::operator()(const size_t i, const size_t n) const {
size_t dimpart = shape_.lastBefore();
assert(i < dimpart);
assert(n < shape_.lastDimension());
return coeffs_[n * dimpart + i];
}
template<typename T>
inline T& Tensor<T>::operator()(const size_t i, const size_t n) {
size_t dimpart = shape_.lastBefore();
assert(i < dimpart);
assert(n < shape_.lastDimension());
return coeffs_[n * dimpart + i];
}
template<typename T>
inline T& Tensor<T>::operator()(size_t bef, size_t i, size_t aft, size_t leaf) {
assert(leaf < shape_.order());
assert(bef < shape_.before(leaf));
assert(i < shape_[leaf]);
assert(aft < shape_.after(leaf));
size_t before = shape_.before(leaf);
size_t dim = shape_[leaf];
size_t idx = aft * before * dim + i * before + bef;
// @TODO: remove when tested
assert(idx < shape_.totalDimension());
return coeffs_[idx];
}
template<typename T>
inline const T& Tensor<T>::operator()(size_t bef, size_t i, size_t aft, size_t leaf) const {
assert(leaf < shape_.order());
assert(bef < shape_.before(leaf));
assert(i < shape_[leaf]);
assert(aft < shape_.after(leaf));
size_t before = shape_.before(leaf);
size_t dim = shape_[leaf];
size_t idx = aft * before * dim + i * before + bef;
// @TODO: remove when tested
assert(idx < shape_.totalDimension());
return coeffs_[idx];
}
template<typename T>
T& Tensor<T>::operator()(const vector<size_t>& dims) {
return operator()(indexMapping(dims, shape_));
}
template<typename T>
const T& Tensor<T>::operator()(const vector<size_t>& dims) const {
return operator()(indexMapping(dims, shape_));
}
//////////////////////////////////////////////////////////
// File handling
//////////////////////////////////////////////////////////
template<typename T>
void Tensor<T>::print(ostream& os) const {
for (size_t n = 0; n < shape_.lastDimension(); n++) {
for (size_t i = 0; i < shape_.lastBefore(); i++)
os << (*this)(i, n) << " ";
os << endl;
}
os << endl;
}
template<typename T>
void Tensor<T>::write(ostream& os) const {
// Verification
os.write("TENS", 4);
// Write the TensorDim
shape_.write(os);
// Write the size
int32_t size = sizeof(T);
os.write((char *) &size, sizeof(size));
// Write the Coefficients
for (size_t i = 0; i < shape_.totalDimension(); i++) {
T Coeff_now = operator()(i);
os.write((char *) &Coeff_now, size);
}
os.flush();
}
template<typename T>
void Tensor<T>::write(const string& file) const {
ofstream os(file);
write(os);
}
template<typename T>
void Tensor<T>::read(istream& is) {
// Check if binary string contains a Tensor
char check[5];
is.read(check, 4);
string s_check(check, 4);
string s_key("TENS");
assert(s_key == s_check);
// Read the TensorDim
TensorShape newtdim;
newtdim.readDim(is);
// Resize the Tensor
(*this) = Tensor<T>(newtdim, false);
// Read the size
int32_t size;
is.read((char *) &size, sizeof(size));
assert(size == sizeof(T));
// Read the coefficients
for (size_t i = 0; i < shape_.totalDimension(); i++) {
T Coeff_now;
is.read((char *) &Coeff_now, size);
operator()(i) = Coeff_now;
}
}
template<typename T>
void Tensor<T>::read(const string& filename) {
ifstream is(filename);
read(is);
}
//////////////////////////////////////////////////////////
// Math Operators
//////////////////////////////////////////////////////////
template<typename T>
Tensor<T>& Tensor<T>::operator+=(const Tensor& A) {
assert(A.shape().totalDimension() == shape().totalDimension());
T const *Ax = A.coeffs_;
for (size_t i = 0; i < A.shape().totalDimension(); i++) {
coeffs_[i] += Ax[i];
}
return *this;
}
template<typename T>
Tensor<T>& Tensor<T>::operator-=(const Tensor& A) {
assert(A.shape().totalDimension() == shape().totalDimension());
for (size_t i = 0; i < A.shape().totalDimension(); i++) {
(*this)(i) -= A(i);
}
return *this;
}
template<typename T>
Tensor<T>& Tensor<T>::operator*=(T a) {
for (size_t i = 0; i < shape().totalDimension(); i++) {
operator()(i) = a * operator()(i);
}
return *this;
}
template<typename T>
Tensor<T>& Tensor<T>::operator/=(T a) {
for (size_t i = 0; i < shape().totalDimension(); i++) {
operator()(i) = operator()(i) / a;
}
return *this;
}
template<typename T>
Tensor<T> productElementwise(const Tensor<T>& A, const Tensor<T>& B) {
assert(A.shape().totalDimension() == B.shape().totalDimension());
Tensor<T> C(A.shape());
for (size_t i = 0; i < A.shape().totalDimension(); i++) {
C(i) = A(i) * B(i);
}
return C;
}
//////////////////////////////////////////////////////////
// Adjust Dimensions
//////////////////////////////////////////////////////////
template<typename T>
Tensor<T> Tensor<T>::adjustDimensions(const TensorShape& newTDim) const {
// Increase the dimensions of the Tensor from old TensorDim
// to new TensorDim
assert(newTDim.order() == shape_.order());
// Increase the active_ modes
Tensor<T> Acoeff(*this);
for (size_t k = 0; k < shape_.order(); k++) {
size_t act = newTDim[k];
Acoeff = Acoeff.adjustActiveDim(act, k);
}
// Increase the number of Tensors
size_t ntens = newTDim.lastDimension();
Acoeff = Acoeff.adjustStateDim(ntens);
return Acoeff;
}
template<typename T>
Tensor<T> Tensor<T>::adjustActiveDim(size_t active, size_t mode) const {
// Adjust the active_ dimension in the coordinate "mode".
// If the new active_ is smaller, the norm of the tensors is
// not conserved.
assert(mode < shape_.order());
// Create a new Tensor with the adjusted dim_
vector<size_t> dimlist = shape_.dimensions();
dimlist[mode] = active;
TensorShape newTDim(dimlist);
Tensor<T> newT(newTDim);
// Copy the coefficients
size_t before = shape_.before(mode);
size_t after = shape_.after(mode);
size_t minactive = min(active, shape_[mode]);
/// Offsets are used to add new & delete functions at first indices.
/// This ensures low-to-high occupancy convention.
size_t offset_old = shape_[mode] - minactive;
size_t offset_new = active - minactive;
for (size_t l = 0; l < after; l++) {
for (size_t j = 0; j < minactive; j++) {
for (size_t i = 0; i < before; i++) {
newT(i, j + offset_new, l, mode) = operator()(i, j + offset_old, l, mode);
}
}
}
return newT;
}
// Adjust the size of Tensor
template<typename T>
Tensor<T> Tensor<T>::adjustStateDim(size_t n) const {
return adjustActiveDim(n, shape().lastIdx());
}
template<typename T>
void Tensor<T>::reshape(const TensorShape& new_dim) {
/// Check that total size is the same
assert(shape_.totalDimension() == new_dim.totalDimension());
shape_ = new_dim;
}
//////////////////////////////////////////////////////////
// Operations on Tensors
//////////////////////////////////////////////////////////
template<typename T>
Matrix<T> Tensor<T>::dotProduct(const Tensor<T>& A) const {
TensorShape tdima(A.shape());
size_t nmax = tdima.lastDimension();
size_t mmax = shape_.lastDimension();
Matrix<T> S(mmax, nmax);
contraction(S, *this, A, shape_.lastIdx());
return S;
}
template<typename T>
void Tensor<T>::zero() {
memset(coeffs_, 0, shape_.totalDimension() * sizeof(T));
}
template<typename T>
Tensor<T>::Tensor(const Matrix<T>& mat)
: Tensor<T>({mat.dim1(), mat.dim2()}) {
for (size_t i = 0; i < mat.dim2(); ++i) {
for (size_t k = 0; k < mat.dim1(); ++k) {
this->operator[](indexMapping({k, i}, shape_)) = mat(k, i);
}
}
}
//////////////////////////////////////////////////////////
/// Non-member functions
//////////////////////////////////////////////////////////
template<typename T>
T singleDotProd(const Tensor<T>& A, const Tensor<T>& B, size_t n, size_t m) {
TensorShape tdima(A.shape());
TensorShape tdimb(B.shape());
size_t nmax = tdima.lastDimension();
size_t mmax = tdimb.lastDimension();
size_t npart = tdima.lastBefore();
// Every tensor can have different amount of states but same dimpart
assert(npart == tdimb.lastBefore());
assert(n < nmax);
assert(m < mmax);
T result = 0;
#pragma omp parallel for reduction(+:result)
for (size_t i = 0; i < npart; i++) {
result += conj(A(i, n)) * B(i, m);
}
return result;
}
extern "C" {
// subroutine matvec (mulpsi, psi, matrix, a, b, c, add)
// subroutine rhomat (bra,ket,matrix,a,b,c)
void matvec_(double *C, double *B, double *mat,
int *a, int *b, int *c, int *add);
void ctmatvec_(double *C, double *B, double *mat,
int *a, int *b, int *c, int *add);
void rmatvec_(double *C, double *B, double *mat,
int *a, int *b, int *c, int *add);
void rhomat_(double *Bra, double *Ket, double *M,
int *a, int *b, int *c);
}
template<typename T, typename U>
void contraction1(Matrix<U>& h, const Tensor<T>& bra, const Tensor<T>& ket,
size_t A, size_t B, size_t B2, size_t C, bool zero) {
if (zero) { h.zero(); }
for (size_t a = 0; a < A; ++a) {
for (size_t b = 0; b < B; ++b) {
for (size_t b2 = 0; b2 < B2; ++b2) {
for (size_t c = 0; c < C; ++c) {
// h(b, b2) += conj(bra(a, b, c)) * ket(a, b2, c);
h(b, b2) += conj(bra[a + b * A + c * A * B]) * ket(a + b2 * A + c * A * B2);
}
}
}
}
}
template<typename T>
void contraction(Matrix<T>& S, const Tensor<T>& A, const Tensor<T>& B,
size_t before, size_t active1, size_t active2, size_t behind) {
typedef complex<double> cd;
typedef double d;
if constexpr(is_same<T, cd>::value) {
if (active1 == active2) {
int a = active1;
int b = before;
int c = behind;
rhomat_((double *) &A[0], (double *) &B[0], (double *) &S[0],
&a, &b, &c);
return;
}
}
contraction1(S, A, B, before, active1, active2, behind, true);
}
template<typename T>
Matrix<T> contraction(const Tensor<T>& A, const Tensor<T>& B, size_t k) {
const TensorShape& tdim_a(A.shape());
const TensorShape& tdim_b(B.shape());
assert(k < tdim_a.order());
assert(k < tdim_b.order());
size_t active1 = tdim_a[k];
size_t active2 = tdim_b[k];
Matrix<T> S(active1, active2);
contraction(S, A, B, k);
return S;
}
template<typename T>
void contraction(Matrix<T>& S, const Tensor<T>& A, const Tensor<T>& B, size_t k, bool zero) {
const TensorShape& tdim_a(A.shape());
const TensorShape& tdim_b(B.shape());
assert(k < tdim_a.order());
assert(k < tdim_b.order());
size_t before = tdim_a.before(k);
size_t after = tdim_a.after(k);
assert(tdim_b.before(k) == before);
assert(tdim_b.after(k) == after);
size_t active1 = tdim_a[k];
size_t active2 = tdim_b[k];
assert(tdim_a.totalDimension() / active1 == tdim_b.totalDimension() / active2);
if (zero) { S.zero(); }
contraction(S, A, B, before, active1, active2, after);
}
template<typename T, typename U>
void matrixTensor1(Tensor<T>& C, const Matrix<U>& h, const Tensor<T>& B,
size_t before, size_t active, size_t activeC, size_t after, bool zero) {
if (zero) { C.zero(); }
size_t dimafter = active * before;
size_t dimafterC = activeC * before;
for (size_t aft = 0; aft < after; ++aft) {
for (size_t act = 0; act < active; ++act) {
for (size_t actC = 0; actC < activeC; ++actC) {
for (size_t bef = 0; bef < before; ++bef) {
C[dimafterC * aft + actC * before + bef] +=
h[act * activeC + actC] * B[dimafter * aft + act * before + bef];
}
}
}
}
}
template<typename T, typename U>
void matrixTensor(Tensor<T>& C, const Matrix<U>& A, const Tensor<T>& B,
size_t before, size_t activeC, size_t activeB, size_t after, bool zero) {
// Null the result tensor if flag is set to "true"
int add = !zero;
int a = activeB;
int b = before;
int c = after;
typedef complex<double> cd;
typedef double d;
if (activeB == activeC) {
if constexpr(is_same<U, cd>::value && is_same<T, cd>::value) {
matvec_((double *) &C[0], (double *) &B[0], (double *) &A[0],
&a, &b, &c, &add);
return;
} else if constexpr(is_same<U, d>::value && is_same<T, d>::value) {
rmatvec_((double *) &C[0], (double *) &B[0], (double *) &A[0],
&a, &b, &c, &add);
return;
}
}
matrixTensor1(C, A, B, before, activeC, activeB, after, zero);
}
template<typename T, typename U>
void tMatrixTensor(Tensor<T>& C, const Matrix<U>& A, const Tensor<T>& B,
size_t before, size_t activeC, size_t activeB, size_t after, bool zero) {
// Null the result tensor if flag is set to "true"
int add = !zero;
int a = activeB;
int b = before;
int c = after;
typedef complex<double> cd;
typedef double d;
if constexpr(is_same<U, cd>::value && is_same<T, cd>::value) {
ctmatvec_((double *) &C[0], (double *) &B[0], (double *) &A[0],
&a, &b, &c, &add);
// } else if constexpr(is_same<U, d>::value && is_same<T, d>::value) {
// rmatvec_((double*)&C[0], (double*)&B[0], (double*)&A[0],
// &a, &b, &c, &add);
} else {
if (zero) { C.zero(); }
size_t actbefB = activeB * before;
size_t actbefC = activeC * before;
size_t Cidx = 0;
size_t Bidx = 0;
size_t Aidx = 0;
size_t kpreidxB = 0;
size_t kpreidxC = 0;
size_t Bpreidx = 0;
size_t Cpreidx = 0;
if (before == 1) {
//#pragma omp parallel for
for (size_t k = 0; k < after; ++k) {
kpreidxB = k * actbefB;
kpreidxC = k * actbefC;
for (size_t l = 0; l < activeB; ++l) {
Bidx = l + kpreidxB;
for (size_t j = 0; j < activeC; ++j) {
Cidx = j + kpreidxC;
Aidx = j * activeB + l;
C[Cidx] += conj(A[Aidx]) * B[Bidx];
// C[Cidx] += conj(A[l, j]) * B[Bidx];
}
}
}
} else {
//#pragma omp parallel for
for (size_t k = 0; k < after; ++k) {
kpreidxB = k * actbefB;
kpreidxC = k * actbefC;
for (size_t l = 0; l < activeB; ++l) {
Bpreidx = l * before + kpreidxB;
for (size_t j = 0; j < activeC; ++j) {
Aidx = j * activeB + l;
Cpreidx = j * before + kpreidxC;
for (size_t i = 0; i < before; ++i) {
Cidx = Cpreidx + i;
Bidx = Bpreidx + i;
C[Cidx] += conj(A[Aidx]) * B[Bidx];
// C[Cidx] += conj(A[l, j]) * B[Bidx];
}
}
}
}
}
}
}
template<typename T, typename U>
void matrixTensor(Tensor<T>& C, const Matrix<U>& A, const Tensor<T>& B, size_t mode, bool zero) {
TensorShape tdim(B.shape());
TensorShape tdimC(C.shape());
size_t after = tdim.after(mode);
size_t before = tdim.before(mode);
size_t active1 = A.dim1();
size_t active2 = A.dim2();
assert(mode < tdim.order());
assert(A.dim2() == tdim[mode]);
assert(A.dim1() == tdimC[mode]);
matrixTensor(C, A, B, before, active1, active2, after, zero);
}
template<typename T, typename U>
Tensor<T> matrixTensor(const Matrix<U>& A, const Tensor<T>& B, size_t mode) {
const TensorShape& tdim(B.shape());
assert(mode < tdim.order());
if (A.dim1() == A.dim2()) {
Tensor<T> C(tdim);
size_t after = tdim.after(mode);
size_t active = tdim[mode];
size_t before = tdim.before(mode);
matrixTensor(C, A, B, before, active, active, after, false);
return C;
} else {
TensorShape tdim(B.shape());
size_t active1 = A.dim1();
size_t active2 = A.dim2();
tdim = replaceDimension(tdim, mode, active1);
Tensor<T> C(tdim);
size_t after = tdim.after(mode);
size_t before = tdim.before(mode);
assert(active1 == C.shape()[mode]);
assert(active2 == B.shape()[mode]);
cout << "non-quadratic mattensor implemented but tested only once so far.\n";
matrixTensor(C, A, B, before, active1, active2, after, false);
cout << "done.\n";
return C;
}
}
template<typename T, typename U>
void tensorMatrix(Tensor<T>& C, const Tensor<T>& B, const Matrix<U>& A, size_t mode, bool zero) {
tensorMatrix(C, B, A.transpose(), mode, zero);
}
template<typename T, typename U>
Tensor<T> tensorMatrix(const Tensor<T>& B, const Matrix<U>& A, size_t mode) {
return matrixTensor(A.transpose(), B, mode);
}
template<typename T, typename U>
Tensor<T> tMatrixTensor(const Matrix<U>& A, const Tensor<T>& B, size_t mode) {
/// @TODO: remove this function; replace by tensorMatrix
const TensorShape& tdim(B.shape());
assert(mode < tdim.order());
assert(mode >= 0);
assert(A.dim1() == B.shape()[mode]);
if (A.dim1() == A.dim2()) {
Tensor<T> C(tdim);
size_t after = tdim.after(mode);
size_t active = tdim[mode];
size_t before = tdim.before(mode);
tMatrixTensor(C, A, B, before, active, active, after, false);
return C;
} else {
size_t activeC = A.dim2();
size_t activeB = A.dim1();
TensorShape tdim(B.shape());
tdim = replaceDimension(tdim, mode, A.dim2());
size_t after = tdim.after(mode);
size_t before = tdim.before(mode);
Tensor<T> C(tdim);
cout << "non-quadratic mattensor implemented but not tested, yet.\n";
tMatrixTensor(C, A, B, before, activeC, activeB, after, false);
getchar();
return C;
}
}
template<typename T, typename U>
void multStateAB(Tensor<T>& C, const Matrix<U>& A, const Tensor<T>& B, bool zero) {
const TensorShape& tdimB(B.shape());
const TensorShape& tdimC(C.shape());
const size_t before = tdimB.lastBefore();
const size_t active1 = tdimB.lastDimension();
const size_t active2 = tdimC.lastDimension();
const size_t after = 1;
assert(A.dim2() == active1);
assert(A.dim1() == active2);
assert(before == tdimC.lastBefore());
matrixTensor(C, A, B, before, active1, active2, after, zero);
}
template<typename T, typename U>
Tensor<T> multStateAB(const Matrix<U>& A, const Tensor<T>& B) {
const TensorShape& tdim_b(B.shape());
size_t ntensor = tdim_b.lastDimension();
assert(A.dim2() == ntensor);
TensorShape tdim_c(tdim_b);
tdim_c.setDimension(A.dim1(), tdim_c.lastIdx());
Tensor<T> C(tdim_c);
multStateAB(C, A, B);
return C;
}
template<typename T, typename U>
void multStateArTB(Tensor<T>& C, const Matrix<U>& A, const Tensor<T>& B) {
const TensorShape& tdim(B.shape());
size_t dimpart = tdim.lastBefore();
size_t ntensor = tdim.lastDimension();
for (size_t n = 0; n < ntensor; n++) {
size_t B_idx = n * dimpart;
for (size_t m = 0; m < ntensor; m++) {
size_t C_idx = m * dimpart;
size_t A_idx = m * ntensor;
for (size_t i = 0; i < dimpart; i++) {
/// C(i, m) += A(n, m) * B(i, n);
C[C_idx + i] += A[A_idx + n] * B[B_idx + i];
}
}
}
}
template<typename T, typename U>
Tensor<T> multStateArTB(const Matrix<U>& A, const Tensor<T>& B) {
const TensorShape& tdim(B.shape());
size_t dimpart = tdim.lastBefore();
size_t ntensor = tdim.lastDimension();
assert(A.dim1() == A.dim2());
assert(A.dim2() == ntensor);
Tensor<T> C(tdim);
multStateArTB(C, A, B);
return C;
}
template<typename T, typename U>
void multAdd(Tensor<T>& A, const Tensor<T>& B, U coeff) {
const TensorShape& tdim = A.shape();
const TensorShape& tdim_2 = A.shape();
size_t dimtot = tdim.totalDimension();
assert(dimtot == tdim_2.totalDimension());
for (size_t i = 0; i < dimtot; ++i) {
A(i) += coeff * B(i);
}
}
template<typename T>
void gramSchmidt(Tensor<T>& A) {
// @TODO: Fill in auto-refill
// control parameters
size_t maxiter = 15;
double conver = 1e-12;
double errorconver = 1e-9;
TensorShape tdim(A.shape());
size_t ntensor = tdim.lastDimension();
size_t dimpart = tdim.lastBefore();
for (size_t n = 0; n < ntensor; n++) {
size_t iter = 0;
double accumoverlap = 1.;
// orthogonalize on all previous ones and then normalize
while ((accumoverlap > conver) && (iter < maxiter)) {
iter++;
accumoverlap = 0;
for (size_t m = 0; m < n; m++) {
// orthogonalize
T overlap = singleDotProd(A, A, m, n);
accumoverlap += abs(overlap);
for (size_t i = 0; i < dimpart; i++) {
A(i, n) -= overlap * A(i, m);
}
}
// renormalize
T norm = singleDotProd(A, A, n, n);
if (abs(norm) != 0) {
norm = sqrt(real(norm));
for (size_t i = 0; i < dimpart; i++) {
A(i, n) /= norm;
}
}
}
// Error message
if (accumoverlap >= errorconver) {
cout << "Error: No orthogonality in Gram-Schmidt" << endl;
cout << "Error measurement: " << conver << endl;
cout << "Present error: " << accumoverlap << endl;
cout << "Error acceptance: " << errorconver << endl;
assert(0);
}
}
}
template<typename T>
Tensor<T> qr(const Tensor<T>& A) {
auto Amat = toMatrix(A);
auto Qmat = qr(Amat);
auto Q = toTensor(Qmat);
Q.reshape(A.shape());
return Q;
}
template<typename T>
Tensor<T> qr(const Tensor<T>& A, size_t mode) {
auto Amat = toMatrix(A, mode);
auto Qmat = qr(Amat);
auto Q = toTensor(Qmat, A.shape(), mode);
return Q;
}
//Projects B on A
template<typename T>
Tensor<T> project(const Tensor<T>& A,
const Tensor<T>& B) {
//calculates the overlap of A with it self
Tensor<T> Aperp(A);
// GramSchmidt(Aperp);
const Matrix<T> overlap = Aperp.dotProduct(Aperp);
//invert the overlap
const Matrix<T> inverse_operlap = overlap.cInv();
//calculate the scalar product of A and B
const Matrix<T> dotproduct = Aperp.dotProduct(B);
//multiply the scalar product and the inverse_operlap
const Matrix<T> product = inverse_operlap * dotproduct;
return multStateArTB(product, Aperp);
// return multStateArTB(dotproduct, Aperp);
}
/*! \brief Project B out of A, i.e. Anew = (1-P_B)*A
*
* This routine takes a Tensor A and makes it orthogonal to B.
* It can be written as A_new = (1 - P_B) A, where P_B is the
* projector onto B.
*/
template<typename T>
Tensor<T> projectOut(const Tensor<T>& A,
const Tensor<T>& B) {
Tensor<T> projector = project(B, A);
Tensor<T> perp_A(A);
const TensorShape& tdim = A.shape();
for (size_t i = 0; i < tdim.totalDimension(); ++i) {
perp_A(i) -= projector(i);
}
return perp_A;
}
//Projects B on A
template<typename T>
Tensor<complex<double> > projectOrthogonal(const Tensor<complex<double> >& A,
const Tensor<T>& B) {
// calculate the scalar product of A and B
const Matrix<complex<double> > dotproduct = A.dotProduct(B);
return multStateArTB(dotproduct, A);
}
template<typename T>
Tensor<T> conj(Tensor<T> A) {
for (size_t i = 0; i < A.shape().totalDimension(); ++i) {
A[i] = conj(A[i]);
}
return A;
}
template<typename T>
double residual(Tensor<T> A, const Tensor<T>& B) {
A -= B;
auto S = A.dotProduct(A);
return S.frobeniusNorm();
}
template<typename T>
Matrix<T> toMatrix(const Tensor<T>& A) {
const TensorShape& shape = A.shape();
size_t diml = shape.lastBefore();
size_t dimr = shape.lastDimension();
Matrix<T> B(diml, dimr);
for (size_t j = 0; j < dimr; ++j) {
for (size_t i = 0; i < diml; ++i) {
B(i, j) = A(i, j);
}
}
return B;
}
template<typename T>
Matrix<T> moveToMatrix(Tensor<T>& A) {
const TensorShape& shape = A.shape();
size_t diml = shape.lastBefore();
size_t dimr = shape.lastDimension();
A.ownership_ = false;
Matrix<T> B(diml, dimr, A.coeffs_, true, false);
return B;
}
template<typename T>
Matrix<T> toMatrix(const Tensor<T>& A, size_t mode) {
const TensorShape& shape = A.shape();
size_t dimbef = shape.before(mode);
size_t dimaft = shape.after(mode);
size_t diml = dimbef * dimaft;
size_t dimr = shape[mode];
Matrix<T> B(diml, dimr);
TensorShape tmp({dimbef, dimr, dimaft});
for (size_t bef = 0; bef < dimbef; ++bef) {
for (size_t a = 0; a < dimr; ++a) {
for (size_t aft = 0; aft < dimaft; ++aft) {
size_t idxl = bef + aft * dimbef;
size_t I = indexMapping({bef, a, aft}, tmp);
B(idxl, a) = A(I);
}
}
}
return B;
}
template<typename T>
Tensor<T> toTensor(const Matrix<T>& B, const TensorShape& shape, size_t mode) {
Tensor<T> A(shape);
size_t dimbef = shape.before(mode);
size_t dimaft = shape.after(mode);
size_t dimr = shape[mode];
TensorShape tmp({dimbef, dimr, dimaft});
for (size_t bef = 0; bef < dimbef; ++bef) {
for (size_t a = 0; a < dimr; ++a) {
for (size_t aft = 0; aft < dimaft; ++aft) {
size_t idxl = bef + aft * dimbef;
size_t I = indexMapping({bef, a, aft}, tmp);
A(I) = B(idxl, a);
}
}
}
return A;
}
template<typename T>
Tensor<T> toTensor(const Matrix<T>& B) {
TensorShape shape({B.dim1(), B.dim2()});
Tensor<T> A(shape);
for (size_t j = 0; j < B.dim2(); ++j) {
for (size_t i = 0; i < B.dim1(); ++i) {
A(i, j) = B(i, j);
}
}
return A;
}
template<typename T>
Tensor<T> moveToTensor(Matrix<T>& B) {
TensorShape shape({B.dim1(), B.dim2()});
Tensor<T> A(shape, &B[0], true, false);
return A;
}
template<typename T>
ostream& operator<<(ostream& os, const Tensor<T>& A) {
A.write(os);
return os;
}
template<typename T>
istream& operator>>(istream& is, Tensor<T>& A) {
A.read(is);
return is;
}
template<typename T>
bool operator==(const Tensor<T>& A, const Tensor<T>& B) {
if (A.shape() != B.shape()) { return false; }
for (size_t k = 0; k < A.shape().totalDimension(); ++k) {
if (A[k] != B[k]) { return false; }
}
return true;
}
template<typename T>
void elementwise(Tensor<T>& res, const Tensor<T>& A, const function<T(T)>& f) {
assert(A.Dim1() == res.Dim1());
assert(A.Dim2() == res.Dim2());
for (size_t i = 0; i < A.Dim1() * A.Dim2(); ++i) {
res[i] = f(A[i]);
}
}
template<typename T>
Tensor<T> elementwise(const Tensor<T>& A, const function<T(T)>& f) {
Tensor<T> res(A.Dim1(), A.Dim2(), false);
elementwise(res, A, f);
return res;
}
|
nr_sgx_direct.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <math.h>
//#include <omp.h>
#include "config.h"
#include "cint.h"
#include "nr_direct.h"
#define MAX(I,J) ((I) > (J) ? (I) : (J))
#define MIN(I,J) ((I) < (J) ? (I) : (J))
typedef struct {
int ncomp;
int v_dims[3];
double *data;
} SGXJKArray;
typedef struct {
SGXJKArray *(*allocate)(int *shls_slice, int *ao_loc, int ncomp, int ngrids);
//void (*contract)(double *eri, double *dm, SGXJKArray *vjk,
// int i0, int i1, int j0, int j1);
void (*contract)(double *eri, double *dm, SGXJKArray *vjk,
int i0, int i1, int j0, int j1,
int* inds, int ngrids);
void (*set0)(SGXJKArray *, int);
void (*send)(SGXJKArray *, int, double *);
void (*finalize)(SGXJKArray *, double *);
void (*sanity_check)(int *shls_slice);
} SGXJKOperator;
int GTOmax_shell_dim(const int *ao_loc, const int *shls_slice, int ncenter);
int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter,
int *atm, int natm, int *bas, int nbas, double *env);
#define BLKSIZE 312
// for grids integrals only
int _max_cache_size_sgx(int (*intor)(), int *shls_slice, int ncenter,
int *atm, int natm, int *bas, int nbas, double *env)
{
int i, n;
int i0 = shls_slice[0];
int i1 = shls_slice[1];
for (i = 1; i < ncenter; i++) {
i0 = MIN(i0, shls_slice[i*2 ]);
i1 = MAX(i1, shls_slice[i*2+1]);
}
int shls[4];
int cache_size = 0;
for (i = i0; i < i1; i++) {
shls[0] = i;
shls[1] = i;
shls[2] = 0;
shls[3] = BLKSIZE;
n = (*intor)(NULL, NULL, shls, atm, natm, bas, nbas, env, NULL, NULL);
cache_size = MAX(cache_size, n);
}
return cache_size;
}
#define DECLARE_ALL \
const int *atm = envs->atm; \
const int *bas = envs->bas; \
double *env = envs->env; \
const int natm = envs->natm; \
const int nbas = envs->nbas; \
const int *ao_loc = envs->ao_loc; \
const int *shls_slice = envs->shls_slice; \
const CINTOpt *cintopt = envs->cintopt; \
const int ioff = ao_loc[shls_slice[0]]; \
const int joff = ao_loc[shls_slice[2]]; \
int i0, j0, i1, j1, ish, jsh, idm; \
ish = shls[0]; \
jsh = shls[1];
int SGXnr_pj_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1;
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int n = opt->nbas;
int nk = opt->ngrids;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < nk);
return opt->q_cond[i*n+j]
* MAX(fabs(opt->dm_cond[j*nk+k]), fabs(opt->dm_cond[i*nk+k]))
> opt->direct_scf_cutoff;
}
void SGXdot_nrk(int (*intor)(), SGXJKOperator **jkop, SGXJKArray **vjk,
double **dms, double *buf, double *cache, int n_dm, int* shls,
CVHFOpt *vhfopt, IntorEnvs *envs,
double* all_grids, int tot_grids)
{
DECLARE_ALL;
i0 = ao_loc[ish ] - ioff;
j0 = ao_loc[jsh ] - joff;
i1 = ao_loc[ish+1] - ioff;
j1 = ao_loc[jsh+1] - joff;
int tmp_ngrids = 0;
int k;
int* inds = (int*) malloc(tot_grids*sizeof(int));
double *grids = env + (size_t) env[PTR_GRIDS];
if (vhfopt != NULL && vhfopt->dm_cond != NULL) {
for (k = 0; k < tot_grids; k++) {
shls[2] = k;
if (SGXnr_pj_prescreen(shls, vhfopt, atm, bas, env)) {
grids[3*tmp_ngrids+0] = all_grids[3*k+0];
grids[3*tmp_ngrids+1] = all_grids[3*k+1];
grids[3*tmp_ngrids+2] = all_grids[3*k+2];
inds[tmp_ngrids] = k;
tmp_ngrids++;
}
}
env[NGRIDS] = tmp_ngrids;
} else {
for (k = 0; k < tot_grids; k++) {
shls[2] = k;
grids[3*tmp_ngrids+0] = all_grids[3*k+0];
grids[3*tmp_ngrids+1] = all_grids[3*k+1];
grids[3*tmp_ngrids+2] = all_grids[3*k+2];
inds[tmp_ngrids] = k;
tmp_ngrids++;
}
env[NGRIDS] = tmp_ngrids;
}
int grid0, grid1;
const int dims[] = {ao_loc[ish+1]-ao_loc[ish], ao_loc[jsh+1]-ao_loc[jsh], tmp_ngrids};
for (grid0 = 0; grid0 < tmp_ngrids; grid0 += BLKSIZE) {
grid1 = MIN(grid0 + BLKSIZE, tmp_ngrids);
shls[2] = grid0;
shls[3] = grid1;
(*intor)(buf+grid0, dims, shls, atm, natm, bas, nbas, env, cintopt, cache);
}
//(*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache);
for (idm = 0; idm < n_dm; idm++) {
jkop[idm]->contract(buf, dms[idm], vjk[idm],
i0, i1, j0, j1, inds, tmp_ngrids);
}
free(inds);
}
void SGXnr_direct_drv(int (*intor)(), void (*fdot)(), SGXJKOperator **jkop,
double **dms, double **vjk, int n_dm, int ncomp,
int *shls_slice, int *ao_loc,
CINTOpt *cintopt, CVHFOpt *vhfopt,
int *atm, int natm, int *bas, int nbas, double *env,
int env_size, int aosym)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
int nish = ish1 - ish0;
int di = GTOmax_shell_dim(ao_loc, shls_slice, 2);
int cache_size = _max_cache_size_sgx(intor, shls_slice, 2,
atm, natm, bas, nbas, env);
int npair;
if (aosym == 2) {
npair = nish * (nish+1) / 2;
} else {
npair = nish * nish;
}
int (*fprescreen)();
if (vhfopt) {
fprescreen = vhfopt->fprescreen;
} else {
fprescreen = CVHFnoscreen;
}
int ngrids = (int) env[NGRIDS];
double* all_grids = env+(size_t)env[PTR_GRIDS];
#pragma omp parallel default(none) firstprivate(ish0, jsh0) \
shared(intor, fdot, jkop, ao_loc, shls_slice, \
dms, vjk, n_dm, ncomp, nbas, vhfopt, \
atm, bas, env, natm, \
nish, di, cache_size, fprescreen, \
aosym, npair, cintopt, env_size, \
ngrids, all_grids)
{
int i, ij, ish, jsh;
int shls[4];
double* tmp_env = (double*) malloc(env_size * sizeof(double));
for (i = 0; i < env_size; i++) {
tmp_env[i] = env[i];
}
IntorEnvs envs = {natm, nbas, atm, bas, tmp_env, shls_slice, ao_loc, NULL,
cintopt, ncomp};
SGXJKArray *v_priv[n_dm];
for (i = 0; i < n_dm; i++) {
v_priv[i] = jkop[i]->allocate(shls_slice, ao_loc, ncomp, ngrids);
}
double *buf = malloc(sizeof(double) * ngrids*di*di*ncomp);
double *cache = malloc(sizeof(double) * cache_size);
#pragma omp for nowait schedule(dynamic, 1)
for (ij = 0; ij < npair; ij++) {
if (aosym == 2) {
ish = (int)(sqrt(2*ij+.25) - .5 + 1e-7);
jsh = ij - ish*(ish+1)/2;
} else {
ish = ij / nish;
jsh = ij % nish;
}
shls[0] = ish + ish0;
shls[1] = jsh + jsh0;
if ((*fprescreen)(shls, vhfopt, atm, bas, env))
{
(*fdot)(intor, jkop, v_priv, dms, buf, cache, n_dm, shls,
vhfopt, &envs, all_grids, ngrids);
}
}
#pragma omp critical
{
for (i = 0; i < n_dm; i++) {
jkop[i]->finalize(v_priv[i], vjk[i]);
}
}
free(buf);
free(cache);
free(tmp_env);
}
}
void SGXsetnr_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->q_cond) {
free(opt->q_cond);
}
nbas = opt->nbas;
double *q_cond = (double *)malloc(sizeof(double) * nbas*nbas);
opt->q_cond = q_cond;
int shls_slice[] = {0, nbas};
int cache_size = GTOmax_cache_size(intor, shls_slice, 1,
atm, natm, bas, nbas, env);
#pragma omp parallel default(none) \
shared(intor, q_cond, ao_loc, atm, natm, bas, nbas, env, cache_size)
{
double qtmp, tmp;
int ij, i, j, di, dj, ish, jsh;
int shls[2];
di = 0;
for (ish = 0; ish < nbas; ish++) {
dj = ao_loc[ish+1] - ao_loc[ish];
di = MAX(di, dj);
}
double *cache = malloc(sizeof(double) * (di*di + cache_size));
double *buf = cache + cache_size;
#pragma omp for schedule(dynamic, 4)
for (ij = 0; ij < nbas*(nbas+1)/2; ij++) {
ish = (int)(sqrt(2*ij+.25) - .5 + 1e-7);
jsh = ij - ish*(ish+1)/2;
if (bas(ATOM_OF,ish) == bas(ATOM_OF,jsh)) {
// If two shells are on the same center, their
// overlap integrals may be zero due to symmetry.
// But their contributions to sgX integrals should
// be recognized.
q_cond[ish*nbas+jsh] = 1;
q_cond[jsh*nbas+ish] = 1;
continue;
}
shls[0] = ish;
shls[1] = jsh;
qtmp = 1e-100;
if (0 != (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env,
NULL, cache)) {
di = ao_loc[ish+1] - ao_loc[ish];
dj = ao_loc[jsh+1] - ao_loc[jsh];
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
tmp = fabs(buf[i+di*j]);
qtmp = MAX(qtmp, tmp);
} }
}
q_cond[ish*nbas+jsh] = qtmp;
q_cond[jsh*nbas+ish] = qtmp;
}
free(cache);
}
}
void SGXsetnr_direct_scf_dm(CVHFOpt *opt, double *dm, int nset, int *ao_loc,
int *atm, int natm, int *bas, int nbas, double *env,
int ngrids)
{
nbas = opt->nbas;
if (opt->dm_cond) {
free(opt->dm_cond);
}
opt->dm_cond = (double *)malloc(sizeof(double) * nbas*ngrids);
// nbas in the input arguments may different to opt->nbas.
// Use opt->nbas because it is used in the prescreen function
memset(opt->dm_cond, 0, sizeof(double)*nbas*ngrids);
opt->ngrids = ngrids;
const size_t nao = ao_loc[nbas] - ao_loc[0];
double dmax;
size_t i, j, jsh, iset;
double *pdm;
for (i = 0; i < ngrids; i++) {
for (jsh = 0; jsh < nbas; jsh++) {
dmax = 0;
for (iset = 0; iset < nset; iset++) {
pdm = dm + nao*ngrids*iset;
for (j = ao_loc[jsh]; j < ao_loc[jsh+1]; j++) {
dmax = MAX(dmax, fabs(pdm[i*nao+j]));
}
}
opt->dm_cond[jsh*ngrids+i] = dmax;
} }
}
int SGXnr_ovlp_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1;
}
int i = shls[0];
int j = shls[1];
int n = opt->nbas;
assert(opt->q_cond);
assert(i < n);
assert(j < n);
return opt->q_cond[i*n+j] > opt->direct_scf_cutoff;
}
#define JTYPE1 1
#define JTYPE2 2
#define KTYPE1 3
#define ALLOCATE(label, task) \
static SGXJKArray *SGXJKOperator_allocate_##label(int *shls_slice, int *ao_loc, \
int ncomp, int ngrids) \
{ \
SGXJKArray *jkarray = malloc(sizeof(SGXJKArray)); \
jkarray->v_dims[0] = ao_loc[shls_slice[1]] - ao_loc[shls_slice[0]]; \
jkarray->v_dims[1] = ao_loc[shls_slice[3]] - ao_loc[shls_slice[2]]; \
jkarray->v_dims[2] = ngrids; \
if (task == JTYPE1) { \
jkarray->data = calloc(ncomp * jkarray->v_dims[2], sizeof(double)); \
} else if (task == JTYPE2) { \
jkarray->data = calloc(ncomp * jkarray->v_dims[0] \
* jkarray->v_dims[1], sizeof(double)); \
} else { \
jkarray->data = calloc(ncomp * jkarray->v_dims[0] \
* jkarray->v_dims[2], sizeof(double)); \
} \
jkarray->ncomp = ncomp; \
return jkarray; \
} \
static void SGXJKOperator_set0_##label(SGXJKArray *jkarray, int k) \
{ } \
static void SGXJKOperator_send_##label(SGXJKArray *jkarray, int k, double *out) \
{ } \
static void SGXJKOperator_final_##label(SGXJKArray *jkarray, double *out) \
{ \
int i, k, icomp; \
int ni = jkarray->v_dims[0]; \
double *data = jkarray->data; \
int ngrids = jkarray->v_dims[2]; \
if (task == JTYPE1) { \
for (i = 0; i < jkarray->ncomp; i++) { \
for (k = 0; k < ngrids; k++) { \
out[i*ngrids+k] += data[i*ngrids+k]; \
} } \
} else if (task == JTYPE2) { \
for (i = 0; i < jkarray->ncomp * jkarray->v_dims[0] * jkarray->v_dims[1]; i++) { \
out[i] += data[i]; \
} \
} else { \
for (icomp = 0; icomp < jkarray->ncomp; icomp++) { \
for (i = 0; i < ni; i++) { \
for (k = 0; k < ngrids; k++) { \
out[i*ngrids+k] += data[i*ngrids+k]; \
} } \
out += ngrids * ni; \
data += ngrids * ni; \
} \
} \
SGXJKOperator_deallocate(jkarray); \
}
#define ADD_OP(fname, task, type) \
ALLOCATE(fname, task) \
SGXJKOperator SGX##fname = {SGXJKOperator_allocate_##fname, fname, \
SGXJKOperator_set0_##fname, SGXJKOperator_send_##fname, \
SGXJKOperator_final_##fname, \
SGXJKOperator_sanity_check_##type}
static void SGXJKOperator_deallocate(SGXJKArray *jkarray)
{
free(jkarray->data);
free(jkarray);
}
static void SGXJKOperator_sanity_check_s1(int *shls_slice)
{
}
static void SGXJKOperator_sanity_check_s2(int *shls_slice)
{
if (!((shls_slice[0] == shls_slice[2]) &&
(shls_slice[1] == shls_slice[3]))) {
fprintf(stderr, "Fail at s2\n");
exit(1);
};
}
static void nrs1_ijg_ji_g(double *eri, double *dm, SGXJKArray *out,
int i0, int i1, int j0, int j1,
int* inds, int pngrids)
{
const int ncol = out->v_dims[0];
int i, j, k, icomp;
double *data = out->data;
int ij = 0;
for (icomp = 0; icomp < out->ncomp; icomp++) {
for (j = j0; j < j1; j++) {
for (i = i0; i < i1; i++, ij++) {
for (k = 0; k < pngrids; k++) {
data[inds[k]] += eri[ij*pngrids+k] * dm[j*ncol+i];
} } }
data += out->v_dims[2];
}
}
ADD_OP(nrs1_ijg_ji_g, JTYPE1, s1);
static void nrs2_ijg_ji_g(double *eri, double *dm, SGXJKArray *out,
int i0, int i1, int j0, int j1,
int* inds, int pngrids)
{
if (i0 == j0) {
return nrs1_ijg_ji_g(eri, dm, out, i0, i1, j0, j1, inds, pngrids);
}
const int ncol = out->v_dims[0];
int i, j, k, icomp;
double *data = out->data;
int ij = 0;
for (icomp = 0; icomp < out->ncomp; icomp++) {
for (j = j0; j < j1; j++) {
for (i = i0; i < i1; i++, ij++) {
for (k = 0; k < pngrids; k++) {
data[inds[k]] += eri[ij*pngrids+k] * (dm[j*ncol+i] + dm[i*ncol+j]);
} } }
data += out->v_dims[2];
}
}
ADD_OP(nrs2_ijg_ji_g, JTYPE1, s2);
static void nrs1_ijg_g_ij(double *eri, double *dm, SGXJKArray *out,
int i0, int i1, int j0, int j1,
int* inds, int pngrids)
{
int ni = out->v_dims[0];
int nj = out->v_dims[1];
int i, j, k, icomp;
double *data = out->data;
int ij = 0;
for (icomp = 0; icomp < out->ncomp; icomp++) {
for (j = j0; j < j1; j++) {
for (i = i0; i < i1; i++, ij++) {
for (k = 0; k < pngrids; k++) {
data[i*nj+j] += eri[ij*pngrids+k] * dm[inds[k]];
} } }
data += ni * nj;
}
}
ADD_OP(nrs1_ijg_g_ij, JTYPE2, s1);
SGXJKOperator SGXnrs2_ijg_g_ij = {SGXJKOperator_allocate_nrs1_ijg_g_ij,
nrs1_ijg_g_ij, SGXJKOperator_set0_nrs1_ijg_g_ij,
SGXJKOperator_send_nrs1_ijg_g_ij, SGXJKOperator_final_nrs1_ijg_g_ij,
SGXJKOperator_sanity_check_s2};
static void nrs1_ijg_gj_gi(double *eri, double *dm, SGXJKArray *out,
int i0, int i1, int j0, int j1,
int* inds, int pngrids)
{
double *data = out->data;
int i, j, k, icomp;
const int ngrids = out->v_dims[2];
int ij = 0;
for (icomp = 0; icomp < out->ncomp; icomp++) {
for (j = j0; j < j1; j++) {
for (i = i0; i < i1; i++, ij++) {
for (k = 0; k < pngrids; k++) {
data[i*ngrids+inds[k]] += eri[ij*pngrids+k] * dm[j*ngrids+inds[k]];
} } }
data += out->v_dims[0] * out->v_dims[2];
}
}
ADD_OP(nrs1_ijg_gj_gi, KTYPE1, s1);
static void nrs2_ijg_gj_gi(double *eri, double *dm, SGXJKArray *out,
int i0, int i1, int j0, int j1,
int* inds, int pngrids)
{
if (i0 == j0) {
return nrs1_ijg_gj_gi(eri, dm, out, i0, i1, j0, j1, inds, pngrids);
}
double *data = out->data;
const int ngrids = out->v_dims[2];
int i, j, k, icomp;
int ij = 0;
for (icomp = 0; icomp < out->ncomp; icomp++) {
for (j = j0; j < j1; j++) {
for (i = i0; i < i1; i++, ij++) {
for (k = 0; k < pngrids; k++) {
data[i*ngrids+inds[k]] += eri[ij*pngrids+k] * dm[j*ngrids+inds[k]];
}
for (k = 0; k < pngrids; k++) {
data[j*ngrids+inds[k]] += eri[ij*pngrids+k] * dm[i*ngrids+inds[k]];
}
} }
data += out->v_dims[0] * out->v_dims[2];
}
}
ADD_OP(nrs2_ijg_gj_gi, KTYPE1, s2);
|
DRB062-matrixvector2-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Matrix-vector multiplication: inner level parallelization.
*/
#define N 1000
double a[N][N],v[N],v_out[N];
int init()
{
int i,j,k;
#pragma omp parallel for private(i, j)
for (i = 0; i < N; i++) {
#pragma omp parallel for private(j)
for (j = 0; j < N; j++) {
a[i][j] = i * j;
}
v_out[i] = i * j;
v[i] = i * j;
}
return 0;
}
void mv()
{
int i,j;
#pragma omp parallel for private(i, j)
for (i = 0; i < N; i++)
{
float sum = 0.0;
#pragma omp parallel for private(j) reduction(+:sum)
for (j = 0; j < N; j++)
{
sum += a[i][j]*v[j];
}
v_out[i] = sum;
}
}
int print()
{
int i,j,k;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
printf("%lf\n", a[i][j]);
}
printf("%lf\n",v_out[i]);
printf("%lf\n",v[i]);
}
return 0;
}
int main()
{
init();
mv();
print();
return 0;
}
|
omp_alloc_hbw.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <omp.h>
int main() {
omp_alloctrait_t at[2];
omp_allocator_handle_t a;
void *p[2];
at[0].key = OMP_ATK_POOL_SIZE;
at[0].value = 2 * 1024 * 1024;
at[1].key = OMP_ATK_FALLBACK;
at[1].value = OMP_ATV_NULL_FB;
a = omp_init_allocator(omp_high_bw_mem_space, 2, at);
printf("allocator hbw created: %p\n", a);
#pragma omp parallel num_threads(2)
{
int i = omp_get_thread_num();
p[i] = omp_alloc(1024 * 1024, a);
#pragma omp barrier
printf("th %d, ptr %p\n", i, p[i]);
omp_free(p[i], a);
}
if (a != omp_null_allocator) {
// As an allocator has some small memory overhead
// exactly one of the two pointers should be NULL
// because of NULL fallback requested
if ((p[0] == NULL && p[1] != NULL) || (p[0] != NULL && p[1] == NULL)) {
printf("passed\n");
return 0;
} else {
printf("failed: pointers %p %p\n", p[0], p[1]);
return 1;
}
} else {
// NULL allocator should cause default allocations
if (p[0] != NULL && p[1] != NULL) {
printf("passed\n");
return 0;
} else {
printf("failed: pointers %p %p\n", p[0], p[1]);
return 1;
}
}
}
|
coord.c | /*---------------------------------------------------------------------------------
COORD.C
-SET GRID POINTS AT CENTER, CORNER AND FACES
-EVALUATE BL R AND TH FROM KS
-COMPUTE TRANSFORMATION MATRIX FOR KS->MKS OR KS->FMKS
-COMPUTE METRIC COEFFICIENTS IN MKS/FMKS
-COMPUTE LIGHT-CROSSING TIME
-INITIALIZE FAILURE FLAGS TO ZERO
---------------------------------------------------------------------------------*/
/*
* -- given the indices i,j and location in the cell, return with
* the values of X1,X2 there;
* -- the locations are defined by :
* -----------------------
* | |
* | |
* |FACE1 CENT |
* | |
* |CORN FACE2 |
* ----------------------
*
*/
#include "decs.h"
double thG_of_X(const double X[NDIM]);
void thJ_of_X(const double X[NDIM], double *y, double* thJ);
double r_of_X(const double X[NDIM]);
double th_of_X(const double X[NDIM]);
// Set coordinate values at grid loc [i,j,LOC]
inline void coord(int i, int j, int loc, double *X)
{
X[0] = 0; // Make sure all memory passed in is initialized
if (loc == FACE1)
{
X[1] = startx[1] + (i - NG) * dx[1];
X[2] = startx[2] + (j + 0.5 - NG) * dx[2];
}
else if (loc == FACE2)
{
X[1] = startx[1] + (i + 0.5 - NG) * dx[1];
X[2] = startx[2] + (j - NG) * dx[2];
}
else if (loc == CENT)
{
X[1] = startx[1] + (i + 0.5 - NG) * dx[1];
X[2] = startx[2] + (j + 0.5 - NG) * dx[2];
}
else if (loc == CORN)
{
X[1] = startx[1] + (i - NG) * dx[1];
X[2] = startx[2] + (j - NG) * dx[2];
}
#if DEBUG
else
{
fprintf(stderr, "Invalid coordinate location!\n");
exit(-1);
}
#endif
}
// Computes theta_G from X2
inline double thG_of_X(const double X[NDIM])
{
return M_PI*X[2] + ((1. - hslope)/2.)*sin(2.*M_PI*X[2]);
}
// Computes theta_J from X2
inline void thJ_of_X(const double X[NDIM], double *y, double* thJ)
{
*y = 2*X[2] - 1.;
*thJ = poly_norm*(*y)*(1. + pow((*y)/poly_xt,poly_alpha)/(poly_alpha+1.)) +
0.5*M_PI;
}
// Computes r from X1
inline double r_of_X(const double X[NDIM])
{
return exp(X[1]);
}
// Computes theta from (X1,X2)
inline double th_of_X(const double X[NDIM])
{
double thG = thG_of_X(X);
#if DEREFINE_POLES
double y, thJ;
thJ_of_X(X, &y, &thJ);
return thG + exp(mks_smooth*(startx[1] - X[1]))*(thJ - thG);
#else
return thG;
#endif
}
// Boyer-Lindquist coordinate of point X
inline void bl_coord(const double X[NDIM], double *r, double *th)
{
*r = r_of_X(X);
*th = th_of_X(X);
// Avoid singularity at polar axis
#if COORDSINGFIX
if (fabs(*th) < SINGSMALL) {
if ((*th) >= 0)
*th = SINGSMALL;
if ((*th) < 0)
*th = -SINGSMALL;
}
if (fabs(M_PI - (*th)) < SINGSMALL) {
if ((*th) >= M_PI)
*th = M_PI + SINGSMALL;
if ((*th) < M_PI)
*th = M_PI - SINGSMALL;
}
#endif
}
// Computes transformation matrix for KS->MKS and KS->FMKS
inline void set_dxdX(double X[NDIM], double dxdX[NDIM][NDIM])
{
memset(dxdX, 0, NDIM*NDIM*sizeof(double));
#if METRIC == MINKOWSKI
for (int mu = 0; mu < NDIM; mu++)
{
dxdX[mu][mu] = 1.;
}
#elif METRIC == MKS && !DEREFINE_POLES
dxdX[0][0] = 1.;
dxdX[1][1] = exp(X[1]);
dxdX[2][2] = M_PI - (hslope - 1.)*M_PI*cos(2.*M_PI*X[2]);
dxdX[3][3] = 1.;
#elif METRIC == MKS && DEREFINE_POLES
dxdX[0][0] = 1.;
dxdX[1][1] = exp(X[1]);
dxdX[2][1] = -exp(mks_smooth*(startx[1]-X[1]))*mks_smooth*(
M_PI/2. -
M_PI*X[2] +
poly_norm*(2.*X[2]-1.)*(1+(pow((-1.+2*X[2])/poly_xt,poly_alpha))/(1 + poly_alpha)) -
1./2.*(1. - hslope)*sin(2.*M_PI*X[2])
);
dxdX[2][2] = M_PI + (1. - hslope)*M_PI*cos(2.*M_PI*X[2]) +
exp(mks_smooth*(startx[1]-X[1]))*(
-M_PI +
2.*poly_norm*(1. + pow((2.*X[2]-1.)/poly_xt,poly_alpha)/(poly_alpha+1.)) +
(2.*poly_alpha*poly_norm*(2.*X[2]-1.)*pow((2.*X[2]-1.)/poly_xt,poly_alpha-1.))/((1.+poly_alpha)*poly_xt) -
(1.-hslope)*M_PI*cos(2.*M_PI*X[2])
);
dxdX[3][3] = 1.;
#else
#error "Unsupported metric!"
#endif
}
// Computes covariant metric in KS
void gcov_func(double X[NDIM], double gcov[NDIM][NDIM])
{
memset(gcov, 0, NDIM*NDIM*sizeof(double));
#if METRIC == MINKOWSKI
gcov[0][0] = -1.;
for (int j = 1; j < NDIM; j++) {
gcov[j][j] = 1.;
}
#else //Everything else is covered in set_dxdX
double sth, cth, s2, rho2;
double r, th;
bl_coord(X, &r, &th);
cth = cos(th);
sth = sin(th);
s2 = sth*sth;
rho2 = r*r + a*a*cth*cth;
gcov[0][0] = -1. + 2.*r/rho2;
gcov[0][1] = 2.*r/rho2;
gcov[0][3] = -2.*a*r*s2/rho2;
gcov[1][0] = gcov[0][1];
gcov[1][1] = 1. + 2.*r/rho2;
gcov[1][3] = -a*s2*(1. + 2.*r/rho2);
gcov[2][2] = rho2;
gcov[3][0] = gcov[0][3];
gcov[3][1] = gcov[1][3];
gcov[3][3] = s2*(rho2 + a*a*s2*(1. + 2.*r/rho2));
// Apply coordinate transformation to code coordinates X
double dxdX[NDIM][NDIM];
set_dxdX(X, dxdX);
double gcov_ks[NDIM][NDIM];
memcpy(gcov_ks, gcov, NDIM*NDIM*sizeof(double));
memset(gcov, 0, NDIM*NDIM*sizeof(double));
for (int mu = 0; mu < NDIM; mu++) {
for (int nu = 0; nu < NDIM; nu++) {
for (int lam = 0; lam < NDIM; lam++) {
for (int kap = 0; kap < NDIM; kap++) {
gcov[mu][nu] += gcov_ks[lam][kap]*dxdX[lam][mu]*dxdX[kap][nu];
}
}
}
}
#endif // METRIC
}
// Establish X coordinates
void set_points()
{
#if METRIC == MINKOWSKI
startx[1] = x1Min;
startx[2] = x2Min;
dx[1] = (x1Max - x1Min)/N1TOT;
dx[2] = (x2Max - x2Min)/N2TOT;
#elif METRIC == MKS
// Set Rin such that we have 5 zones completely inside the event horizon
// If xeh = log(Rhor), xin = log(Rin), and xout = log(Rout),
// then we want xeh = xin + 5.5 * (xout - xin) / N1TOT, or solving/replacing:
Rin = exp((N1TOT * log(Rhor) / 5.5 - log(Rout)) / (-1. + N1TOT / 5.5));
startx[1] = log(Rin);
if (startx[1] < 0.0) ERROR("Not enough radial zones! Increase N1!");
startx[2] = 0.;
dx[1] = log(Rout/Rin)/N1TOT;
dx[2] = 1./N2TOT;
#if DEREFINE_POLES
poly_norm = 0.5*M_PI*1./(1. + 1./(poly_alpha + 1.)*
1./pow(poly_xt, poly_alpha));
#endif
#endif // METRIC
}
// Sets the grid struct G
void set_grid(struct GridGeom *G)
{
// Set up boundaries, steps in coordinate grid
set_points();
dV = dx[1]*dx[2];
#if !INTEL_WORKAROUND
#pragma omp parallel for collapse(2)
#endif
JSLOOP(-NG, N2 - 1 + NG) {
ISLOOP(-NG, N1 - 1 + NG) {
set_grid_loc(G, i, j, CENT);
set_grid_loc(G, i, j, CORN);
set_grid_loc(G, i, j, FACE1);
set_grid_loc(G, i, j, FACE2);
// Connection only needed at zone center
conn_func(G, i, j);
}
}
}
// Makes necessary function calls to set grid at various LOC
inline void set_grid_loc(struct GridGeom *G, int i, int j, int loc)
{
double X[NDIM];
double gcov[NDIM][NDIM], gcon[NDIM][NDIM];
coord(i, j, loc, X);
gcov_func(X, gcov);
G->gdet[loc][j][i] = gcon_func(gcov, gcon);
for (int mu = 0; mu < NDIM; mu++) {
for (int nu = 0; nu < NDIM; nu++) {
G->gcov[loc][mu][nu][j][i] = gcov[mu][nu];
G->gcon[loc][mu][nu][j][i] = gcon[mu][nu];
}
}
G->lapse[loc][j][i] = 1./sqrt(-G->gcon[loc][0][0][j][i]);
}
// Initializes flags and fails to zero
void zero_arrays()
{
ZLOOPALL
{
pflag[j][i] = 0;
fail_save[j][i] = 0;
}
}
|
main.c | void baz(int M, int *restrict T, int N, int *restrict A) {
#pragma omp parallel default(shared)
{
#pragma omp for
for (int I = 0; I < N; ++I) {
A[I] = I;
for (int J = 0; J < M; ++J)
A[I] = A[I] + T[J];
}
}
}
void bar(int M, int *restrict T, int N, int *restrict A) { baz(M, T, N, A); }
void foo(int N, int *A) {
int TSize = 4;
int T[4];
for (int I = 0; I < TSize; ++I)
T[I] = I;
#pragma spf region
{ bar(TSize, T, N, A); }
}
|
convolution_3x3.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q = 0; q < inch; q++)
{
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p * inch * 9 + q * 9;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* r3 = img0 + w * 3;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
int i = 0;
for (; i + 1 < outh; i += 2)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = 0;
float sum2 = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
*outptr += sum;
*outptr2 += sum2;
r0++;
r1++;
r2++;
r3++;
outptr++;
outptr2++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
r0++;
r1++;
r2++;
outptr++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
}
static void conv3x3s1_winograd23_transform_kernel_sse(const Mat& kernel, Mat& kernel_tm, int inch, int outch)
{
kernel_tm.create(4 * 4, inch, outch);
// G
const float ktm[4][3] = {
{1.0f, 0.0f, 0.0f},
{1.0f / 2, 1.0f / 2, 1.0f / 2},
{1.0f / 2, -1.0f / 2, 1.0f / 2},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[4][3];
for (int i = 0; i < 4; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 4; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 4; i++)
{
kernel_tm0[j * 4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
}
static void conv3x3s1_winograd23_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 2n+2, winograd F(2,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 1) / 2 * 2;
outh = (outh + 1) / 2 * 2;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 4;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(4 * 4, tiles, inch, 4u, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {1.0f, 0.0f, -1.0f, 0.0f},
// {0.0f, 1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 0.00f, 1.0f}
// };
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const float* img = bottom_blob_bordered.channel(q);
float* out_tm0 = bottom_blob_tm.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const float* r0 = img + w * j * 2;
const float* r1 = r0 + w;
const float* r2 = r1 + w;
const float* r3 = r2 + w;
for (int i = 0; i < nRowBlocks; i++)
{
#if __AVX__
__m128 _d0, _d1, _d2, _d3;
__m128 _w0, _w1, _w2, _w3;
// load
_d0 = _mm_loadu_ps(r0);
_d1 = _mm_loadu_ps(r1);
_d2 = _mm_loadu_ps(r2);
_d3 = _mm_loadu_ps(r3);
// w = B_t * d
_w0 = _mm_sub_ps(_d0, _d2);
_w1 = _mm_add_ps(_d1, _d2);
_w2 = _mm_sub_ps(_d2, _d1);
_w3 = _mm_sub_ps(_d3, _d1);
// transpose d to d_t
_MM_TRANSPOSE4_PS(_w0, _w1, _w2, _w3);
// d = B_t * d_t
_d0 = _mm_sub_ps(_w0, _w2);
_d1 = _mm_add_ps(_w1, _w2);
_d2 = _mm_sub_ps(_w2, _w1);
_d3 = _mm_sub_ps(_w3, _w1);
// save to out_tm
_mm_storeu_ps(out_tm0, _d0);
_mm_storeu_ps(out_tm0 + 4, _d1);
_mm_storeu_ps(out_tm0 + 8, _d2);
_mm_storeu_ps(out_tm0 + 12, _d3);
#else
float d0[4], d1[4], d2[4], d3[4];
float w0[4], w1[4], w2[4], w3[4];
float t0[4], t1[4], t2[4], t3[4];
// load
for (int n = 0; n < 4; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
}
// w = B_t * d
for (int n = 0; n < 4; n++)
{
w0[n] = d0[n] - d2[n];
w1[n] = d1[n] + d2[n];
w2[n] = d2[n] - d1[n];
w3[n] = d3[n] - d1[n];
}
// transpose d to d_t
{
t0[0] = w0[0];
t1[0] = w0[1];
t2[0] = w0[2];
t3[0] = w0[3];
t0[1] = w1[0];
t1[1] = w1[1];
t2[1] = w1[2];
t3[1] = w1[3];
t0[2] = w2[0];
t1[2] = w2[1];
t2[2] = w2[2];
t3[2] = w2[3];
t0[3] = w3[0];
t1[3] = w3[1];
t2[3] = w3[2];
t3[3] = w3[3];
}
// d = B_t * d_t
for (int n = 0; n < 4; n++)
{
d0[n] = t0[n] - t2[n];
d1[n] = t1[n] + t2[n];
d2[n] = t2[n] - t1[n];
d3[n] = t3[n] - t1[n];
}
// save to out_tm
for (int n = 0; n < 4; n++)
{
out_tm0[n] = d0[n];
out_tm0[n + 4] = d1[n];
out_tm0[n + 8] = d2[n];
out_tm0[n + 12] = d3[n];
}
#endif
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
out_tm0 += 16;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 4;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(16, tiles, outch, 4u, opt.workspace_allocator);
int nn_outch = outch >> 2;
int remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
Mat out0_tm = top_blob_tm.channel(p);
Mat out1_tm = top_blob_tm.channel(p + 1);
Mat out2_tm = top_blob_tm.channel(p + 2);
Mat out3_tm = top_blob_tm.channel(p + 3);
const Mat kernel0_tm = kernel_tm.channel(p);
const Mat kernel1_tm = kernel_tm.channel(p + 1);
const Mat kernel2_tm = kernel_tm.channel(p + 2);
const Mat kernel3_tm = kernel_tm.channel(p + 3);
for (int i = 0; i < tiles; i++)
{
float* output0_tm = out0_tm.row(i);
float* output1_tm = out1_tm.row(i);
float* output2_tm = out2_tm.row(i);
float* output3_tm = out3_tm.row(i);
#if __AVX__
float zero_val = 0.f;
__m256 _sum0 = _mm256_broadcast_ss(&zero_val);
__m256 _sum0n = _mm256_broadcast_ss(&zero_val);
__m256 _sum1 = _mm256_broadcast_ss(&zero_val);
__m256 _sum1n = _mm256_broadcast_ss(&zero_val);
__m256 _sum2 = _mm256_broadcast_ss(&zero_val);
__m256 _sum2n = _mm256_broadcast_ss(&zero_val);
__m256 _sum3 = _mm256_broadcast_ss(&zero_val);
__m256 _sum3n = _mm256_broadcast_ss(&zero_val);
int q = 0;
for (; q + 3 < inch; q += 4)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* r1 = bottom_blob_tm.channel(q + 1).row(i);
const float* r2 = bottom_blob_tm.channel(q + 2).row(i);
const float* r3 = bottom_blob_tm.channel(q + 3).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
__m256 _r0 = _mm256_loadu_ps(r0);
__m256 _r0n = _mm256_loadu_ps(r0 + 8);
// k0
__m256 _k0 = _mm256_loadu_ps(k0);
__m256 _k0n = _mm256_loadu_ps(k0 + 8);
__m256 _k1 = _mm256_loadu_ps(k1);
__m256 _k1n = _mm256_loadu_ps(k1 + 8);
__m256 _k2 = _mm256_loadu_ps(k2);
__m256 _k2n = _mm256_loadu_ps(k2 + 8);
__m256 _k3 = _mm256_loadu_ps(k3);
__m256 _k3n = _mm256_loadu_ps(k3 + 8);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
// k1
_r0 = _mm256_loadu_ps(r1);
_r0n = _mm256_loadu_ps(r1 + 8);
_k0 = _mm256_loadu_ps(k0 + 16);
_k0n = _mm256_loadu_ps(k0 + 24);
_k1 = _mm256_loadu_ps(k1 + 16);
_k1n = _mm256_loadu_ps(k1 + 24);
_k2 = _mm256_loadu_ps(k2 + 16);
_k2n = _mm256_loadu_ps(k2 + 24);
_k3 = _mm256_loadu_ps(k3 + 16);
_k3n = _mm256_loadu_ps(k3 + 24);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
// k2
_r0 = _mm256_loadu_ps(r2);
_r0n = _mm256_loadu_ps(r2 + 8);
_k0 = _mm256_loadu_ps(k0 + 32);
_k0n = _mm256_loadu_ps(k0 + 40);
_k1 = _mm256_loadu_ps(k1 + 32);
_k1n = _mm256_loadu_ps(k1 + 40);
_k2 = _mm256_loadu_ps(k2 + 32);
_k2n = _mm256_loadu_ps(k2 + 40);
_k3 = _mm256_loadu_ps(k3 + 32);
_k3n = _mm256_loadu_ps(k3 + 40);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
// k3
_r0 = _mm256_loadu_ps(r3);
_r0n = _mm256_loadu_ps(r3 + 8);
_k0 = _mm256_loadu_ps(k0 + 48);
_k0n = _mm256_loadu_ps(k0 + 56);
_k1 = _mm256_loadu_ps(k1 + 48);
_k1n = _mm256_loadu_ps(k1 + 56);
_k2 = _mm256_loadu_ps(k2 + 48);
_k2n = _mm256_loadu_ps(k2 + 56);
_k3 = _mm256_loadu_ps(k3 + 48);
_k3n = _mm256_loadu_ps(k3 + 56);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
}
for (; q < inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
__m256 _r0 = _mm256_loadu_ps(r0);
__m256 _r0n = _mm256_loadu_ps(r0 + 8);
__m256 _k0 = _mm256_loadu_ps(k0);
__m256 _k0n = _mm256_loadu_ps(k0 + 8);
__m256 _k1 = _mm256_loadu_ps(k1);
__m256 _k1n = _mm256_loadu_ps(k1 + 8);
__m256 _k2 = _mm256_loadu_ps(k2);
__m256 _k2n = _mm256_loadu_ps(k2 + 8);
__m256 _k3 = _mm256_loadu_ps(k3);
__m256 _k3n = _mm256_loadu_ps(k3 + 8);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
}
_mm256_storeu_ps(output0_tm, _sum0);
_mm256_storeu_ps(output0_tm + 8, _sum0n);
_mm256_storeu_ps(output1_tm, _sum1);
_mm256_storeu_ps(output1_tm + 8, _sum1n);
_mm256_storeu_ps(output2_tm, _sum2);
_mm256_storeu_ps(output2_tm + 8, _sum2n);
_mm256_storeu_ps(output3_tm, _sum3);
_mm256_storeu_ps(output3_tm + 8, _sum3n);
#else
float sum0[16] = {0.0f};
float sum1[16] = {0.0f};
float sum2[16] = {0.0f};
float sum3[16] = {0.0f};
int q = 0;
for (; q + 3 < inch; q += 4)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* r1 = bottom_blob_tm.channel(q + 1).row(i);
const float* r2 = bottom_blob_tm.channel(q + 2).row(i);
const float* r3 = bottom_blob_tm.channel(q + 3).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
for (int n = 0; n < 16; n++)
{
sum0[n] += r0[n] * k0[n];
k0 += 16;
sum0[n] += r1[n] * k0[n];
k0 += 16;
sum0[n] += r2[n] * k0[n];
k0 += 16;
sum0[n] += r3[n] * k0[n];
k0 -= 16 * 3;
sum1[n] += r0[n] * k1[n];
k1 += 16;
sum1[n] += r1[n] * k1[n];
k1 += 16;
sum1[n] += r2[n] * k1[n];
k1 += 16;
sum1[n] += r3[n] * k1[n];
k1 -= 16 * 3;
sum2[n] += r0[n] * k2[n];
k2 += 16;
sum2[n] += r1[n] * k2[n];
k2 += 16;
sum2[n] += r2[n] * k2[n];
k2 += 16;
sum2[n] += r3[n] * k2[n];
k2 -= 16 * 3;
sum3[n] += r0[n] * k3[n];
k3 += 16;
sum3[n] += r1[n] * k3[n];
k3 += 16;
sum3[n] += r2[n] * k3[n];
k3 += 16;
sum3[n] += r3[n] * k3[n];
k3 -= 16 * 3;
}
}
for (; q < inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
for (int n = 0; n < 16; n++)
{
sum0[n] += r0[n] * k0[n];
sum1[n] += r0[n] * k1[n];
sum2[n] += r0[n] * k2[n];
sum3[n] += r0[n] * k3[n];
}
}
for (int n = 0; n < 16; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
#endif
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
Mat out0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int i = 0; i < tiles; i++)
{
float* output0_tm = out0_tm.row(i);
float sum0[16] = {0.0f};
int q = 0;
for (; q + 3 < inch; q += 4)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* r1 = bottom_blob_tm.channel(q + 1).row(i);
const float* r2 = bottom_blob_tm.channel(q + 2).row(i);
const float* r3 = bottom_blob_tm.channel(q + 3).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel0_tm.row(q + 1);
const float* k2 = kernel0_tm.row(q + 2);
const float* k3 = kernel0_tm.row(q + 3);
for (int n = 0; n < 16; n++)
{
sum0[n] += r0[n] * k0[n];
sum0[n] += r1[n] * k1[n];
sum0[n] += r2[n] * k2[n];
sum0[n] += r3[n] * k3[n];
}
}
for (; q < inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* k0 = kernel0_tm.row(q);
for (int n = 0; n < 16; n++)
{
sum0[n] += r0[n] * k0[n];
}
}
for (int n = 0; n < 16; n++)
{
output0_tm[n] = sum0[n];
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
}
{
// AT
// const float itm[2][4] = {
// {1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 1.0f}
// };
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out_tm = top_blob_tm.channel(p);
Mat out = top_blob_bordered.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
for (int j = 0; j < nColBlocks; j++)
{
float* outRow0 = out.row(j * 2);
float* outRow1 = out.row(j * 2 + 1);
for (int i = 0; i < nRowBlocks; i++)
{
float* out_tile = out_tm.row(j * nRowBlocks + i);
float s0[4], s1[4], s2[4], s3[4];
float w0[4], w1[4];
float d0[2], d1[2], d2[2], d3[2];
float o0[2], o1[2];
// load
for (int n = 0; n < 4; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n + 4];
s2[n] = out_tile[n + 8];
s3[n] = out_tile[n + 12];
}
// w = A_T * W
for (int n = 0; n < 4; n++)
{
w0[n] = s0[n] + s1[n] + s2[n];
w1[n] = s1[n] - s2[n] + s3[n];
}
// transpose w to w_t
{
d0[0] = w0[0];
d0[1] = w1[0];
d1[0] = w0[1];
d1[1] = w1[1];
d2[0] = w0[2];
d2[1] = w1[2];
d3[0] = w0[3];
d3[1] = w1[3];
}
// Y = A_T * w_t
for (int n = 0; n < 2; n++)
{
o0[n] = d0[n] + d1[n] + d2[n] + bias0;
o1[n] = d1[n] - d2[n] + d3[n] + bias0;
}
// save to top blob tm
outRow0[0] = o0[0];
outRow0[1] = o0[1];
outRow1[0] = o1[0];
outRow1[1] = o1[1];
outRow0 += 2;
outRow1 += 2;
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd43_transform_kernel_sse(const Mat& kernel, std::vector<Mat>& kernel_tm2, int inch, int outch)
{
Mat kernel_tm(6 * 6, inch, outch);
// G
const float ktm[6][3] = {
{1.0f / 4, 0.0f, 0.0f},
{-1.0f / 6, -1.0f / 6, -1.0f / 6},
{-1.0f / 6, 1.0f / 6, -1.0f / 6},
{1.0f / 24, 1.0f / 12, 1.0f / 6},
{1.0f / 24, -1.0f / 12, 1.0f / 6},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
for (int r = 0; r < 9; r++)
{
Mat kernel_tm_test(4 * 8, inch, outch / 8 + (outch % 8) / 4 + outch % 4);
int p = 0;
for (; p + 7 < outch; p += 8)
{
const float* kernel0 = (const float*)kernel_tm.channel(p);
const float* kernel1 = (const float*)kernel_tm.channel(p + 1);
const float* kernel2 = (const float*)kernel_tm.channel(p + 2);
const float* kernel3 = (const float*)kernel_tm.channel(p + 3);
const float* kernel4 = (const float*)kernel_tm.channel(p + 4);
const float* kernel5 = (const float*)kernel_tm.channel(p + 5);
const float* kernel6 = (const float*)kernel_tm.channel(p + 6);
const float* kernel7 = (const float*)kernel_tm.channel(p + 7);
float* ktmp = kernel_tm_test.channel(p / 8);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp[4] = kernel1[r * 4 + 0];
ktmp[5] = kernel1[r * 4 + 1];
ktmp[6] = kernel1[r * 4 + 2];
ktmp[7] = kernel1[r * 4 + 3];
ktmp[8] = kernel2[r * 4 + 0];
ktmp[9] = kernel2[r * 4 + 1];
ktmp[10] = kernel2[r * 4 + 2];
ktmp[11] = kernel2[r * 4 + 3];
ktmp[12] = kernel3[r * 4 + 0];
ktmp[13] = kernel3[r * 4 + 1];
ktmp[14] = kernel3[r * 4 + 2];
ktmp[15] = kernel3[r * 4 + 3];
ktmp[16] = kernel4[r * 4 + 0];
ktmp[17] = kernel4[r * 4 + 1];
ktmp[18] = kernel4[r * 4 + 2];
ktmp[19] = kernel4[r * 4 + 3];
ktmp[20] = kernel5[r * 4 + 0];
ktmp[21] = kernel5[r * 4 + 1];
ktmp[22] = kernel5[r * 4 + 2];
ktmp[23] = kernel5[r * 4 + 3];
ktmp[24] = kernel6[r * 4 + 0];
ktmp[25] = kernel6[r * 4 + 1];
ktmp[26] = kernel6[r * 4 + 2];
ktmp[27] = kernel6[r * 4 + 3];
ktmp[28] = kernel7[r * 4 + 0];
ktmp[29] = kernel7[r * 4 + 1];
ktmp[30] = kernel7[r * 4 + 2];
ktmp[31] = kernel7[r * 4 + 3];
ktmp += 32;
kernel0 += 36;
kernel1 += 36;
kernel2 += 36;
kernel3 += 36;
kernel4 += 36;
kernel5 += 36;
kernel6 += 36;
kernel7 += 36;
}
}
for (; p + 3 < outch; p += 4)
{
const float* kernel0 = (const float*)kernel_tm.channel(p);
const float* kernel1 = (const float*)kernel_tm.channel(p + 1);
const float* kernel2 = (const float*)kernel_tm.channel(p + 2);
const float* kernel3 = (const float*)kernel_tm.channel(p + 3);
float* ktmp = kernel_tm_test.channel(p / 8 + (p % 8) / 4);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp[4] = kernel1[r * 4 + 0];
ktmp[5] = kernel1[r * 4 + 1];
ktmp[6] = kernel1[r * 4 + 2];
ktmp[7] = kernel1[r * 4 + 3];
ktmp[8] = kernel2[r * 4 + 0];
ktmp[9] = kernel2[r * 4 + 1];
ktmp[10] = kernel2[r * 4 + 2];
ktmp[11] = kernel2[r * 4 + 3];
ktmp[12] = kernel3[r * 4 + 0];
ktmp[13] = kernel3[r * 4 + 1];
ktmp[14] = kernel3[r * 4 + 2];
ktmp[15] = kernel3[r * 4 + 3];
ktmp += 16;
kernel0 += 36;
kernel1 += 36;
kernel2 += 36;
kernel3 += 36;
}
}
for (; p < outch; p++)
{
const float* kernel0 = (const float*)kernel_tm.channel(p);
float* ktmp = kernel_tm_test.channel(p / 8 + (p % 8) / 4 + p % 4);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp += 4;
kernel0 += 36;
}
}
kernel_tm2.push_back(kernel_tm_test);
}
}
static void conv3x3s1_winograd43_sse(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat>& kernel_tm_test, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
size_t elemsize = bottom_blob.elemsize;
const float* bias = _bias;
// pad to 4n+2, winograd F(4,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(4, inch, tiles * 9, elemsize, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
#if __AVX__
__m256 _1_n = _mm256_set1_ps(-1);
__m256 _2_p = _mm256_set1_ps(2);
__m256 _2_n = _mm256_set1_ps(-2);
__m256 _4_p = _mm256_set1_ps(4);
__m256 _4_n = _mm256_set1_ps(-4);
__m256 _5_n = _mm256_set1_ps(-5);
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const float* img = bottom_blob_bordered.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const float* r0 = img + w * j * 4;
const float* r1 = r0 + w;
const float* r2 = r1 + w;
const float* r3 = r2 + w;
const float* r4 = r3 + w;
const float* r5 = r4 + w;
for (int i = 0; i < nRowBlocks; i++)
{
float* out_tm0 = bottom_blob_tm.channel(tiles * 0 + j * nRowBlocks + i).row(q);
float* out_tm1 = bottom_blob_tm.channel(tiles * 1 + j * nRowBlocks + i).row(q);
float* out_tm2 = bottom_blob_tm.channel(tiles * 2 + j * nRowBlocks + i).row(q);
float* out_tm3 = bottom_blob_tm.channel(tiles * 3 + j * nRowBlocks + i).row(q);
float* out_tm4 = bottom_blob_tm.channel(tiles * 4 + j * nRowBlocks + i).row(q);
float* out_tm5 = bottom_blob_tm.channel(tiles * 5 + j * nRowBlocks + i).row(q);
float* out_tm6 = bottom_blob_tm.channel(tiles * 6 + j * nRowBlocks + i).row(q);
float* out_tm7 = bottom_blob_tm.channel(tiles * 7 + j * nRowBlocks + i).row(q);
float* out_tm8 = bottom_blob_tm.channel(tiles * 8 + j * nRowBlocks + i).row(q);
#if __AVX__
__m256 _d0, _d1, _d2, _d3, _d4, _d5;
__m256 _w0, _w1, _w2, _w3, _w4, _w5;
__m256 _t0, _t1, _t2, _t3, _t4, _t5;
__m256 _n0, _n1, _n2, _n3, _n4, _n5;
// load
_d0 = _mm256_loadu_ps(r0);
_d1 = _mm256_loadu_ps(r1);
_d2 = _mm256_loadu_ps(r2);
_d3 = _mm256_loadu_ps(r3);
_d4 = _mm256_loadu_ps(r4);
_d5 = _mm256_loadu_ps(r5);
// w = B_t * d
_w0 = _mm256_mul_ps(_d0, _4_p);
_w0 = _mm256_fmadd_ps(_d2, _5_n, _w0);
_w0 = _mm256_add_ps(_w0, _d4);
_w1 = _mm256_mul_ps(_d1, _4_n);
_w1 = _mm256_fmadd_ps(_d2, _4_n, _w1);
_w1 = _mm256_add_ps(_w1, _d3);
_w1 = _mm256_add_ps(_w1, _d4);
_w2 = _mm256_mul_ps(_d1, _4_p);
_w2 = _mm256_fmadd_ps(_d2, _4_n, _w2);
_w2 = _mm256_fmadd_ps(_d3, _1_n, _w2);
_w2 = _mm256_add_ps(_w2, _d4);
_w3 = _mm256_mul_ps(_d1, _2_n);
_w3 = _mm256_fmadd_ps(_d2, _1_n, _w3);
_w3 = _mm256_fmadd_ps(_d3, _2_p, _w3);
_w3 = _mm256_add_ps(_w3, _d4);
_w4 = _mm256_mul_ps(_d1, _2_p);
_w4 = _mm256_fmadd_ps(_d2, _1_n, _w4);
_w4 = _mm256_fmadd_ps(_d3, _2_n, _w4);
_w4 = _mm256_add_ps(_w4, _d4);
_w5 = _mm256_mul_ps(_d1, _4_p);
_w5 = _mm256_fmadd_ps(_d3, _5_n, _w5);
_w5 = _mm256_add_ps(_w5, _d5);
// transpose d to d_t
#if (defined _WIN32 && !(defined __MINGW32__) && !__clang__)
{
_t0.m256_f32[0] = _w0.m256_f32[0];
_t1.m256_f32[0] = _w0.m256_f32[1];
_t2.m256_f32[0] = _w0.m256_f32[2];
_t3.m256_f32[0] = _w0.m256_f32[3];
_t4.m256_f32[0] = _w0.m256_f32[4];
_t5.m256_f32[0] = _w0.m256_f32[5];
_t0.m256_f32[1] = _w1.m256_f32[0];
_t1.m256_f32[1] = _w1.m256_f32[1];
_t2.m256_f32[1] = _w1.m256_f32[2];
_t3.m256_f32[1] = _w1.m256_f32[3];
_t4.m256_f32[1] = _w1.m256_f32[4];
_t5.m256_f32[1] = _w1.m256_f32[5];
_t0.m256_f32[2] = _w2.m256_f32[0];
_t1.m256_f32[2] = _w2.m256_f32[1];
_t2.m256_f32[2] = _w2.m256_f32[2];
_t3.m256_f32[2] = _w2.m256_f32[3];
_t4.m256_f32[2] = _w2.m256_f32[4];
_t5.m256_f32[2] = _w2.m256_f32[5];
_t0.m256_f32[3] = _w3.m256_f32[0];
_t1.m256_f32[3] = _w3.m256_f32[1];
_t2.m256_f32[3] = _w3.m256_f32[2];
_t3.m256_f32[3] = _w3.m256_f32[3];
_t4.m256_f32[3] = _w3.m256_f32[4];
_t5.m256_f32[3] = _w3.m256_f32[5];
_t0.m256_f32[4] = _w4.m256_f32[0];
_t1.m256_f32[4] = _w4.m256_f32[1];
_t2.m256_f32[4] = _w4.m256_f32[2];
_t3.m256_f32[4] = _w4.m256_f32[3];
_t4.m256_f32[4] = _w4.m256_f32[4];
_t5.m256_f32[4] = _w4.m256_f32[5];
_t0.m256_f32[5] = _w5.m256_f32[0];
_t1.m256_f32[5] = _w5.m256_f32[1];
_t2.m256_f32[5] = _w5.m256_f32[2];
_t3.m256_f32[5] = _w5.m256_f32[3];
_t4.m256_f32[5] = _w5.m256_f32[4];
_t5.m256_f32[5] = _w5.m256_f32[5];
}
#else
{
_t0[0] = _w0[0];
_t1[0] = _w0[1];
_t2[0] = _w0[2];
_t3[0] = _w0[3];
_t4[0] = _w0[4];
_t5[0] = _w0[5];
_t0[1] = _w1[0];
_t1[1] = _w1[1];
_t2[1] = _w1[2];
_t3[1] = _w1[3];
_t4[1] = _w1[4];
_t5[1] = _w1[5];
_t0[2] = _w2[0];
_t1[2] = _w2[1];
_t2[2] = _w2[2];
_t3[2] = _w2[3];
_t4[2] = _w2[4];
_t5[2] = _w2[5];
_t0[3] = _w3[0];
_t1[3] = _w3[1];
_t2[3] = _w3[2];
_t3[3] = _w3[3];
_t4[3] = _w3[4];
_t5[3] = _w3[5];
_t0[4] = _w4[0];
_t1[4] = _w4[1];
_t2[4] = _w4[2];
_t3[4] = _w4[3];
_t4[4] = _w4[4];
_t5[4] = _w4[5];
_t0[5] = _w5[0];
_t1[5] = _w5[1];
_t2[5] = _w5[2];
_t3[5] = _w5[3];
_t4[5] = _w5[4];
_t5[5] = _w5[5];
}
#endif
// d = B_t * d_t
_n0 = _mm256_mul_ps(_t0, _4_p);
_n0 = _mm256_fmadd_ps(_t2, _5_n, _n0);
_n0 = _mm256_add_ps(_n0, _t4);
_n1 = _mm256_mul_ps(_t1, _4_n);
_n1 = _mm256_fmadd_ps(_t2, _4_n, _n1);
_n1 = _mm256_add_ps(_n1, _t3);
_n1 = _mm256_add_ps(_n1, _t4);
_n2 = _mm256_mul_ps(_t1, _4_p);
_n2 = _mm256_fmadd_ps(_t2, _4_n, _n2);
_n2 = _mm256_fmadd_ps(_t3, _1_n, _n2);
_n2 = _mm256_add_ps(_n2, _t4);
_n3 = _mm256_mul_ps(_t1, _2_n);
_n3 = _mm256_fmadd_ps(_t2, _1_n, _n3);
_n3 = _mm256_fmadd_ps(_t3, _2_p, _n3);
_n3 = _mm256_add_ps(_n3, _t4);
_n4 = _mm256_mul_ps(_t1, _2_p);
_n4 = _mm256_fmadd_ps(_t2, _1_n, _n4);
_n4 = _mm256_fmadd_ps(_t3, _2_n, _n4);
_n4 = _mm256_add_ps(_n4, _t4);
_n5 = _mm256_mul_ps(_t1, _4_p);
_n5 = _mm256_fmadd_ps(_t3, _5_n, _n5);
_n5 = _mm256_add_ps(_n5, _t5);
// save to out_tm
float output_n0[8] = {0.f};
_mm256_storeu_ps(output_n0, _n0);
float output_n1[8] = {0.f};
_mm256_storeu_ps(output_n1, _n1);
float output_n2[8] = {0.f};
_mm256_storeu_ps(output_n2, _n2);
float output_n3[8] = {0.f};
_mm256_storeu_ps(output_n3, _n3);
float output_n4[8] = {0.f};
_mm256_storeu_ps(output_n4, _n4);
float output_n5[8] = {0.f};
_mm256_storeu_ps(output_n5, _n5);
out_tm0[0] = output_n0[0];
out_tm0[1] = output_n0[1];
out_tm0[2] = output_n0[2];
out_tm0[3] = output_n0[3];
out_tm1[0] = output_n0[4];
out_tm1[1] = output_n0[5];
out_tm1[2] = output_n1[0];
out_tm1[3] = output_n1[1];
out_tm2[0] = output_n1[2];
out_tm2[1] = output_n1[3];
out_tm2[2] = output_n1[4];
out_tm2[3] = output_n1[5];
out_tm3[0] = output_n2[0];
out_tm3[1] = output_n2[1];
out_tm3[2] = output_n2[2];
out_tm3[3] = output_n2[3];
out_tm4[0] = output_n2[4];
out_tm4[1] = output_n2[5];
out_tm4[2] = output_n3[0];
out_tm4[3] = output_n3[1];
out_tm5[0] = output_n3[2];
out_tm5[1] = output_n3[3];
out_tm5[2] = output_n3[4];
out_tm5[3] = output_n3[5];
out_tm6[0] = output_n4[0];
out_tm6[1] = output_n4[1];
out_tm6[2] = output_n4[2];
out_tm6[3] = output_n4[3];
out_tm7[0] = output_n4[4];
out_tm7[1] = output_n4[5];
out_tm7[2] = output_n5[0];
out_tm7[3] = output_n5[1];
out_tm8[0] = output_n5[2];
out_tm8[1] = output_n5[3];
out_tm8[2] = output_n5[4];
out_tm8[3] = output_n5[5];
#else
float d0[6], d1[6], d2[6], d3[6], d4[6], d5[6];
float w0[6], w1[6], w2[6], w3[6], w4[6], w5[6];
float t0[6], t1[6], t2[6], t3[6], t4[6], t5[6];
// load
for (int n = 0; n < 6; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
d4[n] = r4[n];
d5[n] = r5[n];
}
// w = B_t * d
for (int n = 0; n < 6; n++)
{
w0[n] = 4 * d0[n] - 5 * d2[n] + d4[n];
w1[n] = -4 * d1[n] - 4 * d2[n] + d3[n] + d4[n];
w2[n] = 4 * d1[n] - 4 * d2[n] - d3[n] + d4[n];
w3[n] = -2 * d1[n] - d2[n] + 2 * d3[n] + d4[n];
w4[n] = 2 * d1[n] - d2[n] - 2 * d3[n] + d4[n];
w5[n] = 4 * d1[n] - 5 * d3[n] + d5[n];
}
// transpose d to d_t
{
t0[0] = w0[0];
t1[0] = w0[1];
t2[0] = w0[2];
t3[0] = w0[3];
t4[0] = w0[4];
t5[0] = w0[5];
t0[1] = w1[0];
t1[1] = w1[1];
t2[1] = w1[2];
t3[1] = w1[3];
t4[1] = w1[4];
t5[1] = w1[5];
t0[2] = w2[0];
t1[2] = w2[1];
t2[2] = w2[2];
t3[2] = w2[3];
t4[2] = w2[4];
t5[2] = w2[5];
t0[3] = w3[0];
t1[3] = w3[1];
t2[3] = w3[2];
t3[3] = w3[3];
t4[3] = w3[4];
t5[3] = w3[5];
t0[4] = w4[0];
t1[4] = w4[1];
t2[4] = w4[2];
t3[4] = w4[3];
t4[4] = w4[4];
t5[4] = w4[5];
t0[5] = w5[0];
t1[5] = w5[1];
t2[5] = w5[2];
t3[5] = w5[3];
t4[5] = w5[4];
t5[5] = w5[5];
}
// d = B_t * d_t
for (int n = 0; n < 6; n++)
{
d0[n] = 4 * t0[n] - 5 * t2[n] + t4[n];
d1[n] = -4 * t1[n] - 4 * t2[n] + t3[n] + t4[n];
d2[n] = 4 * t1[n] - 4 * t2[n] - t3[n] + t4[n];
d3[n] = -2 * t1[n] - t2[n] + 2 * t3[n] + t4[n];
d4[n] = 2 * t1[n] - t2[n] - 2 * t3[n] + t4[n];
d5[n] = 4 * t1[n] - 5 * t3[n] + t5[n];
}
// save to out_tm
{
out_tm0[0] = d0[0];
out_tm0[1] = d0[1];
out_tm0[2] = d0[2];
out_tm0[3] = d0[3];
out_tm1[0] = d0[4];
out_tm1[1] = d0[5];
out_tm1[2] = d1[0];
out_tm1[3] = d1[1];
out_tm2[0] = d1[2];
out_tm2[1] = d1[3];
out_tm2[2] = d1[4];
out_tm2[3] = d1[5];
out_tm3[0] = d2[0];
out_tm3[1] = d2[1];
out_tm3[2] = d2[2];
out_tm3[3] = d2[3];
out_tm4[0] = d2[4];
out_tm4[1] = d2[5];
out_tm4[2] = d3[0];
out_tm4[3] = d3[1];
out_tm5[0] = d3[2];
out_tm5[1] = d3[3];
out_tm5[2] = d3[4];
out_tm5[3] = d3[5];
out_tm6[0] = d4[0];
out_tm6[1] = d4[1];
out_tm6[2] = d4[2];
out_tm6[3] = d4[3];
out_tm7[0] = d4[4];
out_tm7[1] = d4[5];
out_tm7[2] = d5[0];
out_tm7[3] = d5[1];
out_tm8[0] = d5[2];
out_tm8[1] = d5[3];
out_tm8[2] = d5[4];
out_tm8[3] = d5[5];
}
#endif // __AVX__
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(36, tiles, outch, elemsize, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 9; r++)
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
float* output0_tm = top_blob_tm.channel(p);
float* output1_tm = top_blob_tm.channel(p + 1);
float* output2_tm = top_blob_tm.channel(p + 2);
float* output3_tm = top_blob_tm.channel(p + 3);
float* output4_tm = top_blob_tm.channel(p + 4);
float* output5_tm = top_blob_tm.channel(p + 5);
float* output6_tm = top_blob_tm.channel(p + 6);
float* output7_tm = top_blob_tm.channel(p + 7);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
output4_tm = output4_tm + r * 4;
output5_tm = output5_tm + r * 4;
output6_tm = output6_tm + r * 4;
output7_tm = output7_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const float* kptr = kernel_tm_test[r].channel(p / 8);
const float* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
__m128 _sum1 = _mm_broadcast_ss(&zero_val);
__m128 _sum2 = _mm_broadcast_ss(&zero_val);
__m128 _sum3 = _mm_broadcast_ss(&zero_val);
__m128 _sum4 = _mm_broadcast_ss(&zero_val);
__m128 _sum5 = _mm_broadcast_ss(&zero_val);
__m128 _sum6 = _mm_broadcast_ss(&zero_val);
__m128 _sum7 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
__m128 _sum1 = _mm_set1_ps(0.f);
__m128 _sum2 = _mm_set1_ps(0.f);
__m128 _sum3 = _mm_set1_ps(0.f);
__m128 _sum4 = _mm_set1_ps(0.f);
__m128 _sum5 = _mm_set1_ps(0.f);
__m128 _sum6 = _mm_set1_ps(0.f);
__m128 _sum7 = _mm_set1_ps(0.f);
#endif
int q = 0;
for (; q + 3 < inch; q = q + 4)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _r1 = _mm_loadu_ps(r0 + 4);
__m128 _r2 = _mm_loadu_ps(r0 + 8);
__m128 _r3 = _mm_loadu_ps(r0 + 12);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr + 4);
__m128 _k2 = _mm_loadu_ps(kptr + 8);
__m128 _k3 = _mm_loadu_ps(kptr + 12);
__m128 _k4 = _mm_loadu_ps(kptr + 16);
__m128 _k5 = _mm_loadu_ps(kptr + 20);
__m128 _k6 = _mm_loadu_ps(kptr + 24);
__m128 _k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r0, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r0, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r0, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r0, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr + 4);
_k2 = _mm_loadu_ps(kptr + 8);
_k3 = _mm_loadu_ps(kptr + 12);
_k4 = _mm_loadu_ps(kptr + 16);
_k5 = _mm_loadu_ps(kptr + 20);
_k6 = _mm_loadu_ps(kptr + 24);
_k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r1, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r1, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r1, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r1, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r1, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r1, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r1, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r1, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r1, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r1, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r1, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r1, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r1, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r1, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r1, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r1, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr + 4);
_k2 = _mm_loadu_ps(kptr + 8);
_k3 = _mm_loadu_ps(kptr + 12);
_k4 = _mm_loadu_ps(kptr + 16);
_k5 = _mm_loadu_ps(kptr + 20);
_k6 = _mm_loadu_ps(kptr + 24);
_k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r2, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r2, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r2, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r2, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r2, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r2, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r2, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r2, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r2, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r2, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r2, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r2, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r2, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r2, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r2, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r2, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr + 4);
_k2 = _mm_loadu_ps(kptr + 8);
_k3 = _mm_loadu_ps(kptr + 12);
_k4 = _mm_loadu_ps(kptr + 16);
_k5 = _mm_loadu_ps(kptr + 20);
_k6 = _mm_loadu_ps(kptr + 24);
_k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r3, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r3, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r3, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r3, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r3, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r3, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r3, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r3, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r3, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r3, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r3, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r3, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r3, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r3, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r3, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r3, _k7));
#endif
kptr += 32;
r0 += 16;
}
for (; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr + 4);
__m128 _k2 = _mm_loadu_ps(kptr + 8);
__m128 _k3 = _mm_loadu_ps(kptr + 12);
__m128 _k4 = _mm_loadu_ps(kptr + 16);
__m128 _k5 = _mm_loadu_ps(kptr + 20);
__m128 _k6 = _mm_loadu_ps(kptr + 24);
__m128 _k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r0, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r0, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r0, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r0, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7));
#endif
kptr += 32;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
_mm_storeu_ps(output1_tm, _sum1);
_mm_storeu_ps(output2_tm, _sum2);
_mm_storeu_ps(output3_tm, _sum3);
_mm_storeu_ps(output4_tm, _sum4);
_mm_storeu_ps(output5_tm, _sum5);
_mm_storeu_ps(output6_tm, _sum6);
_mm_storeu_ps(output7_tm, _sum7);
#else
float sum0[4] = {0};
float sum1[4] = {0};
float sum2[4] = {0};
float sum3[4] = {0};
float sum4[4] = {0};
float sum5[4] = {0};
float sum6[4] = {0};
float sum7[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += r0[n] * kptr[n];
sum1[n] += r0[n] * kptr[n + 4];
sum2[n] += r0[n] * kptr[n + 8];
sum3[n] += r0[n] * kptr[n + 12];
sum4[n] += r0[n] * kptr[n + 16];
sum5[n] += r0[n] * kptr[n + 20];
sum6[n] += r0[n] * kptr[n + 24];
sum7[n] += r0[n] * kptr[n + 28];
}
kptr += 32;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
output4_tm[n] = sum4[n];
output5_tm[n] = sum5[n];
output6_tm[n] = sum6[n];
output7_tm[n] = sum7[n];
}
#endif // __AVX__
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
output4_tm += 36;
output5_tm += 36;
output6_tm += 36;
output7_tm += 36;
}
}
nn_outch = (outch - remain_outch_start) >> 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
float* output0_tm = top_blob_tm.channel(p);
float* output1_tm = top_blob_tm.channel(p + 1);
float* output2_tm = top_blob_tm.channel(p + 2);
float* output3_tm = top_blob_tm.channel(p + 3);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const float* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4);
const float* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
__m128 _sum1 = _mm_broadcast_ss(&zero_val);
__m128 _sum2 = _mm_broadcast_ss(&zero_val);
__m128 _sum3 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
__m128 _sum1 = _mm_set1_ps(0.f);
__m128 _sum2 = _mm_set1_ps(0.f);
__m128 _sum3 = _mm_set1_ps(0.f);
#endif
for (int q = 0; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr + 4);
__m128 _k2 = _mm_loadu_ps(kptr + 8);
__m128 _k3 = _mm_loadu_ps(kptr + 12);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
#endif
kptr += 16;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
_mm_storeu_ps(output1_tm, _sum1);
_mm_storeu_ps(output2_tm, _sum2);
_mm_storeu_ps(output3_tm, _sum3);
#else
float sum0[4] = {0};
float sum1[4] = {0};
float sum2[4] = {0};
float sum3[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += r0[n] * kptr[n];
sum1[n] += r0[n] * kptr[n + 4];
sum2[n] += r0[n] * kptr[n + 8];
sum3[n] += r0[n] * kptr[n + 12];
}
kptr += 16;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
#endif // __AVX__
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
}
}
remain_outch_start += nn_outch << 2;
for (int p = remain_outch_start; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
output0_tm = output0_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const float* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4 + p % 4);
const float* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
#endif
for (int q = 0; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
#endif
kptr += 16;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
#else
float sum0[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
}
kptr += 4;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
}
#endif // __AVX__ || __SSE__
output0_tm += 36;
}
}
// for (int p=0; p<outch; p++)
// {
// Mat out0_tm = top_blob_tm.channel(p);
// const Mat kernel0_tm = kernel_tm.channel(p);
// for (int i=0; i<tiles; i++)
// {
// float* output0_tm = out0_tm.row<int>(i);
// int sum0[36] = {0};
// for (int q=0; q<inch; q++)
// {
// const float* r0 = bottom_blob_tm.channel(q).row<float>(i);
// const float* k0 = kernel0_tm.row<float>(q);
// for (int n=0; n<36; n++)
// {
// sum0[n] += (int)r0[n] * k0[n];
// }
// }
// for (int n=0; n<36; n++)
// {
// output0_tm[n] = sum0[n];
// }
// }
// }
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, opt.workspace_allocator);
}
{
// AT
// const float itm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + r01 + r02 + r03 + r04
// 1 = r01 - r02 + 2 * (r03 - r04)
// 2 = r01 + r02 + 4 * (r03 + r04)
// 3 = r01 - r02 + 8 * (r03 - r04) + r05
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* out_tile = top_blob_tm.channel(p);
float* outRow0 = top_blob_bordered.channel(p);
float* outRow1 = outRow0 + outw;
float* outRow2 = outRow0 + outw * 2;
float* outRow3 = outRow0 + outw * 3;
const float bias0 = bias ? bias[p] : 0.f;
for (int j = 0; j < nColBlocks; j++)
{
for (int i = 0; i < nRowBlocks; i++)
{
// TODO AVX2
float s0[6], s1[6], s2[6], s3[6], s4[6], s5[6];
float w0[6], w1[6], w2[6], w3[6];
float d0[4], d1[4], d2[4], d3[4], d4[4], d5[4];
float o0[4], o1[4], o2[4], o3[4];
// load
for (int n = 0; n < 6; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n + 6];
s2[n] = out_tile[n + 12];
s3[n] = out_tile[n + 18];
s4[n] = out_tile[n + 24];
s5[n] = out_tile[n + 30];
}
// w = A_T * W
for (int n = 0; n < 6; n++)
{
w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n];
w1[n] = s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n];
w2[n] = s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n];
w3[n] = s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + s5[n];
}
// transpose w to w_t
{
d0[0] = w0[0];
d0[1] = w1[0];
d0[2] = w2[0];
d0[3] = w3[0];
d1[0] = w0[1];
d1[1] = w1[1];
d1[2] = w2[1];
d1[3] = w3[1];
d2[0] = w0[2];
d2[1] = w1[2];
d2[2] = w2[2];
d2[3] = w3[2];
d3[0] = w0[3];
d3[1] = w1[3];
d3[2] = w2[3];
d3[3] = w3[3];
d4[0] = w0[4];
d4[1] = w1[4];
d4[2] = w2[4];
d4[3] = w3[4];
d5[0] = w0[5];
d5[1] = w1[5];
d5[2] = w2[5];
d5[3] = w3[5];
}
// Y = A_T * w_t
for (int n = 0; n < 4; n++)
{
o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n];
o1[n] = d1[n] - d2[n] + 2 * d3[n] - 2 * d4[n];
o2[n] = d1[n] + d2[n] + 4 * d3[n] + 4 * d4[n];
o3[n] = d1[n] - d2[n] + 8 * d3[n] - 8 * d4[n] + d5[n];
}
// save to top blob tm
for (int n = 0; n < 4; n++)
{
outRow0[n] = o0[n] + bias0;
outRow1[n] = o1[n] + bias0;
outRow2[n] = o2[n] + bias0;
outRow3[n] = o3[n] + bias0;
}
out_tile += 36;
outRow0 += 4;
outRow1 += 4;
outRow2 += 4;
outRow3 += 4;
}
outRow0 += outw * 3;
outRow1 += outw * 3;
outRow2 += outw * 3;
outRow3 += outw * 3;
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s2_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q = 0; q < inch; q++)
{
float* outptr = out;
const float* img = bottom_blob.channel(q);
const float* kernel0 = kernel + p * inch * 9 + q * 9;
const float* r0 = img;
const float* r1 = img + w;
const float* r2 = img + w * 2;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
}
|
residual_based_implicit_time_scheme.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_RESIDUAL_BASED_IMPLICIT_TIME_SCHEME )
#define KRATOS_RESIDUAL_BASED_IMPLICIT_TIME_SCHEME
/* System includes */
/* External includes */
/* Project includes */
#include "solving_strategies/schemes/scheme.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedImplicitTimeScheme
* @ingroup KratosCore
* @brief This is the base class for the implicit time schemes
* @details Other implicit schemes should derive from this one. With the use of this base scheme it is possible to reduce code duplication
* @tparam TSparseSpace The sparse space considered
* @tparam TDenseSpace The dense space considered
* @see Scheme
* @author Vicente Mataix Ferrandiz
*/
template<class TSparseSpace, class TDenseSpace >
class ResidualBasedImplicitTimeScheme
: public Scheme<TSparseSpace,TDenseSpace>
{
public:
///@name Type Definitions
///@{
/// Pointer definition of ResidualBasedImplicitTimeScheme
KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedImplicitTimeScheme );
/// Base class definition
typedef Scheme<TSparseSpace,TDenseSpace> BaseType;
/// DoF array type definition
typedef typename BaseType::DofsArrayType DofsArrayType;
/// DoF vector type definition
typedef typename Element::DofsVectorType DofsVectorType;
/// Data type definition
typedef typename BaseType::TDataType TDataType;
/// Matrix type definition
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
/// Vector type definition
typedef typename BaseType::TSystemVectorType TSystemVectorType;
/// Local system matrix type definition
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
/// Local system vector type definition
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
/// Nodes containers definition
typedef ModelPart::NodesContainerType NodesArrayType;
/// Elements containers definition
typedef ModelPart::ElementsContainerType ElementsArrayType;
/// Conditions containers definition
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
/// Index type definition
typedef std::size_t IndexType;
///@}
///@name Life Cycle
///@{
/**
* Constructor.
* The implicit method method
*/
explicit ResidualBasedImplicitTimeScheme()
:BaseType()
{
// Allocate auxiliary memory
const std::size_t num_threads = OpenMPUtils::GetNumThreads();
mMatrix.M.resize(num_threads);
mMatrix.D.resize(num_threads);
}
/**
* @brief Constructor. The implicit method method
* @param ThisParameters The configuration parameters
*/
explicit ResidualBasedImplicitTimeScheme(Parameters ThisParameters)
:ResidualBasedImplicitTimeScheme()
{
this->ValidateAndAssignParameters(ThisParameters);
this->AssignSettings(ThisParameters);
}
/** Copy Constructor.
*/
explicit ResidualBasedImplicitTimeScheme(ResidualBasedImplicitTimeScheme& rOther)
:BaseType(rOther)
,mMatrix(rOther.mMatrix)
{
}
/**
* Clone
*/
typename BaseType::Pointer Clone() override
{
return Kratos::make_shared<ResidualBasedImplicitTimeScheme>(*this);
}
/** Destructor.
*/
~ResidualBasedImplicitTimeScheme
() override {}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief It initializes a non-linear iteration (for the element)
* @param rModelPart The model part of the problem to solve
* @param A LHS matrix
* @param Dx Incremental update of primary variables
* @param b RHS Vector
*/
void InitializeNonLinIteration(
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b
) override
{
KRATOS_TRY;
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// Definition of the first element iterator
const auto it_elem_begin = rModelPart.ElementsBegin();
#pragma omp parallel for
for(int i=0; i<static_cast<int>(rModelPart.Elements().size()); ++i) {
auto it_elem = it_elem_begin + i;
it_elem->InitializeNonLinearIteration(r_current_process_info);
}
// Definition of the first condition iterator
const auto it_cond_begin = rModelPart.ConditionsBegin();
#pragma omp parallel for
for(int i=0; i<static_cast<int>(rModelPart.Conditions().size()); ++i) {
auto it_cond = it_cond_begin + i;
it_cond->InitializeNonLinearIteration(r_current_process_info);
}
// Definition of the first constraint iterator
const auto it_const_begin = rModelPart.MasterSlaveConstraintsBegin();
#pragma omp parallel for
for(int i=0; i<static_cast<int>(rModelPart.MasterSlaveConstraints().size()); ++i) {
auto it_const = it_const_begin + i;
it_const->InitializeNonLinearIteration(r_current_process_info);
}
KRATOS_CATCH( "" );
}
/**
* @brief It initializes a non-linear iteration (for an individual condition)
* @param pCurrentCondition The condition to compute
* @param rCurrentProcessInfo The current process info instance
*/
void InitializeNonLinearIteration(
Condition::Pointer pCurrentCondition,
ProcessInfo& rCurrentProcessInfo
) override
{
pCurrentCondition->InitializeNonLinearIteration(rCurrentProcessInfo);
}
/**
* @brief It initializes a non-linear iteration (for an individual element)
* @param pCurrentElement The element to compute
* @param rCurrentProcessInfo The current process info instance
*/
void InitializeNonLinearIteration(
Element::Pointer pCurrentElement,
ProcessInfo& rCurrentProcessInfo
) override
{
pCurrentElement->InitializeNonLinearIteration(rCurrentProcessInfo);
}
/**
* @brief This function is designed to be called in the builder and solver to introduce the selected time integration scheme.
* @details It "asks" the matrix needed to the element and performs the operations needed to introduce the selected time integration scheme. This function calculates at the same time the contribution to the LHS and to the RHS of the system
* @param rCurrentElement The element to compute
* @param LHS_Contribution The LHS matrix contribution
* @param RHS_Contribution The RHS vector contribution
* @param EquationId The ID's of the element degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
void CalculateSystemContributions(
Element& rCurrentElement,
LocalSystemMatrixType& LHS_Contribution,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId,
const ProcessInfo& rCurrentProcessInfo
) override
{
KRATOS_TRY;
const IndexType this_thread = OpenMPUtils::ThisThread();
//rCurrentElement.InitializeNonLinearIteration(rCurrentProcessInfo);
rCurrentElement.CalculateLocalSystem(LHS_Contribution,RHS_Contribution,rCurrentProcessInfo);
rCurrentElement.EquationIdVector(EquationId,rCurrentProcessInfo);
rCurrentElement.CalculateMassMatrix(mMatrix.M[this_thread],rCurrentProcessInfo);
rCurrentElement.CalculateDampingMatrix(mMatrix.D[this_thread],rCurrentProcessInfo);
AddDynamicsToLHS(LHS_Contribution, mMatrix.D[this_thread], mMatrix.M[this_thread], rCurrentProcessInfo);
AddDynamicsToRHS(rCurrentElement, RHS_Contribution, mMatrix.D[this_thread], mMatrix.M[this_thread], rCurrentProcessInfo);
KRATOS_CATCH("ResidualBasedImplicitTimeScheme.CalculateSystemContributions");
}
/**
* @brief This function is designed to calculate just the RHS contribution
* @param rCurrentElement The element to compute
* @param rRHSContribution The RHS vector contribution
* @param rEquationId The ID's of the element degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
void CalculateRHSContribution(
Element& rCurrentElement,
LocalSystemVectorType& rRHSContribution,
Element::EquationIdVectorType& rEquationId,
const ProcessInfo& rCurrentProcessInfo
) override
{
KRATOS_TRY;
const IndexType this_thread = OpenMPUtils::ThisThread();
// Initializing the non linear iteration for the current element
// rCurrentElement.InitializeNonLinearIteration(rCurrentProcessInfo);
// Basic operations for the element considered
rCurrentElement.CalculateRightHandSide(rRHSContribution,rCurrentProcessInfo);
rCurrentElement.CalculateMassMatrix(mMatrix.M[this_thread], rCurrentProcessInfo);
rCurrentElement.CalculateDampingMatrix(mMatrix.D[this_thread],rCurrentProcessInfo);
rCurrentElement.EquationIdVector(rEquationId,rCurrentProcessInfo);
AddDynamicsToRHS (rCurrentElement, rRHSContribution, mMatrix.D[this_thread], mMatrix.M[this_thread], rCurrentProcessInfo);
KRATOS_CATCH("ResidualBasedImplicitTimeScheme.Calculate_RHS_Contribution");
}
/**
* @brief Functions totally analogous to the precedent but applied to the "condition" objects
* @param rCurrentCondition The condition to compute
* @param rLHSContribution The LHS matrix contribution
* @param rRHSContribution The RHS vector contribution
* @param rEquationId The ID's of the element degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
void CalculateSystemContributions(
Condition& rCurrentCondition,
LocalSystemMatrixType& rLHSContribution,
LocalSystemVectorType& rRHSContribution,
Element::EquationIdVectorType& rEquationId,
const ProcessInfo& rCurrentProcessInfo
) override
{
KRATOS_TRY;
const IndexType this_thread = OpenMPUtils::ThisThread();
// Initializing the non linear iteration for the current condition
//rCurrentCondition.InitializeNonLinearIteration(rCurrentProcessInfo);
// Basic operations for the condition considered
rCurrentCondition.CalculateLocalSystem(rLHSContribution,rRHSContribution, rCurrentProcessInfo);
rCurrentCondition.EquationIdVector(rEquationId, rCurrentProcessInfo);
rCurrentCondition.CalculateMassMatrix(mMatrix.M[this_thread], rCurrentProcessInfo);
rCurrentCondition.CalculateDampingMatrix(mMatrix.D[this_thread], rCurrentProcessInfo);
AddDynamicsToLHS(rLHSContribution, mMatrix.D[this_thread], mMatrix.M[this_thread], rCurrentProcessInfo);
AddDynamicsToRHS(rCurrentCondition, rRHSContribution, mMatrix.D[this_thread], mMatrix.M[this_thread], rCurrentProcessInfo);
KRATOS_CATCH("ResidualBasedImplicitTimeScheme.CalculateSystemContributions");
}
/**
* @brief Functions that calculates the RHS of a "condition" object
* @param rCurrentCondition The condition to compute
* @param rRHSContribution The RHS vector contribution
* @param rEquationId The ID's of the condition degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
void CalculateRHSContribution(
Condition& rCurrentCondition,
LocalSystemVectorType& rRHSContribution,
Element::EquationIdVectorType& rEquationId,
const ProcessInfo& rCurrentProcessInfo
) override
{
KRATOS_TRY;
const IndexType this_thread = OpenMPUtils::ThisThread();
// Initializing the non linear iteration for the current condition
//rCurrentCondition.InitializeNonLinearIteration(rCurrentProcessInfo);
// Basic operations for the condition considered
rCurrentCondition.CalculateRightHandSide(rRHSContribution, rCurrentProcessInfo);
rCurrentCondition.EquationIdVector(rEquationId, rCurrentProcessInfo);
rCurrentCondition.CalculateMassMatrix(mMatrix.M[this_thread], rCurrentProcessInfo);
rCurrentCondition.CalculateDampingMatrix(mMatrix.D[this_thread], rCurrentProcessInfo);
// Adding the dynamic contributions (static is already included)
AddDynamicsToRHS(rCurrentCondition, rRHSContribution, mMatrix.D[this_thread], mMatrix.M[this_thread], rCurrentProcessInfo);
KRATOS_CATCH("ResidualBasedImplicitTimeScheme.Calculate_RHS_Contribution");
}
/**
* @brief It initializes time step solution. Only for reasons if the time step solution is restarted
* @param rModelPart The model part of the problem to solve
* @param rA LHS matrix
* @param rDx Incremental update of primary variables
* @param rb RHS Vector
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
const ProcessInfo r_current_process_info= rModelPart.GetProcessInfo();
BaseType::InitializeSolutionStep(rModelPart, rA, rDx, rb);
const double delta_time = r_current_process_info[DELTA_TIME];
KRATOS_ERROR_IF(delta_time < 1.0e-24) << "ERROR:: Detected delta_time = 0 in the Solution Scheme DELTA_TIME. PLEASE : check if the time step is created correctly for the current time step" << std::endl;
KRATOS_CATCH("ResidualBasedImplicitTimeScheme.InitializeSolutionStep");
}
/**
* @brief This function is designed to be called once to perform all the checks needed
* on the input provided.
* @details Checks can be "expensive" as the function is designed
* to catch user's errors.
* @param rModelPart The model part of the problem to solve
* @return Zero means all ok
*/
int Check(const ModelPart& rModelPart) const override
{
KRATOS_TRY;
const int err = BaseType::Check(rModelPart);
if(err!=0) return err;
return 0;
KRATOS_CATCH("ResidualBasedImplicitTimeScheme.Check");
}
/**
* @brief This method provides the defaults parameters to avoid conflicts between the different constructors
* @return The default parameters
*/
Parameters GetDefaultParameters() const override
{
Parameters default_parameters = Parameters(R"(
{
"name" : "residualbased_implicit_time_scheme"
})");
// Getting base class default parameters
const Parameters base_default_parameters = BaseType::GetDefaultParameters();
default_parameters.RecursivelyAddMissingParameters(base_default_parameters);
return default_parameters;
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedImplicitTimeScheme";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
struct GeneralMatrices
{
std::vector< Matrix > M; /// First derivative matrix (usually mass matrix)
std::vector< Matrix > D; /// Second derivative matrix (usually damping matrix)
};
GeneralMatrices mMatrix;
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief It adds the dynamic LHS contribution of the elements LHS = d(-RHS)/d(un0) = c0*c0*M + c0*D + K
* @param LHS_Contribution The dynamic contribution for the LHS
* @param D The damping matrix
* @param M The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
virtual void AddDynamicsToLHS(
LocalSystemMatrixType& LHS_Contribution,
LocalSystemMatrixType& D,
LocalSystemMatrixType& M,
const ProcessInfo& rCurrentProcessInfo
)
{
KRATOS_ERROR << "YOU ARE CALLING THE BASE CLASS OF AddDynamicsToLHS" << std::endl;
}
/**
* @brief It adds the dynamic RHS contribution of the elements b - M*a - D*v
* @param rCurrentElement The element to compute
* @param RHS_Contribution The dynamic contribution for the RHS
* @param D The damping matrix
* @param M The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
virtual void AddDynamicsToRHS(
Element& rCurrentElement,
LocalSystemVectorType& RHS_Contribution,
LocalSystemMatrixType& D,
LocalSystemMatrixType& M,
const ProcessInfo& rCurrentProcessInfo
)
{
KRATOS_ERROR << "YOU ARE CALLING THE BASE CLASS OF AddDynamicsToRHS" << std::endl;
}
/**
* @brief It adds the dynamic RHS contribution of the condition RHS = fext - M*an0 - D*vn0 - K*dn0
* @param rCurrentCondition The condition to compute
* @param RHS_Contribution The dynamic contribution for the RHS
* @param D The damping matrix
* @param M The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
virtual void AddDynamicsToRHS(
Condition& rCurrentCondition,
LocalSystemVectorType& RHS_Contribution,
LocalSystemMatrixType& D,
LocalSystemMatrixType& M,
const ProcessInfo& rCurrentProcessInfo
)
{
KRATOS_ERROR << "YOU ARE CALLING THE BASE CLASS OF AddDynamicsToRHS" << std::endl;
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@{
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedImplicitTimeScheme */
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_IMPLICIT_TIME_SCHEME defined */
|
copy.c | #include "copy.h"
void copy_ref(size_t n, const double * RESTRICT a, double * RESTRICT b)
{
OMP_PARALLEL_FOR
for (size_t i=0; i<n; i++) {
b[i] = a[i];
}
}
void copy_mov(size_t n, const double * RESTRICT a, double * RESTRICT b)
{
OMP_PARALLEL_FOR
for (size_t i=0; i<n; i++) {
double t;
//t = a[i];
asm ("mov %1, %0" : "=r" (t) : "m" (a[i]));
//b[i] = t;
asm ("mov %1, %0" : "=m" (b[i]) : "r" (t));
}
}
void copy_rep_movsq(size_t n, const double * RESTRICT a, double * RESTRICT b)
{
/* It might make more sense to do rep-movsq a page at a time
* and make the alignment nicer... */
#ifdef _OPENMP
#pragma omp parallel
{
int me = omp_get_thread_num();
int nt = omp_get_num_threads();
size_t chunk = 1+(n-1)/nt;
size_t start = me*chunk;
size_t end = (me+1)*chunk;
if (end>n) end = n;
size_t tn = (end>start) ? end-start : 0;
//const double * RESTRICT ta = &( a[start] );
// double * RESTRICT tb = &( b[start] );
const double * RESTRICT ta = a+start;
double * RESTRICT tb = b+start;
//printf("zzz %d: chunk=%zu\n", me, chunk); fflush(stdout);
//printf("zzz %d: start=%zu\n", me, start); fflush(stdout);
//printf("zzz %d: xend=%zu\n", me, end); fflush(stdout);
//printf("zzz %d: count=%zd\n", me, tn); fflush(stdout);
#ifdef __INTEL_COMPILER
asm("rep movsq"
: "=D" (tb), "=S" (ta), "=c" (tn)
: "0" (tb), "1" (ta), "2" (tn)
: "memory");
#else
tn *= sizeof(double);
memcpy(tb,ta,tn);
#endif
}
#else
{
#ifdef __INTEL_COMPILER
asm("rep movsq"
: "=D" (b), "=S" (a), "=c" (n)
: "0" (b), "1" (a), "2" (n)
: "memory");
#else
tn *= sizeof(double);
memcpy(b,a,n*sizeof(double));
#endif
}
#endif
}
#ifdef __SSE__
#if 0 /* BROKEN */
void copy_movntq(size_t n, const double * RESTRICT a, double * RESTRICT b)
{
OMP_PARALLEL_FOR
for (size_t i=0; i<n; i++) {
double t;
//t = a[i];
asm ("mov %1, %0" : "=r" (t) : "m" (a[i]));
//b[i] = t;
// movntq does not work here...
asm ("movntq %1, %0" : "=m" (b[i]) : "r" (t));
}
asm ("sfence" ::: "memory");
}
#endif
#ifdef __INTEL_COMPILER
void copy_movntq64(size_t n, const double * RESTRICT a, double * RESTRICT b)
{
//_mm_empty();
OMP_PARALLEL_FOR
for (size_t i=0; i<n; i++) {
__m64 t = _m_from_int64( *(__int64*)&(a[i]) );
_mm_stream_pi( (__m64*)&(b[i]), (__m64)t);
}
_mm_sfence();
}
#endif /* ICC */
#endif /* SSE */
#ifdef __SSE2__
void copy_movnti(size_t n, const double * RESTRICT a, double * RESTRICT b)
{
OMP_PARALLEL_FOR
for (size_t i=0; i<n; i++) {
double t;
//t = a[i];
asm ("mov %1, %0" : "=r" (t) : "m" (a[i]));
//b[i] = t;
asm ("movnti %1, %0" : "=m" (b[i]) : "r" (t));
}
asm ("sfence" ::: "memory");
}
#ifdef __INTEL_COMPILER
void copy_movnti64(size_t n, const double * RESTRICT a, double * RESTRICT b)
{
//_mm_empty();
OMP_PARALLEL_FOR
for (size_t i=0; i<n; i++) {
__m64 t = _m_from_int64( *(__int64*)&(a[i]) );
_mm_stream_si64( (__int64*)&(b[i]), *(__int64*)&t);
}
_mm_sfence();
}
#endif /* ICC */
void copy_movapd128(size_t n, const double * RESTRICT a, double * RESTRICT b)
{
OMP_PARALLEL_FOR
for (size_t i=0; i<n; i+=2) {
__m128d t = _mm_load_pd( &(a[i]) );
_mm_store_pd( &(b[i]), t);
}
}
void copy_movntpd128(size_t n, const double * RESTRICT a, double * RESTRICT b)
{
OMP_PARALLEL_FOR
for (size_t i=0; i<n; i+=2) {
__m128d t = _mm_load_pd( &(a[i]) );
_mm_stream_pd( &(b[i]), t);
}
_mm_sfence();
}
#endif /* SSE2 */
#ifdef __SSE4_1__
void copy_movntdqa128(size_t n, const double * RESTRICT a, double * RESTRICT b)
{
OMP_PARALLEL_FOR
for (size_t i=0; i<n; i+=2) {
__m128i t = _mm_stream_load_si128( (__m128i*)&(a[i]) );
_mm_stream_si128 ( (__m128i*)&(b[i]), t);
}
_mm_sfence();
}
#endif /* SSE4.1 */
#ifdef __AVX__
void copy_vmovapd256(size_t n, const double * RESTRICT a, double * RESTRICT b)
{
OMP_PARALLEL_FOR
for (size_t i=0; i<n; i+=4) {
__m256d t = _mm256_load_pd( &(a[i]) );
_mm256_store_pd( &(b[i]), t);
}
}
void copy_vmovntpd256(size_t n, const double * RESTRICT a, double * RESTRICT b)
{
OMP_PARALLEL_FOR
for (size_t i=0; i<n; i+=4) {
__m256d t = _mm256_load_pd( &(a[i]) );
_mm256_stream_pd( &(b[i]), t);
}
_mm_sfence();
}
#endif /* AVX */
#ifdef __AVX2__
void copy_vmovntdqa256(size_t n, const double * RESTRICT a, double * RESTRICT b)
{
OMP_PARALLEL_FOR
for (size_t i=0; i<n; i+=4) {
__m256i t = _mm256_stream_load_si256( (__m256i*)&(a[i]) );
_mm256_stream_si256 ( (__m256i*)&(b[i]), t);
}
_mm_sfence();
}
void copy_vgatherdpd128(size_t n, const double * RESTRICT a, double * RESTRICT b)
{
const __m128i vindex = _mm_set_epi32(-1,-1,1,0); // start from the right...
OMP_PARALLEL_FOR
for (size_t i=0; i<n; i+=2) {
__m128d t = _mm_i32gather_pd( &(a[i]), vindex, 8 /* scale */ );
_mm_storel_pd( &(b[i ]), t);
_mm_storeh_pd( &(b[i+1]), t);
}
}
void copy_vgatherqpd128(size_t n, const double * RESTRICT a, double * RESTRICT b)
{
const __m128i vindex = _mm_set_epi64x(1,0); // works
OMP_PARALLEL_FOR
for (size_t i=0; i<n; i+=2) {
__m128d t = _mm_i64gather_pd( &(a[i]), vindex, 8 /* scale */ );
_mm_storel_pd( &(b[i ]), t);
_mm_storeh_pd( &(b[i+1]), t);
}
}
void copy_vgatherdpd256(size_t n, const double * RESTRICT a, double * RESTRICT b)
{
const __m128i vindex = _mm_set_epi32(3,2,1,0); // start from the right...
OMP_PARALLEL_FOR
for (size_t i=0; i<n; i+=4) {
__m256d t = _mm256_i32gather_pd( &(a[i]), vindex, 8 /* scale */ );
__m128d l = _mm256_extractf128_pd(t,0);
__m128d u = _mm256_extractf128_pd(t,1);
_mm_storel_pd( &(b[i ]), l);
_mm_storeh_pd( &(b[i+1]), l);
_mm_storel_pd( &(b[i+2]), u);
_mm_storeh_pd( &(b[i+3]), u);
}
}
void copy_vgatherqpd256(size_t n, const double * RESTRICT a, double * RESTRICT b)
{
const __m256i vindex = _mm256_set_epi64x(3,2,1,0); // works
OMP_PARALLEL_FOR
for (size_t i=0; i<n; i+=4) {
__m256d t = _mm256_i64gather_pd( &(a[i]), vindex, 8 /* scale */ );
__m128d l = _mm256_extractf128_pd(t,0);
__m128d u = _mm256_extractf128_pd(t,1);
_mm_storel_pd( &(b[i ]), l);
_mm_storeh_pd( &(b[i+1]), l);
_mm_storel_pd( &(b[i+2]), u);
_mm_storeh_pd( &(b[i+3]), u);
}
}
void copy_mvgatherqpd256(size_t n, const double * RESTRICT a, double * RESTRICT b)
{
const __m256i vindex = _mm256_set_epi64x(3,2,1,0); // works
// O in OQ means ordered, i.e. AND. unordered is OR. Q means quiet i.e. non-signaling.
__m256d src = _mm256_cmp_pd(_mm256_setzero_pd(),_mm256_setzero_pd(),_CMP_EQ_OQ); // sets all bits to 1
__m256d mask = src;
OMP_PARALLEL_FOR
for (size_t i=0; i<n; i+=4) {
__m256d t = _mm256_mask_i64gather_pd( src, &(a[i]), vindex, mask, 8 /* scale */ );
__m128d l = _mm256_extractf128_pd(t,0);
__m128d u = _mm256_extractf128_pd(t,1);
_mm_storel_pd( &(b[i ]), l);
_mm_storeh_pd( &(b[i+1]), l);
_mm_storel_pd( &(b[i+2]), u);
_mm_storeh_pd( &(b[i+3]), u);
}
}
#endif /* AVX2 */
#ifdef __AVX512F__
void copy_vmovapd512(size_t n, const double * RESTRICT a, double * RESTRICT b)
{
OMP_PARALLEL_FOR
for (size_t i=0; i<n; i+=8) {
__m512d t = _mm512_load_pd( &(a[i]) );
_mm512_store_pd( &(b[i]), t);
}
}
void copy_vmovupd512(size_t n, const double * RESTRICT a, double * RESTRICT b)
{
OMP_PARALLEL_FOR
for (size_t i=0; i<n; i+=8) {
__m512d t = _mm512_loadu_pd( &(a[i]) );
_mm512_storeu_pd( &(b[i]), t);
}
}
void copy_mvmovapd512(size_t n, const double * RESTRICT a, double * RESTRICT b)
{
__m512d src = {0};
__mmask8 k = 255;
OMP_PARALLEL_FOR
for (size_t i=0; i<n; i+=8) {
__m512d t = _mm512_mask_load_pd( src, k, &(a[i]) );
_mm512_mask_store_pd( &(b[i]), k, t);
}
}
void copy_mvmovupd512(size_t n, const double * RESTRICT a, double * RESTRICT b)
{
__m512d src = {0};
__mmask8 k = 255;
OMP_PARALLEL_FOR
for (size_t i=0; i<n; i+=8) {
__m512d t = _mm512_mask_loadu_pd( src, k, &(a[i]) );
_mm512_mask_storeu_pd( &(b[i]), k, t);
}
}
void copy_vmovntpd512(size_t n, const double * RESTRICT a, double * RESTRICT b)
{
OMP_PARALLEL_FOR
for (size_t i=0; i<n; i+=8) {
__m512d t = _mm512_load_pd( &(a[i]) );
_mm512_stream_pd( &(b[i]), t);
}
_mm_sfence();
}
void copy_vmovntdqa512(size_t n, const double * RESTRICT a, double * RESTRICT b)
{
OMP_PARALLEL_FOR
for (size_t i=0; i<n; i+=8) {
__m512i t = _mm512_stream_load_si512( (__m512i*)&(a[i]) );
_mm512_stream_si512 ( (__m512i*)&(b[i]), t);
}
_mm_sfence();
}
void copy_vGSdpd512(size_t n, const double * RESTRICT a, double * RESTRICT b)
{
const __m256i vindex = _mm256_set_epi32(7,6,5,4,3,2,1,0); // start from the right...
OMP_PARALLEL_FOR
for (size_t i=0; i<n; i+=8) {
__m512d t = _mm512_i32gather_pd(vindex, &(a[i]), 8 /* scale */ );
_mm512_i32scatter_pd( &(b[i]), vindex, t, 8 /* scale */ );
}
}
void copy_mvGSdpd512(size_t n, const double * RESTRICT a, double * RESTRICT b)
{
__m512d src = {0};
__mmask8 k = 255;
const __m256i vindex = _mm256_set_epi32(7,6,5,4,3,2,1,0); // start from the right...
OMP_PARALLEL_FOR
for (size_t i=0; i<n; i+=8) {
__m512d t = _mm512_mask_i32gather_pd(src, k, vindex, &(a[i]), 8 /* scale */ );
_mm512_mask_i32scatter_pd( &(b[i]), k, vindex, t, 8 /* scale */ );
}
}
void copy_vGSqpd512(size_t n, const double * RESTRICT a, double * RESTRICT b)
{
const __m512i vindex = _mm512_set_epi64(7,6,5,4,3,2,1,0);
OMP_PARALLEL_FOR
for (size_t i=0; i<n; i+=8) {
__m512d t = _mm512_i64gather_pd(vindex, &(a[i]), 8 /* scale */ );
_mm512_i64scatter_pd( &(b[i]), vindex, t, 8 /* scale */ );
}
}
void copy_mvGSqpd512(size_t n, const double * RESTRICT a, double * RESTRICT b)
{
__m512d src = {0};
__mmask8 k = 255;
const __m512i vindex = _mm512_set_epi64(7,6,5,4,3,2,1,0);
OMP_PARALLEL_FOR
for (size_t i=0; i<n; i+=8) {
__m512d t = _mm512_mask_i64gather_pd(src, k, vindex, &(a[i]), 8 /* scale */ );
_mm512_mask_i64scatter_pd( &(b[i]), k, vindex, t, 8 /* scale */ );
}
}
#endif /* AVX-512F */
|
Tutorial.h | //=================================================================================================
/*!
// \file blaze/Tutorial.h
// \brief Tutorial of the Blaze library
//
// Copyright (C) 2012-2019 Klaus Iglberger - All Rights Reserved
//
// This file is part of the Blaze library. You can redistribute it and/or modify it under
// the terms of the New (Revised) BSD License. Redistribution and use in source and binary
// forms, with or without modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other materials
// provided with the distribution.
// 3. Neither the names of the Blaze development group nor the names of its contributors
// may be used to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
// SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
// BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
*/
//=================================================================================================
#ifndef _BLAZE_TUTORIAL_H_
#define _BLAZE_TUTORIAL_H_
//=================================================================================================
//
// BLAZE TUTORIAL
//
//=================================================================================================
//**Mainpage***************************************************************************************
/*!\mainpage
//
// \image html blaze300x150.jpg
//
// This is the API for the \b Blaze high performance C++ math library. It gives a complete
// overview of the individual features and sublibraries of \b Blaze. To get a first impression
// on \b Blaze, the short \ref getting_started tutorial is a good place to start. Afterwards,
// the following long tutorial covers the most important aspects of the \b Blaze math library.
// The tabs at the top of the page allow a direct access to the individual modules, namespaces,
// classes, and files of the \b Blaze library.\n\n
//
// \section table_of_content Table of Contents
//
// <ul>
// <li> \ref configuration_and_installation </li>
// <li> \ref getting_started </li>
// <li> \ref vectors
// <ul>
// <li> \ref vector_types </li>
// <li> \ref vector_operations </li>
// </ul>
// </li>
// <li> \ref matrices
// <ul>
// <li> \ref matrix_types </li>
// <li> \ref matrix_operations </li>
// </ul>
// </li>
// <li> \ref adaptors
// <ul>
// <li> \ref adaptors_symmetric_matrices </li>
// <li> \ref adaptors_hermitian_matrices </li>
// <li> \ref adaptors_triangular_matrices </li>
// </ul>
// </li>
// <li> \ref views
// <ul>
// <li> \ref views_subvectors </li>
// <li> \ref views_element_selections </li>
// <li> \ref views_submatrices </li>
// <li> \ref views_rows </li>
// <li> \ref views_row_selections </li>
// <li> \ref views_columns </li>
// <li> \ref views_column_selections </li>
// <li> \ref views_bands </li>
// </ul>
// </li>
// <li> \ref arithmetic_operations
// <ul>
// <li> \ref addition </li>
// <li> \ref subtraction </li>
// <li> \ref scalar_multiplication </li>
// <li> \ref vector_vector_multiplication
// <ul>
// <li> \ref componentwise_multiplication </li>
// <li> \ref inner_product </li>
// <li> \ref outer_product </li>
// <li> \ref cross_product </li>
// <li> \ref vector_kronecker_product </li>
// </ul>
// </li>
// <li> \ref vector_vector_division </li>
// <li> \ref matrix_vector_multiplication </li>
// <li> \ref matrix_matrix_multiplication
// <ul>
// <li> \ref schur_product </li>
// <li> \ref matrix_product </li>
// <li> \ref matrix_kronecker_product </li>
// </ul>
// </li>
// </ul>
// </li>
// <li> \ref bitwise_operations
// <ul>
// <li> \ref bitwise_shift </li>
// <li> \ref bitwise_and </li>
// <li> \ref bitwise_or </li>
// <li> \ref bitwise_xor </li>
// </ul>
// </li>
// <li> \ref logical_operations
// <ul>
// <li> \ref logical_not </li>
// <li> \ref logical_and </li>
// <li> \ref logical_or </li>
// </ul>
// </li>
// <li> \ref shared_memory_parallelization
// <ul>
// <li> \ref hpx_parallelization </li>
// <li> \ref cpp_threads_parallelization </li>
// <li> \ref boost_threads_parallelization </li>
// <li> \ref openmp_parallelization </li>
// <li> \ref serial_execution </li>
// </ul>
// </li>
// <li> \ref serialization
// <ul>
// <li> \ref vector_serialization </li>
// <li> \ref matrix_serialization </li>
// </ul>
// </li>
// <li> \ref customization
// <ul>
// <li> \ref configuration_files </li>
// <li> \ref vector_and_matrix_customization
// <ul>
// <li> \ref custom_data_members </li>
// <li> \ref custom_operations </li>
// <li> \ref custom_data_types </li>
// </ul>
// </li>
// <li> \ref error_reporting_customization </li>
// </ul>
// </li>
// <li> \ref blas_functions </li>
// <li> \ref lapack_functions </li>
// <li> \ref block_vectors_and_matrices </li>
// <li> \ref intra_statement_optimization </li>
// <li> \ref faq </li>
// <li> \ref issue_creation_guidelines </li>
// <li> \ref blaze_references </li>
// </ul>
*/
//*************************************************************************************************
//**Configuration and Installation*****************************************************************
/*!\page configuration_and_installation Configuration and Installation
//
// \tableofcontents
//
//
// Since \b Blaze is a header-only library, setting up the \b Blaze library on a particular system
// is a fairly easy two step process. In the following, this two step process is explained in
// detail, preceded only by a short summary of the requirements.
//
//
// \n \section requirements Requirements
// <hr>
//
// For maximum performance the \b Blaze library expects you to have a BLAS library installed
// (<a href="http://software.intel.com/en-us/articles/intel-mkl/">Intel MKL</a>,
// <a href="http://developer.amd.com/libraries/acml/">ACML</a>,
// <a href="http://math-atlas.sourceforge.net">Atlas</a>,
// <a href="http://www.tacc.utexas.edu/tacc-projects/gotoblas2">Goto</a>, ...). If you don't
// have a BLAS library installed on your system, \b Blaze will still work and will not be reduced
// in functionality, but performance may be limited. Thus it is strongly recommended to install a
// BLAS library.
//
// Additionally, for computing the determinant of a dense matrix, for the decomposition of dense
// matrices, for the dense matrix inversion, and for the computation of eigenvalues and singular
// values \b Blaze requires <a href="https://en.wikipedia.org/wiki/LAPACK">LAPACK</a>. When either
// of these features is used it is necessary to link the LAPACK library to the final executable.
// If no LAPACK library is available the use of these features will result in a linker error.
//
// Furthermore, it is possible to use Boost threads to run numeric operations in parallel. In this
// case the Boost library is required to be installed on your system. It is recommended to use the
// newest Boost library available, but \b Blaze requires at minimum the Boost version 1.54.0. If
// you don't have Boost installed on your system, you can download it for free from
// <a href="http://www.boost.org">www.boost.org</a>.
//
//
// \n \section step_1_installation Step 1: Installation
// <hr>
//
// \subsection step_1_cmake Installation via CMake
//
// The first step is the installation of the \b Blaze header files. The most convenient way
// to do this is via <a href="https://cmake.org">CMake</a>. Linux and macOS users can use the
// following two lines to copy the \b Blaze headers in the <tt>./blaze</tt> subdirectory to
// the directory \c ${CMAKE_INSTALL_PREFIX}/include and the package configuration files to
// \c ${CMAKE_INSTALL_PREFIX}/share/blaze/cmake.
\code
cmake -DCMAKE_INSTALL_PREFIX=/usr/local/
sudo make install
\endcode
// Windows users can do the same via the cmake-gui. Alternatively, it is possible to include
// \b Blaze by adding the following lines in any \c CMakeLists.txt file:
\code
find_package( blaze )
if( blaze_FOUND )
add_library( blaze_target INTERFACE )
target_link_libraries( blaze_target INTERFACE blaze::blaze )
endif()
\endcode
// \n \subsection step_1_vcpkg Installation via the VC++ Packaging Tool
//
// An alternate way to install \b Blaze for Windows users is Microsoft's
// <a href="https://github.com/Microsoft/vcpkg">VC++ Packaging Tool (vcpkg)</a>. \b Blaze can
// be installed via the command line:
\code
C:\src\vcpkg> .\vcpkg install blaze
\endcode
// The tool automatically downloads the latest \b Blaze release and copies the header files to
// the common include directory. Please note that since \b Blaze is a header-only library the
// attempt to install any static or dynamic library will fail!
//
// \n \subsection step_1_installation_unix Manual Installation on Linux/macOS
//
// Since \b Blaze only consists of header files, the <tt>./blaze</tt> subdirectory can be simply
// copied to a standard include directory (note that this requires root privileges):
\code
cp -r ./blaze /usr/local/include
\endcode
// Alternatively, on Unix-based machines (which includes Linux and Mac OS X) the
// \c CPLUS_INCLUDE_PATH environment variable can be set. The specified directory will be
// searched after any directories specified on the command line with the option \c -I and
// before the standard default directories (such as \c /usr/local/include and \c /usr/include).
// Assuming a user named 'Jon', the environment variable can be set as follows:
\code
CPLUS_INCLUDE_PATH=/usr/home/jon/blaze
export CPLUS_INCLUDE_PATH
\endcode
// Last but not least, the <tt>./blaze</tt> subdirectory can be explicitly specified on the
// command line. The following example demonstrates this by means of the GNU C++ compiler:
\code
g++ -I/usr/home/jon/blaze -o BlazeTest BlazeTest.cpp
\endcode
// \n \subsection step_1_installation_windows Manual Installation on Windows
//
// Windows doesn't have a standard include directory. Therefore the \b Blaze header files can be
// copied to any other directory or simply left in the default \b Blaze directory. However, the
// chosen include directory has to be explicitly specified as include path. In Visual Studio,
// this is done via the project property pages, configuration properties, C/C++, General settings.
// Here the additional include directories can be specified.
//
//
// \n \section step_2_configuration Step 2: Configuration
// <hr>
//
// The second step is the configuration and customization of the \b Blaze library. Many aspects
// of \b Blaze can be adapted to specific requirements, environments and architectures. The most
// convenient way to configure \b Blaze is to modify the headers in the <tt>./blaze/config/</tt>
// subdirectory by means of <a href="https://cmake.org">CMake</a>. Alternatively these header
// files can be customized manually. In both cases, however, the files are modified. If this is
// not an option it is possible to configure \b Blaze via the command line (see the tutorial
// section \ref configuration_files or the documentation in the configuration files).
//
// Since the default settings are reasonable for most systems this step can also be skipped.
// However, in order to achieve maximum performance a customization of at least the following
// configuration files is required:
//
// - <b><tt><blaze/config/BLAS.h></tt></b>: Via this configuration file \b Blaze can be enabled
// to use a third-party BLAS library for several basic linear algebra functions (such as for
// instance dense matrix multiplications). In case no BLAS library is used, all linear algebra
// functions use the default implementations of the \b Blaze library and therefore BLAS is not a
// requirement for the compilation process. However, please note that performance may be limited.
// - <b><tt><blaze/config/CacheSize.h></tt></b>: This file contains the hardware specific cache
// settings. \b Blaze uses this information to optimize its cache usage. For maximum performance
// it is recommended to adapt these setting to a specific target architecture.
// - <b><tt><blaze/config/Thresholds.h></tt></b>: This file contains all thresholds for the
// customization of the \b Blaze compute kernels. In order to tune the kernels for a specific
// architecture and to maximize performance it can be necessary to adjust the thresholds,
// especially for a parallel execution (see \ref shared_memory_parallelization).
//
// For an overview of other customization options and more details, please see the section
// \ref configuration_files.
//
//
// \n \section blaze_version Blaze Version
// <hr>
//
// The current major and minor version number of the \b Blaze library can be found in the
// <b><tt><blaze/system/Version.h></tt></b> header file. It is automatically included via the
// <b><tt><blaze/Blaze.h></tt></b> header file. The file contains the two following macros,
// which can for instance be used for conditional compilation:
\code
#define BLAZE_MAJOR_VERSION 3
#define BLAZE_MINOR_VERSION 5
#define BLAZE_PATCH_VERSION 0
\endcode
// \n Next: \ref getting_started
*/
//*************************************************************************************************
//**Getting Started********************************************************************************
/*!\page getting_started Getting Started
//
// This short tutorial serves the purpose to give a quick overview of the way mathematical
// expressions have to be formulated in \b Blaze. Starting with \ref vector_types, the following
// long tutorial covers the most important aspects of the \b Blaze math library.
//
//
// \n \section getting_started_vector_example A First Example
//
// \b Blaze is written such that using mathematical expressions is as close to mathematical
// textbooks as possible and therefore as intuitive as possible. In nearly all cases the seemingly
// easiest solution is the right solution and most users experience no problems when trying to
// use \b Blaze in the most natural way. The following example gives a first impression of the
// formulation of a vector addition in \b Blaze:
\code
#include <iostream>
#include <blaze/Math.h>
using blaze::StaticVector;
using blaze::DynamicVector;
int main()
{
// Instantiation of a static 3D column vector. The vector is directly initialized as
// ( 4 -2 5 )
StaticVector<int,3UL> a{ 4, -2, 5 };
// Instantiation of a dynamic 3D column vector. Via the subscript operator the values are set to
// ( 2 5 -3 )
DynamicVector<int> b( 3UL );
b[0] = 2;
b[1] = 5;
b[2] = -3;
// Adding the vectors a and b
DynamicVector<int> c = a + b;
// Printing the result of the vector addition
std::cout << "c =\n" << c << "\n";
}
\endcode
// Note that the entire \b Blaze math library can be included via the \c blaze/Math.h header
// file. Alternatively, the entire \b Blaze library, including both the math and the entire
// utility module, can be included via the \c blaze/Blaze.h header file. Also note that all
// classes and functions of \b Blaze are contained in the blaze namespace.\n\n
//
// Assuming that this program resides in a source file called \c FirstExample.cpp, it can be
// compiled for instance via the GNU C++ compiler:
\code
g++ -std=c++14 -O3 -DNDEBUG -mavx -o FirstExample FirstExample.cpp
\endcode
// Note the definition of the \c NDEBUG preprocessor symbol. In order to achieve maximum
// performance, it is necessary to compile the program in release mode, which deactivates
// all debugging functionality inside \b Blaze. It is also strongly recommended to specify
// the available architecture specific instruction set (as for instance the AVX instruction
// set, which if available can be activated via the \c -mavx flag). This allows \b Blaze
// to optimize computations via vectorization.\n\n
//
// When running the resulting executable \c FirstExample, the output of the last line of
// this small program is
\code
c =
( 6 )
( 3 )
( 2 )
\endcode
// \n \section getting_started_matrix_example An Example Involving Matrices
//
// Similarly easy and intuitive are expressions involving matrices:
\code
#include <iostream>
#include <blaze/Math.h>
using namespace blaze;
int main()
{
// Instantiating a dynamic 3D column vector
DynamicVector<int> x{ 4, -1, 3 };
// Instantiating a dynamic 2x3 row-major matrix, preinitialized with 0. Via the function call
// operator three values of the matrix are explicitly set to get the matrix
// ( 1 0 4 )
// ( 0 -2 0 )
DynamicMatrix<int> A( 2UL, 3UL, 0 );
A(0,0) = 1;
A(0,2) = 4;
A(1,1) = -2;
// Performing a matrix/vector multiplication
DynamicVector<int> y = A * x;
// Printing the resulting vector
std::cout << "y =\n" << y << "\n";
// Instantiating a static column-major matrix. The matrix is directly initialized as
// ( 3 -1 )
// ( 0 2 )
// ( -1 0 )
StaticMatrix<int,3UL,2UL,columnMajor> B{ { 3, -1 }, { 0, 2 }, { -1, 0 } };
// Performing a matrix/matrix multiplication
DynamicMatrix<int> C = A * B;
// Printing the resulting matrix
std::cout << "C =\n" << C << "\n";
}
\endcode
// The output of this program is
\code
y =
( 16 )
( 2 )
C =
( -1 -1 )
( 0 -4 )
\endcode
// \n \section getting_started_complex_example A Complex Example
//
// The following example is much more sophisticated. It shows the implementation of the Conjugate
// Gradient (CG) algorithm (http://en.wikipedia.org/wiki/Conjugate_gradient) by means of the
// \b Blaze library:
//
// \image html cg.jpg
//
// In this example it is not important to understand the CG algorithm itself, but to see the
// advantage of the API of the \b Blaze library. In the \b Blaze implementation we will use a
// sparse matrix/dense vector multiplication for a 2D Poisson equation using \f$ N \times N \f$
// unknowns. It becomes apparent that the core of the algorithm is very close to the mathematical
// formulation and therefore has huge advantages in terms of readability and maintainability,
// while the performance of the code is close to the expected theoretical peak performance:
\code
#include <blaze/Math.h>
int main()
{
const size_t N ( 1000UL );
const size_t iterations( 10UL );
const size_t NN( N*N );
blaze::CompressedMatrix<double,rowMajor> A( NN, NN );
blaze::DynamicVector<double,columnVector> x( NN, 1.0 ), b( NN, 0.0 ), r( NN ), p( NN ), Ap( NN );
double alpha, beta, delta;
// ... Initializing the sparse matrix A
// Performing the CG algorithm
r = b - A * x;
p = r;
delta = (r,r);
for( size_t iteration=0UL; iteration<iterations; ++iteration )
{
Ap = A * p;
alpha = delta / (p,Ap);
x += alpha * p;
r -= alpha * Ap;
beta = (r,r);
if( std::sqrt( beta ) < 1E-8 ) break;
p = r + ( beta / delta ) * p;
delta = beta;
}
}
\endcode
// \n Hopefully this short tutorial gives a good first impression of how mathematical expressions
// are formulated with \b Blaze. The following long tutorial, starting with \ref vector_types,
// will cover all aspects of the \b Blaze math library, i.e. it will introduce all vector and
// matrix types, all possible operations on vectors and matrices, and of course all possible
// mathematical expressions.
//
// \n Previous: \ref configuration_and_installation Next: \ref vectors
*/
//*************************************************************************************************
//**Vectors****************************************************************************************
/*!\page vectors Vectors
//
// \tableofcontents
//
//
// \n \section vectors_general General Concepts
// <hr>
//
// The \b Blaze library currently offers five dense vector types (\ref vector_types_static_vector,
// \ref vector_types_dynamic_vector, \ref vector_types_hybrid_vector, \ref vector_types_custom_vector,
// and \ref vector_types_uniform_vector) and two sparse vector types (\ref vector_types_compressed_vector
// and \ref vector_types_zero_vector). All vectors can be specified as either column vectors or row
// vectors:
\code
using blaze::DynamicVector;
using blaze::columnVector;
using blaze::rowVector;
// Setup of the 3-dimensional dense column vector
//
// ( 1 )
// ( 2 )
// ( 3 )
//
DynamicVector<int,columnVector> a{ 1, 2, 3 };
// Setup of the 3-dimensional dense row vector
//
// ( 4 5 6 )
//
DynamicVector<int,rowVector> b{ 4, 5, 6 };
\endcode
// Per default, all vectors in \b Blaze are column vectors:
\code
// Instantiation of a 3-dimensional column vector
blaze::DynamicVector<int> c( 3UL );
\endcode
// \n \section vectors_details Vector Details
// <hr>
//
// - \ref vector_types
// - \ref vector_operations
//
//
// \n \section vectors_examples Examples
// <hr>
\code
using blaze::StaticVector;
using blaze::DynamicVector;
using blaze::CompressedVector;
using blaze::rowVector;
using blaze::columnVector;
StaticVector<int,6UL> a; // Instantiation of a 6-dimensional static column vector
CompressedVector<int,rowVector> b; // Instantiation of a compressed row vector
DynamicVector<int,columnVector> c; // Instantiation of a dynamic column vector
// ... Resizing and initialization
c = a + trans( b );
\endcode
// \n Previous: \ref getting_started Next: \ref vector_types
*/
//*************************************************************************************************
//**Vector Types***********************************************************************************
/*!\page vector_types Vector Types
//
// \tableofcontents
//
//
// \n \section vector_types_static_vector StaticVector
// <hr>
//
// The blaze::StaticVector class template is the representation of a fixed size vector with
// statically allocated elements of arbitrary type. It can be included via the header file
\code
#include <blaze/math/StaticVector.h>
\endcode
// The type of the elements, the number of elements, and the transpose flag of the vector can
// be specified via the three template parameters:
\code
template< typename Type, size_t N, bool TF >
class StaticVector;
\endcode
// - \c Type: specifies the type of the vector elements. StaticVector can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - \c N : specifies the total number of vector elements. It is expected that StaticVector is
// only used for tiny and small vectors.
// - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column
// vector (\c blaze::columnVector). The default value is \c blaze::columnVector.
//
// The blaze::StaticVector is perfectly suited for small to medium vectors whose size is known at
// compile time:
\code
// Definition of a 3-dimensional integral column vector
blaze::StaticVector<int,3UL> a;
// Definition of a 4-dimensional single precision column vector
blaze::StaticVector<float,4UL,blaze::columnVector> b;
// Definition of a 6-dimensional double precision row vector
blaze::StaticVector<double,6UL,blaze::rowVector> c;
\endcode
// \n \section vector_types_dynamic_vector DynamicVector
// <hr>
//
// The blaze::DynamicVector class template is the representation of an arbitrary sized vector
// with dynamically allocated elements of arbitrary type. It can be included via the header file
\code
#include <blaze/math/DynamicVector.h>
\endcode
// The type of the elements and the transpose flag of the vector can be specified via the two
// template parameters:
\code
template< typename Type, bool TF >
class DynamicVector;
\endcode
// - \c Type: specifies the type of the vector elements. DynamicVector can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column
// vector (\c blaze::columnVector). The default value is \c blaze::columnVector.
//
// The blaze::DynamicVector is the default choice for all kinds of dense vectors and the best
// choice for medium to large vectors. Its size can be modified at runtime:
\code
// Definition of a 3-dimensional integral column vector
blaze::DynamicVector<int> a( 3UL );
// Definition of a 4-dimensional single precision column vector
blaze::DynamicVector<float,blaze::columnVector> b( 4UL );
// Definition of a double precision row vector with size 0
blaze::DynamicVector<double,blaze::rowVector> c;
\endcode
// \n \section vector_types_hybrid_vector HybridVector
// <hr>
//
// The blaze::HybridVector class template combines the advantages of the blaze::StaticVector and
// the blaze::DynamicVector class templates. It represents a fixed size vector with statically
// allocated elements, but still can be dynamically resized (within the bounds of the available
// memory). It can be included via the header file
\code
#include <blaze/math/HybridVector.h>
\endcode
// The type of the elements, the number of elements, and the transpose flag of the vector can
// be specified via the three template parameters:
\code
template< typename Type, size_t N, bool TF >
class HybridVector;
\endcode
// - \c Type: specifies the type of the vector elements. HybridVector can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - \c N : specifies the maximum number of vector elements. It is expected that HybridVector
// is only used for tiny and small vectors.
// - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column
// vector (\c blaze::columnVector). The default value is \c blaze::columnVector.
//
// The blaze::HybridVector is a suitable choice for small to medium vectors, whose size is not
// known at compile time or not fixed at runtime, but whose maximum size is known at compile
// time:
\code
// Definition of a 3-dimensional integral column vector with a maximum size of 6
blaze::HybridVector<int,6UL> a( 3UL );
// Definition of a 4-dimensional single precision column vector with a maximum size of 16
blaze::HybridVector<float,16UL,blaze::columnVector> b( 4UL );
// Definition of a double precision row vector with size 0 and a maximum size of 6
blaze::HybridVector<double,6UL,blaze::rowVector> c;
\endcode
// \n \section vector_types_custom_vector CustomVector
// <hr>
//
// The blaze::CustomVector class template provides the functionality to represent an external
// array of elements of arbitrary type and a fixed size as a native \b Blaze dense vector data
// structure. Thus in contrast to all other dense vector types a custom vector does not perform
// any kind of memory allocation by itself, but it is provided with an existing array of element
// during construction. A custom vector can therefore be considered an alias to the existing
// array. It can be included via the header file
\code
#include <blaze/math/CustomVector.h>
\endcode
// The type of the elements, the properties of the given array of elements and the transpose
// flag of the vector can be specified via the following four template parameters:
\code
template< typename Type, bool AF, bool PF, bool TF >
class CustomVector;
\endcode
// - Type: specifies the type of the vector elements. blaze::CustomVector can be used with
// any non-cv-qualified, non-reference, non-pointer element type.
// - AF : specifies whether the represented, external arrays are properly aligned with
// respect to the available instruction set (SSE, AVX, ...) or not.
// - PF : specified whether the represented, external arrays are properly padded with
// respect to the available instruction set (SSE, AVX, ...) or not.
// - TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column
// vector (\c blaze::columnVector). The default value is \c blaze::columnVector.
//
// The blaze::CustomVector is the right choice if any external array needs to be represented as
// a \b Blaze dense vector data structure or if a custom memory allocation strategy needs to be
// realized:
\code
using blaze::CustomVector;
using blaze::Deallocate;
using blaze::aligned;
using blaze::unaligned;
using blaze::padded;
using blaze::unpadded;
// Definition of an unmanaged custom column vector for unaligned, unpadded integer arrays
using UnalignedUnpadded = CustomVector<int,unaligned,unpadded,columnVector>;
std::vector<int> vec( 7UL );
UnalignedUnpadded a( &vec[0], 7UL );
// Definition of a managed custom column vector for unaligned but padded 'float' arrays
using UnalignedPadded = CustomVector<float,unaligned,padded,columnVector>;
std::unique_ptr<float[]> memory1( new float[16] );
UnalignedPadded b( memory1.get(), 9UL, 16UL );
// Definition of a managed custom row vector for aligned, unpadded 'double' arrays
using AlignedUnpadded = CustomVector<double,aligned,unpadded,rowVector>;
std::unique_ptr<double[],Deallocate> memory2( blaze::allocate<double>( 7UL ) );
AlignedUnpadded c( memory2.get(), 7UL );
// Definition of a managed custom row vector for aligned, padded 'complex<double>' arrays
using cplx = complex<double>;
using AlignedPadded = CustomVector<cplx,aligned,padded,columnVector>;
std::unique_ptr<cplx[],Deallocate> memory3( allocate<cplx>( 8UL ) );
AlignedPadded d( memory3.get(), 5UL, 8UL );
\endcode
// In comparison with the remaining \b Blaze dense vector types blaze::CustomVector has several
// special characteristics. All of these result from the fact that a custom vector is not
// performing any kind of memory allocation, but instead is given an existing array of elements.
// The following sections discuss all of these characteristics:
//
// -# <b>\ref vector_types_custom_vector_memory_management</b>
// -# <b>\ref vector_types_custom_vector_copy_operations</b>
// -# <b>\ref vector_types_custom_vector_alignment</b>
// -# <b>\ref vector_types_custom_vector_padding</b>
//
// \n \subsection vector_types_custom_vector_memory_management Memory Management
//
// The blaze::CustomVector class template acts as an adaptor for an existing array of elements. As
// such it provides everything that is required to use the array just like a native \b Blaze dense
// vector data structure. However, this flexibility comes with the price that the user of a custom
// vector is responsible for the resource management.
//
// The following examples give an impression of several possible types of custom vectors:
\code
using blaze::CustomVector;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::unaligned;
using blaze::padded;
using blaze::unpadded;
// Definition of a 3-dimensional custom vector with unaligned, unpadded and externally
// managed integer array. Note that the std::vector must be guaranteed to outlive the
// custom vector!
std::vector<int> vec( 3UL );
CustomVector<int,unaligned,unpadded> a( &vec[0], 3UL );
// Definition of a custom vector with size 3 and capacity 16 with aligned, padded and
// externally managed integer array. Note that the std::unique_ptr must be guaranteed
// to outlive the custom vector!
std::unique_ptr<int[],Deallocate> memory( allocate<int>( 16UL ) );
CustomVector<int,aligned,padded> b( memory.get(), 3UL, 16UL );
\endcode
// \n \subsection vector_types_custom_vector_copy_operations Copy Operations
//
// As with all dense vectors it is possible to copy construct a custom vector:
\code
using blaze::CustomVector;
using blaze::unaligned;
using blaze::unpadded;
using CustomType = CustomVector<int,unaligned,unpadded>;
std::vector<int> vec( 5UL, 10 ); // Vector of 5 integers of the value 10
CustomType a( &vec[0], 5UL ); // Represent the std::vector as Blaze dense vector
a[1] = 20; // Also modifies the std::vector
CustomType b( a ); // Creating a copy of vector a
b[2] = 20; // Also affects vector a and the std::vector
\endcode
// It is important to note that a custom vector acts as a reference to the specified array. Thus
// the result of the copy constructor is a new custom vector that is referencing and representing
// the same array as the original custom vector.
//
// In contrast to copy construction, just as with references, copy assignment does not change
// which array is referenced by the custom vector, but modifies the values of the array:
\code
std::vector<int> vec2( 5UL, 4 ); // Vector of 5 integers of the value 4
CustomType c( &vec2[0], 5UL ); // Represent the std::vector as Blaze dense vector
a = c; // Copy assignment: Set all values of vector a and b to 4.
\endcode
// \n \subsection vector_types_custom_vector_alignment Alignment
//
// In case the custom vector is specified as \c aligned the passed array must be guaranteed to
// be aligned according to the requirements of the used instruction set (SSE, AVX, ...). For
// instance, if AVX is active an array of integers must be 32-bit aligned:
\code
using blaze::CustomVector;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::unpadded;
// Allocation of 32-bit aligned memory
std::unique_ptr<int[],Deallocate> memory( allocate<int>( 5UL ) );
CustomVector<int,aligned,unpadded> a( memory.get(), 5UL );
\endcode
// In case the alignment requirements are violated, a \c std::invalid_argument exception is
// thrown.
//
// \n \subsection vector_types_custom_vector_padding Padding
//
// Adding padding elements to the end of an array can have a significant impact on the performance.
// For instance, assuming that AVX is available, then two aligned, padded, 3-dimensional vectors
// of double precision values can be added via a single SIMD addition operation:
\code
using blaze::CustomVector;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::padded;
using CustomType = CustomVector<double,aligned,padded>;
std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 4UL ) );
std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 4UL ) );
std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 4UL ) );
// Creating padded custom vectors of size 3 and a capacity of 4
CustomType a( memory1.get(), 3UL, 4UL );
CustomType b( memory2.get(), 3UL, 4UL );
CustomType c( memory3.get(), 3UL, 4UL );
// ... Initialization
c = a + b; // AVX-based vector addition
\endcode
// In this example, maximum performance is possible. However, in case no padding elements are
// inserted, a scalar addition has to be used:
\code
using blaze::CustomVector;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::unpadded;
using CustomType = CustomVector<double,aligned,unpadded>;
std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 3UL ) );
std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 3UL ) );
std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 3UL ) );
// Creating unpadded custom vector of size 3
CustomType a( allocate<double>( 3UL ), 3UL );
CustomType b( allocate<double>( 3UL ), 3UL );
CustomType c( allocate<double>( 3UL ), 3UL );
// ... Initialization
c = a + b; // Scalar vector addition
\endcode
// Note the different number of constructor parameters for unpadded and padded custom vectors:
// In contrast to unpadded vectors, where during the construction only the size of the array
// has to be specified, during the construction of a padded custom vector it is additionally
// necessary to explicitly specify the capacity of the array.
//
// The number of padding elements is required to be sufficient with respect to the available
// instruction set: In case of an aligned padded custom vector the added padding elements must
// guarantee that the capacity is greater or equal than the size and a multiple of the SIMD vector
// width. In case of unaligned padded vectors the number of padding elements can be greater or
// equal the number of padding elements of an aligned padded custom vector. In case the padding
// is insufficient with respect to the available instruction set, a \a std::invalid_argument
// exception is thrown.
//
// Please also note that \b Blaze will zero initialize the padding elements in order to achieve
// maximum performance!
//
//
// \n \section vector_types_uniform_vector UniformVector
// <hr>
//
// The blaze::UniformVector class template is the representation of an arbitrary sized uniform
// vector with elements of arbitrary type. It can be included via the header file
\code
#include <blaze/math/UniformVector.h>
\endcode
// The type of the elements and the transpose flag of the vector can be specified via the two
// template parameters:
\code
template< typename Type, bool TF >
class UniformVector;
\endcode
// - \c Type: specifies the type of the vector elements. UniformVector can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column
// vector (\c blaze::columnVector). The default value is \c blaze::columnVector.
//
// The blaze::UniformVector is the best choice for uniform vectors of any size. Its size can be
// modified at runtime:
\code
// Definition of a 3-dimensional integral column vector
blaze::UniformVector<int> a( 3UL );
// Definition of a 4-dimensional single precision column vector
blaze::UniformVector<float,blaze::columnVector> b( 4UL );
// Definition of a double precision row vector with size 0
blaze::UniformVector<double,blaze::rowVector> c;
\endcode
// \n \section vector_types_compressed_vector CompressedVector
// <hr>
//
// The blaze::CompressedVector class is the representation of an arbitrarily sized sparse
// vector, which stores only non-zero elements of arbitrary type. It can be included via the
// header file
\code
#include <blaze/math/CompressedVector.h>
\endcode
// The type of the elements and the transpose flag of the vector can be specified via the two
// template parameters:
\code
template< typename Type, bool TF >
class CompressedVector;
\endcode
// - \c Type: specifies the type of the vector elements. CompressedVector can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column
// vector (\c blaze::columnVector). The default value is \c blaze::columnVector.
//
// The blaze::CompressedVector is the right choice for all kinds of sparse vectors:
\code
// Definition of a 3-dimensional integral column vector
blaze::CompressedVector<int> a( 3UL );
// Definition of a 4-dimensional single precision column vector with capacity for 3 non-zero elements
blaze::CompressedVector<float,blaze::columnVector> b( 4UL, 3UL );
// Definition of a double precision row vector with size 0
blaze::CompressedVector<double,blaze::rowVector> c;
\endcode
// \n \section vector_types_zero_vector ZeroVector
// <hr>
//
// The blaze::ZeroVector class template is the representation of an immutable, arbitrary sized
// zero vector with elements of arbitrary type. It can be included via the header file
\code
#include <blaze/math/ZeroVector.h>
\endcode
// The type of the elements and the transpose flag of the vector can be specified via the two
// template parameters:
\code
template< typename Type, bool TF >
class ZeroVector;
\endcode
// - \c Type: specifies the type of the vector elements. ZeroVector can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column
// vector (\c blaze::columnVector). The default value is \c blaze::columnVector.
//
// The blaze::ZeroVector is the perfect choice to represent a zero vector:
\code
// Definition of a 3-dimensional integral zero column vector
blaze::ZeroVector<int> a( 3UL );
// Definition of a 6-dimensional single precision zero column vector
blaze::ZeroVector<float,blaze::columnVector> b( 6UL );
// Definition of a double precision row vector with size 0
blaze::ZeroVector<double,blaze::rowVector> c;
\endcode
// \n Previous: \ref vectors Next: \ref vector_operations
*/
//*************************************************************************************************
//**Vector Operations******************************************************************************
/*!\page vector_operations Vector Operations
//
// \tableofcontents
//
//
// \n \section vector_operations_constructors Constructors
// <hr>
//
// Instantiating and setting up a vector is very easy and intuitive. However, there are a few
// rules to take care of:
// - In case the last template parameter (the transpose flag) is omitted, the vector is per
// default a column vector.
// - The elements of a \c StaticVector or \c HybridVector are default initialized (i.e. built-in
// data types are initialized to 0, class types are initialized via the default constructor).
// - Newly allocated elements of a \c DynamicVector or \c CompressedVector remain uninitialized
// if they are of built-in type and are default constructed if they are of class type.
//
// \n \subsection vector_operations_default_construction Default Construction
\code
using blaze::StaticVector;
using blaze::DynamicVector;
using blaze::CompressedVector;
// All vectors can be default constructed. Whereas the size
// of StaticVectors is fixed via the second template parameter,
// the initial size of a default constructed DynamicVector or
// CompressedVector is 0.
StaticVector<int,2UL> v1; // Instantiation of a 2D integer column vector.
// All elements are initialized to 0.
StaticVector<long,3UL,columnVector> v2; // Instantiation of a 3D long integer column vector.
// Again, all elements are initialized to 0L.
DynamicVector<float> v3; // Instantiation of a dynamic single precision column
// vector of size 0.
DynamicVector<double,rowVector> v4; // Instantiation of a dynamic double precision row
// vector of size 0.
CompressedVector<int> v5; // Instantiation of a compressed integer column
// vector of size 0.
CompressedVector<double,rowVector> v6; // Instantiation of a compressed double precision row
// vector of size 0.
\endcode
// \n \subsection vector_operations_size_construction Construction with Specific Size
//
// The \c DynamicVector, \c HybridVector and \c CompressedVector classes offer a constructor that
// allows to immediately give the vector the required size. Whereas both dense vectors (i.e.
// \c DynamicVector and \c HybridVector) use this information to allocate memory for all vector
// elements, \c CompressedVector merely acquires the size but remains empty.
\code
DynamicVector<int,columnVector> v7( 9UL ); // Instantiation of an integer dynamic column vector
// of size 9. The elements are NOT initialized!
HybridVector< complex<float>, 5UL > v8( 2UL ); // Instantiation of a column vector with two single
// precision complex values. The elements are
// default constructed.
CompressedVector<int,rowVector> v9( 10UL ); // Instantiation of a compressed row vector with
// size 10. Initially, the vector provides no
// capacity for non-zero elements.
\endcode
// \n \subsection vector_operations_initialization_constructors Initialization Constructors
//
// All dense vector classes offer a constructor that allows for a direct, homogeneous initialization
// of all vector elements. In contrast, for sparse vectors the predicted number of non-zero elements
// can be specified
\code
StaticVector<int,3UL,rowVector> v10( 2 ); // Instantiation of a 3D integer row vector.
// All elements are initialized to 2.
DynamicVector<float> v11( 3UL, 7.0F ); // Instantiation of a dynamic single precision
// column vector of size 3. All elements are
// set to 7.0F.
CompressedVector<float,rowVector> v12( 15UL, 3UL ); // Instantiation of a single precision column
// vector of size 15, which provides enough
// space for at least 3 non-zero elements.
\endcode
// \n \subsection vector_operations_array_construction Array Construction
//
// Alternatively, all dense vector classes offer a constructor for an initialization with a dynamic
// or static array. If the vector is initialized from a dynamic array, the constructor expects the
// actual size of the array as first argument, the array as second argument. In case of a static
// array, the fixed size of the array is used:
\code
const unique_ptr<double[]> array1( new double[2] );
// ... Initialization of the dynamic array
blaze::StaticVector<double,2UL> v13( 2UL, array1.get() );
int array2[4] = { 4, -5, -6, 7 };
blaze::StaticVector<int,4UL> v14( array2 );
\endcode
// \n \subsection vector_operations_initializer_list_construction Initializer List Construction
//
// In addition, all dense and sparse vector classes can be directly initialized by means of an
// initializer list:
\code
blaze::DynamicVector<float> v15{ 1.0F, 2.0F, 3.0F, 4.0F };
blaze::CompressedVector<int> v16{ 0, 2, 0, 0, 5, 0, 7, 0 };
\endcode
// Dynamically sized vectors (such as e.g. \ref vector_types_hybrid_vector,
// \ref vector_types_dynamic_vector or \ref vector_types_compressed_vector) are sized according
// to the size of the initializer list and all their elements are (copy) assigned the values of
// the list. For fixed size vectors (such as e.g. \ref vector_types_static_vector) missing values
// are initialized as default and in case the size of the initializer list exceeds the size
// of the vector a \c std::invalid_argument exception is thrown. In case of sparse vectors, only
// the non-zero elements are used to initialize the vector.
//
// \n \subsection vector_operations_copy_construction Copy Construction
//
// All dense and sparse vectors can be created as the copy of any other dense or sparse vector
// with the same transpose flag (i.e. blaze::rowVector or blaze::columnVector).
\code
StaticVector<int,9UL,columnVector> v17( v7 ); // Instantiation of the dense column vector v17
// as copy of the dense column vector v7.
DynamicVector<int,rowVector> v18( v9 ); // Instantiation of the dense row vector v18 as
// copy of the sparse row vector v9.
CompressedVector<int,columnVector> v19( v1 ); // Instantiation of the sparse column vector v19
// as copy of the dense column vector v1.
CompressedVector<float,rowVector> v20( v12 ); // Instantiation of the sparse row vector v20 as
// copy of the row vector v12.
\endcode
// Note that it is not possible to create a \c StaticVector as a copy of a vector with a different
// size:
\code
StaticVector<int,5UL,columnVector> v21( v7 ); // Runtime error: Size does not match!
StaticVector<int,4UL,rowVector> v22( v10 ); // Compile time error: Size does not match!
\endcode
// \n \section vector_operations_assignment Assignment
// <hr>
//
// There are several types of assignment to dense and sparse vectors:
// \ref vector_operations_homogeneous_assignment, \ref vector_operations_array_assignment,
// \ref vector_operations_copy_assignment, and \ref vector_operations_compound_assignment.
//
// \n \subsection vector_operations_homogeneous_assignment Homogeneous Assignment
//
// Sometimes it may be necessary to assign the same value to all elements of a dense vector.
// For this purpose, the assignment operator can be used:
\code
blaze::StaticVector<int,3UL> v1;
blaze::DynamicVector<double> v2;
// Setting all integer elements of the StaticVector to 2
v1 = 2;
// Setting all double precision elements of the DynamicVector to 5.0
v2 = 5.0;
\endcode
// \n \subsection vector_operations_array_assignment Array Assignment
//
// Dense vectors can also be assigned a static array:
\code
blaze::StaticVector<float,2UL> v1;
blaze::DynamicVector<double,rowVector> v2;
float array1[2] = { 1.0F, 2.0F };
double array2[5] = { 2.1, 4.0, -1.7, 8.6, -7.2 };
v1 = array1;
v2 = array2;
\endcode
// \n \subsection vector_operations_initializer_list_assignment Initializer List Assignment
//
// Alternatively, it is possible to directly assign an initializer list to a dense or sparse
// vector:
\code
blaze::DynamicVector<float> v1;
blaze::CompressedVector<double,rowVector> v2;
v1 = { 1.0F, 2.0F };
v2 = { 2.1, 0.0, -1.7, 0.0, -7.2 };
\endcode
// Dynamically sized vectors (such as e.g. \ref vector_types_hybrid_vector,
// \ref vector_types_dynamic_vector or \ref vector_types_compressed_vector) are resized according
// to the size of the initializer list and all their elements are (copy) assigned the values of
// the list. For fixed size vectors (such as e.g. \ref vector_types_static_vector) missing values
// are reset to their default value and in case the size of the initializer list exceeds the size
// of the vector a \c std::invalid_argument exception is thrown. In case of sparse vectors, only
// the non-zero elements are considered.
//
// \n \subsection vector_operations_copy_assignment Copy Assignment
//
// For all vector types it is generally possible to assign another vector with the same transpose
// flag (i.e. blaze::columnVector or blaze::rowVector). Note that in case of \c StaticVectors, the
// assigned vector is required to have the same size as the \c StaticVector since the size of a
// \c StaticVector cannot be adapted!
\code
blaze::StaticVector<int,3UL,columnVector> v1;
blaze::DynamicVector<int,columnVector> v2( 3UL );
blaze::DynamicVector<float,columnVector> v3( 5UL );
blaze::CompressedVector<int,columnVector> v4( 3UL );
blaze::CompressedVector<float,rowVector> v5( 3UL );
// ... Initialization of the vectors
v1 = v2; // OK: Assignment of a 3D dense column vector to another 3D dense column vector
v1 = v4; // OK: Assignment of a 3D sparse column vector to a 3D dense column vector
v1 = v3; // Runtime error: Cannot assign a 5D vector to a 3D static vector
v1 = v5; // Compilation error: Cannot assign a row vector to a column vector
\endcode
// \n \subsection vector_operations_compound_assignment Compound Assignment
//
// Next to plain assignment, it is also possible to use addition assignment, subtraction
// assignment, and multiplication assignment. Note however, that in contrast to plain assignment
// the size and the transpose flag of the vectors has be to equal in order to able to perform a
// compound assignment.
\code
blaze::StaticVector<int,5UL,columnVector> v1;
blaze::DynamicVector<int,columnVector> v2( 5UL );
blaze::CompressedVector<float,columnVector> v3( 7UL );
blaze::DynamicVector<float,rowVector> v4( 7UL );
blaze::CompressedVector<float,rowVector> v5( 7UL );
// ... Initialization of the vectors
v1 += v2; // OK: Addition assignment between two column vectors of the same size
v1 += v3; // Runtime error: No compound assignment between vectors of different size
v1 -= v4; // Compilation error: No compound assignment between vectors of different transpose flag
v4 *= v5; // OK: Multiplication assignment between two row vectors of the same size
\endcode
// \n \section vector_operations_element_access Element Access
// <hr>
//
// \n \subsection vector_operations_subscript_operator_1 Subscript Operator
//
// The easiest and most intuitive way to access a dense or sparse vector is via the subscript
// operator. The indices to access a vector are zero-based:
\code
blaze::DynamicVector<int> v1( 5UL );
v1[0] = 1;
v1[1] = 3;
// ...
blaze::CompressedVector<float> v2( 5UL );
v2[2] = 7.3F;
v2[4] = -1.4F;
\endcode
// Whereas using the subscript operator on a dense vector only accesses the already existing
// element, accessing an element of a sparse vector via the subscript operator potentially
// inserts the element into the vector and may therefore be more expensive. Consider the
// following example:
\code
blaze::CompressedVector<int> v1( 10UL );
for( size_t i=0UL; i<v1.size(); ++i ) {
... = v1[i];
}
\endcode
// Although the compressed vector is only used for read access within the for loop, using the
// subscript operator temporarily inserts 10 non-zero elements into the vector. Therefore the
// preferred way to traverse the non-zero elements of a sparse vector is to use iterators.
//
// \n \subsection vector_operations_iterators Iterators
//
// All vectors (sparse as well as dense) offer an alternate way via the \c begin(), \c cbegin(),
// \c end(), and \c cend() functions to traverse the currently contained elements by iterators.
// In case of non-const vectors, \c begin() and \c end() return an \c Iterator, which allows a
// manipulation of the non-zero value, in case of a constant vector or in case \c cbegin() or
// \c cend() are used a \c ConstIterator is returned:
\code
using blaze::CompressedVector;
CompressedVector<int> v1( 10UL );
// ... Initialization of the vector
// Traversing the vector by Iterator
for( CompressedVector<int>::Iterator it=v1.begin(); it!=v1.end(); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the non-zero element.
}
// Traversing the vector by ConstIterator
for( CompressedVector<int>::ConstIterator it=v1.cbegin(); it!=v1.cend(); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the non-zero element.
}
\endcode
// Note that \c begin(), \c cbegin(), \c end(), and \c cend() are also available as free functions:
\code
for( CompressedVector<int>::Iterator it=begin( v1 ); it!=end( v1 ); ++it ) {
// ...
}
for( CompressedVector<int>::ConstIterator it=cbegin( v1 ); it!=cend( v1 ); ++it ) {
// ...
}
\endcode
// \n \section vector_operations_element_insertion Element Insertion
// <hr>
//
// In contrast to dense vectors, that store all elements independent of their value and that
// offer direct access to all elements, spares vectors only store the non-zero elements contained
// in the vector. Therefore it is necessary to explicitly add elements to the vector.
//
// \n \subsection vector_operations_subscript_operator_2 Subscript Operator
//
// The first option to add elements to a sparse vector is the subscript operator:
\code
using blaze::CompressedVector;
CompressedVector<int> v1( 3UL );
v1[1] = 2;
\endcode
// In case the element at the given index is not yet contained in the vector, it is automatically
// inserted. Otherwise the old value is replaced by the new value 2. The operator returns a
// reference to the sparse vector element.
//
// \n \subsection vector_operations_set .set()
//
// An alternative to the subscript operator is the \c set() function: In case the element is not
// yet contained in the vector the element is inserted, else the element's value is modified:
\code
// Insert or modify the value at index 3
v1.set( 3, 1 );
\endcode
// \n \subsection vector_operations_insert .insert()
//
// The insertion of elements can be better controlled via the \c insert() function. In contrast to
// the subscript operator and the \c set() function it emits an exception in case the element is
// already contained in the vector. In order to check for this case, the \c find() function can be
// used:
\code
// In case the element at index 4 is not yet contained in the matrix it is inserted
// with a value of 6.
if( v1.find( 4 ) == v1.end() )
v1.insert( 4, 6 );
\endcode
// \n \subsection vector_operations_append .append()
//
// Although the \c insert() function is very flexible, due to performance reasons it is not suited
// for the setup of large sparse vectors. A very efficient, yet also very low-level way to fill
// a sparse vector is the \c append() function. It requires the sparse vector to provide enough
// capacity to insert a new element. Additionally, the index of the new element must be larger
// than the index of the previous element. Violating these conditions results in undefined
// behavior!
\code
v1.reserve( 10 ); // Reserving space for 10 non-zero elements
v1.append( 5, -2 ); // Appending the element -2 at index 5
v1.append( 6, 4 ); // Appending the element 4 at index 6
// ...
\endcode
// \n \section vector_operations_element_removal Element Removal
// <hr>
//
// \subsection vector_operations_erase .erase()
//
// The \c erase() member functions can be used to remove elements from a sparse vector. The
// following example gives an impression of the five different flavors of \c erase():
\code
using blaze::CompressedVector;
CompressedVector<int> v( 42 );
// ... Initialization of the vector
// Erasing the element at index 21
v.erase( 21 );
// Erasing a single element via iterator
v.erase( v.find( 4 ) );
// Erasing all non-zero elements in the range [7..24]
v.erase( v.lowerBound( 7 ), v.upperBound( 24 ) );
// Erasing all non-zero elements with a value larger than 9 by passing a unary predicate
v.erase( []( int i ){ return i > 9; } );
// Erasing all non-zero elements in the range [30..40] with a value larger than 5
v.erase( v.lowerBound( 30 ), v.upperBound( 40 ), []( int i ){ return i > 5; } );
\endcode
// \n \section vector_operations_element_lookup Element Lookup
// <hr>
//
// A sparse vector only stores the non-zero elements contained in the vector. Therefore, whenever
// accessing a vector element at a specific index a lookup operation is required. Whereas the
// subscript operator is performing this lookup automatically, it is also possible to use the
// \c find(), \c lowerBound(), and \c upperBound() member functions for a manual lookup.
//
// \n \subsection vector_operations_find .find()
//
// The \c find() function can be used to check whether a specific element is contained in a sparse
// vector. It specifically searches for the element at the given index. In case the element is
// found, the function returns an iterator to the element. Otherwise an iterator just past the
// last non-zero element of the compressed vector (the \c end() iterator) is returned. Note that
// the returned iterator is subject to invalidation due to inserting operations via the subscript
// operator, the \c set() function or the \c insert() function!
\code
using blaze::CompressedVector;
CompressedVector<int> a( 42 );
// ... Initialization of the vector
// Searching the element at index 7. In case the element is not
// contained in the vector, the end() iterator is returned.
CompressedVector<int>::Iterator pos( a.find( 7 ) );
if( pos != a.end( 7 ) ) {
// ...
}
\endcode
// \n \subsection vector_operations_lowerbound .lowerBound()
//
// The \c lowerBound() function returns an iterator to the first element with an index not less
// then the given index. In combination with the \c upperBound() function this function can be
// used to create a pair of iterators specifying a range of indices. Note that the returned
// iterator is subject to invalidation due to inserting operations via the subscript operator,
// the \c set() function or the \c insert() function!
\code
using blaze::CompressedVector;
CompressedVector<int> a( 42 );
// ... Initialization of the vector
// Searching the lower bound of index 17.
CompressedVector<int>::Iterator pos1( A.lowerBound( 17 ) );
// Searching the upper bound of index 28
CompressedVector<int>::Iterator pos2( A.upperBound( 28 ) );
// Erasing all elements in the specified range
a.erase( pos1, pos2 );
\endcode
// \n \subsection vector_operations_upperbound .upperBound()
//
// The \c upperBound() function returns an iterator to the first element with an index greater then
// the given index. In combination with the \c lowerBound() function this function can be used to
// create a pair of iterators specifying a range of indices. Note that the returned iterator is
// subject to invalidation due to inserting operations via the subscript operator, the \c set()
// function or the \c insert() function!
\code
using blaze::CompressedVector;
CompressedVector<int> a( 42 );
// ... Initialization of the vector
// Searching the lower bound of index 17.
CompressedVector<int>::Iterator pos1( A.lowerBound( 17 ) );
// Searching the upper bound of index 28
CompressedVector<int>::Iterator pos2( A.upperBound( 28 ) );
// Erasing all elements in the specified range
a.erase( pos1, pos2 );
\endcode
// \n \section vector_operations_non_modifying_operations Non-Modifying Operations
// <hr>
//
// \subsection vector_operations_size .size() / size()
//
// Via the \c size() member function, the current size of a dense or sparse vector can be queried:
\code
// Instantiating a dynamic vector with size 10
blaze::DynamicVector<int> v1( 10UL );
v1.size(); // Returns 10
// Instantiating a compressed vector with size 12 and capacity for 3 non-zero elements
blaze::CompressedVector<double> v2( 12UL, 3UL );
v2.size(); // Returns 12
\endcode
// Alternatively, the free function \c size() can be used to query to current size of a vector.
// In contrast to the member function, the free function can also be used to query the size of
// vector expressions:
\code
size( v1 ); // Returns 10, i.e. has the same effect as the member function
size( v2 ); // Returns 12, i.e. has the same effect as the member function
blaze::DynamicMatrix<int> A( 15UL, 12UL );
size( A * v2 ); // Returns 15, i.e. the size of the resulting vector
\endcode
// \n \subsection vector_operations_capacity .capacity() / capacity()
//
// Via the \c capacity() (member) function the internal capacity of a dense or sparse vector
// can be queried. Note that the capacity of a vector doesn't have to be equal to the size
// of a vector. In case of a dense vector the capacity will always be greater or equal than
// the size of the vector, in case of a sparse vector the capacity may even be less than
// the size.
\code
v1.capacity(); // Returns at least 10
\endcode
// For symmetry reasons, there is also a free function /c capacity() available that can be used
// to query the capacity:
\code
capacity( v1 ); // Returns at least 10, i.e. has the same effect as the member function
\endcode
// Note, however, that it is not possible to query the capacity of a vector expression:
\code
capacity( A * v1 ); // Compilation error!
\endcode
// \n \subsection vector_operations_nonzeros .nonZeros() / nonZeros()
//
// For both dense and sparse vectors the number of non-zero elements can be determined via the
// \c nonZeros() member function. Sparse vectors directly return their number of non-zero
// elements, dense vectors traverse their elements and count the number of non-zero elements.
\code
v1.nonZeros(); // Returns the number of non-zero elements in the dense vector
v2.nonZeros(); // Returns the number of non-zero elements in the sparse vector
\endcode
// There is also a free function \c nonZeros() available to query the current number of non-zero
// elements:
\code
nonZeros( v1 ); // Returns the number of non-zero elements in the dense vector
nonZeros( v2 ); // Returns the number of non-zero elements in the sparse vector
\endcode
// The free \c nonZeros() function can also be used to query the number of non-zero elements in
// a vector expression. However, the result is not the exact number of non-zero elements, but
// may be a rough estimation:
\code
nonZeros( A * v1 ); // Estimates the number of non-zero elements in the vector expression
\endcode
// \n \subsection vector_operations_isempty isEmpty()
//
// The \c isEmpty() function returns whether the total number of elements of the vector is zero:
\code
blaze::DynamicVector<int> a; // Create an empty vector
isEmpty( a ); // Returns true
a.resize( 10 ); // Resize to 10 elements
isEmpty( a ); // Returns false
\endcode
// \n \subsection vector_operations_isnan isnan()
//
// The \c isnan() function provides the means to check a dense or sparse vector for non-a-number
// elements:
\code
blaze::DynamicVector<double> a;
// ... Resizing and initialization
if( isnan( a ) ) { ... }
\endcode
\code
blaze::CompressedVector<double> a;
// ... Resizing and initialization
if( isnan( a ) ) { ... }
\endcode
// If at least one element of the vector is not-a-number, the function returns \c true, otherwise
// it returns \c false. Please note that this function only works for vectors with floating point
// elements. The attempt to use it for a vector with a non-floating point element type results in
// a compile time error.
//
//
// \n \subsection vector_operations_isdefault isDefault()
//
// The \c isDefault() function returns whether the given dense or sparse vector is in default state:
\code
blaze::HybridVector<int,20UL> a;
// ... Resizing and initialization
if( isDefault( a ) ) { ... }
\endcode
// A vector is in default state if it appears to just have been default constructed. All resizable
// vectors (\c HybridVector, \c DynamicVector, or \c CompressedVector) and \c CustomVector are
// in default state if its size is equal to zero. A non-resizable vector (\c StaticVector, all
// subvectors, element selections, rows, and columns) is in default state if all its elements are
// in default state. For instance, in case the vector is instantiated for a built-in integral or
// floating point data type, the function returns \c true in case all vector elements are 0 and
// \c false in case any vector element is not 0.
//
//
// \n \subsection vector_operations_isUniform isUniform()
//
// In order to check if all vector elements are identical, the \c isUniform() function can be used:
\code
blaze::DynamicVector<int> a;
// ... Resizing and initialization
if( isUniform( a ) ) { ... }
\endcode
// Note that in case of sparse vectors the zero elements are also taken into account!
//
//
// \n \subsection vector_operations_isZero isZero()
//
// In order to check if all vector elements are zero, the \c isZero() function can be used:
\code
blaze::DynamicVector<int> a;
// ... Resizing and initialization
if( isZero( a ) ) { ... }
\endcode
// \n \subsection vector_operations_length length() / sqrLength()
//
// In order to calculate the length (magnitude) of a dense or sparse vector, both the \c length()
// and \c sqrLength() function can be used:
\code
blaze::StaticVector<float,3UL,rowVector> v{ -1.2F, 2.7F, -2.3F };
const float len = length ( v ); // Computes the current length of the vector
const float sqrlen = sqrLength( v ); // Computes the square length of the vector
\endcode
// Note that both functions can only be used for vectors with built-in or complex element type!
//
//
// \n \subsection vector_operations_vector_trans trans()
//
// As already mentioned, vectors can either be column vectors (blaze::columnVector) or row vectors
// (blaze::rowVector). A column vector cannot be assigned to a row vector and vice versa. However,
// vectors can be transposed via the \c trans() function:
\code
blaze::DynamicVector<int,columnVector> v1( 4UL );
blaze::CompressedVector<int,rowVector> v2( 4UL );
v1 = v2; // Compilation error: Cannot assign a row vector to a column vector
v1 = trans( v2 ); // OK: Transposing the row vector to a column vector and assigning it
// to the column vector v1
v2 = trans( v1 ); // OK: Transposing the column vector v1 and assigning it to the row vector v2
v1 += trans( v2 ); // OK: Addition assignment of two column vectors
\endcode
// \n \subsection vector_operations_ctrans ctrans()
//
// It is also possible to compute the conjugate transpose of a vector. This operation is available
// via the \c ctrans() function:
\code
blaze::CompressedVector< complex<float>, rowVector > v1( 4UL );
blaze::DynamicVector< complex<float>, columnVector > v2( 4UL );
v1 = ctrans( v2 ); // Compute the conjugate transpose vector
\endcode
// Note that the \c ctrans() function has the same effect as manually applying the \c conj() and
// \c trans() function in any order:
\code
v1 = trans( conj( v2 ) ); // Computing the conjugate transpose vector
v1 = conj( trans( v2 ) ); // Computing the conjugate transpose vector
\endcode
// \n \subsection vector_operations_reverse reverse()
//
// Via the \c reverse() function is is possible to reverse the elements of a dense or sparse
// vector. The following examples demonstrates this by means of a dense vector:
\code
blaze::DynamicVector<int> a{ 1, 2, 3, 4, 5 };
blaze::DynamicVector<int> b;
b = reverse( a ); // Results in ( 5 4 3 2 1 )
\endcode
// \n \subsection vector_operations_evaluate eval() / evaluate()
//
// The \c evaluate() function forces an evaluation of the given vector expression and enables
// an automatic deduction of the correct result type of an operation. The following code example
// demonstrates its intended use for the multiplication of a dense and a sparse vector:
\code
using blaze::DynamicVector;
using blaze::CompressedVector;
blaze::DynamicVector<double> a;
blaze::CompressedVector<double> b;
// ... Resizing and initialization
auto c = evaluate( a * b );
\endcode
// In this scenario, the \c evaluate() function assists in deducing the exact result type of
// the operation via the \c auto keyword. Please note that if \c evaluate() is used in this
// way, no temporary vector is created and no copy operation is performed. Instead, the result
// is directly written to the target vector due to the return value optimization (RVO). However,
// if \c evaluate() is used in combination with an explicit target type, a temporary will be
// created and a copy operation will be performed if the used type differs from the type
// returned from the function:
\code
CompressedVector<double> d( a * b ); // No temporary & no copy operation
DynamicVector<double> e( a * b ); // Temporary & copy operation
d = evaluate( a * b ); // Temporary & copy operation
\endcode
// Sometimes it might be desirable to explicitly evaluate a sub-expression within a larger
// expression. However, please note that \c evaluate() is not intended to be used for this
// purpose. This task is more elegantly and efficiently handled by the \c eval() function:
\code
blaze::DynamicVector<double> a, b, c, d;
d = a + evaluate( b * c ); // Unnecessary creation of a temporary vector
d = a + eval( b * c ); // No creation of a temporary vector
\endcode
// In contrast to the \c evaluate() function, \c eval() can take the complete expression
// into account and therefore can guarantee the most efficient way to evaluate it (see also
// \ref intra_statement_optimization).
//
//
// \n \section vector_operations_modifying_operations Modifying Operations
// <hr>
//
// \subsection vector_operations_resize_reserve .resize() / .reserve()
//
// The size of a \c StaticVector is fixed by the second template parameter and a \c CustomVector
// cannot be resized. In contrast, the size of \c DynamicVectors, \c HybridVectors as well as
// \c CompressedVectors can be changed via the \c resize() function:
\code
using blaze::DynamicVector;
using blaze::CompressedVector;
DynamicVector<int,columnVector> v1;
CompressedVector<int,rowVector> v2( 4 );
v2[1] = -2;
v2[3] = 11;
// Adapting the size of the dynamic and compressed vectors. The (optional) second parameter
// specifies whether the existing elements should be preserved. Per default, the existing
// elements are preserved.
v1.resize( 5UL ); // Resizing vector v1 to 5 elements. Elements of built-in type remain
// uninitialized, elements of class type are default constructed.
v1.resize( 3UL, false ); // Resizing vector v1 to 3 elements. The old elements are lost, the
// new elements are NOT initialized!
v2.resize( 8UL, true ); // Resizing vector v2 to 8 elements. The old elements are preserved.
v2.resize( 5UL, false ); // Resizing vector v2 to 5 elements. The old elements are lost.
\endcode
// Note that resizing a vector invalidates all existing views (see e.g. \ref views_subvectors)
// on the vector:
\code
blaze::DynamicVector<int,rowVector> v1( 10UL ); // Creating a dynamic vector of size 10
auto sv = subvector( v1, 2UL, 5UL ); // Creating a view on the range [2..6]
v1.resize( 6UL ); // Resizing the vector invalidates the view
\endcode
// When the internal capacity of a vector is no longer sufficient, the allocation of a larger
// junk of memory is triggered. In order to avoid frequent reallocations, the \c reserve()
// function can be used up front to set the internal capacity:
\code
blaze::DynamicVector<int> v1;
v1.reserve( 100 );
v1.size(); // Returns 0
v1.capacity(); // Returns at least 100
\endcode
// Note that the size of the vector remains unchanged, but only the internal capacity is set
// according to the specified value!
//
// \n \subsection vector_operations_shrinkToFit .shrinkToFit()
//
// The internal capacity of vectors with dynamic memory is preserved in order to minimize the
// number of reallocations. For that reason, the \c resize() and \c reserve() functions can lead
// to memory overhead. The \c shrinkToFit() member function can be used to minimize the internal
// capacity:
\code
blaze::DynamicVector<int> v1( 1000UL ); // Create a vector of 1000 integers
v1.resize( 10UL ); // Resize to 10, but the capacity is preserved
v1.shrinkToFit(); // Remove the unused capacity
\endcode
// Please note that due to padding the capacity might not be reduced exactly to \c size(). Please
// also note that in case a reallocation occurs, all iterators (including \c end() iterators), all
// pointers and references to elements of the vector are invalidated.
//
// \subsection vector_operations_reset_clear reset() / clear()
//
// In order to reset all elements of a vector, the \c reset() function can be used:
\code
// Setup of a single precision column vector, whose elements are initialized with 2.0F.
blaze::DynamicVector<float> v1( 3UL, 2.0F );
// Resetting all elements to 0.0F. Only the elements are reset, the size of the vector is unchanged.
reset( v1 ); // Resetting all elements
v1.size(); // Returns 3: size and capacity remain unchanged
\endcode
// In order to return a vector to its default state (i.e. the state of a default constructed
// vector), the \c clear() function can be used:
\code
// Setup of a single precision column vector, whose elements are initialized with -1.0F.
blaze::DynamicVector<float> v1( 5, -1.0F );
// Resetting the entire vector.
clear( v1 ); // Resetting the entire vector
v1.size(); // Returns 0: size is reset, but capacity remains unchanged
\endcode
// Note that resetting or clearing both dense and sparse vectors does not change the capacity
// of the vectors.
//
//
// \n \subsection vector_operations_swap swap()
//
// Via the \c swap() function it is possible to completely swap the contents of two vectors of
// the same type:
\code
blaze::DynamicVector<int,columnVector> v1( 10UL );
blaze::DynamicVector<int,columnVector> v2( 20UL );
swap( v1, v2 ); // Swapping the contents of v1 and v2
\endcode
// \n \section vector_operations_arithmetic_operations Arithmetic Operations
// <hr>
//
// \subsection vector_operations_normalize normalize()
//
// The \c normalize() function can be used to scale any non-zero vector to a length of 1. In
// case the vector does not contain a single non-zero element (i.e. is a zero vector), the
// \c normalize() function returns a zero vector.
\code
blaze::DynamicVector<float,columnVector> v1( 10UL );
blaze::CompressedVector<double,columnVector> v2( 12UL );
v1 = normalize( v1 ); // Normalizing the dense vector v1
length( v1 ); // Returns 1 (or 0 in case of a zero vector)
v1 = normalize( v2 ); // Assigning v1 the normalized vector v2
length( v1 ); // Returns 1 (or 0 in case of a zero vector)
\endcode
// Note that the \c normalize() function only works for floating point vectors. The attempt to
// use it for an integral vector results in a compile time error.
//
//
// \n \subsection vector_operations_min_max min() / max()
//
// The \c min() and \c max() functions can be used for a single vector or multiple vectors. If
// passed a single vector, the functions return the smallest and largest element of the given
// dense vector or the smallest and largest non-zero element of the given sparse vector,
// respectively:
\code
blaze::StaticVector<int,4UL,rowVector> a{ -5, 2, 7, -4 };
min( a ); // Returns -5
max( a ); // Returns 7
\endcode
\code
blaze::CompressedVector<int> b{ 1, 0, 3, 0 };
min( b ); // Returns 1
max( b ); // Returns 3
\endcode
// For more information on the unary \c min() and \c max() reduction operations see the
// \ref vector_operations_reduction_operations section.
//
// If passed two or more dense vectors, the \c min() and \c max() functions compute the
// componentwise minimum or maximum of the given vectors, respectively:
\code
blaze::StaticVector<int,4UL,rowVector> c{ -5, 1, -7, 4 };
blaze::StaticVector<int,4UL,rowVector> d{ -5, 3, 0, 2 };
min( a, c ); // Results in the vector ( -5, 1, -7, -4 )
max( a, c, d ); // Results in the vector ( -5, 3, 7, 4 )
\endcode
// Please note that sparse vectors can only be used in the unary \c min() and \c max() functions.
// Also note that all forms of the \c min() and \c max() functions can be used to compute the
// smallest and largest element of a vector expression:
\code
min( a + b + c ); // Returns -9, i.e. the smallest value of the resulting vector
max( a - b - c ); // Returns 11, i.e. the largest value of the resulting vector
min( a + c, c - d ); // Results in ( -10 -2 -7 0 )
max( a - c, c + d ); // Results in ( 0 4 14 6 )
\endcode
// \n \subsection vector_operators_softmax softmax()
//
// The <a href="https://en.wikipedia.org/wiki/Softmax_function">softmax function</a>, also called
// the normalized exponential function, of a given dense vector can be computed via \c softmax().
// The resulting dense vector consists of real values in the range (0..1], which add up to 1.
\code
blaze::StaticVector<double,7UL,rowVector> x{ 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0 };
blaze::StaticVector<double,7UL,rowVector> y;
// Evaluating the softmax function
y = softmax( x ); // Results in ( 0.024 0.064 0.175 0.475 0.024 0.064 0.175 )
double s = sum( y ); // Results in 1
\endcode
// \n \subsection vector_operators_abs abs()
//
// The \c abs() function can be used to compute the absolute values of each element of a vector.
// For instance, the following computation
\code
blaze::StaticVector<int,3UL,rowVector> a{ -1, 2, -3 };
blaze::StaticVector<int,3UL,rowVector> b( abs( a ) );
\endcode
// results in the vector
\f$ b = \left(\begin{array}{*{1}{c}}
1 \\
2 \\
3 \\
\end{array}\right)\f$
// \n \subsection vector_operators_sign sign()
//
// The \c sign() function can be used to evaluate the sign of each element of a vector \a a. For
// each element \c i the corresponding result is 1 if \a a[i] is greater than zero, 0 if \a a[i]
// is zero, and -1 if \a a[i] is less than zero. For instance, the following use of the \c sign()
// function
\code
blaze::StaticVector<int,3UL,rowVector> a{ -1, 2, 0 };
blaze::StaticVector<int,3UL,rowVector> b( sign( a ) );
\endcode
// results in the vector
\f$ b = \left(\begin{array}{*{1}{c}}
-1 \\
1 \\
0 \\
\end{array}\right)\f$
// \n \subsection vector_operations_rounding_functions floor() / ceil() / trunc() / round()
//
// The \c floor(), \c ceil(), \c trunc(), and \c round() functions can be used to round down/up
// each element of a vector, respectively:
\code
blaze::StaticVector<double,3UL,rowVector> a, b;
b = floor( a ); // Rounding down each element of the vector
b = ceil ( a ); // Rounding up each element of the vector
b = trunc( a ); // Truncating each element of the vector
b = round( a ); // Rounding each element of the vector
\endcode
// \n \subsection vector_operators_conj conj()
//
// The \c conj() function can be applied on a dense or sparse vector to compute the complex
// conjugate of each element of the vector:
\code
using blaze::StaticVector;
using cplx = std::complex<double>;
// Creating the vector
// ( (-2,-1) )
// ( ( 1, 1) )
StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) };
// Computing the vector of complex conjugates
// ( (-2, 1) )
// ( ( 1,-1) )
StaticVector<cplx,2UL> b;
b = conj( a );
\endcode
// Additionally, vectors can be conjugated in-place via the \c conjugate() function:
\code
blaze::DynamicVector<cplx> c( 5UL );
conjugate( c ); // In-place conjugate operation.
c = conj( c ); // Same as above
\endcode
// \n \subsection vector_operators_real real()
//
// The \c real() function can be used on a dense or sparse vector to extract the real part of
// each element of the vector:
\code
using blaze::StaticVector;
using cplx = std::complex<double>;
// Creating the vector
// ( (-2,-1) )
// ( ( 1, 1) )
StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) };
// Extracting the real part of each vector element
// ( -2 )
// ( 1 )
StaticVector<double,2UL> b;
b = real( a );
\endcode
// \n \subsection vector_operators_imag imag()
//
// The \c imag() function can be used on a dense or sparse vector to extract the imaginary part
// of each element of the vector:
\code
using blaze::StaticVector;
using cplx = std::complex<double>;
// Creating the vector
// ( (-2,-1) )
// ( ( 1, 1) )
StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) };
// Extracting the imaginary part of each vector element
// ( -1 )
// ( 1 )
StaticVector<double,2UL> b;
b = imag( a );
\endcode
// \n \subsection vector_operations_sqrt sqrt() / invsqrt()
//
// Via the \c sqrt() and \c invsqrt() functions the (inverse) square root of each element of a
// vector can be computed:
\code
blaze::DynamicVector<double> a, b, c;
b = sqrt( a ); // Computes the square root of each element
c = invsqrt( a ); // Computes the inverse square root of each element
\endcode
// Note that in case of sparse vectors only the non-zero elements are taken into account!
//
//
// \n \subsection vector_operations_cbrt cbrt() / invcbrt()
//
// The \c cbrt() and \c invcbrt() functions can be used to compute the the (inverse) cubic root
// of each element of a vector:
\code
blaze::HybridVector<double,3UL> a, b, c;
b = cbrt( a ); // Computes the cubic root of each element
c = invcbrt( a ); // Computes the inverse cubic root of each element
\endcode
// Note that in case of sparse vectors only the non-zero elements are taken into account!
//
//
// \n \subsection vector_operations_hypot hypot()
//
// The \c hypot() function can be used to compute the componentwise hypotenous for a pair of
// dense vectors:
\code
blaze::StaticVector<double,3UL> a, b, c;
c = hypot( a, b ); // Computes the componentwise hypotenuous
\endcode
// \n \subsection vector_operations_clamp clamp()
//
// The \c clamp() function can be used to restrict all elements of a vector to a specific range:
\code
blaze::DynamicVector<double> a, b
b = clamp( a, -1.0, 1.0 ); // Restrict all elements to the range [-1..1]
\endcode
// Note that in case of sparse vectors only the non-zero elements are taken into account!
//
//
// \n \subsection vector_operations_pow pow()
//
// The \c pow() function can be used to compute the exponential value of each element of a vector.
// If passed a vector and a numeric exponent, the function computes the exponential value of each
// element of the vector using the same exponent. If passed a second vector, the function computes
// the componentwise exponential value:
\code
blaze::StaticVector<double,3UL> a, b, c;
c = pow( a, 1.2 ); // Computes the exponential value of each element
c = pow( a, b ); // Computes the componentwise exponential value
\endcode
// \n \subsection vector_operations_exp exp() / exp2() / exp10()
//
// \c exp(), \c exp2() and \c exp10() compute the base e/2/10 exponential of each element of a
// vector, respectively:
\code
blaze::DynamicVector<double> a, b;
b = exp( a ); // Computes the base e exponential of each element
b = exp2( a ); // Computes the base 2 exponential of each element
b = exp10( a ); // Computes the base 10 exponential of each element
\endcode
// Note that in case of sparse vectors only the non-zero elements are taken into account!
//
//
// \n \subsection vector_operations_log log() / log2() / log10()
//
// The \c log(), \c log2() and \c log10() functions can be used to compute the natural, binary
// and common logarithm of each element of a vector:
\code
blaze::StaticVector<double,3UL> a, b;
b = log( a ); // Computes the natural logarithm of each element
b = log2( a ); // Computes the binary logarithm of each element
b = log10( a ); // Computes the common logarithm of each element
\endcode
// \n \subsection vector_operations_trigonometric_functions sin() / cos() / tan() / asin() / acos() / atan()
//
// The following trigonometric functions are available for both dense and sparse vectors:
\code
blaze::DynamicVector<double> a, b;
b = sin( a ); // Computes the sine of each element of the vector
b = cos( a ); // Computes the cosine of each element of the vector
b = tan( a ); // Computes the tangent of each element of the vector
b = asin( a ); // Computes the inverse sine of each element of the vector
b = acos( a ); // Computes the inverse cosine of each element of the vector
b = atan( a ); // Computes the inverse tangent of each element of the vector
\endcode
// Note that in case of sparse vectors only the non-zero elements are taken into account!
//
//
// \n \subsection vector_operations_hyperbolic_functions sinh() / cosh() / tanh() / asinh() / acosh() / atanh()
//
// The following hyperbolic functions are available for both dense and sparse vectors:
\code
blaze::DynamicVector<double> a, b;
b = sinh( a ); // Computes the hyperbolic sine of each element of the vector
b = cosh( a ); // Computes the hyperbolic cosine of each element of the vector
b = tanh( a ); // Computes the hyperbolic tangent of each element of the vector
b = asinh( a ); // Computes the inverse hyperbolic sine of each element of the vector
b = acosh( a ); // Computes the inverse hyperbolic cosine of each element of the vector
b = atanh( a ); // Computes the inverse hyperbolic tangent of each element of the vector
\endcode
// Note that in case of sparse vectors only the non-zero elements are taken into account!
//
//
// \n \subsection vector_operations_atan2 atan2()
//
// The multi-valued inverse tangent is available for a pair of dense vectors:
\code
blaze::DynamicVector<double> a, b, c;
c = atan2( a, b ); // Computes the componentwise multi-valued inverse tangent
\endcode
// \n \subsection vector_operations_erf erf() / erfc()
//
// The \c erf() and \c erfc() functions compute the (complementary) error function of each
// element of a vector:
\code
blaze::StaticVector<double,3UL,rowVector> a, b;
b = erf( a ); // Computes the error function of each element
b = erfc( a ); // Computes the complementary error function of each element
\endcode
// Note that in case of sparse vectors only the non-zero elements are taken into account!
//
//
// \n \subsection vector_operations_map map() / forEach()
//
// Via the unary and binary \c map() functions it is possible to execute componentwise custom
// operations on vectors. The unary \c map() function can be used to apply a custom operation
// on each element of a dense or sparse vector. For instance, the following example demonstrates
// a custom square root computation via a lambda:
\code
blaze::DynamicVector<double> a, b;
b = map( a, []( double d ) { return std::sqrt( d ); } );
\endcode
// The binary \c map() function can be used to apply an operation pairwise to the elements of
// two dense vectors. The following example demonstrates the merging of two vectors of double
// precision values into a vector of double precision complex numbers:
\code
blaze::DynamicVector<double> real{ 2.1, -4.2, 1.0, 0.6 };
blaze::DynamicVector<double> imag{ 0.3, 1.4, 2.9, -3.4 };
blaze::DynamicVector< complex<double> > cplx;
// Creating the vector
// ( ( 2.1, 0.3) )
// ( (-4.2, 1.4) )
// ( ( 1.0, 2.9) )
// ( ( 0.6, -3.4) )
cplx = map( real, imag, []( double r, double i ){ return complex<double>( r, i ); } );
\endcode
// Although the computation can be parallelized it is not vectorized and thus cannot perform at
// peak performance. However, it is also possible to create vectorized custom operations. See
// \ref custom_operations for a detailed overview of the possibilities of custom operations.
//
// Please note that unary custom operations on vectors have been introduced in \b Blaze 3.0 in
// form of the \c forEach() function. With the introduction of binary custom functions, the
// \c forEach() function has been renamed to \c map(). The \c forEach() function can still be
// used (even for binary custom operations), but the function might be deprecated in future
// releases of \b Blaze.
//
//
// \n \section vector_operations_reduction_operations Reduction Operations
// <hr>
//
// \subsection vector_operations_reduction_operations_reduce reduce()
//
// The \c reduce() function performs a total reduction of the elements of the given dense vector
// or the non-zero elements of the given sparse vector. The following examples demonstrate the
// total reduction of a dense and sparse vector:
\code
blaze::DynamicVector<double> a;
// ... Resizing and initialization
const double totalsum1 = reduce( a, blaze::Add() );
const double totalsum2 = reduce( a, []( double a, double b ){ return a + b; } );
\endcode
\code
blaze::CompressedVector<double> a;
// ... Resizing and initialization
const double totalmin1 = reduce( a, blaze::Min() );
const double totalmin2 = reduce( a, []( double a, double b ){ return blaze::min( a, b ); } );
\endcode
// As demonstrated in the examples it is possible to pass any binary callable as custom reduction
// operation. However, for instance in the case of lambdas the vectorization of the reduction
// operation is compiler dependent and might not perform at peak performance. However, it is also
// possible to create vectorized custom operations. See \ref custom_operations for a detailed
// overview of the possibilities of custom operations.
//
// Please note that the evaluation order of the \c reduce() function is unspecified. Thus the
// behavior is non-deterministic if the given reduction operation is not associative or not
// commutative. Also, the operation is undefined if the given reduction operation modifies the
// values.
//
// \n \subsection vector_operations_reduction_operations_sum sum()
//
// The \c sum() function reduces the elements of the given dense vector or the non-zero elements
// of the given sparse vector by means of addition:
\code
blaze::DynamicVector<int> a{ 1, 2, 3, 4 };
const int totalsum = sum( a ); // Results in 10
\endcode
\code
blaze::CompressedVector<int> a{ 1, 2, 3, 4 };
const int totalsum = sum( a ); // Results in 10
\endcode
// Please note that the evaluation order of the \c sum() function is unspecified.
//
// \n \subsection vector_operations_reduction_operations_prod prod()
//
// The \c prod() function reduces the elements of the given dense vector or the non-zero elements
// of the given sparse vector by means of multiplication:
\code
blaze::DynamicVector<int> a{ 1, 2, 3, 4 };
const int totalprod = prod( a ); // Results in 24
\endcode
\code
blaze::CompressedVector<int> a{ 1, 2, 3, 4 };
const int totalprod = prod( a ); // Results in 24
\endcode
// \n \subsection vector_operations_reduction_operations_min min()
//
// The unary \c min() function returns the smallest element of the given dense vector or the
// smallest non-zero element of the given sparse vector. It can only be used for element types
// that support the smaller-than relationship. In case the given vector currently has a size
// of 0, the returned value is the default value (e.g. 0 in case of fundamental data types).
\code
blaze::DynamicVector<int> a{ 1, -2, 3, 0 };
const int totalmin = min( a ); // Results in -2
\endcode
\code
blaze::CompressedVector<int> a{ 1, 0, 3, 0 };
const int totalmin = min( a ); // Results in 1
\endcode
// \note In case the sparse vector is not completely filled, the implicit zero elements are NOT
// taken into account. In the previous example the compressed vector has only 2 non-zero elements.
// However, the minimum of the vector is 1.
//
// \n \subsection vector_operations_reduction_operations_max max()
//
// The unary \c max() function returns the largest element of the given dense vector or the
// largest non-zero element of the given sparse vector. It can only be used for element types
// that support the smaller-than relationship. In case the given vector currently has a size
// of 0, the returned value is the default value (e.g. 0 in case of fundamental data types).
\code
blaze::DynamicVector<int> a{ 1, -2, 3, 0 };
const int totalmax = max( a ); // Results in 3
\endcode
\code
blaze::CompressedVector<int> a{ -1, 0, -3, 0 };
const int totalmin = max( a ); // Results in -1
\endcode
// \note In case the sparse vector is not completely filled, the implicit zero elements are NOT
// taken into account. In the previous example the compressed vector has only 2 non-zero elements.
// However, the maximum of the vector is -1.
//
// \n \subsection vector_operations_reduction_operations_argmin argmin()
//
// The \c argmin() function returns the index of the first smallest element of the given dense
// vector. This function can only be used for element types that support the smaller-than
// relationship. In case the given vector currently has a size of 0, the returned index is 0.
\code
blaze::DynamicVector<int> a{ 1, -2, 3, 0 };
const size_t minindex = argmin( a ); // Results in 1
\endcode
// \n \subsection vector_operations_reduction_operations_argmax argmax()
//
// The \c argmax() function returns the index of the first largest element of the given dense
// vector. This function can only be used for element types that support the smaller-than
// relationship. In case the given vector currently has a size of 0, the returned index is 0.
\code
blaze::DynamicVector<int> a{ 1, -2, 3, 0 };
const size_t maxindex = argmax( a ); // Results in 2
\endcode
// \n \section vector_operations_norms Norms
// <hr>
//
// \subsection vector_operations_norms_norm norm()
//
// The \c norm() function computes the L2 norm of the given dense or sparse vector:
\code
blaze::DynamicVector<double> a;
blaze::CompressedVector<double> b;
// ... Resizing and initialization
const double norm1 = norm( a );
const double norm2 = norm( b );
\endcode
// \n \subsection vector_operations_norms_sqrnorm sqrNorm()
//
// The \c sqrNorm() function computes the squared L2 norm of the given dense or sparse vector:
\code
blaze::DynamicVector<double> a;
blaze::CompressedVector<double> b;
// ... Resizing and initialization
const double norm1 = sqrNorm( a );
const double norm2 = sqrNorm( b );
\endcode
// \n \subsection vector_operations_norms_l1norm l1Norm()
//
// The \c l1Norm() function computes the squared L1 norm of the given dense or sparse vector:
\code
blaze::DynamicVector<double> a;
blaze::CompressedVector<double> b;
// ... Resizing and initialization
const double norm1 = l1Norm( a );
const double norm2 = l1Norm( b );
\endcode
// \n \subsection vector_operations_norms_l2norm l2Norm()
//
// The \c l2Norm() function computes the squared L2 norm of the given dense or sparse vector:
\code
blaze::DynamicVector<double> a;
blaze::CompressedVector<double> b;
// ... Resizing and initialization
const double norm1 = l2Norm( a );
const double norm2 = l2Norm( b );
\endcode
// \n \subsection vector_operations_norms_l3norm l3Norm()
//
// The \c l3Norm() function computes the squared L3 norm of the given dense or sparse vector:
\code
blaze::DynamicVector<double> a;
blaze::CompressedVector<double> b;
// ... Resizing and initialization
const double norm1 = l3Norm( a );
const double norm2 = l3Norm( b );
\endcode
// \n \subsection vector_operations_norms_l4norm l4Norm()
//
// The \c l4Norm() function computes the squared L4 norm of the given dense or sparse vector:
\code
blaze::DynamicVector<double> a;
blaze::CompressedVector<double> b;
// ... Resizing and initialization
const double norm1 = l4Norm( a );
const double norm2 = l4Norm( b );
\endcode
// \n \subsection vector_operations_norms_lpnorm lpNorm()
//
// The \c lpNorm() function computes the general Lp norm of the given dense or sparse vector,
// where the norm is specified by either a compile time or a runtime argument:
\code
blaze::DynamicVector<double> a;
blaze::CompressedVector<double> b;
// ... Resizing and initialization
const double norm1 = lpNorm<2>( a ); // Compile time argument
const double norm2 = lpNorm( b, 2.3 ); // Runtime argument
\endcode
// \n \subsection vector_operations_norms_maxnorm linfNorm() / maxNorm()
//
// The \c linfNorm() and \c maxNorm() functions compute the infinity/maximum norm of the given
// dense or sparse vector:
\code
blaze::DynamicVector<double> a;
blaze::CompressedVector<double> b;
// ... Resizing and initialization
const double norm1 = linfNorm( a );
const double norm2 = maxNorm( b );
\endcode
// \n \section vector_operations_scalar_expansion Scalar Expansion
// <hr>
//
// By means of the \c uniform() function it is possible to expand a scalar value into a dense,
// uniform vector. By default, the resulting uniform vector is a column vector, but it is possible
// to specify the transpose flag explicitly:
\code
using blaze::columnVector;
int scalar = 5;
blaze::DynamicVector<int,columnVector> v;
// ... Resizing and initialization
// Expansion of 'scalar' to a 3-dimensional uniform column vector
//
// ( 5 )
// ( 5 )
// ( 5 )
//
v = uniform( 3UL, scalar );
v = uniform<columnVector>( 3UL, scalar );
\endcode
// \n \section vector_operations_vector_expansion Vector Expansion
// <hr>
//
// Via the \c expand() function it is possible to convert a dense or sparse vector into a matrix.
// A column vector is expanded into a column-major matrix, a row vector is expanded into a
// row-major matrix. As demonstrated by the following examples, \c expand() can be used with both
// runtime and compile time parameters:
\code
blaze::DynamicVector<int,columnVector> a{ 1, 2, 3 };
blaze::CompressedVector<int,rowVector> b{ 1, 0, 3, 0, 5 };
// Expand the dense column vector ( 1 2 3 ) into a dense 3x5 column-major matrix
//
// ( 1 1 1 1 1 )
// ( 2 2 2 2 2 )
// ( 3 3 3 3 3 )
//
expand( a, 5 ); // Runtime parameter
expand<5>( a ); // Compile time parameter
// Expand the sparse row vector ( 1 0 3 0 5 ) into a sparse 3x5 row-major matrix
//
// ( 1 0 3 0 5 )
// ( 1 0 3 0 5 )
// ( 1 0 3 0 5 )
//
expand( b, 3 ); // Runtime parameter
expand<3>( b ); // Compile time parameter
\endcode
// \n \section vector_operations_statistic_operations Statistic Operations
// <hr>
//
// \subsection vector_operations_mean mean()
//
// The <a href="https://en.wikipedia.org/wiki/Arithmetic_mean">(arithmetic) mean</a> of a dense or
// sparse vector can be computed via the \c mean() function. In case of a sparse vector, both the
// non-zero and zero elements are taken into account. The following example demonstrates the
// computation of the mean of a dense vector:
\code
blaze::DynamicVector<int> v{ 1, 4, 3, 6, 7 };
const double m = mean( v ); // Results in 4.2 (i.e. 21/5)
\endcode
// In case the size of the given vector is 0, a \a std::invalid_argument is thrown.
//
// \n \subsection vector_operations_var var()
//
// The <a href="https://en.wikipedia.org/wiki/Variance">variance</a> of a dense or sparse vector
// can be computed via the \c var() function. In case of a sparse vector, both the non-zero and
// zero elements are taken into account. The following example demonstrates the computation of
// the variance of a dense vector:
\code
blaze::DynamicVector<int> v{ 1, 4, 3, 6, 7 };
const double v = var( v ); // Results in 5.7
\endcode
// In case the size of the given vector is smaller than 2, a \a std::invalid_argument is thrown.
//
// \n \subsection vector_operations_stddev stddev()
//
// The <a href="https://en.wikipedia.org/wiki/Standard_deviation">standard deviation</a> of a
// dense or sparse vector can be computed via the \c stddev() function. In case of a sparse
// vector, both the non-zero and zero elements are taken into account. The following example
// demonstrates the computation of the standard deviation of a dense vector:
\code
blaze::DynamicVector<int> v{ 1, 4, 3, 6, 7 };
const double s = stddev( v ); // Results in 2.38747
\endcode
// In case the size of the given vector is smaller than 2, a \a std::invalid_argument is thrown.
//
//
// \n \section vector_operations_declaration_operations Declaration Operations
// <hr>
//
// \subsection vector_operations_declzero declzero()
//
// The \c declzero() operation can be used to explicitly declare any vector or vector expression
// as zero vector:
\code
blaze::DynamicVector<double> a, b;
// ... Resizing and initialization
b = declzero( a );
\endcode
// Any vector or vector expression that has been declared as zero vector via \c declzero() will
// gain all the benefits of a zero vector, which range from reduced runtime checking to a
// considerable speed-up in computations:
\code
using blaze::DynamicVector;
DynamicVector<double> a, b, c;
// ... Resizing and initialization
isZero( declzero( a ) ); // Will always return true without runtime effort
c = declzero( a ) + b; // Declare the left operand of the vector addition as a
// zero vector, i.e. no addition needs to be performed
\endcode
// \warning The \c declzero() operation has the semantics of a cast: The caller is completely
// responsible and the system trusts the given information. Declaring a non-zero vector or
// vector expression as zero vector via the \c declzero() operation leads to undefined behavior
// (which can be violated invariants or wrong computation results)!
//
//
// \n Previous: \ref vector_types Next: \ref matrices
*/
//*************************************************************************************************
//**Matrices***************************************************************************************
/*!\page matrices Matrices
//
// \tableofcontents
//
//
// \n \section matrices_general General Concepts
// <hr>
//
// The \b Blaze library currently offers five dense matrix types (\ref matrix_types_static_matrix,
// \ref matrix_types_dynamic_matrix, \ref matrix_types_hybrid_matrix, \ref matrix_types_custom_matrix,
// and \ref matrix_types_uniform_matrix) and three sparse matrix types (\ref matrix_types_compressed_matrix,
// \ref matrix_types_identity_matrix, and \ref matrix_types_zero_matrix). All matrices can either
// be stored as row-major matrices or column-major matrices:
\code
using blaze::DynamicMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
// Setup of the 2x3 row-major dense matrix
//
// ( 1 2 3 )
// ( 4 5 6 )
//
DynamicMatrix<int,rowMajor> A{ { 1, 2, 3 },
{ 4, 5, 6 } };
// Setup of the 3x2 column-major dense matrix
//
// ( 1 4 )
// ( 2 5 )
// ( 3 6 )
//
DynamicMatrix<int,columnMajor> B{ { 1, 4 },
{ 2, 5 },
{ 3, 6 } };
\endcode
// Per default, all matrices in \b Blaze are row-major matrices:
\code
// Instantiation of a 3x3 row-major matrix
blaze::DynamicMatrix<int> C( 3UL, 3UL );
\endcode
// \n \section matrices_details Matrix Details
// <hr>
//
// - \ref matrix_types
// - \ref matrix_operations
//
//
// \n \section matrices_examples Examples
// <hr>
\code
using blaze::StaticMatrix;
using blaze::DynamicMatrix;
using blaze::CompressedMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
StaticMatrix<double,6UL,20UL> A; // Instantiation of a 6x20 row-major static matrix
CompressedMatrix<double,rowMajor> B; // Instantiation of a row-major compressed matrix
DynamicMatrix<double,columnMajor> C; // Instantiation of a column-major dynamic matrix
// ... Resizing and initialization
C = A * B;
\endcode
// \n Previous: \ref vector_operations Next: \ref matrix_types
*/
//*************************************************************************************************
//**Matrix Types***********************************************************************************
/*!\page matrix_types Matrix Types
//
// \tableofcontents
//
//
// \n \section matrix_types_static_matrix StaticMatrix
// <hr>
//
// The blaze::StaticMatrix class template is the representation of a fixed size matrix with
// statically allocated elements of arbitrary type. It can be included via the header file
\code
#include <blaze/math/StaticMatrix.h>
\endcode
// The type of the elements, the number of rows and columns, and the storage order of the matrix
// can be specified via the four template parameters:
\code
template< typename Type, size_t M, size_t N, bool SO >
class StaticMatrix;
\endcode
// - \c Type: specifies the type of the matrix elements. StaticMatrix can be used with any
// non-cv-qualified, non-reference element type.
// - \c M : specifies the total number of rows of the matrix.
// - \c N : specifies the total number of columns of the matrix. Note that it is expected
// that StaticMatrix is only used for tiny and small matrices.
// - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix.
// The default value is blaze::rowMajor.
//
// The blaze::StaticMatrix is perfectly suited for small to medium matrices whose dimensions are
// known at compile time:
\code
// Definition of a 3x4 integral row-major matrix
blaze::StaticMatrix<int,3UL,4UL> A;
// Definition of a 4x6 single precision row-major matrix
blaze::StaticMatrix<float,4UL,6UL,blaze::rowMajor> B;
// Definition of a 6x4 double precision column-major matrix
blaze::StaticMatrix<double,6UL,4UL,blaze::columnMajor> C;
\endcode
// \n \section matrix_types_dynamic_matrix DynamicMatrix
// <hr>
//
// The blaze::DynamicMatrix class template is the representation of an arbitrary sized matrix
// with \f$ M \cdot N \f$ dynamically allocated elements of arbitrary type. It can be included
// via the header file
\code
#include <blaze/math/DynamicMatrix.h>
\endcode
// The type of the elements and the storage order of the matrix can be specified via the two
// template parameters:
\code
template< typename Type, bool SO >
class DynamicMatrix;
\endcode
// - \c Type: specifies the type of the matrix elements. DynamicMatrix can be used with any
// non-cv-qualified, non-reference element type.
// - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix.
// The default value is blaze::rowMajor.
//
// The blaze::DynamicMatrix is the default choice for all kinds of dense matrices and the best
// choice for medium to large matrices. The number of rows and columns can be modified at runtime:
\code
// Definition of a 3x4 integral row-major matrix
blaze::DynamicMatrix<int> A( 3UL, 4UL );
// Definition of a 4x6 single precision row-major matrix
blaze::DynamicMatrix<float,blaze::rowMajor> B( 4UL, 6UL );
// Definition of a double precision column-major matrix with 0 rows and columns
blaze::DynamicMatrix<double,blaze::columnMajor> C;
\endcode
// \n \section matrix_types_hybrid_matrix HybridMatrix
// <hr>
//
// The HybridMatrix class template combines the flexibility of a dynamically sized matrix with
// the efficiency and performance of a fixed size matrix. It is implemented as a crossing between
// the blaze::StaticMatrix and the blaze::DynamicMatrix class templates: Similar to the static
// matrix it uses static stack memory instead of dynamically allocated memory and similar to the
// dynamic matrix it can be resized (within the extend of the static memory). It can be included
// via the header file
\code
#include <blaze/math/HybridMatrix.h>
\endcode
// The type of the elements, the maximum number of rows and columns and the storage order of the
// matrix can be specified via the four template parameters:
\code
template< typename Type, size_t M, size_t N, bool SO >
class HybridMatrix;
\endcode
// - Type: specifies the type of the matrix elements. HybridMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - M : specifies the maximum number of rows of the matrix.
// - N : specifies the maximum number of columns of the matrix. Note that it is expected
// that HybridMatrix is only used for tiny and small matrices.
// - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix.
// The default value is blaze::rowMajor.
//
// The blaze::HybridMatrix is a suitable choice for small to medium matrices, whose dimensions
// are not known at compile time or not fixed at runtime, but whose maximum dimensions are known
// at compile time:
\code
// Definition of a 3x4 integral row-major matrix with maximum dimensions of 6x8
blaze::HybridMatrix<int,6UL,8UL> A( 3UL, 4UL );
// Definition of a 4x6 single precision row-major matrix with maximum dimensions of 12x16
blaze::HybridMatrix<float,12UL,16UL,blaze::rowMajor> B( 4UL, 6UL );
// Definition of a 0x0 double precision column-major matrix and maximum dimensions of 6x6
blaze::HybridMatrix<double,6UL,6UL,blaze::columnMajor> C;
\endcode
// \n \section matrix_types_custom_matrix CustomMatrix
// <hr>
//
// The blaze::CustomMatrix class template provides the functionality to represent an external
// array of elements of arbitrary type and a fixed size as a native \b Blaze dense matrix data
// structure. Thus in contrast to all other dense matrix types a custom matrix does not perform
// any kind of memory allocation by itself, but it is provided with an existing array of element
// during construction. A custom matrix can therefore be considered an alias to the existing
// array. It can be included via the header file
\code
#include <blaze/math/CustomMatrix.h>
\endcode
// The type of the elements, the properties of the given array of elements and the storage order
// of the matrix can be specified via the following four template parameters:
\code
template< typename Type, bool AF, bool PF, bool SO >
class CustomMatrix;
\endcode
// - Type: specifies the type of the matrix elements. blaze::CustomMatrix can be used with
// any non-cv-qualified, non-reference, non-pointer element type.
// - AF : specifies whether the represented, external arrays are properly aligned with
// respect to the available instruction set (SSE, AVX, ...) or not.
// - PF : specified whether the represented, external arrays are properly padded with
// respect to the available instruction set (SSE, AVX, ...) or not.
// - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix.
// The default value is blaze::rowMajor.
//
// The blaze::CustomMatrix is the right choice if any external array needs to be represented as
// a \b Blaze dense matrix data structure or if a custom memory allocation strategy needs to be
// realized:
\code
using blaze::CustomMatrix;
using blaze::Deallocate;
using blaze::aligned;
using blaze::unaligned;
using blaze::padded;
using blaze::unpadded;
// Definition of an unmanaged 3x4 custom matrix for unaligned, unpadded integer arrays
using UnalignedUnpadded = CustomMatrix<int,unaligned,unpadded,rowMajor>;
std::vector<int> vec( 12UL )
UnalignedUnpadded A( &vec[0], 3UL, 4UL );
// Definition of a managed 5x6 custom matrix for unaligned but padded 'float' arrays
using UnalignedPadded = CustomMatrix<float,unaligned,padded,columnMajor>;
std::unique_ptr<float[]> memory1( new float[40] );
UnalignedPadded B( memory1.get(), 5UL, 6UL, 8UL );
// Definition of a managed 12x13 custom matrix for aligned, unpadded 'double' arrays
using AlignedUnpadded = CustomMatrix<double,aligned,unpadded,rowMajor>;
std::unique_ptr<double[],Deallocate> memory2( blaze::allocate<double>( 192UL ) );
AlignedUnpadded C( memory2.get(), 12UL, 13UL, 16UL );
// Definition of a 7x14 custom matrix for aligned, padded 'complex<double>' arrays
using cplx = complex<double>;
using AlignedPadded = CustomMatrix<cplx,aligned,padded,columnMajor>;
std::unique_ptr<cplx[],Deallocate> memory3( blaze::allocate<cplx>( 112UL ) );
AlignedPadded D( memory3.get(), 7UL, 14UL, 16UL );
\endcode
// In comparison with the remaining \b Blaze dense matrix types blaze::CustomMatrix has several
// special characteristics. All of these result from the fact that a custom matrix is not
// performing any kind of memory allocation, but instead is given an existing array of elements.
// The following sections discuss all of these characteristics:
//
// -# <b>\ref matrix_types_custom_matrix_memory_management</b>
// -# <b>\ref matrix_types_custom_matrix_copy_operations</b>
// -# <b>\ref matrix_types_custom_matrix_alignment</b>
// -# <b>\ref matrix_types_custom_matrix_padding</b>
//
// \n \subsection matrix_types_custom_matrix_memory_management Memory Management
//
// The blaze::CustomMatrix class template acts as an adaptor for an existing array of elements. As
// such it provides everything that is required to use the array just like a native \b Blaze dense
// matrix data structure. However, this flexibility comes with the price that the user of a custom
// matrix is responsible for the resource management.
//
// The following examples give an impression of several possible types of custom matrices:
\code
using blaze::CustomMatrix;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::unaligned;
using blaze::padded;
using blaze::unpadded;
// Definition of a 3x4 custom row-major matrix with unaligned, unpadded and externally
// managed integer array. Note that the std::vector must be guaranteed to outlive the
// custom matrix!
std::vector<int> vec( 12UL );
CustomMatrix<int,unaligned,unpadded> A( &vec[0], 3UL, 4UL );
// Definition of a custom 8x12 matrix for an aligned and padded integer array of
// capacity 128 (including 8 padding elements per row). Note that the std::unique_ptr
// must be guaranteed to outlive the custom matrix!
std::unique_ptr<int[],Deallocate> memory( allocate<int>( 128UL ) );
CustomMatrix<int,aligned,padded> B( memory.get(), 8UL, 12UL, 16UL );
\endcode
// \n \subsection matrix_types_custom_matrix_copy_operations Copy Operations
//
// As with all dense matrices it is possible to copy construct a custom matrix:
\code
using blaze::CustomMatrix;
using blaze::unaligned;
using blaze::unpadded;
using CustomType = CustomMatrix<int,unaligned,unpadded>;
std::vector<int> vec( 6UL, 10 ); // Vector of 6 integers of the value 10
CustomType A( &vec[0], 2UL, 3UL ); // Represent the std::vector as Blaze dense matrix
a[1] = 20; // Also modifies the std::vector
CustomType B( a ); // Creating a copy of vector a
b[2] = 20; // Also affects matrix A and the std::vector
\endcode
// It is important to note that a custom matrix acts as a reference to the specified array. Thus
// the result of the copy constructor is a new custom matrix that is referencing and representing
// the same array as the original custom matrix.
//
// In contrast to copy construction, just as with references, copy assignment does not change
// which array is referenced by the custom matrices, but modifies the values of the array:
\code
std::vector<int> vec2( 6UL, 4 ); // Vector of 6 integers of the value 4
CustomType C( &vec2[0], 2UL, 3UL ); // Represent the std::vector as Blaze dense matrix
A = C; // Copy assignment: Set all values of matrix A and B to 4.
\endcode
// \n \subsection matrix_types_custom_matrix_alignment Alignment
//
// In case the custom matrix is specified as \c aligned the passed array must adhere to some
// alignment restrictions based on the alignment requirements of the used data type and the
// used instruction set (SSE, AVX, ...). The restriction applies to the first element of each
// row/column: In case of a row-major matrix the first element of each row must be properly
// aligned, in case of a column-major matrix the first element of each column must be properly
// aligned. For instance, if a row-major matrix is used and AVX is active the first element of
// each row must be 32-bit aligned:
\code
using blaze::CustomMatrix;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::padded;
using blaze::rowMajor;
// Allocation of 32-bit aligned memory
std::unique_ptr<int[],Deallocate> memory( allocate<int>( 40UL ) );
CustomMatrix<int,aligned,padded,rowMajor> A( memory.get(), 5UL, 6UL, 8UL );
\endcode
// In the example, the row-major matrix has six columns. However, since with AVX eight integer
// values are loaded together the matrix is padded with two additional elements. This guarantees
// that the first element of each row is 32-bit aligned. In case the alignment requirements are
// violated, a \c std::invalid_argument exception is thrown.
//
// \n \subsection matrix_types_custom_matrix_padding Padding
//
// Adding padding elements to the end of each row/column can have a significant impact on the
// performance. For instance, assuming that AVX is available, then two aligned, padded, 3x3 double
// precision matrices can be added via three SIMD addition operations:
\code
using blaze::CustomMatrix;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::padded;
using CustomType = CustomMatrix<double,aligned,padded>;
std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 12UL ) );
std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 12UL ) );
std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 12UL ) );
// Creating padded custom 3x3 matrix with an additional padding element in each row
CustomType A( memory1.get(), 3UL, 3UL, 4UL );
CustomType B( memory2.get(), 3UL, 3UL, 4UL );
CustomType C( memory3.get(), 3UL, 3UL, 4UL );
// ... Initialization
C = A + B; // AVX-based matrix addition
\endcode
// In this example, maximum performance is possible. However, in case no padding elements are
// inserted a scalar addition has to be used:
\code
using blaze::CustomMatrix;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::unpadded;
using CustomType = CustomMatrix<double,aligned,unpadded>;
std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 9UL ) );
std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 9UL ) );
std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 9UL ) );
// Creating unpadded custom 3x3 matrix
CustomType A( memory1.get(), 3UL, 3UL );
CustomType B( memory2.get(), 3UL, 3UL );
CustomType C( memory3.get(), 3UL, 3UL );
// ... Initialization
C = A + B; // Scalar matrix addition
\endcode
// Note that the construction of padded and unpadded aligned matrices looks identical. However,
// in case of padded matrices, \b Blaze will zero initialize the padding element and use them
// in all computations in order to achieve maximum performance. In case of an unpadded matrix
// \b Blaze will ignore the elements with the downside that it is not possible to load a complete
// row to an AVX register, which makes it necessary to fall back to a scalar addition.
//
// The number of padding elements is required to be sufficient with respect to the available
// instruction set: In case of an aligned padded custom matrix the added padding elements must
// guarantee that the total number of elements in each row/column is a multiple of the SIMD
// vector width. In case of an unaligned padded matrix the number of padding elements can be
// greater or equal the number of padding elements of an aligned padded custom matrix. In case
// the padding is insufficient with respect to the available instruction set, a
// \c std::invalid_argument exception is thrown.
//
//
// \n \section matrix_types_uniform_matrix UniformMatrix
// <hr>
//
// The blaze::UniformMatrix class template is the representation of an arbitrary sized uniform
// matrix with elements of arbitrary type. It can be included via the header file
\code
#include <blaze/math/UniformMatrix.h>
\endcode
// The type of the elements and the storage order of the matrix can be specified via the two
// template parameters:
\code
template< typename Type, bool SO >
class UniformMatrix;
\endcode
// - \c Type: specifies the type of the matrix elements. UniformMatrix can be used with any
// non-cv-qualified, non-reference element type.
// - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix.
// The default value is blaze::rowMajor.
//
// The blaze::UniformVector is the best choice for uniform matrices of any size. The number of
// rows and columns can be modified at runtime:
\code
// Definition of a 3x4 integral row-major matrix
blaze::UniformMatrix<int> A( 3UL, 4UL );
// Definition of a 4x6 single precision row-major matrix
blaze::UniformMatrix<float,blaze::rowMajor> B( 4UL, 6UL );
// Definition of a double precision column-major matrix with 0 rows and columns
blaze::UniformMatrix<double,blaze::columnMajor> C;
\endcode
// \n \section matrix_types_compressed_matrix CompressedMatrix
// <hr>
//
// The blaze::CompressedMatrix class template is the representation of an arbitrary sized sparse
// matrix with \f$ M \cdot N \f$ dynamically allocated elements of arbitrary type. It can be
// included via the header file
\code
#include <blaze/math/CompressedMatrix.h>
\endcode
// The type of the elements and the storage order of the matrix can be specified via the two
// template parameters:
\code
template< typename Type, bool SO >
class CompressedMatrix;
\endcode
// - \c Type: specifies the type of the matrix elements. CompressedMatrix can be used with
// any non-cv-qualified, non-reference, non-pointer element type.
// - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix.
// The default value is blaze::rowMajor.
//
// The blaze::CompressedMatrix is the right choice for all kinds of sparse matrices:
\code
// Definition of a 3x4 integral row-major matrix
blaze::CompressedMatrix<int> A( 3UL, 4UL );
// Definition of a 4x6 single precision row-major matrix
blaze::CompressedMatrix<float,blaze::rowMajor> B( 4UL, 6UL );
// Definition of a double precision column-major matrix with 0 rows and columns
blaze::CompressedMatrix<double,blaze::columnMajor> C;
\endcode
// \n \section matrix_types_identity_matrix IdentityMatrix
// <hr>
//
// The blaze::IdentityMatrix class template is the representation of an immutable, arbitrary
// sized identity matrix with \f$ N \cdot N \f$ elements of arbitrary type. It can be included
// via the header file
\code
#include <blaze/math/IdentityMatrix.h>
\endcode
// The type of the elements and the storage order of the matrix can be specified via the two
// template parameters:
\code
template< typename Type, bool SO >
class IdentityMatrix;
\endcode
// - Type: specifies the type of the matrix elements. IdentityMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix.
// The default value is blaze::rowMajor.
//
// The blaze::IdentityMatrix is the perfect choice to represent an identity matrix:
\code
// Definition of a 3x3 integral row-major identity matrix
blaze::IdentityMatrix<int> A( 3UL );
// Definition of a 6x6 single precision row-major identity matrix
blaze::IdentityMatrix<float,blaze::rowMajor> B( 6UL );
// Definition of a double precision column-major identity matrix with 0 rows and columns
blaze::IdentityMatrix<double,blaze::columnMajor> C;
\endcode
// \n \section matrix_types_zero_matrix ZeroMatrix
// <hr>
//
// The blaze::ZeroMatrix class template is the representation of an immutable, arbitrary sized
// zero matrix with \f$ M \cdot N \f$ elements of arbitrary type. It can be included via the
// header file
\code
#include <blaze/math/ZeroMatrix.h>
\endcode
// The type of the elements and the storage order of the matrix can be specified via the two
// template parameters:
\code
template< typename Type, bool SO >
class ZeroMatrix;
\endcode
// - Type: specifies the type of the matrix elements. ZeroMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix.
// The default value is blaze::rowMajor.
//
// The blaze::ZeroMatrix is the perfect choice to represent a zero matrix:
\code
// Definition of a 3x5 integral row-major zero matrix
blaze::ZeroMatrix<int> A( 3UL, 5UL );
// Definition of a 6x4 single precision row-major zero matrix
blaze::ZeroMatrix<float,blaze::rowMajor> B( 6UL, 4UL );
// Definition of a double precision column-major zero matrix with 0 rows and columns
blaze::ZeroMatrix<double,blaze::columnMajor> C;
\endcode
// \n Previous: \ref matrices Next: \ref matrix_operations
*/
//*************************************************************************************************
//**Matrix Operations******************************************************************************
/*!\page matrix_operations Matrix Operations
//
// \tableofcontents
//
//
// \n \section matrix_operations_constructors Constructors
// <hr>
//
// Matrices are just as easy and intuitive to create as vectors. Still, there are a few rules
// to be aware of:
// - In case the last template parameter (the storage order) is omitted, the matrix is per
// default stored in row-major order.
// - The elements of a \c StaticMatrix or \c HybridMatrix are default initialized (i.e. built-in
// data types are initialized to 0, class types are initialized via the default constructor).
// - Newly allocated elements of a \c DynamicMatrix or \c CompressedMatrix remain uninitialized
// if they are of built-in type and are default constructed if they are of class type.
//
// \n \subsection matrix_operations_default_construction Default Construction
\code
using blaze::StaticMatrix;
using blaze::DynamicMatrix;
using blaze::CompressedMatrix;
// All matrices can be default constructed. Whereas the size of
// a StaticMatrix is fixed via the second and third template
// parameter, the initial size of a constructed DynamicMatrix
// or CompressedMatrix is 0.
StaticMatrix<int,2UL,2UL> M1; // Instantiation of a 2x2 integer row-major
// matrix. All elements are initialized to 0.
DynamicMatrix<float> M2; // Instantiation of a single precision dynamic
// row-major matrix with 0 rows and 0 columns.
DynamicMatrix<double,columnMajor> M3; // Instantiation of a double precision dynamic
// column-major matrix with 0 rows and 0 columns.
CompressedMatrix<int> M4; // Instantiation of a compressed integer
// row-major matrix of size 0x0.
CompressedMatrix<double,columnMajor> M5; // Instantiation of a compressed double precision
// column-major matrix of size 0x0.
\endcode
// \n \subsection matrix_operations_size_construction Construction with Specific Size
//
// The \c DynamicMatrix, \c HybridMatrix, and \c CompressedMatrix classes offer a constructor
// that allows to immediately give the matrices a specific number of rows and columns:
\code
DynamicMatrix<int> M6( 5UL, 4UL ); // Instantiation of a 5x4 dynamic row-major
// matrix. The elements are not initialized.
HybridMatrix<double,5UL,9UL> M7( 3UL, 7UL ); // Instantiation of a 3x7 hybrid row-major
// matrix. The elements are not initialized.
CompressedMatrix<float,columnMajor> M8( 8UL, 6UL ); // Instantiation of an empty 8x6 compressed
// column-major matrix.
\endcode
// Note that dense matrices (in this case \c DynamicMatrix and \c HybridMatrix) immediately
// allocate enough capacity for all matrix elements. Sparse matrices on the other hand (in this
// example \c CompressedMatrix) merely acquire the size, but don't necessarily allocate memory.
//
//
// \n \subsection matrix_operations_initialization_constructors Initialization Constructors
//
// All dense matrix classes offer a constructor for a direct, homogeneous initialization of all
// matrix elements. In contrast, for sparse matrices the predicted number of non-zero elements
// can be specified.
\code
StaticMatrix<int,4UL,3UL,columnMajor> M9( 7 ); // Instantiation of a 4x3 integer column-major
// matrix. All elements are initialized to 7.
DynamicMatrix<float> M10( 2UL, 5UL, 2.0F ); // Instantiation of a 2x5 single precision row-major
// matrix. All elements are initialized to 2.0F.
CompressedMatrix<int> M11( 3UL, 4UL, 4 ); // Instantiation of a 3x4 integer row-major
// matrix with capacity for 4 non-zero elements.
\endcode
// \n \subsection matrix_operations_array_construction Array Construction
//
// Alternatively, all dense matrix classes offer a constructor for an initialization with a
// dynamic or static array. If the matrix is initialized from a dynamic array, the constructor
// expects the dimensions of values provided by the array as first and second argument, the
// array as third argument. In case of a static array, the fixed size of the array is used:
\code
const std::unique_ptr<double[]> array1( new double[6] );
// ... Initialization of the dynamic array
blaze::StaticMatrix<double,2UL,3UL> M12( 2UL, 3UL, array1.get() );
int array2[2][2] = { { 4, -5 }, { -6, 7 } };
blaze::StaticMatrix<int,2UL,2UL,rowMajor> M13( array2 );
\endcode
// \n \subsection matrix_operations_initializer_list_construction
//
// In addition, all dense and sparse matrix classes can be directly initialized by means of an
// initializer list:
\code
blaze::DynamicMatrix<float,columnMajor> M14{ { 3.1F, 6.4F },
{ -0.9F, -1.2F },
{ 4.8F, 0.6F } };
blaze::CompressedMatrix<int,rowMajor> M15{ { 3 },
{ 1 },
{ 0, 2 } };
\endcode
// Dynamically sized matrices (such as e.g. \ref matrix_types_hybrid_matrix,
// \ref matrix_types_dynamic_matrix or \ref matrix_types_compressed_matrix) are sized according
// to the size of the initializer list and all their elements are (copy) assigned the values of
// the list. For fixed size matrices (such as e.g. \ref matrix_types_static_matrix) missing values
// are initialized as default and in case the size of the top-level initializer list does not
// match the number of rows of the matrix or the size of any nested list exceeds the number of
// columns, a \a std::invalid_argument exception is thrown. In case of sparse matrices, only
// the non-zero elements are used to initialize the matrix.
//
// \n \subsection matrix_operations_copy_construction Copy Construction
//
// All dense and sparse matrices can be created as a copy of another dense or sparse matrix.
\code
StaticMatrix<int,5UL,4UL,rowMajor> M16( M6 ); // Instantiation of the dense row-major matrix M16
// as copy of the dense row-major matrix M6.
DynamicMatrix<float,columnMajor> M17( M8 ); // Instantiation of the dense column-major matrix M17
// as copy of the sparse column-major matrix M8.
CompressedMatrix<double,columnMajor> M18( M7 ); // Instantiation of the compressed column-major matrix
// M18 as copy of the dense row-major matrix M7.
CompressedMatrix<float,rowMajor> M19( M8 ); // Instantiation of the compressed row-major matrix
// M19 as copy of the compressed column-major matrix M8.
\endcode
// Note that it is not possible to create a \c StaticMatrix as a copy of a matrix with a different
// number of rows and/or columns:
\code
StaticMatrix<int,4UL,5UL,rowMajor> M20( M6 ); // Runtime error: Number of rows and columns
// does not match!
StaticMatrix<int,4UL,4UL,columnMajor> M21( M9 ); // Compile time error: Number of columns does
// not match!
\endcode
// \n \section matrix_operations_assignment Assignment
// <hr>
//
// There are several types of assignment to dense and sparse matrices:
// \ref matrix_operations_homogeneous_assignment, \ref matrix_operations_array_assignment,
// \ref matrix_operations_copy_assignment, and \ref matrix_operations_compound_assignment.
//
//
// \n \subsection matrix_operations_homogeneous_assignment Homogeneous Assignment
//
// It is possible to assign the same value to all elements of a dense matrix. All dense matrix
// classes provide an according assignment operator:
\code
blaze::StaticMatrix<int,3UL,2UL> M1;
blaze::DynamicMatrix<double> M2;
// Setting all integer elements of the StaticMatrix to 4
M1 = 4;
// Setting all double precision elements of the DynamicMatrix to 3.5
M2 = 3.5
\endcode
// \n \subsection matrix_operations_array_assignment Array Assignment
//
// Dense matrices can also be assigned a static array:
\code
blaze::StaticMatrix<int,2UL,2UL,rowMajor> M1;
blaze::StaticMatrix<int,2UL,2UL,columnMajor> M2;
blaze::DynamicMatrix<double> M3;
int array1[2][2] = { { 1, 2 }, { 3, 4 } };
double array2[3][2] = { { 3.1, 6.4 }, { -0.9, -1.2 }, { 4.8, 0.6 } };
M1 = array1;
M2 = array1;
M3 = array2;
\endcode
// Note that the dimensions of the static array have to match the size of a \c StaticMatrix,
// whereas a \c DynamicMatrix is resized according to the array dimensions:
\f$ M3 = \left(\begin{array}{*{2}{c}}
3.1 & 6.4 \\
-0.9 & -1.2 \\
4.8 & 0.6 \\
\end{array}\right)\f$
// \n \subsection matrix_operations_initializer_list_assignment Initializer List Assignment
//
// Alternatively, it is possible to directly assign an initializer list to a dense or sparse
// matrix:
\code
blaze::DynamicMatrix<double> M1;
blaze::CompressedMatrix<int> M2;
M1 = { { 3.1, 6.4 }, { -0.9, -1.2 }, { 4.8, 0.6 } };
M2 = { { 1, 0 }, {}, { 0, 1 }, { 2 } };
\endcode
// Dynamically sized matrices (such as e.g. \ref matrix_types_hybrid_matrix,
// \ref matrix_types_dynamic_matrix or \ref matrix_types_compressed_matrix) are resized according
// to the size of the initializer list and all their elements are (copy) assigned the values of
// the list. For fixed size matrices (such as e.g. \ref matrix_types_static_matrix) missing values
// are reset to their default value and in case the size of the top-level initializer list does
// not match the number of rows of the matrix or the size of any nested list exceeds the number
// of columns, a \a std::invalid_argument exception is thrown. In case of sparse matrices, only
// the non-zero elements are considered.
//
// \n \subsection matrix_operations_copy_assignment Copy Assignment
//
// All kinds of matrices can be assigned to each other. The only restriction is that since a
// \c StaticMatrix cannot change its size, the assigned matrix must match both in the number of
// rows and in the number of columns.
\code
blaze::StaticMatrix<int,3UL,2UL,rowMajor> M1;
blaze::DynamicMatrix<int,rowMajor> M2( 3UL, 2UL );
blaze::DynamicMatrix<float,rowMajor> M3( 5UL, 2UL );
blaze::CompressedMatrix<int,rowMajor> M4( 3UL, 2UL );
blaze::CompressedMatrix<float,columnMajor> M5( 3UL, 2UL );
// ... Initialization of the matrices
M1 = M2; // OK: Assignment of a 3x2 dense row-major matrix to another 3x2 dense row-major matrix
M1 = M4; // OK: Assignment of a 3x2 sparse row-major matrix to a 3x2 dense row-major matrix
M1 = M3; // Runtime error: Cannot assign a 5x2 matrix to a 3x2 static matrix
M1 = M5; // OK: Assignment of a 3x2 sparse column-major matrix to a 3x2 dense row-major matrix
\endcode
// \n \subsection matrix_operations_compound_assignment Compound Assignment
//
// Compound assignment is also available for matrices: addition assignment, subtraction assignment,
// and multiplication assignment. In contrast to plain assignment, however, the number of rows
// and columns of the two operands have to match according to the arithmetic operation.
\code
blaze::StaticMatrix<int,2UL,3UL,rowMajor> M1;
blaze::DynamicMatrix<int,rowMajor> M2( 2UL, 3UL );
blaze::CompressedMatrix<float,columnMajor> M3( 2UL, 3UL );
blaze::CompressedMatrix<float,rowMajor> M4( 2UL, 4UL );
blaze::StaticMatrix<float,2UL,4UL,rowMajor> M5;
blaze::CompressedMatrix<float,rowMajor> M6( 3UL, 2UL );
// ... Initialization of the matrices
M1 += M2; // OK: Addition assignment between two row-major matrices of the same dimensions
M1 -= M3; // OK: Subtraction assignment between between a row-major and a column-major matrix
M1 += M4; // Runtime error: No compound assignment between matrices of different size
M1 -= M5; // Compilation error: No compound assignment between matrices of different size
M2 *= M6; // OK: Multiplication assignment between two row-major matrices
\endcode
// Note that the multiplication assignment potentially changes the number of columns of the
// target matrix:
\f$\left(\begin{array}{*{3}{c}}
2 & 0 & 1 \\
0 & 3 & 2 \\
\end{array}\right) \times
\left(\begin{array}{*{2}{c}}
4 & 0 \\
1 & 0 \\
0 & 3 \\
\end{array}\right) =
\left(\begin{array}{*{2}{c}}
8 & 3 \\
3 & 6 \\
\end{array}\right)\f$
// Since a \c StaticMatrix cannot change its size, only a square StaticMatrix can be used in a
// multiplication assignment with other square matrices of the same dimensions.
//
//
// \n \section matrix_operations_element_access Element Access
// <hr>
//
// \n \subsection matrix_operations_function_call_operator_1 Function Call Operator
//
// The easiest way to access a specific dense or sparse matrix element is via the function call
// operator. The indices to access a matrix are zero-based:
\code
blaze::DynamicMatrix<int> M1( 4UL, 6UL );
M1(0,0) = 1;
M1(0,1) = 3;
// ...
blaze::CompressedMatrix<double> M2( 5UL, 3UL );
M2(0,2) = 4.1;
M2(1,1) = -6.3;
\endcode
// Since dense matrices allocate enough memory for all contained elements, using the function
// call operator on a dense matrix directly returns a reference to the accessed value. In case
// of a sparse matrix, if the accessed value is currently not contained in the matrix, the
// value is inserted into the matrix prior to returning a reference to the value, which can
// be much more expensive than the direct access to a dense matrix. Consider the following
// example:
\code
blaze::CompressedMatrix<int> M1( 4UL, 4UL );
for( size_t i=0UL; i<M1.rows(); ++i ) {
for( size_t j=0UL; j<M1.columns(); ++j ) {
... = M1(i,j);
}
}
\endcode
// Although the compressed matrix is only used for read access within the for loop, using the
// function call operator temporarily inserts 16 non-zero elements into the matrix. Therefore
// the preferred way to traverse the non-zero elements of a sparse matrix is to use iterators.
//
// \n \subsection matrix_operations_iterators Iterators
//
// All matrices (sparse as well as dense) offer an alternate way via the \c begin(), \c cbegin(),
// \c end() and \c cend() functions to traverse all contained elements by iterator. Note that
// it is not possible to traverse all elements of the matrix, but that it is only possible to
// traverse elements in a row/column-wise fashion. In case of a non-const matrix, \c begin() and
// \c end() return an \c Iterator, which allows a manipulation of the non-zero value, in case of
// a constant matrix or in case \c cbegin() or \c cend() are used a \c ConstIterator is returned:
\code
using blaze::CompressedMatrix;
CompressedMatrix<int,rowMajor> M1( 4UL, 6UL );
// Traversing the matrix by Iterator
for( size_t i=0UL; i<A.rows(); ++i ) {
for( CompressedMatrix<int,rowMajor>::Iterator it=A.begin(i); it!=A.end(i); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the non-zero element.
}
}
// Traversing the matrix by ConstIterator
for( size_t i=0UL; i<A.rows(); ++i ) {
for( CompressedMatrix<int,rowMajor>::ConstIterator it=A.cbegin(i); it!=A.cend(i); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the non-zero element.
}
}
\endcode
// Note that \c begin(), \c cbegin(), \c end(), and \c cend() are also available as free functions:
\code
for( size_t i=0UL; i<A.rows(); ++i ) {
for( CompressedMatrix<int,rowMajor>::Iterator it=begin( A, i ); it!=end( A, i ); ++it ) {
// ...
}
}
for( size_t i=0UL; i<A.rows(); ++i ) {
for( CompressedMatrix<int,rowMajor>::ConstIterator it=cbegin( A, i ); it!=cend( A, i ); ++it ) {
// ...
}
}
\endcode
// \n \section matrix_operations_element_insertion Element Insertion
// <hr>
//
// Whereas a dense matrix always provides enough capacity to store all matrix elements, a sparse
// matrix only stores the non-zero elements. Therefore it is necessary to explicitly add elements
// to the matrix.
//
// \n \subsection matrix_operations_function_call_operator_2 Function Call Operator
//
// The first possibility to add elements to a sparse matrix is the function call operator:
\code
using blaze::CompressedMatrix;
CompressedMatrix<int> M1( 3UL, 4UL );
M1(1,2) = 9;
\endcode
// In case the element at the given position is not yet contained in the sparse matrix, it is
// automatically inserted. Otherwise the old value is replaced by the new value 2. The operator
// returns a reference to the sparse vector element.
//
// \n \subsection matrix_operations_set .set()
//
// An alternative to the function call operator is the \c set() function: In case the element is
// not yet contained in the matrix the element is inserted, else the element's value is modified:
\code
// Insert or modify the value at position (2,0)
M1.set( 2, 0, 1 );
\endcode
// \n \subsection matrix_operations_insert .insert()
// The insertion of elements can be better controlled via the \c insert() function. In contrast
// to the function call operator and the \c set() function it emits an exception in case the
// element is already contained in the matrix. In order to check for this case, the \c find()
// function can be used:
\code
// In case the element at position (2,3) is not yet contained in the matrix it is inserted
// with a value of 4.
if( M1.find( 2, 3 ) == M1.end( 2 ) )
M1.insert( 2, 3, 4 );
\endcode
// \n \subsection matrix_operations_append .append()
//
// Although the \c insert() function is very flexible, due to performance reasons it is not
// suited for the setup of large sparse matrices. A very efficient, yet also very low-level
// way to fill a sparse matrix is the \c append() function. It requires the sparse matrix to
// provide enough capacity to insert a new element in the specified row/column. Additionally,
// the index of the new element must be larger than the index of the previous element in the
// same row/column. Violating these conditions results in undefined behavior!
\code
M1.reserve( 0, 3 ); // Reserving space for three non-zero elements in row 0
M1.append( 0, 1, 2 ); // Appending the element 2 in row 0 at column index 1
M1.append( 0, 2, -4 ); // Appending the element -4 in row 0 at column index 2
// ...
\endcode
// The most efficient way to fill a sparse matrix with elements, however, is a combination of
// \c reserve(), \c append(), and the \c finalize() function:
\code
// Setup of the compressed row-major matrix
//
// ( 0 1 0 2 0 )
// A = ( 0 0 0 0 0 )
// ( 3 0 0 0 0 )
//
blaze::CompressedMatrix<int> M1( 3UL, 5UL );
M1.reserve( 3 ); // Reserving enough space for 3 non-zero elements
M1.append( 0, 1, 1 ); // Appending the value 1 in row 0 with column index 1
M1.append( 0, 3, 2 ); // Appending the value 2 in row 0 with column index 3
M1.finalize( 0 ); // Finalizing row 0
M1.finalize( 1 ); // Finalizing the empty row 1 to prepare row 2
M1.append( 2, 0, 3 ); // Appending the value 3 in row 2 with column index 0
M1.finalize( 2 ); // Finalizing row 2
\endcode
// \note The \c finalize() function has to be explicitly called for each row or column, even
// for empty ones!
// \note Although \c append() does not allocate new memory, it still invalidates all iterators
// returned by the \c end() functions!
//
//
// \n \section matrix_operations_element_removal Element Removal
// <hr>
//
// \subsection matrix_operations_erase .erase()
//
// The \c erase() member functions can be used to remove elements from a sparse matrix. The
// following example gives an impression of the five different flavors of \c erase():
\code
using blaze::CompressedMatrix;
CompressedMatrix<int,rowMajor> A( 42, 53 );
// ... Initialization of the matrix
// Erasing the element at position (21,23)
A.erase( 21, 23 );
// Erasing a single element in row 17 via iterator
A.erase( 17, A.find( 4 ) );
// Erasing all non-zero elements in the range [7..24] of row 33
A.erase( 33, A.lowerBound( 33, 7 ), A.upperBound( 33, 24 ) );
// Erasing all non-zero elements with a value larger than 9 by passing a unary predicate
A.erase( []( int i ){ return i > 9; } );
// Erasing all non-zero elements in the range [30..40] of row 37 with a value larger than 5
CompressedMatrix<int,rowMajor>::Iterator pos1( A.lowerBound( 37, 30 ) );
CompressedMatrix<int,rowMajor>::Iterator pos2( A.upperBound( 37, 40 ) );
A.erase( 37, pos1, pos2, []( int i ){ return i > 5; } );
\endcode
// \n \section matrix_operations_element_lookup Element Lookup
// <hr>
//
// A sparse matrix only stores the non-zero elements contained in the matrix. Therefore, whenever
// accessing a matrix element at a specific position a lookup operation is required. Whereas the
// function call operator is performing this lookup automatically, it is also possible to use the
// \c find(), \c lowerBound(), and \c upperBound() member functions for a manual lookup.
//
// \n \subsection matrix_operations_find .find()
//
// The \c find() function can be used to check whether a specific element is contained in the
// sparse matrix. It specifically searches for the element at the specified position. In case
// the element is found, the function returns an iterator to the element. Otherwise an iterator
// just past the last non-zero element of the according row or column (the \c end() iterator)
// is returned. Note that the returned iterator is subject to invalidation due to inserting
// operations via the function call operator, the \c set() function or the \c insert() function!
\code
using blaze::CompressedMatrix;
CompressedMatrix<int,rowMajor> A( 42, 53 );
// ... Initialization of the matrix
// Searching the element at position (7,17). In case the element is not
// contained in the vector, the end() iterator of row 7 is returned.
CompressedMatrix<int,rowMajor>::Iterator pos( A.find( 7, 17 ) );
if( pos != A.end( 7 ) ) {
// ...
}
\endcode
// \n \subsection matrix_operations_lowerbound .lowerBound()
//
// In case of a row-major matrix, this function returns a row iterator to the first element with
// an index not less then the given column index. In case of a column-major matrix, the function
// returns a column iterator to the first element with an index not less then the given row
// index. In combination with the \c upperBound() function this function can be used to create a
// pair of iterators specifying a range of indices. Note that the returned iterator is subject
// to invalidation due to inserting operations via the function call operator, the \c set()
// function or the \c insert() function!
\code
using blaze::CompressedMatrix;
CompressedMatrix<int,rowMajor> A( 42, 53 );
// ... Initialization of the matrix
// Searching the lower bound of column index 17 in row 7.
CompressedMatrix<int,rowMajor>::Iterator pos1( A.lowerBound( 7, 17 ) );
// Searching the upper bound of column index 28 in row 7
CompressedMatrix<int,rowMajor>::Iterator pos2( A.upperBound( 7, 28 ) );
// Erasing all elements in the specified range
A.erase( 7, pos1, pos2 );
\endcode
// \n \subsection matrix_operations_upperbound .upperBound()
//
// In case of a row-major matrix, this function returns a row iterator to the first element with
// an index greater then the given column index. In case of a column-major matrix, the function
// returns a column iterator to the first element with an index greater then the given row
// index. In combination with the \c lowerBound() function this function can be used to create a
// pair of iterators specifying a range of indices. Note that the returned iterator is subject
// to invalidation due to inserting operations via the function call operator, the \c set()
// function or the \c insert() function!
\code
using blaze::CompressedMatrix;
CompressedMatrix<int,columnMajor> A( 42, 53 );
// ... Initialization of the matrix
// Searching the lower bound of row index 17 in column 9.
CompressedMatrix<int,columnMajor>::Iterator pos1( A.lowerBound( 17, 9 ) );
// Searching the upper bound of row index 28 in column 9
CompressedMatrix<int,columnMajor>::Iterator pos2( A.upperBound( 28, 9 ) );
// Erasing all elements in the specified range
A.erase( 9, pos1, pos2 );
\endcode
// \n \section matrix_operations_non_modifying_operations Non-Modifying Operations
// <hr>
//
// \subsection matrix_operations_rows .rows() / rows()
//
// The current number of rows of a matrix can be acquired via the \c rows() member function:
\code
// Instantiating a dynamic matrix with 10 rows and 8 columns
blaze::DynamicMatrix<int> M1( 10UL, 8UL );
M1.rows(); // Returns 10
// Instantiating a compressed matrix with 8 rows and 12 columns
blaze::CompressedMatrix<double> M2( 8UL, 12UL );
M2.rows(); // Returns 8
\endcode
// Alternatively, the free functions \c rows() can be used to query the current number of rows of
// a matrix. In contrast to the member function, the free function can also be used to query the
// number of rows of a matrix expression:
\code
rows( M1 ); // Returns 10, i.e. has the same effect as the member function
rows( M2 ); // Returns 8, i.e. has the same effect as the member function
rows( M1 * M2 ); // Returns 10, i.e. the number of rows of the resulting matrix
\endcode
// \n \subsection matrix_operations_columns .columns() / columns()
//
// The current number of columns of a matrix can be acquired via the \c columns() member function:
\code
// Instantiating a dynamic matrix with 6 rows and 8 columns
blaze::DynamicMatrix<int> M1( 6UL, 8UL );
M1.columns(); // Returns 8
// Instantiating a compressed matrix with 8 rows and 7 columns
blaze::CompressedMatrix<double> M2( 8UL, 7UL );
M2.columns(); // Returns 7
\endcode
// There is also a free function \c columns() available, which can also be used to query the number
// of columns of a matrix expression:
\code
columns( M1 ); // Returns 8, i.e. has the same effect as the member function
columns( M2 ); // Returns 7, i.e. has the same effect as the member function
columns( M1 * M2 ); // Returns 7, i.e. the number of columns of the resulting matrix
\endcode
// \subsection matrix_operations_size size()
//
// The \c size() function returns the total number of elements of a matrix:
\code
// Instantiating a dynamic matrix with 6 rows and 8 columns
blaze::DynamicMatrix<int> M1( 6UL, 8UL );
size( M1 ); // Returns 48
// Instantiating a compressed matrix with 8 rows and 7 columns
blaze::CompressedMatrix<double> M2( 8UL, 7UL );
size( M2 ); // Returns 56
\endcode
// \subsection matrix_operations_spacing .spacing() / spacing()
//
// The total number of elements of a row or column of a dense matrix, including potential padding
// elements, can be acquired via the \c spacing member function. In case of a row-major matrix
// (i.e. in case the storage order is set to blaze::rowMajor) the function returns the spacing
// between two rows, in case of a column-major matrix (i.e. in case the storage flag is set to
// blaze::columnMajor) the function returns the spacing between two columns:
\code
// Instantiating a row-major dynamic matrix with 7 rows and 8 columns
blaze::DynamicMatrix<int,blaze::rowMajor> M1( 7UL, 8UL );
M1.spacing(); // Returns the total number of elements in a row
// Instantiating a column-major dynamic matrix with 8 rows and 12 columns
blaze::CompressedMatrix<double> M2( 8UL, 12UL );
M2.spacing(); // Returns the total number of element in a column
\endcode
// Alternatively, the free functions \c spacing() can be used to query the current number of
// elements in a row/column.
\code
spacing( M1 ); // Returns the total number of elements in a row
spacing( M2 ); // Returns the total number of elements in a column
\endcode
// \n \subsection matrix_operations_capacity .capacity() / capacity()
//
// The \c capacity() member function returns the internal capacity of a dense or sparse matrix.
// Note that the capacity of a matrix doesn't have to be equal to the size of a matrix. In case of
// a dense matrix the capacity will always be greater or equal than the total number of elements
// of the matrix. In case of a sparse matrix, the capacity will usually be much less than the
// total number of elements.
\code
blaze::DynamicMatrix<float> M1( 5UL, 7UL );
blaze::StaticMatrix<float,7UL,4UL> M2;
M1.capacity(); // Returns at least 35
M2.capacity(); // Returns at least 28
\endcode
// There is also a free function \c capacity() available to query the capacity. However, please
// note that this function cannot be used to query the capacity of a matrix expression:
\code
capacity( M1 ); // Returns at least 35, i.e. has the same effect as the member function
capacity( M2 ); // Returns at least 28, i.e. has the same effect as the member function
capacity( M1 * M2 ); // Compilation error!
\endcode
// \n \subsection matrix_operations_nonzeros .nonZeros() / nonZeros()
//
// For both dense and sparse matrices the current number of non-zero elements can be queried
// via the \c nonZeros() member function. In case of matrices there are two flavors of the
// \c nonZeros() function: One returns the total number of non-zero elements in the matrix,
// the second returns the number of non-zero elements in a specific row (in case of a row-major
// matrix) or column (in case of a column-major matrix). Sparse matrices directly return their
// number of non-zero elements, dense matrices traverse their elements and count the number of
// non-zero elements.
\code
blaze::DynamicMatrix<int,rowMajor> M1( 3UL, 5UL );
// ... Initializing the dense matrix
M1.nonZeros(); // Returns the total number of non-zero elements in the dense matrix
M1.nonZeros( 2 ); // Returns the number of non-zero elements in row 2
\endcode
\code
blaze::CompressedMatrix<double,columnMajor> M2( 4UL, 7UL );
// ... Initializing the sparse matrix
M2.nonZeros(); // Returns the total number of non-zero elements in the sparse matrix
M2.nonZeros( 3 ); // Returns the number of non-zero elements in column 3
\endcode
// The free \c nonZeros() function can also be used to query the number of non-zero elements in a
// matrix expression. However, the result is not the exact number of non-zero elements, but may be
// a rough estimation:
\code
nonZeros( M1 ); // Has the same effect as the member function
nonZeros( M1, 2 ); // Has the same effect as the member function
nonZeros( M2 ); // Has the same effect as the member function
nonZeros( M2, 3 ); // Has the same effect as the member function
nonZeros( M1 * M2 ); // Estimates the number of non-zero elements in the matrix expression
\endcode
// \n \subsection matrix_operations_isempty isEmpty()
//
// The \c isEmpty() function returns whether the total number of elements of the matrix is zero:
\code
blaze::DynamicMatrix<int> A; // Create an empty matrix
isEmpty( A ); // Returns true
A.resize( 5, 0 ); // Resize to a 5x0 matrix
isEmpty( A ); // Returns true
A.resize( 5, 3 ); // Resize to a 5x3 matrix
isEmpty( A ); // Returns false
\endcode
// \n \subsection matrix_operations_isnan isnan()
//
// The \c isnan() function provides the means to check a dense or sparse matrix for non-a-number
// elements:
\code
blaze::DynamicMatrix<double> A( 3UL, 4UL );
// ... Initialization
if( isnan( A ) ) { ... }
\endcode
\code
blaze::CompressedMatrix<double> A( 3UL, 4UL );
// ... Initialization
if( isnan( A ) ) { ... }
\endcode
// If at least one element of the matrix is not-a-number, the function returns \c true, otherwise
// it returns \c false. Please note that this function only works for matrices with floating point
// elements. The attempt to use it for a matrix with a non-floating point element type results in
// a compile time error.
//
//
// \n \subsection matrix_operations_isdefault isDefault()
//
// The \c isDefault() function returns whether the given dense or sparse matrix is in default state:
\code
blaze::HybridMatrix<int,5UL,4UL> A;
// ... Resizing and initialization
if( isDefault( A ) ) { ... }
\endcode
// A matrix is in default state if it appears to just have been default constructed. All resizable
// matrices (\c HybridMatrix, \c DynamicMatrix, or \c CompressedMatrix) and \c CustomMatrix are in
// default state if its size is equal to zero. A non-resizable matrix (\c StaticMatrix and all
// submatrices) is in default state if all its elements are in default state. For instance, in case
// the matrix is instantiated for a built-in integral or floating point data type, the function
// returns \c true in case all matrix elements are 0 and \c false in case any matrix element is
// not 0.
//
//
// \n \subsection matrix_operations_isSquare isSquare()
//
// Whether a dense or sparse matrix is a square matrix (i.e. if the number of rows is equal to the
// number of columns) can be checked via the \c isSquare() function:
\code
blaze::DynamicMatrix<double> A;
// ... Resizing and initialization
if( isSquare( A ) ) { ... }
\endcode
// \n \subsection matrix_operations_issymmetric isSymmetric()
//
// Via the \c isSymmetric() function it is possible to check whether a dense or sparse matrix
// is symmetric:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isSymmetric( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be symmetric!
//
//
// \n \subsection matrix_operations_isUniform isUniform()
//
// In order to check if all matrix elements are identical, the \c isUniform() function can be used:
\code
blaze::DynamicMatrix<int> A;
// ... Resizing and initialization
if( isUniform( A ) ) { ... }
\endcode
// Note that in case of a sparse matrix also the zero elements are also taken into account!
//
//
// \n \subsection matrix_operations_isZero isZero()
//
// In order to check if all matrix elements are zero, the \c isZero() function can be used:
\code
blaze::DynamicMatrix<int> A;
// ... Resizing and initialization
if( isZero( A ) ) { ... }
\endcode
// \n \subsection matrix_operations_islower isLower()
//
// Via the \c isLower() function it is possible to check whether a dense or sparse matrix is
// lower triangular:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isLower( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be lower triangular!
//
//
// \n \subsection matrix_operations_isunilower isUniLower()
//
// Via the \c isUniLower() function it is possible to check whether a dense or sparse matrix is
// lower unitriangular:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isUniLower( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be lower unitriangular!
//
//
// \n \subsection matrix_operations_isstrictlylower isStrictlyLower()
//
// Via the \c isStrictlyLower() function it is possible to check whether a dense or sparse matrix
// is strictly lower triangular:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isStrictlyLower( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be strictly lower triangular!
//
//
// \n \subsection matrix_operations_isUpper isUpper()
//
// Via the \c isUpper() function it is possible to check whether a dense or sparse matrix is
// upper triangular:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isUpper( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be upper triangular!
//
//
// \n \subsection matrix_operations_isuniupper isUniUpper()
//
// Via the \c isUniUpper() function it is possible to check whether a dense or sparse matrix is
// upper unitriangular:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isUniUpper( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be upper unitriangular!
//
//
// \n \subsection matrix_operations_isstrictlyupper isStrictlyUpper()
//
// Via the \c isStrictlyUpper() function it is possible to check whether a dense or sparse matrix
// is strictly upper triangular:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isStrictlyUpper( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be strictly upper triangular!
//
//
// \n \subsection matrix_operations_isdiagonal isDiagonal()
//
// The \c isDiagonal() function checks if the given dense or sparse matrix is a diagonal matrix,
// i.e. if it has only elements on its diagonal and if the non-diagonal elements are default
// elements:
\code
blaze::CompressedMatrix<float> A;
// ... Resizing and initialization
if( isDiagonal( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be diagonal!
//
//
// \n \subsection matrix_operations_isidentity isIdentity()
//
// The \c isIdentity() function checks if the given dense or sparse matrix is an identity matrix,
// i.e. if all diagonal elements are 1 and all non-diagonal elements are 0:
\code
blaze::CompressedMatrix<float> A;
// ... Resizing and initialization
if( isIdentity( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be identity matrices!
//
//
// \n \subsection matrix_operations_matrix_determinant det()
//
// The determinant of a square dense matrix can be computed by means of the \c det() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
double d = det( A ); // Compute the determinant of A
\endcode
// In case the given dense matrix is not a square matrix, a \c std::invalid_argument exception is
// thrown.
//
// \note The \c det() function can only be used for dense matrices with \c float, \c double,
// \c complex<float> or \c complex<double> element type. The attempt to call the function with
// matrices of any other element type or with a sparse matrix results in a compile time error!
//
// \note The function is depending on LAPACK kernels. Thus the function can only be used if the
// fitting LAPACK library is available and linked to the executable. Otherwise a linker error
// will be created.
//
//
// \n \subsection matrix_operations_matrix_trans trans()
//
// Matrices can be transposed via the \c trans() function. Row-major matrices are transposed into
// a column-major matrix and vice versa:
\code
blaze::DynamicMatrix<int,rowMajor> M1( 5UL, 2UL );
blaze::CompressedMatrix<int,columnMajor> M2( 3UL, 7UL );
M1 = M2; // Assigning a column-major matrix to a row-major matrix
M1 = trans( M2 ); // Assigning the transpose of M2 (i.e. a row-major matrix) to M1
M1 += trans( M2 ); // Addition assignment of two row-major matrices
\endcode
// \n \subsection matrix_operations_ctrans ctrans()
//
// The conjugate transpose of a dense or sparse matrix (also called adjoint matrix, Hermitian
// conjugate, or transjugate) can be computed via the \c ctrans() function:
\code
blaze::DynamicMatrix< complex<float>, rowMajor > M1( 5UL, 2UL );
blaze::CompressedMatrix< complex<float>, columnMajor > M2( 2UL, 5UL );
M1 = ctrans( M2 ); // Compute the conjugate transpose matrix
\endcode
// Note that the \c ctrans() function has the same effect as manually applying the \c conj() and
// \c trans() function in any order:
\code
M1 = trans( conj( M2 ) ); // Computing the conjugate transpose matrix
M1 = conj( trans( M2 ) ); // Computing the conjugate transpose matrix
\endcode
// \n \subsection matrix_operations_reverse reverse()
//
// Via the \c reverse() function is is possible to reverse the rows or columns of a dense or sparse
// matrix. The following examples gives an impression of both alternatives:
\code
blaze::DynamicMatrix<int,rowMajor> A{ { 1, 0, 2, 3 },
{ 2, 4, 0, 1 },
{ 0, 3, 1, 0 } };
blaze::DynamicMatrix<int> B;
// Reversing the rows result in the matrix
//
// ( 0 3 1 0 )
// ( 2 4 0 1 )
// ( 1 0 2 3 )
//
B = reverse<rowwise>( A );
// Reversing the columns result in the matrix
//
// ( 3 2 0 1 )
// ( 1 0 4 2 )
// ( 0 1 3 0 )
//
B = reverse<columnwise>( A );
\endcode
// \n \subsection matrix_operations_evaluate eval() / evaluate()
//
// The \c evaluate() function forces an evaluation of the given matrix expression and enables
// an automatic deduction of the correct result type of an operation. The following code example
// demonstrates its intended use for the multiplication of a lower and a strictly lower dense
// matrix:
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
using blaze::StrictlyLowerMatrix;
LowerMatrix< DynamicMatrix<double> > A;
StrictlyLowerMatrix< DynamicMatrix<double> > B;
// ... Resizing and initialization
auto C = evaluate( A * B );
\endcode
// In this scenario, the \c evaluate() function assists in deducing the exact result type of
// the operation via the \c auto keyword. Please note that if \c evaluate() is used in this
// way, no temporary matrix is created and no copy operation is performed. Instead, the result
// is directly written to the target matrix due to the return value optimization (RVO). However,
// if \c evaluate() is used in combination with an explicit target type, a temporary will be
// created and a copy operation will be performed if the used type differs from the type
// returned from the function:
\code
StrictlyLowerMatrix< DynamicMatrix<double> > D( A * B ); // No temporary & no copy operation
LowerMatrix< DynamicMatrix<double> > E( A * B ); // Temporary & copy operation
DynamicMatrix<double> F( A * B ); // Temporary & copy operation
D = evaluate( A * B ); // Temporary & copy operation
\endcode
// Sometimes it might be desirable to explicitly evaluate a sub-expression within a larger
// expression. However, please note that \c evaluate() is not intended to be used for this
// purpose. This task is more elegantly and efficiently handled by the \c eval() function:
\code
blaze::DynamicMatrix<double> A, B, C, D;
D = A + evaluate( B * C ); // Unnecessary creation of a temporary matrix
D = A + eval( B * C ); // No creation of a temporary matrix
\endcode
// In contrast to the \c evaluate() function, \c eval() can take the complete expression
// into account and therefore can guarantee the most efficient way to evaluate it (see also
// \ref intra_statement_optimization).
//
//
// \n \section matrix_operations_modifying_operations Modifying Operations
// <hr>
//
// \subsection matrix_operations_resize_reserve .resize() / .reserve()
//
// The dimensions of a \c StaticMatrix are fixed at compile time by the second and third template
// parameter and a \c CustomMatrix cannot be resized. In contrast, the number or rows and columns
// of \c DynamicMatrix, \c HybridMatrix, and \c CompressedMatrix can be changed at runtime:
\code
using blaze::DynamicMatrix;
using blaze::CompressedMatrix;
DynamicMatrix<int,rowMajor> M1;
CompressedMatrix<int,columnMajor> M2( 3UL, 2UL );
// Adapting the number of rows and columns via the resize() function. The (optional)
// third parameter specifies whether the existing elements should be preserved. Per
// default, the existing elements are preserved.
M1.resize( 2UL, 2UL ); // Resizing matrix M1 to 2x2 elements. Elements of built-in type
// remain uninitialized, elements of class type are default
// constructed.
M1.resize( 3UL, 1UL, false ); // Resizing M1 to 3x1 elements. The old elements are lost, the
// new elements are NOT initialized!
M2.resize( 5UL, 7UL, true ); // Resizing M2 to 5x7 elements. The old elements are preserved.
M2.resize( 3UL, 2UL, false ); // Resizing M2 to 3x2 elements. The old elements are lost.
\endcode
// Note that resizing a matrix invalidates all existing views (see e.g. \ref views_submatrices)
// on the matrix:
\code
blaze::DynamicMatrix<int,rowMajor> M1( 10UL, 20UL ); // Creating a 10x20 matrix
auto row8 = row( M1, 8UL ); // Creating a view on the 8th row of the matrix
M1.resize( 6UL, 20UL ); // Resizing the matrix invalidates the view
\endcode
// When the internal capacity of a matrix is no longer sufficient, the allocation of a larger
// junk of memory is triggered. In order to avoid frequent reallocations, the \c reserve()
// function can be used up front to set the internal capacity:
\code
blaze::DynamicMatrix<int> M1;
M1.reserve( 100 );
M1.rows(); // Returns 0
M1.capacity(); // Returns at least 100
\endcode
// Additionally it is possible to reserve memory in a specific row (for a row-major matrix) or
// column (for a column-major matrix):
\code
blaze::CompressedMatrix<int> M1( 4UL, 6UL );
M1.reserve( 1, 4 ); // Reserving enough space for four non-zero elements in row 1
\endcode
// \n \subsection matrix_operations_shrinkToFit .shrinkToFit()
//
// The internal capacity of matrices with dynamic memory is preserved in order to minimize the
// number of reallocations. For that reason, the \c resize() and \c reserve() functions can lead
// to memory overhead. The \c shrinkToFit() member function can be used to minimize the internal
// capacity:
\code
blaze::DynamicMatrix<int> M1( 100UL, 100UL ); // Create a 100x100 integer matrix
M1.resize( 10UL, 10UL ); // Resize to 10x10, but the capacity is preserved
M1.shrinkToFit(); // Remove the unused capacity
\endcode
// Please note that due to padding the capacity might not be reduced exactly to \c rows() times
// \c columns(). Please also note that in case a reallocation occurs, all iterators (including
// \c end() iterators), all pointers and references to elements of this matrix are invalidated.
//
//
// \subsection matrix_operations_reset_clear reset() / clear
//
// In order to reset all elements of a dense or sparse matrix, the \c reset() function can be
// used. The number of rows and columns of the matrix are preserved:
\code
// Setting up a single precision row-major matrix, whose elements are initialized with 2.0F.
blaze::DynamicMatrix<float> M1( 4UL, 5UL, 2.0F );
// Resetting all elements to 0.0F.
reset( M1 ); // Resetting all elements
M1.rows(); // Returns 4: size and capacity remain unchanged
\endcode
// Alternatively, only a single row or column of the matrix can be resetted:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> M1( 7UL, 6UL, 5 ); // Setup of a row-major matrix
blaze::DynamicMatrix<int,blaze::columnMajor> M2( 4UL, 5UL, 4 ); // Setup of a column-major matrix
reset( M1, 2UL ); // Resetting the 2nd row of the row-major matrix
reset( M2, 3UL ); // Resetting the 3rd column of the column-major matrix
\endcode
// In order to reset a row of a column-major matrix or a column of a row-major matrix, use a
// row or column view (see \ref views_rows and views_colums).
//
// In order to return a matrix to its default state (i.e. the state of a default constructed
// matrix), the \c clear() function can be used:
\code
// Setting up a single precision row-major matrix, whose elements are initialized with 2.0F.
blaze::DynamicMatrix<float> M1( 4UL, 5UL, 2.0F );
// Resetting all elements to 0.0F.
clear( M1 ); // Resetting the entire matrix
M1.rows(); // Returns 0: size is reset, but capacity remains unchanged
\endcode
// \n \subsection matrix_operations_matrix_transpose transpose()
//
// In addition to the non-modifying \c trans() function, matrices can be transposed in-place via
// the \c transpose() function:
\code
blaze::DynamicMatrix<int,rowMajor> M( 5UL, 2UL );
transpose( M ); // In-place transpose operation.
M = trans( M ); // Same as above
\endcode
// Note however that the transpose operation fails if ...
//
// - ... the given matrix has a fixed size and is non-square;
// - ... the given matrix is a triangular matrix;
// - ... the given submatrix affects the restricted parts of a triangular matrix;
// - ... the given submatrix would cause non-deterministic results in a symmetric/Hermitian matrix.
//
//
// \n \subsection matrix_operations_ctranspose ctranspose()
//
// The \c ctranspose() function can be used to perform an in-place conjugate transpose operation:
\code
blaze::DynamicMatrix<int,rowMajor> M( 5UL, 2UL );
ctranspose( M ); // In-place conjugate transpose operation.
M = ctrans( M ); // Same as above
\endcode
// Note however that the conjugate transpose operation fails if ...
//
// - ... the given matrix has a fixed size and is non-square;
// - ... the given matrix is a triangular matrix;
// - ... the given submatrix affects the restricted parts of a triangular matrix;
// - ... the given submatrix would cause non-deterministic results in a symmetric/Hermitian matrix.
//
//
// \n \subsection matrix_operations_swap swap()
//
// Via the \c \c swap() function it is possible to completely swap the contents of two matrices
// of the same type:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> M1( 10UL, 15UL );
blaze::DynamicMatrix<int,blaze::rowMajor> M2( 20UL, 10UL );
swap( M1, M2 ); // Swapping the contents of M1 and M2
\endcode
// \n \section matrix_operations_arithmetic_operations Arithmetic Operations
// <hr>
//
// \subsection matrix_operations_min_max min() / max()
//
// The \c min() and \c max() functions can be used for a single vector or multiple vectors. If
// passed a single matrix, the functions return the smallest and largest element of the given
// dense matrix or the smallest and largest non-zero element of the given sparse matrix,
// respectively:
\code
blaze::StaticMatrix<int,2UL,3UL> A{ { -5, 2, 7 },
{ -4, 0, 1 } };
min( A ); // Returns -5
max( A ); // Returns 7
\endcode
\code
blaze::CompressedMatrix<int> B{ { 1, 0, 3 },
{ 0, 0, 0 } };
min( B ); // Returns 1
max( B ); // Returns 3
\endcode
// For more information on the unary \c min() and \c max() reduction operations see the
// \ref matrix_operations_reduction_operations section.
//
// If passed two or more dense matrices, the \c min() and \c max() functions compute the
// componentwise minimum or maximum of the given matrices, respectively:
\code
blaze::StaticMatrix<int,2UL,3UL,rowMajor> C{ { -5, 1, -7 }, { 4, 1, 0 } };
blaze::StaticMatrix<int,2UL,3UL,rowMajor> D{ { -5, 3, 0 }, { 2, 2, -2 } };
min( A, C ); // Results in the matrix ( -5, 1, -7 ) ( -4, 0, 0 )
max( A, C, D ); // Results in the matrix ( -5, 3, 7 ) ( 4, 2, 1 )
\endcode
// Please note that sparse matrices can only be used in the unary \c min() and \c max() functions.
// Also note that all forms of the \c min() and \c max() functions can be used to compute the
// smallest and largest element of a matrix expression:
\code
min( A + B + C ); // Returns -9, i.e. the smallest value of the resulting matrix
max( A - B - C ); // Returns 11, i.e. the largest value of the resulting matrix
\endcode
// \n \subsection matrix_operators_softmax softmax()
//
// The <a href="https://en.wikipedia.org/wiki/Softmax_function">softmax function</a>, also called
// the normalized exponential function, of a given dense matrix can be computed via \c softmax().
// The resulting dense matrix consists of real values in the range (0..1], which add up to 1.
\code
blaze::StaticMatrix<double,3UL,3UL> A{ { 1.0, 2.0, 3.0 }
, { 4.0, 1.0, 2.0 }
, { 3.0, 4.0, 1.0 } };
blaze::StaticMatrix<double,3UL,3UL> B;
// Evaluating the softmax function
B = softmax( A ); // Results in ( 0.0157764 0.0428847 0.116573 )
// ( 0.316878 0.0157764 0.0428847 )
// ( 0.116573 0.316878 0.0157764 )
double b = sum( B ); // Results in 1
\endcode
// Alternatively it is possible to compute a row- or columnwise \c softmax() function. The
// resulting dense matrix consists of real values in the range (0..1], which add up to the number
// of rows or columns, respectively.
\code
using blaze::rowwise;
using blaze::columnwise;
blaze::StaticMatrix<double,3UL,3UL> C, D;
// Evaluating the rowwise softmax function
C = softmax<rowwise>( A ); // Results in ( 0.0900306 0.244728 0.665241 )
// ( 0.843795 0.0420101 0.114195 )
// ( 0.259496 0.705385 0.035119 )
double c = sum( C ); // Results in 3 (the number of rows of A)
// Evaluating the columnwise softmax function
D = softmax<columnwise>( A ); // Results in ( 0.035119 0.114195 0.665241 )
// ( 0.705385 0.0420101 0.244728 )
// ( 0.259496 0.843795 0.0900306 )
double d = sum( D ); // Results in 3 (the number of columns of A)
\endcode
// \n \subsection matrix_operators_trace trace()
//
// The \c trace() function sums the diagonal elements of a square dense or sparse matrix:
\code
blaze::StaticMatrix<int,3UL,3UL> A{ { -1, 2, -3 }
, { -4, -5, 6 }
, { 7, -8, -9 } };
trace( A ); // Returns the sum of the diagonal elements, i.e. -15
\endcode
// In case the given matrix is not a square matrix, a \c std::invalid_argument exception is
// thrown.
//
//
// \n \subsection matrix_operators_abs abs()
//
// The \c abs() function can be used to compute the absolute values of each element of a matrix.
// For instance, the following computation
\code
blaze::StaticMatrix<int,2UL,3UL,rowMajor> A{ { -1, 2, -3 },
{ 4, -5, 6 } };
blaze::StaticMatrix<int,2UL,3UL,rowMajor> B( abs( A ) );
\endcode
// results in the matrix
\f$ B = \left(\begin{array}{*{3}{c}}
1 & 2 & 3 \\
4 & 5 & 6 \\
\end{array}\right)\f$
// \n \subsection matrix_operators_sign sign()
//
// The \c sign() function can be used to evaluate the sign of each element of a matrix \a A. For
// each element \c (i,j) the corresponding result is 1 if \a A(i,j) is greater than zero, 0 if
// \a A(i,j) is zero, and -1 if \a A(i,j) is less than zero. For instance, the following use of
// the \c sign() function
\code
blaze::StaticMatrix<int,2UL,3UL,rowMajor> A{ { -1, 2, 0 },
{ 4, 0, -6 } };
blaze::StaticMatrix<int,2UL,3UL,rowMajor> B( sign( A ) );
\endcode
// results in the matrix
\f$ B = \left(\begin{array}{*{3}{c}}
-1 & 1 & 0 \\
1 & 0 & -1 \\
\end{array}\right)\f$
// \n \subsection matrix_operators_rounding_functions floor() / ceil() / trunc() / round()
//
// The \c floor(), \c ceil(), \c trunc(), and \c round() functions can be used to round down/up
// each element of a matrix, respectively:
\code
blaze::StaticMatrix<double,3UL,3UL> A, B;
B = floor( A ); // Rounding down each element of the matrix
B = ceil ( A ); // Rounding up each element of the matrix
B = trunc( A ); // Truncating each element of the matrix
B = round( A ); // Rounding each element of the matrix
\endcode
// \n \subsection matrix_operators_conj conj()
//
// The \c conj() function can be applied on a dense or sparse matrix to compute the complex
// conjugate of each element of the matrix:
\code
using blaze::StaticMatrix;
using cplx = std::complex<double>;
// Creating the matrix
// ( (1,0) (-2,-1) )
// ( (1,1) ( 0, 1) )
StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) },
{ cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } };
// Computing the matrix of conjugate values
// ( (1, 0) (-2, 1) )
// ( (1,-1) ( 0,-1) )
StaticMatrix<cplx,2UL,2UL> B;
B = conj( A );
\endcode
// Additionally, matrices can be conjugated in-place via the \c conjugate() function:
\code
blaze::DynamicMatrix<cplx> C( 5UL, 2UL );
conjugate( C ); // In-place conjugate operation.
C = conj( C ); // Same as above
\endcode
// \n \subsection matrix_operators_real real()
//
// The \c real() function can be used on a dense or sparse matrix to extract the real part of
// each element of the matrix:
\code
using blaze::StaticMatrix;
using cplx = std::complex<double>;
// Creating the matrix
// ( (1,0) (-2,-1) )
// ( (1,1) ( 0, 1) )
StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) },
{ cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } };
// Extracting the real part of each matrix element
// ( 1 -2 )
// ( 1 0 )
StaticMatrix<double,2UL,2UL> B;
B = real( A );
\endcode
// \n \subsection matrix_operators_imag imag()
//
// The \c imag() function can be used on a dense or sparse matrix to extract the imaginary part
// of each element of the matrix:
\code
using blaze::StaticMatrix;
using cplx = std::complex<double>;
// Creating the matrix
// ( (1,0) (-2,-1) )
// ( (1,1) ( 0, 1) )
StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) },
{ cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } };
// Extracting the imaginary part of each matrix element
// ( 0 -1 )
// ( 1 1 )
StaticMatrix<double,2UL,2UL> B;
B = imag( A );
\endcode
// \n \subsection matrix_operators_sqrt sqrt() / invsqrt()
//
// Via the \c sqrt() and \c invsqrt() functions the (inverse) square root of each element of a
// matrix can be computed:
\code
blaze::StaticMatrix<double,3UL,3UL> A, B, C;
B = sqrt( A ); // Computes the square root of each element
C = invsqrt( A ); // Computes the inverse square root of each element
\endcode
// Note that in case of sparse matrices only the non-zero elements are taken into account!
//
//
// \n \subsection matrix_operators_cbrt cbrt() / invcbrt()
//
// The \c cbrt() and \c invcbrt() functions can be used to compute the the (inverse) cubic root
// of each element of a matrix:
\code
blaze::DynamicMatrix<double> A, B, C;
B = cbrt( A ); // Computes the cubic root of each element
C = invcbrt( A ); // Computes the inverse cubic root of each element
\endcode
// Note that in case of sparse matrices only the non-zero elements are taken into account!
//
//
// \n \subsection matrix_operations_hypot hypot()
//
// The \c hypot() function can be used to compute the componentwise hypotenous for a pair of
// dense matrices:
\code
blaze::StaticMatrix<double,3UL,3UL> A, B, C;
C = hypot( A, B ); // Computes the componentwise hypotenuous
\endcode
// \n \subsection matrix_operators_clamp clamp()
//
// The \c clamp() function can be used to restrict all elements of a matrix to a specific range:
\code
blaze::DynamicMatrix<double> A, B;
B = clamp( A, -1.0, 1.0 ); // Restrict all elements to the range [-1..1]
\endcode
// Note that in case of sparse matrices only the non-zero elements are taken into account!
//
//
// \n \subsection matrix_operators_pow pow()
//
// The \c pow() function can be used to compute the exponential value of each element of a matrix.
// If passed a matrix and a numeric exponent, the function computes the exponential value of each
// element of the matrix using the same exponent. If passed a second matrix, the function computes
// the componentwise exponential value:
\code
blaze::StaticMatrix<double,3UL,3UL> A, B, C;
C = pow( A, 1.2 ); // Computes the exponential value of each element
C = pow( A, B ); // Computes the componentwise exponential value
\endcode
// \n \subsection matrix_operators_exp exp() / exp2() / exp10()
//
// \c exp(), \c exp2() and \c exp10() compute the base e/2/10 exponential of each element of a
// matrix, respectively:
\code
blaze::HybridMatrix<double,3UL,3UL> A, B;
B = exp( A ); // Computes the base e exponential of each element
B = exp2( A ); // Computes the base 2 exponential of each element
B = exp10( A ); // Computes the base 10 exponential of each element
\endcode
// Note that in case of sparse matrices only the non-zero elements are taken into account!
//
//
// \n \subsection matrix_operators_log log() / log2() / log10()
//
// The \c log(), \c log2() and \c log10() functions can be used to compute the natural, binary
// and common logarithm of each element of a matrix:
\code
blaze::StaticMatrix<double,3UL,3UL> A, B;
B = log( A ); // Computes the natural logarithm of each element
B = log2( A ); // Computes the binary logarithm of each element
B = log10( A ); // Computes the common logarithm of each element
\endcode
// \n \subsection matrix_operators_trigonometric_functions sin() / cos() / tan() / asin() / acos() / atan()
//
// The following trigonometric functions are available for both dense and sparse matrices:
\code
blaze::DynamicMatrix<double> A, B;
B = sin( A ); // Computes the sine of each element of the matrix
B = cos( A ); // Computes the cosine of each element of the matrix
B = tan( A ); // Computes the tangent of each element of the matrix
B = asin( A ); // Computes the inverse sine of each element of the matrix
B = acos( A ); // Computes the inverse cosine of each element of the matrix
B = atan( A ); // Computes the inverse tangent of each element of the matrix
\endcode
// Note that in case of sparse matrices only the non-zero elements are taken into account!
//
//
// \n \subsection matrix_operators_hyperbolic_functions sinh() / cosh() / tanh() / asinh() / acosh() / atanh()
//
// The following hyperbolic functions are available for both dense and sparse matrices:
\code
blaze::DynamicMatrix<double> A, B;
B = sinh( A ); // Computes the hyperbolic sine of each element of the matrix
B = cosh( A ); // Computes the hyperbolic cosine of each element of the matrix
B = tanh( A ); // Computes the hyperbolic tangent of each element of the matrix
B = asinh( A ); // Computes the inverse hyperbolic sine of each element of the matrix
B = acosh( A ); // Computes the inverse hyperbolic cosine of each element of the matrix
B = atanh( A ); // Computes the inverse hyperbolic tangent of each element of the matrix
\endcode
// \n \subsection matrix_operations_atan2 atan2()
//
// The multi-valued inverse tangent is available for a pair of dense matrices:
\code
blaze::DynamicMatrix<double> A, B, C;
C = atan2( A, B ); // Computes the componentwise multi-valued inverse tangent
\endcode
// \n \subsection matrix_operators_erf erf() / erfc()
//
// The \c erf() and \c erfc() functions compute the (complementary) error function of each
// element of a matrix:
\code
blaze::StaticMatrix<double,3UL,3UL> A, B;
B = erf( A ); // Computes the error function of each element
B = erfc( A ); // Computes the complementary error function of each element
\endcode
// Note that in case of sparse matrices only the non-zero elements are taken into account!
//
//
// \n \subsection matrix_operations_map map() / forEach()
//
// Via the unary and binary \c map() functions it is possible to execute componentwise custom
// operations on matrices. The unary \c map() function can be used to apply a custom operation
// on each element of a dense or sparse matrix. For instance, the following example demonstrates
// a custom square root computation via a lambda:
\code
blaze::DynamicMatrix<double> A, B;
B = map( A, []( double d ) { return std::sqrt( d ); } );
\endcode
// The binary \c map() function can be used to apply an operation pairwise to the elements of
// two dense matrices. The following example demonstrates the merging of two matrices of double
// precision values into a matrix of double precision complex numbers:
\code
blaze::DynamicMatrix<double> real{ { 2.1, -4.2 }, { 1.0, 0.6 } };
blaze::DynamicMatrix<double> imag{ { 0.3, 1.4 }, { 2.9, -3.4 } };
blaze::DynamicMatrix< complex<double> > cplx;
// Creating the matrix
// ( ( 2.1, 0.3) (-4.2, 1.4) )
// ( ( 1.0, 2.9) ( 0.6, -3.4) )
cplx = map( real, imag, []( double r, double i ){ return complex<double>( r, i ); } );
\endcode
// Although the computation can be parallelized it is not vectorized and thus cannot perform at
// peak performance. However, it is also possible to create vectorized custom operations. See
// \ref custom_operations for a detailed overview of the possibilities of custom operations.
//
// Please note that unary custom operations on vectors have been introduced in \b Blaze 3.0 in
// form of the \c forEach() function. With the introduction of binary custom functions, the
// \c forEach() function has been renamed to \c map(). The \c forEach() function can still be
// used (even for binary custom operations), but the function might be deprecated in future
// releases of \b Blaze.
//
//
// \n \section matrix_operations_reduction_operations Reduction Operations
// <hr>
//
// \subsection matrix_operations_reduction_operations_reduce reduce()
//
// The \c reduce() function performs either a total reduction, a rowwise reduction or a columnwise
// reduction of the elements of the given dense matrix or the non-zero elements of the given sparse
// matrix. The following examples demonstrate the total reduction of a dense and sparse matrix:
\code
blaze::DynamicMatrix<double> A;
// ... Resizing and initialization
const double totalsum1 = reduce( A, blaze::Add() );
const double totalsum2 = reduce( A, []( double a, double b ){ return a + b; } );
\endcode
\code
blaze::CompressedMatrix<double> A;
// ... Resizing and initialization
const double totalsum1 = reduce( A, blaze::Add() );
const double totalsum2 = reduce( A, []( double a, double b ){ return a + b; } );
\endcode
// By specifying \c blaze::columnwise or \c blaze::rowwise the \c reduce() function performs a
// column-wise or row-wise reduction, respectively. In case \c blaze::columnwise is specified, the
// (non-zero) elements of the matrix are reduced column-wise and the result is a row vector. In
// case \c blaze::rowwise is specified, the (non-zero) elements of the matrix are reduced row-wise
// and the result is a column vector:
\code
blaze::DynamicMatrix<double> A;
blaze::CompressedMatrix<double> B;
blaze::DynamicVector<double,rowVector> colsum1, colsum2;
// ... Resizing and initialization
colsum1 = reduce<columnwise>( A, blaze::Add() );
colsum2 = reduce<columnwise>( B, []( double a, double b ){ return a + b; } );
\endcode
\code
blaze::DynamicMatrix<double> A;
blaze::CompressedMatrix<double> B;
blaze::DynamicVector<double,columnVector> rowsum1, rowsum2;
// ... Resizing and initialization
rowsum1 = reduce<rowwise>( A, blaze::Add() );
rowsum2 = reduce<rowwise>( B, []( double a, double b ){ return a + b; } );
\endcode
// As demonstrated in the examples it is possible to pass any binary callable as custom reduction
// operation. However, for instance in the case of lambdas the vectorization of the reduction
// operation is compiler dependent and might not perform at peak performance. However, it is also
// possible to create vectorized custom operations. See \ref custom_operations for a detailed
// overview of the possibilities of custom operations.
//
// Please note that the evaluation order of the \c reduce() function is unspecified. Thus the
// behavior is non-deterministic if the given reduction operation is not associative or not
// commutative. Also, the operation is undefined if the given reduction operation modifies the
// values.
//
// \n \subsection matrix_operations_reduction_operations_sum sum()
//
// The \c sum() function reduces the elements of the given dense vector or the non-zero elements
// of the given sparse vector by means of addition:
\code
blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } };
const int totalsum = sum( A ); // Results in 10
\endcode
\code
blaze::CompressedMatrix<int> a{ { 1, 2 }, { 3, 4 } };
const int totalsum = sum( A ); // Results in 10
\endcode
// By specifying \c blaze::columnwise or \c blaze::rowwise the \c sum() function performs a
// column-wise or row-wise summation, respectively. In case \c blaze::columnwise is specified,
// the (non-zero) elements of the matrix are summed up column-wise and the result is a row vector.
// In case \c blaze::rowwise is specified, the (non-zero) elements of the matrix are summed up
// row-wise and the result is a column vector:
\code
using blaze::columnwise;
blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } };
blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } };
blaze::DynamicVector<int,rowVector> colsum1, colsum2;
colsum1 = sum<columnwise>( A ); // Results in ( 2, 3, 6 )
colsum2 = sum<columnwise>( B ); // Same result
\endcode
\code
using blaze::rowwise;
blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } };
blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } };
blaze::DynamicVector<int,columnVector> rowsum1, rowsum2;
rowsum1 = sum<rowwise>( A ); // Results in ( 3, 8 )
rowsum2 = sum<rowwise>( B ); // Same result
\endcode
// Please note that the evaluation order of the \c sum() function is unspecified.
//
// \n \subsection matrix_operations_reduction_operations_prod prod()
//
// The \c prod() function reduces the elements of the given dense vector or the non-zero elements
// of the given sparse vector by means of multiplication:
\code
blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } };
const int totalprod = prod( A ); // Results in 24
\endcode
\code
blaze::CompressedMatrix<int> A{ { 1, 2 }, { 3, 4 } };
const int totalprod = prod( A ); // Results in 24
\endcode
// By specifying \c blaze::columnwise or \c blaze::rowwise the \c prod() function performs a
// column-wise or row-wise multiplication, respectively. In case \c blaze::columnwise is specified,
// the (non-zero) elements of the matrix are multiplied column-wise and the result is a row vector.
// In case \c blaze::rowwise is specified, the (non-zero) elements of the matrix are multiplied
// row-wise and the result is a column vector:
\code
using blaze::columnwise;
blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } };
blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } };
blaze::DynamicVector<int,rowVector> colprod1, colprod2;
colprod1 = prod<columnwise>( A ); // Results in ( 1, 0, 8 )
colprod2 = prod<columnwise>( A ); // Results in ( 1, 3, 8 )
\endcode
\code
using blaze::rowwise;
blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } };
blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } };
blaze::DynamicVector<int,columnVector> rowprod1, rowprod2;
rowprod1 = prod<rowwise>( A ); // Results in ( 0, 12 )
rowprod2 = prod<rowwise>( A ); // Results in ( 2, 12 )
\endcode
// Please note that the evaluation order of the \c prod() function is unspecified.
//
// \n \subsection matrix_operations_reduction_operations_min min()
//
// The unary \c min() function returns the smallest element of the given dense matrix or the
// smallest non-zero element of the given sparse matrix. This function can only be used for
// element types that support the smaller-than relationship. In case the given matrix currently
// has either 0 rows or 0 columns, the returned value is the default value (e.g. 0 in case of
// fundamental data types).
\code
blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } };
const int totalmin = min( A ); // Results in 1
\endcode
\code
blaze::CompressedMatrix<int> A{ { 1, 0 }, { 3, 0 } };
const int totalmin = min( A ); // Results in 1
\endcode
// \note In case the sparse matrix is not completely filled, the implicit zero elements are NOT
// taken into account. In the previous example the compressed matrix has only 2 non-zero elements.
// However, the minimum of this matrix is 1.
//
// By specifying \c blaze::columnwise or \c blaze::rowwise the \c min() function determines the
// smallest (non-zero) element in each row or column, respectively. In case \c blaze::columnwise
// is specified, the smallest (non-zero) element of each column is determined and the result is
// a row vector. In case \c blaze::rowwise is specified, the smallest (non-zero) element of each
// row is determined and the result is a column vector.
\code
using blaze::columnwise;
blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } };
blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } };
blaze::DynamicVector<int,rowVector> colmin1, colmin2;
colmin1 = min<columnwise>( A ); // Results in ( 1, 0, 2 )
colmin2 = min<columnwise>( B ); // Results in ( 1, 3, 2 )
\endcode
\code
using blaze::rowwise;
blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } };
blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } };
blaze::DynamicVector<int,columnVector> rowmin1, rowmin2;
rowmin1 = min<rowwise>( A ); // Results in ( 0, 1 )
rowmin2 = min<rowwise>( B ); // Results in ( 1, 1 )
\endcode
// \note In case the sparse matrix is not completely filled, the implicit zero elements are NOT
// taken into account.
//
// \n \subsection matrix_operations_reduction_operations_max max()
//
// The unary \c max() function returns the largest element of the given dense matrix or the
// largest non-zero element of the given sparse matrix. This function can only be used for
// element types that support the smaller-than relationship. In case the given matrix currently
// has either 0 rows or 0 columns, the returned value is the default value (e.g. 0 in case of
// fundamental data types).
\code
blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } };
const int totalmax = max( A ); // Results in 4
\endcode
\code
blaze::CompressedMatrix<int> A{ { -1, 0 }, { -3, 0 } };
const int totalmax = max( A ); // Results in -1
\endcode
// \note In case the sparse matrix is not completely filled, the implicit zero elements are NOT
// taken into account. In the previous example the compressed matrix has only 2 non-zero elements.
// However, the maximum of this matrix is -1.
//
// By specifying \c blaze::columnwise or \c blaze::rowwise the \c max() function determines the
// largest (non-zero) element in each row or column, respectively. In case \c blaze::columnwise
// is specified, the largest (non-zero) element of each column is determined and the result is
// a row vector. In case \c blaze::rowwise is specified, the largest (non-zero) element of each
// row is determined and the result is a column vector.
\code
using blaze::columnwise;
blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } };
blaze::CompressedMatrix<int> B{ { -1, 0, -2 }, { -1, -3, -4 } };
blaze::DynamicVector<int,rowVector> colmax1, colmax2;
colmax1 = max<columnwise>( A ); // Results in ( 1, 3, 4 )
colmax2 = max<columnwise>( B ); // Results in ( -1, -3, -2 )
\endcode
\code
using blaze::rowwise;
blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } };
blaze::CompressedMatrix<int> B{ { -1, 0, -2 }, { -1, -3, -4 } };
blaze::DynamicVector<int,columnVector> rowmax1, rowmax2;
rowmax1 = max<rowwise>( A ); // Results in ( 2, 4 )
rowmax2 = max<rowwise>( B ); // Results in ( -1, -1 )
\endcode
// \note In case the sparse matrix is not completely filled, the implicit zero elements are NOT
// taken into account.
//
//
// \n \section matrix_operations_norms Norms
// <hr>
//
// \subsection matrix_operations_norms_norm norm()
//
// The \c norm() function computes the L2 norm of the given dense or sparse matrix:
\code
blaze::DynamicMatrix<double> A;
blaze::CompressedMatrix<double> B;
// ... Resizing and initialization
const double norm1 = norm( A );
const double norm2 = norm( B );
\endcode
// \n \subsection matrix_operations_norms_sqrnorm sqrNorm()
//
// The \c sqrNorm() function computes the squared L2 norm of the given dense or sparse matrix:
\code
blaze::DynamicMatrix<double> A;
blaze::CompressedMatrix<double> B;
// ... Resizing and initialization
const double norm1 = sqrNorm( A );
const double norm2 = sqrNorm( B );
\endcode
// \n \subsection matrix_operations_norms_l1norm l1Norm()
//
// The \c l1Norm() function computes the squared L1 norm of the given dense or sparse matrix:
\code
blaze::DynamicMatrix<double> A;
blaze::CompressedMatrix<double> B;
// ... Resizing and initialization
const double norm1 = l1Norm( A );
const double norm2 = l1Norm( B );
\endcode
// \n \subsection matrix_operations_norms_l2norm l2Norm()
//
// The \c l2Norm() function computes the squared L2 norm of the given dense or sparse matrix:
\code
blaze::DynamicMatrix<double> A;
blaze::CompressedMatrix<double> B;
// ... Resizing and initialization
const double norm1 = l2Norm( A );
const double norm2 = l2Norm( B );
\endcode
// \n \subsection matrix_operations_norms_l3norm l3Norm()
//
// The \c l3Norm() function computes the squared L3 norm of the given dense or sparse matrix:
\code
blaze::DynamicMatrix<double> A;
blaze::CompressedMatrix<double> B;
// ... Resizing and initialization
const double norm1 = l3Norm( A );
const double norm2 = l3Norm( B );
\endcode
// \n \subsection matrix_operations_norms_l4norm l4Norm()
//
// The \c l4Norm() function computes the squared L4 norm of the given dense or sparse matrix:
\code
blaze::DynamicMatrix<double> A;
blaze::CompressedMatrix<double> B;
// ... Resizing and initialization
const double norm1 = l4Norm( A );
const double norm2 = l4Norm( B );
\endcode
// \n \subsection matrix_operations_norms_lpnorm lpNorm()
//
// The \c lpNorm() function computes the general Lp norm of the given dense or sparse matrix,
// where the norm is specified by either a compile time or a runtime argument:
\code
blaze::DynamicMatrix<double> A;
blaze::CompressedMatrix<double> B;
// ... Resizing and initialization
const double norm1 = lpNorm<2>( A ); // Compile time argument
const double norm2 = lpNorm( B, 2.3 ); // Runtime argument
\endcode
// \n \subsection matrix_operations_norms_maxnorm linfNorm() / maxNorm()
//
// The \c linfNorm() and \c maxNorm() functions compute the infinity/maximum norm of the given
// dense or sparse matrix:
\code
blaze::DynamicMatrix<double> A;
blaze::CompressedMatrix<double> B;
// ... Resizing and initialization
const double norm1 = linfNorm( A );
const double norm2 = maxNorm( B );
\endcode
// \n \section matrix_operations_scalar_expansion Scalar Expansion
// <hr>
//
// By means of the \c uniform() function it is possible to expand a scalar value into a dense,
// uniform matrix. By default, the resulting uniform matrix is a row-major matrix, but it is
// possible to specify the storage order explicitly:
\code
using blaze::rowMajor;
int scalar = 5;
blaze::DynamicMatrix<int,rowMajor> A;
// ... Resizing and initialization
// Expansion of 'scalar' to a 3x5 row-major matrix
//
// ( 5 5 5 5 5 )
// ( 5 5 5 5 5 )
// ( 5 5 5 5 5 )
//
A = uniform( 3UL, 5UL, scalar );
A = uniform<columnMajor>( 3UL, 5UL, scalar );
\endcode
// \n \section matrix_operations_statistic_operations Statistic Operations
// <hr>
//
// \subsection matrix_operations_mean mean()
//
// The <a href="https://en.wikipedia.org/wiki/Arithmetic_mean">(arithmetic) mean</a> of a dense or
// sparse matrix can be computed via the \c mean() function. In case of a sparse matrix, both the
// non-zero and zero elements are taken into account. The following example demonstrates the
// computation of the mean of a dense matrix:
\code
blaze::DynamicMatrix<int> A{ { 1, 4, 3, 6, 7 }
, { 2, 6, 3, 1, 0 } };
const double m = mean( A ); // Results in 3.3 (i.e. 33/10)
\endcode
// In case the number of rows or columns of the given matrix is 0, a \a std::invalid_argument is
// thrown.
//
// Alternatively it is possible to compute the row- or columnwise mean:
\code
using blaze::columnVector;
using blaze::rowVector;
blaze::DynamicMatrix<int> A{ { 1, 4, 3, 6, 7 }
, { 2, 6, 3, 1, 0 } };
blaze::DynamicVector<double,columnVector> rm;
blaze::DynamicVector<double,rowVector> cm;
rm = mean<rowwise>( A ); // Results in ( 4.2 2.4 )
cm = mean<columnwise>( A ); // Results in ( 1.5 5.0 3.0 3.5 3.5 )
\endcode
// In case the rowwise mean is computed and the number of columns of the given matrix is 0 or
// in case the columnwise mean is computed and the number of rows of the given matrix is 0, a
// \a std::invalid_argument is thrown.
//
// \n \subsection matrix_operations_var var()
//
// The <a href="https://en.wikipedia.org/wiki/Variance">variance</a> of a dense or sparse matrix
// can be computed via the \c var() function. In case of a sparse vector, both the non-zero and
// zero elements are taken into account. The following example demonstrates the computation of
// the variance of a dense matrix:
\code
blaze::DynamicMatrix<int> A{ { 1, 3, 2 }
, { 2, 6, 4 }
, { 9, 6, 3 } };
const double v = var( A ); // Results in 6.5
\endcode
// In case the size of the given matrix is smaller than 2, a \a std::invalid_argument is thrown.
//
// Alternatively it is possible to compute the row- or columnwise variance:
\code
using blaze::columnVector;
using blaze::rowVector;
blaze::DynamicMatrix<int> A{ { 1, 3, 2 }
, { 2, 6, 4 }
, { 9, 6, 3 } };
blaze::DynamicVector<double,columnVector> rv;
blaze::DynamicVector<double,rowVector> cv;
rv = var<rowwise>( A ); // Results in ( 1 4 9 )
cv = var<columnwise>( A ); // Results in ( 19 3 1 )
\endcode
// In case the rowwise varoamce is computed and the number of columns of the given matrix is
// smaller than 2 or in case the columnwise mean is computed and the number of rows of the given
// matrix is smaller than 2, a \a std::invalid_argument is thrown.
//
// \n \subsection matrix_operations_stddev stddev()
//
// The <a href="https://en.wikipedia.org/wiki/Standard_deviation">standard deviation</a> of a
// dense or sparse matrix can be computed via the \c stddev() function. In case of a sparse
// vector, both the non-zero and zero elements are taken into account. The following example
// demonstrates the computation of the standard deviation of a dense matrix:
\code
blaze::DynamicMatrix<int> A{ { 1, 3, 2 }
, { 2, 6, 4 }
, { 9, 6, 3 } };
const double s = stddev( A ); // Results in sqrt(6.5)
\endcode
// In case the size of the given matrix is smaller than 2, a \a std::invalid_argument is thrown.
//
// Alternatively it is possible to compute the row- or columnwise standard deviation:
\code
using blaze::columnVector;
using blaze::rowVector;
blaze::DynamicMatrix<int> A{ { 1, 3, 2 }
, { 2, 6, 4 }
, { 9, 6, 3 } };
blaze::DynamicVector<double,columnVector> rs;
blaze::DynamicVector<double,rowVector> cs;
rs = stddev<rowwise>( A ); // Results in ( 1 2 3 )
cs = stddev<columnwise>( A ); // Results in ( sqrt(19) sqrt(3) 1 )
\endcode
// In case the rowwise standard deviation is computed and the number of columns of the given
// matrix is smaller than 2 or in case the columnwise mean is computed and the number of rows of
// the given matrix is smaller than 2, a \a std::invalid_argument is thrown.
//
//
// \n \section matrix_operations_declaration_operations Declaration Operations
// <hr>
//
// \subsection matrix_operations_declsym declsym()
//
// The \c declsym() operation can be used to explicitly declare any matrix or matrix expression
// as symmetric:
\code
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
B = declsym( A );
\endcode
// Any matrix or matrix expression that has been declared as symmetric via \c declsym() will
// gain all the benefits of a symmetric matrix, which range from reduced runtime checking to
// a considerable speed-up in computations:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
DynamicMatrix<double> A, B, C;
SymmetricMatrix< DynamicMatrix<double> > S;
// ... Resizing and initialization
isSymmetric( declsym( A ) ); // Will always return true without runtime effort
S = declsym( A ); // Omit any runtime check for symmetry
C = declsym( A * B ); // Declare the result of the matrix multiplication as symmetric,
// i.e. perform an optimized matrix multiplication
\endcode
// \warning The \c declsym() operation has the semantics of a cast: The caller is completely
// responsible and the system trusts the given information. Declaring a non-symmetric matrix or
// matrix expression as symmetric via the \c declsym() operation leads to undefined behavior
// (which can be violated invariants or wrong computation results)!
//
//
// \n \subsection matrix_operations_declherm declherm()
//
// The \c declherm() operation can be used to explicitly declare any matrix or matrix expression
// as Hermitian:
\code
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
B = declherm( A );
\endcode
// Any matrix or matrix expression that has been declared as Hermitian via \c declherm() will
// gain all the benefits of an Hermitian matrix, which range from reduced runtime checking to
// a considerable speed-up in computations:
\code
using blaze::DynamicMatrix;
using blaze::HermitianMatrix;
DynamicMatrix<double> A, B, C;
HermitianMatrix< DynamicMatrix<double> > S;
// ... Resizing and initialization
isHermitian( declherm( A ) ); // Will always return true without runtime effort
S = declherm( A ); // Omit any runtime check for Hermitian symmetry
C = declherm( A * B ); // Declare the result of the matrix multiplication as Hermitian,
// i.e. perform an optimized matrix multiplication
\endcode
// \warning The \c declherm() operation has the semantics of a cast: The caller is completely
// responsible and the system trusts the given information. Declaring a non-Hermitian matrix or
// matrix expression as Hermitian via the \c declherm() operation leads to undefined behavior
// (which can be violated invariants or wrong computation results)!
//
//
// \n \subsection matrix_operations_decllow decllow()
//
// The \c decllow() operation can be used to explicitly declare any matrix or matrix expression
// as lower triangular:
\code
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
B = decllow( A );
\endcode
// Any matrix or matrix expression that has been declared as lower triangular via \c decllow()
// will gain all the benefits of a lower triangular matrix, which range from reduced runtime
// checking to a considerable speed-up in computations:
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
DynamicMatrix<double> A, B, C;
LowerMatrix< DynamicMatrix<double> > L;
// ... Resizing and initialization
isLower( decllow( A ) ); // Will always return true without runtime effort
L = decllow( A ); // Omit any runtime check for A being a lower matrix
C = decllow( A * B ); // Declare the result of the matrix multiplication as lower triangular,
// i.e. perform an optimized matrix multiplication
\endcode
// \warning The \c decllow() operation has the semantics of a cast: The caller is completely
// responsible and the system trusts the given information. Declaring a non-lower matrix or
// matrix expression as lower triangular via the \c decllow() operation leads to undefined
// behavior (which can be violated invariants or wrong computation results)!
//
//
// \n \subsection matrix_operations_declupp declupp()
//
// The \c declupp() operation can be used to explicitly declare any matrix or matrix expression
// as upper triangular:
\code
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
B = declupp( A );
\endcode
// Any matrix or matrix expression that has been declared as upper triangular via \c declupp()
// will gain all the benefits of an upper triangular matrix, which range from reduced runtime
// checking to a considerable speed-up in computations:
\code
using blaze::DynamicMatrix;
using blaze::UpperMatrix;
DynamicMatrix<double> A, B, C;
UpperMatrix< DynamicMatrix<double> > U;
// ... Resizing and initialization
isUpper( declupp( A ) ); // Will always return true without runtime effort
U = declupp( A ); // Omit any runtime check for A being an upper matrix
C = declupp( A * B ); // Declare the result of the matrix multiplication as upper triangular,
// i.e. perform an optimized matrix multiplication
\endcode
// \warning The \c declupp() operation has the semantics of a cast: The caller is completely
// responsible and the system trusts the given information. Declaring a non-upper matrix or
// matrix expression as upper triangular via the \c declupp() operation leads to undefined
// behavior (which can be violated invariants or wrong computation results)!
//
//
// \n \subsection matrix_operations_decldiag decldiag()
//
// The \c decldiag() operation can be used to explicitly declare any matrix or matrix expression
// as diagonal:
\code
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
B = decldiag( A );
\endcode
// Any matrix or matrix expression that has been declared as diagonal via \c decldiag() will
// gain all the benefits of a diagonal matrix, which range from reduced runtime checking to
// a considerable speed-up in computations:
\code
using blaze::DynamicMatrix;
using blaze::DiagonalMatrix;
DynamicMatrix<double> A, B, C;
DiagonalMatrix< DynamicMatrix<double> > D;
// ... Resizing and initialization
isDiagonal( decldiag( A ) ); // Will always return true without runtime effort
D = decldiag( A ); // Omit any runtime check for A being a diagonal matrix
C = decldiag( A * B ); // Declare the result of the matrix multiplication as diagonal,
// i.e. perform an optimized matrix multiplication
\endcode
// \warning The \c decldiag() operation has the semantics of a cast: The caller is completely
// responsible and the system trusts the given information. Declaring a non-diagonal matrix
// or matrix expression as diagonal via the \c decldiag() operation leads to undefined
// behavior (which can be violated invariants or wrong computation results)!
//
//
// \n \subsection matrix_operations_declid declid()
//
// The \c declid() operation can be used to explicitly declare any matrix or matrix expression
// as identity matrix:
\code
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
B = declid( A );
\endcode
// Any matrix or matrix expression that has been declared as identity matrix via \c declid() will
// gain all the benefits of an identity matrix, which range from reduced runtime checking to a
// considerable speed-up in computations:
\code
using blaze::DynamicMatrix;
using blaze::DiagonalMatrix;
DynamicMatrix<double> A, B, C;
DiagonalMatrix< DynamicMatrix<double> > D;
// ... Resizing and initialization
isIdentity( declid( A ) ); // Will always return true without runtime effort
D = declid( A ); // Omit any runtime check for A being a diagonal matrix
C = declid( A ) * B; // Declare the left operand of the matrix multiplication as an
// identity matrix, i.e. perform an optimized matrix multiplication
\endcode
// \warning The \c declid() operation has the semantics of a cast: The caller is completely
// responsible and the system trusts the given information. Declaring a non-identity matrix
// or matrix expression as identity matrix via the \c declid() operation leads to undefined
// behavior (which can be violated invariants or wrong computation results)!
//
//
// \n \subsection matrix_operations_declzero declzero()
//
// The \c declzero() operation can be used to explicitly declare any matrix or matrix expression
// as zero matrix:
\code
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
B = declzero( A );
\endcode
// Any matrix or matrix expression that has been declared as zero matrix via \c declzero() will
// gain all the benefits of a zero matrix, which range from reduced runtime checking to a
// considerable speed-up in computations:
\code
using blaze::DynamicMatrix;
DynamicMatrix<double> A, B, C;
// ... Resizing and initialization
isZero( declzero( A ) ); // Will always return true without runtime effort
C = declzero( A ) + B; // Declare the left operand of the matrix addition as a
// zero matrix, i.e. no addition needs to be performed
\endcode
// \warning The \c declzero() operation has the semantics of a cast: The caller is completely
// responsible and the system trusts the given information. Declaring a non-zero matrix or
// matrix expression as zero matrix via the \c declzero() operation leads to undefined behavior
// (which can be violated invariants or wrong computation results)!
//
//
// \n \section matrix_operations_matrix_inversion Matrix Inversion
// <hr>
//
// The inverse of a square dense matrix can be computed via the \c inv() function:
\code
blaze::DynamicMatrix<float,blaze::rowMajor> A, B;
// ... Resizing and initialization
B = inv( A ); // Compute the inverse of A
\endcode
// Alternatively, an in-place inversion of a dense matrix can be performed via the \c invert()
// function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
invert( A ); // In-place matrix inversion
\endcode
// Both the \c inv() and the \c invert() functions will automatically select the most suited matrix
// inversion algorithm depending on the size and type of the given matrix. For small matrices of
// up to 6x6, both functions use manually optimized kernels for maximum performance. For matrices
// larger than 6x6 the inversion is performed by means of the most suited matrix decomposition
// method: In case of a general matrix the LU decomposition is used, for symmetric matrices the
// LDLT decomposition is applied, for Hermitian matrices the LDLH decomposition is performed, and
// for triangular matrices the inverse is computed via a forward or back substitution.
//
// In case the type of the matrix does not provide additional compile time information about its
// structure (symmetric, lower, upper, diagonal, ...), the information can be provided manually
// when calling the \c invert() function:
\code
using blaze::asGeneral;
using blaze::asSymmetric;
using blaze::asHermitian;
using blaze::asLower;
using blaze::asUniLower;
using blaze::asUpper;
using blaze::asUniUpper;
using blaze::asDiagonal;
invert<asGeneral> ( A ); // In-place inversion of a general matrix
invert<asSymmetric>( A ); // In-place inversion of a symmetric matrix
invert<asHermitian>( A ); // In-place inversion of an Hermitian matrix
invert<asLower> ( A ); // In-place inversion of a lower triangular matrix
invert<asUniLower> ( A ); // In-place inversion of a lower unitriangular matrix
invert<asUpper> ( A ); // In-place inversion of an upper triangular matrix
invert<asUniUpper> ( A ); // In-place inversion of an upper unitriangular matrix
invert<asDiagonal> ( A ); // In-place inversion of a diagonal matrix
\endcode
// Alternatively, via the \c invert() function it is possible to explicitly specify the inversion
// algorithm:
\code
using blaze::byLU;
using blaze::byLDLT;
using blaze::byLDLH;
using blaze::byLLH;
// In-place inversion of a general matrix by means of an LU decomposition
invert<byLU>( A );
// In-place inversion of a symmetric indefinite matrix by means of a Bunch-Kaufman decomposition
invert<byLDLT>( A );
// In-place inversion of an Hermitian indefinite matrix by means of a Bunch-Kaufman decomposition
invert<byLDLH>( A );
// In-place inversion of a positive definite matrix by means of a Cholesky decomposition
invert<byLLH>( A );
\endcode
// Whereas the inversion by means of an LU decomposition works for every general square matrix,
// the inversion by LDLT only works for symmetric indefinite matrices, the inversion by LDLH is
// restricted to Hermitian indefinite matrices and the Cholesky decomposition (LLH) only works
// for Hermitian positive definite matrices. Please note that it is in the responsibility of the
// function caller to guarantee that the selected algorithm is suited for the given matrix. In
// case this precondition is violated the result can be wrong and might not represent the inverse
// of the given matrix!
//
// For both the \c inv() and \c invert() function the matrix inversion fails if ...
//
// - ... the given matrix is not a square matrix;
// - ... the given matrix is singular and not invertible.
//
// In all failure cases either a compilation error is created if the failure can be predicted at
// compile time or a \c std::invalid_argument exception is thrown.
//
// \note The matrix inversion can only be used for dense matrices with \c float, \c double,
// \c complex<float> or \c complex<double> element type. The attempt to call the function with
// matrices of any other element type or with a sparse matrix results in a compile time error!
//
// \note The functions invert the dense matrix by means of LAPACK kernels. Thus the functions can
// only be used if a fitting LAPACK library is available and linked to the executable. Otherwise
// a linker error will be created.
//
// \note It is not possible to use any kind of view on the expression object returned by the
// \c inv() function. Also, it is not possible to access individual elements via the function call
// operator on the expression object:
\code
row( inv( A ), 2UL ); // Compilation error: Views cannot be used on an inv() expression!
inv( A )(1,2); // Compilation error: It is not possible to access individual elements!
\endcode
// \note The inversion functions do not provide any exception safety guarantee, i.e. in case an
// exception is thrown the matrix may already have been modified.
//
//
// \n \section matrix_operations_decomposition Matrix Decomposition
// <hr>
//
// \note All decomposition functions can only be used for dense matrices with \c float, \c double,
// \c complex<float> or \c complex<double> element type. The attempt to call the function with
// matrices of any other element type or with a sparse matrix results in a compile time error!
//
// \note The functions decompose a dense matrix by means of LAPACK kernels. Thus the functions can
// only be used if a fitting LAPACK library is available and linked to the executable. Otherwise
// a linker error will be created.
//
// \subsection matrix_operations_decomposition_lu LU Decomposition
//
// The LU decomposition of a dense matrix can be computed via the \c lu() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> L, U, P;
lu( A, L, U, P ); // LU decomposition of a row-major matrix
assert( A == L * U * P );
\endcode
\code
blaze::DynamicMatrix<double,blaze::columnMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::columnMajor> L, U, P;
lu( A, L, U, P ); // LU decomposition of a column-major matrix
assert( A == P * L * U );
\endcode
// The function works for both \c rowMajor and \c columnMajor matrices. Note, however, that the
// three matrices \c A, \c L and \c U are required to have the same storage order. Also, please
// note that the way the permutation matrix \c P needs to be applied differs between row-major and
// column-major matrices, since the algorithm uses column interchanges for row-major matrices and
// row interchanges for column-major matrices.
//
// Furthermore, \c lu() can be used with adaptors. For instance, the following example demonstrates
// the LU decomposition of a symmetric matrix into a lower and upper triangular matrix:
\code
blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A;
// ... Resizing and initialization
blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L;
blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > U;
blaze::DynamicMatrix<double,blaze::columnMajor> P;
lu( A, L, U, P ); // LU decomposition of A
\endcode
// \n \subsection matrix_operations_decomposition_llh Cholesky Decomposition
//
// The Cholesky (LLH) decomposition of a dense matrix can be computed via the \c llh() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> L;
llh( A, L ); // LLH decomposition of a row-major matrix
assert( A == L * ctrans( L ) );
\endcode
// The function works for both \c rowMajor and \c columnMajor matrices and the two matrices \c A
// and \c L can have any storage order.
//
// Furthermore, \c llh() can be used with adaptors. For instance, the following example demonstrates
// the LLH decomposition of a symmetric matrix into a lower triangular matrix:
\code
blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A;
// ... Resizing and initialization
blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L;
llh( A, L ); // Cholesky decomposition of A
\endcode
// \n \subsection matrix_operations_decomposition_qr QR Decomposition
//
// The QR decomposition of a dense matrix can be computed via the \c qr() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::columnMajor> Q;
blaze::DynamicMatrix<double,blaze::rowMajor> R;
qr( A, Q, R ); // QR decomposition of a row-major matrix
assert( A == Q * R );
\endcode
// The function works for both \c rowMajor and \c columnMajor matrices and the three matrices
// \c A, \c Q and \c R can have any storage order.
//
// Furthermore, \c qr() can be used with adaptors. For instance, the following example demonstrates
// the QR decomposition of a symmetric matrix into a general matrix and an upper triangular matrix:
\code
blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> Q;
blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > R;
qr( A, Q, R ); // QR decomposition of A
\endcode
// \n \subsection matrix_operations_decomposition_rq RQ Decomposition
//
// Similar to the QR decomposition, the RQ decomposition of a dense matrix can be computed via
// the \c rq() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> R;
blaze::DynamicMatrix<double,blaze::columnMajor> Q;
rq( A, R, Q ); // RQ decomposition of a row-major matrix
assert( A == R * Q );
\endcode
// The function works for both \c rowMajor and \c columnMajor matrices and the three matrices
// \c A, \c R and \c Q can have any storage order.
//
// Also the \c rq() function can be used in combination with matrix adaptors. For instance, the
// following example demonstrates the RQ decomposition of an Hermitian matrix into a general
// matrix and an upper triangular matrix:
\code
blaze::HermitianMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > A;
// ... Resizing and initialization
blaze::UpperMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > R;
blaze::DynamicMatrix<complex<double>,blaze::rowMajor> Q;
rq( A, R, Q ); // RQ decomposition of A
\endcode
// \n \subsection matrix_operations_decomposition_ql QL Decomposition
//
// The QL decomposition of a dense matrix can be computed via the \c ql() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> Q;
blaze::DynamicMatrix<double,blaze::columnMajor> L;
ql( A, Q, L ); // QL decomposition of a row-major matrix
assert( A == Q * L );
\endcode
// The function works for both \c rowMajor and \c columnMajor matrices and the three matrices
// \c A, \c Q and \c L can have any storage order.
//
// Also the \c ql() function can be used in combination with matrix adaptors. For instance, the
// following example demonstrates the QL decomposition of a symmetric matrix into a general
// matrix and a lower triangular matrix:
\code
blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> Q;
blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L;
ql( A, Q, L ); // QL decomposition of A
\endcode
// \n \subsection matrix_operations_decomposition_lq LQ Decomposition
//
// The LQ decomposition of a dense matrix can be computed via the \c lq() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> L;
blaze::DynamicMatrix<double,blaze::columnMajor> Q;
lq( A, L, Q ); // LQ decomposition of a row-major matrix
assert( A == L * Q );
\endcode
// The function works for both \c rowMajor and \c columnMajor matrices and the three matrices
// \c A, \c L and \c Q can have any storage order.
//
// Furthermore, \c lq() can be used with adaptors. For instance, the following example demonstrates
// the LQ decomposition of an Hermitian matrix into a lower triangular matrix and a general matrix:
\code
blaze::HermitianMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > A;
// ... Resizing and initialization
blaze::LowerMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > L;
blaze::DynamicMatrix<complex<double>,blaze::rowMajor> Q;
lq( A, L, Q ); // LQ decomposition of A
\endcode
// \n \section matrix_operations_eigenvalues Eigenvalues/Eigenvectors
// <hr>
//
// The eigenvalues and eigenvectors of a dense matrix can be computed via the \c eigen() functions:
\code
namespace blaze {
template< typename MT, bool SO, typename VT, bool TF >
void eigen( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w );
template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 >
void eigen( const DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& V );
} // namespace blaze
\endcode
// The first function computes only the eigenvalues of the given \a n-by-\a n matrix, the second
// function additionally computes the eigenvectors. The eigenvalues are returned in the given vector
// \a w and the eigenvectors are returned in the given matrix \a V, which are both resized to the
// correct dimensions (if possible and necessary).
//
// Depending on the given matrix type, the resulting eigenvalues are either of floating point
// or complex type: In case the given matrix is either a compile time symmetric matrix with
// floating point elements or an Hermitian matrix with complex elements, the resulting eigenvalues
// will be of floating point type and therefore the elements of the given eigenvalue vector are
// expected to be of floating point type. In all other cases they are expected to be of complex
// type. Please note that for complex eigenvalues no order of eigenvalues can be assumed, except
// that complex conjugate pairs of eigenvalues appear consecutively with the eigenvalue having
// the positive imaginary part first.
//
// In case \a A is a row-major matrix, \a V will contain the left eigenvectors, otherwise \a V
// will contain the right eigenvectors. In case \a V is a row-major matrix the eigenvectors are
// returned in the rows of \a V, in case \a V is a column-major matrix the eigenvectors are
// returned in the columns of \a V. In case the given matrix is a compile time symmetric matrix
// with floating point elements, the resulting eigenvectors will be of floating point type and
// therefore the elements of the given eigenvector matrix are expected to be of floating point
// type. In all other cases they are expected to be of complex type.
//
// The following examples give an impression of the computation of eigenvalues and eigenvectors
// for a general, a symmetric, and an Hermitian matrix:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::rowMajor;
using blaze::columnVector;
DynamicMatrix<double,rowMajor> A( 5UL, 5UL ); // The general matrix A
// ... Initialization
DynamicVector<complex<double>,columnVector> w( 5UL ); // The vector for the complex eigenvalues
DynamicMatrix<complex<double>,rowMajor> V( 5UL, 5UL ); // The matrix for the left eigenvectors
eigen( A, w, V );
\endcode
\code
using blaze::SymmetricMatrix;
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::rowMajor;
using blaze::columnVector;
SymmetricMatrix< DynamicMatrix<double,rowMajor> > A( 5UL, 5UL ); // The symmetric matrix A
// ... Initialization
DynamicVector<double,columnVector> w( 5UL ); // The vector for the real eigenvalues
DynamicMatrix<double,rowMajor> V( 5UL, 5UL ); // The matrix for the left eigenvectors
eigen( A, w, V );
\endcode
\code
using blaze::HermitianMatrix;
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::rowMajor;
using blaze::columnVector;
HermitianMatrix< DynamicMatrix<complex<double>,rowMajor> > A( 5UL, 5UL ); // The Hermitian matrix A
// ... Initialization
DynamicVector<double,columnVector> w( 5UL ); // The vector for the real eigenvalues
DynamicMatrix<complex<double>,rowMajor> V( 5UL, 5UL ); // The matrix for the left eigenvectors
eigen( A, w, V );
\endcode
// The functions fail if ...
//
// - ... the given matrix \a A is not a square matrix;
// - ... the given vector \a w is a fixed size vector and the size doesn't match;
// - ... the given matrix \a V is a fixed size matrix and the dimensions don't match;
// - ... the eigenvalue computation fails.
//
// In all failure cases an exception is thrown.
//
// \note All \c eigen() functions can only be used for dense matrices with \c float, \c double,
// \c complex<float> or \c complex<double> element type. The attempt to call the function with
// matrices of any other element type or with a sparse matrix results in a compile time error!
//
// \note The functions compute the eigenvalues and/or eigenvectors of a dense matrix by means of
// LAPACK kernels. Thus the functions can only be used if a fitting LAPACK library is available
// and linked to the executable. Otherwise a linker error will be created.
//
//
// \n \section matrix_operations_singularvalues Singular Values/Singular Vectors
// <hr>
//
// The singular value decomposition (SVD) of a dense matrix can be computed via the \c svd()
// functions:
\code
namespace blaze {
template< typename MT, bool SO, typename VT, bool TF >
void svd( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s );
template< typename MT1, bool SO, typename VT, bool TF, typename MT2, typename MT3 >
void svd( const DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V );
template< typename MT, bool SO, typename VT, bool TF, typename ST >
size_t svd( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s, ST low, ST upp );
template< typename MT1, bool SO, typename VT, bool TF, typename MT2, typename MT3, typename ST >
size_t svd( const DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, ST low, ST upp );
} // namespace blaze
\endcode
// The first and third function compute only singular values of the given general \a m-by-\a n
// matrix, the second and fourth function additionally compute singular vectors. The resulting
// singular values are returned in the given vector \a s, the left singular vectors are returned
// in the given matrix \a U, and the right singular vectors are returned in the matrix \a V. \a s,
// \a U, and \a V are resized to the correct dimensions (if possible and necessary).
//
// The third and fourth function allow for the specification of a subset of singular values and/or
// vectors. The number of singular values and vectors to be computed is specified by the lower
// bound \a low and the upper bound \a upp, which either form an integral or a floating point
// range.
//
// In case \a low and \a upp form are of integral type, the function computes all singular values
// in the index range \f$[low..upp]\f$. The \a num resulting real and non-negative singular values
// are stored in descending order in the given vector \a s, which is either resized (if possible)
// or expected to be a \a num-dimensional vector. The resulting left singular vectors are stored
// in the given matrix \a U, which is either resized (if possible) or expected to be a
// \a m-by-\a num matrix. The resulting right singular vectors are stored in the given matrix \a V,
// which is either resized (if possible) or expected to be a \a num-by-\a n matrix.
//
// In case \a low and \a upp are of floating point type, the function computes all singular values
// in the half-open interval \f$(low..upp]\f$. The resulting real and non-negative singular values
// are stored in descending order in the given vector \a s, which is either resized (if possible)
// or expected to be a min(\a m,\a n)-dimensional vector. The resulting left singular vectors are
// stored in the given matrix \a U, which is either resized (if possible) or expected to be a
// \a m-by-min(\a m,\a n) matrix. The resulting right singular vectors are stored in the given
// matrix \a V, which is either resized (if possible) or expected to be a min(\a m,\a n)-by-\a n
// matrix.
//
// The functions fail if ...
//
// - ... the given matrix \a U is a fixed size matrix and the dimensions don't match;
// - ... the given vector \a s is a fixed size vector and the size doesn't match;
// - ... the given matrix \a V is a fixed size matrix and the dimensions don't match;
// - ... the given scalar values don't form a proper range;
// - ... the singular value decomposition fails.
//
// In all failure cases an exception is thrown.
//
// Examples:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::rowMajor;
using blaze::columnVector;
DynamicMatrix<double,rowMajor> A( 5UL, 8UL ); // The general matrix A
// ... Initialization
DynamicMatrix<double,rowMajor> U; // The matrix for the left singular vectors
DynamicVector<double,columnVector> s; // The vector for the singular values
DynamicMatrix<double,rowMajor> V; // The matrix for the right singular vectors
svd( A, U, s, V );
\endcode
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::rowMajor;
using blaze::columnVector;
DynamicMatrix<complex<double>,rowMajor> A( 5UL, 8UL ); // The general matrix A
// ... Initialization
DynamicMatrix<complex<double>,rowMajor> U; // The matrix for the left singular vectors
DynamicVector<double,columnVector> s; // The vector for the singular values
DynamicMatrix<complex<double>,rowMajor> V; // The matrix for the right singular vectors
svd( A, U, s, V, 0, 2 );
\endcode
// \note All \c svd() functions can only be used for dense matrices with \c float, \c double,
// \c complex<float> or \c complex<double> element type. The attempt to call the function with
// matrices of any other element type or with a sparse matrix results in a compile time error!
//
// \note The functions compute the singular values and/or singular vectors of a dense matrix by
// means of LAPACK kernels. Thus the functions can only be used if a fitting LAPACK library is
// available and linked to the executable. Otherwise a linker error will be created.
//
//
// \n Previous: \ref matrix_types Next: \ref adaptors
*/
//*************************************************************************************************
//**Adaptors***************************************************************************************
/*!\page adaptors Adaptors
//
// \tableofcontents
//
//
// \section adaptors_general General Concepts
// <hr>
//
// Adaptors act as wrappers around the general \ref matrix_types. They adapt the interface of the
// matrices such that certain invariants are preserved. Due to this adaptors can provide a compile
// time guarantee of certain properties, which can be exploited for optimized performance.
//
// The \b Blaze library provides a total of 9 different adaptors:
//
// <ul>
// <li> \ref adaptors_symmetric_matrices </li>
// <li> \ref adaptors_hermitian_matrices </li>
// <li> \ref adaptors_triangular_matrices
// <ul>
// <li> \ref adaptors_triangular_matrices "Lower Triangular Matrices"
// <ul>
// <li> \ref adaptors_triangular_matrices_lowermatrix </li>
// <li> \ref adaptors_triangular_matrices_unilowermatrix </li>
// <li> \ref adaptors_triangular_matrices_strictlylowermatrix </li>
// </ul>
// </li>
// <li> \ref adaptors_triangular_matrices "Upper Triangular Matrices"
// <ul>
// <li> \ref adaptors_triangular_matrices_uppermatrix </li>
// <li> \ref adaptors_triangular_matrices_uniuppermatrix </li>
// <li> \ref adaptors_triangular_matrices_strictlyuppermatrix </li>
// </ul>
// </li>
// <li> \ref adaptors_triangular_matrices "Diagonal Matrices"
// <ul>
// <li> \ref adaptors_triangular_matrices_diagonalmatrix </li>
// </ul>
// </li>
// </ul>
// </li>
// </ul>
//
// In combination with the general matrix types, \b Blaze provides a total of 40 different matrix
// types that make it possible to exactly adapt the type of matrix to every specific problem.
//
//
// \n \section adaptors_examples Examples
// <hr>
//
// The following code examples give an impression on the use of adaptors. The first example shows
// the multiplication between two lower matrices:
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
LowerMatrix< DynamicMatrix<double,rowMajor> > A;
LowerMatrix< DynamicMatrix<double,columnMajor> > B;
DynamicMatrix<double,columnMajor> C;
// ... Resizing and initialization
C = A * B;
\endcode
// When multiplying two matrices, at least one of which is triangular, \b Blaze can exploit the
// fact that either the lower or upper part of the matrix contains only default elements and
// restrict the algorithm to the non-zero elements. Thus the adaptor provides a significant
// performance advantage in comparison to a general matrix multiplication, especially for large
// matrices.
//
// The second example shows the \c SymmetricMatrix adaptor in a row-major dense matrix/sparse
// vector multiplication:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::CompressedVector;
using blaze::rowMajor;
using blaze::columnVector;
SymmetricMatrix< DynamicMatrix<double,rowMajor> > A;
CompressedVector<double,columnVector> x;
DynamicVector<double,columnVector> y;
// ... Resizing and initialization
y = A * x;
\endcode
// In this example it is not intuitively apparent that using a row-major matrix is not the best
// possible choice in terms of performance since the computation cannot be vectorized. Choosing
// a column-major matrix instead, however, would enable a vectorized computation. Therefore
// \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and
// evaluates the multiplication as
\code
y = trans( A ) * x;
\endcode
// which significantly increases the performance.
//
// \n Previous: \ref matrix_operations Next: \ref adaptors_symmetric_matrices
*/
//*************************************************************************************************
//**Symmetric Matrices*****************************************************************************
/*!\page adaptors_symmetric_matrices Symmetric Matrices
//
// \tableofcontents
//
//
// \n \section adaptors_symmetric_matrices_general Symmetric Matrices
// <hr>
//
// In contrast to general matrices, which have no restriction in their number of rows and columns
// and whose elements can have any value, symmetric matrices provide the compile time guarantee
// to be square matrices with pair-wise identical values. Mathematically, this means that a
// symmetric matrix is always equal to its transpose (\f$ A = A^T \f$) and that all non-diagonal
// values have an identical counterpart (\f$ a_{ij} == a_{ji} \f$). This symmetry property can
// be exploited to provide higher efficiency and/or lower memory consumption. Within the \b Blaze
// library, symmetric matrices are realized by the \ref adaptors_symmetric_matrices_symmetricmatrix
// class template.
//
//
// \n \section adaptors_symmetric_matrices_symmetricmatrix SymmetricMatrix
// <hr>
//
// The SymmetricMatrix class template is an adapter for existing dense and sparse matrix types.
// It inherits the properties and the interface of the given matrix type \c MT and extends it
// by enforcing the additional invariant of symmetry (i.e. the matrix is always equal to its
// transpose \f$ A = A^T \f$). It can be included via the header file
\code
#include <blaze/math/SymmetricMatrix.h>
\endcode
// The type of the adapted matrix can be specified via template parameter:
\code
template< typename MT >
class SymmetricMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. SymmetricMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note
// that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or
// blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible symmetric matrices:
\code
using blaze::unaligned;
using blaze::unpadded;
using blaze::rowMajor;
using blaze::columnMajor;
// Definition of a 3x3 row-major dense symmetric matrix with static memory
blaze::SymmetricMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A;
// Definition of a resizable column-major dense symmetric matrix based on HybridMatrix
blaze::SymmetricMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B;
// Definition of a resizable row-major dense symmetric matrix based on DynamicMatrix
blaze::SymmetricMatrix< blaze::DynamicMatrix<double,rowMajor> > C;
// Definition of a fixed size row-major dense symmetric matrix based on CustomMatrix
blaze::SymmetricMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D;
// Definition of a compressed row-major single precision symmetric matrix
blaze::SymmetricMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > E;
\endcode
// The storage order of a symmetric matrix is depending on the storage order of the adapted matrix
// type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified as
// blaze::rowMajor), the symmetric matrix will also be a row-major matrix. Otherwise, if the
// adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the symmetric matrix
// will also be a column-major matrix.
//
//
// \n \section adaptors_symmetric_matrices_special_properties Special Properties of Symmetric Matrices
// <hr>
//
// A symmetric matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT.
// It also provides (nearly) the same interface as the underlying matrix type. However, there are
// some important exceptions resulting from the symmetry constraint:
//
// -# <b>\ref adaptors_symmetric_matrices_square</b>
// -# <b>\ref adaptors_symmetric_matrices_symmetry</b>
// -# <b>\ref adaptors_symmetric_matrices_initialization</b>
//
// \n \subsection adaptors_symmetric_matrices_square Symmetric Matrices Must Always be Square!
//
// In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix,
// or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and
// the \c extend() functions only expect a single parameter, which specifies both the number of
// rows and columns, instead of two (one for the number of rows and one for the number of columns):
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
using blaze::rowMajor;
// Default constructed, default initialized, row-major 3x3 symmetric dynamic matrix
SymmetricMatrix< DynamicMatrix<double,rowMajor> > A( 3 );
// Resizing the matrix to 5x5
A.resize( 5 );
// Extending the number of rows and columns by 2, resulting in a 7x7 matrix
A.extend( 2 );
\endcode
// In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number
// of rows and number of columns must be specified equally:
\code
using blaze::StaticMatrix;
using blaze::SymmetricMatrix;
using blaze::columnMajor;
// Correct setup of a fixed size column-major 3x3 symmetric static matrix
SymmetricMatrix< StaticMatrix<int,3UL,3UL,columnMajor> > A;
// Compilation error: the provided matrix type is not a square matrix type
SymmetricMatrix< StaticMatrix<int,3UL,4UL,columnMajor> > B;
\endcode
// \n \subsection adaptors_symmetric_matrices_symmetry The Symmetric Property is Always Enforced!
//
// This means that modifying the element \f$ a_{ij} \f$ of a symmetric matrix also modifies its
// counterpart element \f$ a_{ji} \f$. Also, it is only possible to assign matrices that are
// symmetric themselves:
\code
using blaze::CompressedMatrix;
using blaze::DynamicMatrix;
using blaze::StaticMatrix;
using blaze::SymmetricMatrix;
using blaze::rowMajor;
// Default constructed, row-major 3x3 symmetric compressed matrix
SymmetricMatrix< CompressedMatrix<double,rowMajor> > A( 3 );
// Initializing three elements via the function call operator
A(0,0) = 1.0; // Initialization of the diagonal element (0,0)
A(0,2) = 2.0; // Initialization of the elements (0,2) and (2,0)
// Inserting three more elements via the insert() function
A.insert( 1, 1, 3.0 ); // Inserting the diagonal element (1,1)
A.insert( 1, 2, 4.0 ); // Inserting the elements (1,2) and (2,1)
// Access via a non-const iterator
*A.begin(1UL) = 10.0; // Modifies both elements (1,0) and (0,1)
// Erasing elements via the erase() function
A.erase( 0, 0 ); // Erasing the diagonal element (0,0)
A.erase( 0, 2 ); // Erasing the elements (0,2) and (2,0)
// Construction from a symmetric dense matrix
StaticMatrix<double,3UL,3UL> B{ { 3.0, 8.0, -2.0 },
{ 8.0, 0.0, -1.0 },
{ -2.0, -1.0, 4.0 } };
SymmetricMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK
// Assignment of a non-symmetric dense matrix
StaticMatrix<double,3UL,3UL> D{ { 3.0, 7.0, -2.0 },
{ 8.0, 0.0, -1.0 },
{ -2.0, -1.0, 4.0 } };
C = D; // Throws an exception; symmetric invariant would be violated!
\endcode
// The same restriction also applies to the \c append() function for sparse matrices: Appending
// the element \f$ a_{ij} \f$ additionally inserts the element \f$ a_{ji} \f$ into the matrix.
// Despite the additional insertion, the \c append() function still provides the most efficient
// way to set up a symmetric sparse matrix. In order to achieve the maximum efficiency, the
// capacity of the individual rows/columns of the matrix should to be specifically prepared with
// \c reserve() calls:
\code
using blaze::CompressedMatrix;
using blaze::SymmetricMatrix;
using blaze::rowMajor;
// Setup of the symmetric matrix
//
// ( 0 1 3 )
// A = ( 1 2 0 )
// ( 3 0 0 )
//
SymmetricMatrix< CompressedMatrix<double,rowMajor> > A( 3 );
A.reserve( 5 ); // Reserving enough space for 5 non-zero elements
A.reserve( 0, 2 ); // Reserving two non-zero elements in the first row
A.reserve( 1, 2 ); // Reserving two non-zero elements in the second row
A.reserve( 2, 1 ); // Reserving a single non-zero element in the third row
A.append( 0, 1, 1.0 ); // Appending the value 1 at position (0,1) and (1,0)
A.append( 1, 1, 2.0 ); // Appending the value 2 at position (1,1)
A.append( 2, 0, 3.0 ); // Appending the value 3 at position (2,0) and (0,2)
\endcode
// The symmetry property is also enforced for symmetric custom matrices: In case the given array
// of elements does not represent a symmetric matrix, a \c std::invalid_argument exception is
// thrown:
\code
using blaze::CustomMatrix;
using blaze::SymmetricMatrix;
using blaze::unaligned;
using blaze::unpadded;
using blaze::rowMajor;
using CustomSymmetric = SymmetricMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> >;
// Creating a 3x3 symmetric custom matrix from a properly initialized array
double array[9] = { 1.0, 2.0, 4.0,
2.0, 3.0, 5.0,
4.0, 5.0, 6.0 };
CustomSymmetric A( array, 3UL ); // OK
// Attempt to create a second 3x3 symmetric custom matrix from an uninitialized array
std::unique_ptr<double[]> memory( new double[9UL] );
CustomSymmetric B( memory.get(), 3UL ); // Throws an exception
\endcode
// Finally, the symmetry property is enforced for views (rows, columns, submatrices, ...) on the
// symmetric matrix. The following example demonstrates that modifying the elements of an entire
// row of the symmetric matrix also affects the counterpart elements in the according column of
// the matrix:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
// Setup of the symmetric matrix
//
// ( 0 1 0 2 )
// A = ( 1 3 4 0 )
// ( 0 4 0 5 )
// ( 2 0 5 0 )
//
SymmetricMatrix< DynamicMatrix<int> > A( 4 );
A(0,1) = 1;
A(0,3) = 2;
A(1,1) = 3;
A(1,2) = 4;
A(2,3) = 5;
// Setting all elements in the 1st row to 0 results in the matrix
//
// ( 0 0 0 2 )
// A = ( 0 0 0 0 )
// ( 0 0 0 5 )
// ( 2 0 5 0 )
//
row( A, 1 ) = 0;
\endcode
// The next example demonstrates the (compound) assignment to submatrices of symmetric matrices.
// Since the modification of element \f$ a_{ij} \f$ of a symmetric matrix also modifies the
// element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the symmetry
// of the symmetric matrix is preserved. Otherwise a \c std::invalid_argument exception is
// thrown:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
// Setup of two default 4x4 symmetric matrices
SymmetricMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 );
// Setup of the 3x2 dynamic matrix
//
// ( 1 2 )
// B = ( 3 4 )
// ( 5 6 )
//
DynamicMatrix<int> B{ { 1, 2 }, { 3, 4 }, { 5, 6 } };
// OK: Assigning B to a submatrix of A1 such that the symmetry can be preserved
//
// ( 0 0 1 2 )
// A1 = ( 0 0 3 4 )
// ( 1 3 5 6 )
// ( 2 4 6 0 )
//
submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK
// Error: Assigning B to a submatrix of A2 such that the symmetry cannot be preserved!
// The elements marked with X cannot be assigned unambiguously!
//
// ( 0 1 2 0 )
// A2 = ( 1 3 X 0 )
// ( 2 X 6 0 )
// ( 0 0 0 0 )
//
submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception!
\endcode
// \n \subsection adaptors_symmetric_matrices_initialization The Elements of a Dense Symmetric Matrix are Always Default Initialized!
//
// Although this results in a small loss of efficiency (especially in case all default values are
// overridden afterwards), this property is important since otherwise the symmetric property of
// dense symmetric matrices could not be guaranteed:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
// Uninitialized, 5x5 row-major dynamic matrix
DynamicMatrix<int,rowMajor> A( 5, 5 );
// Default initialized, 5x5 row-major symmetric dynamic matrix
SymmetricMatrix< DynamicMatrix<int,rowMajor> > B( 5 );
\endcode
// \n \section adaptors_symmetric_matrices_arithmetic_operations Arithmetic Operations
// <hr>
//
// A SymmetricMatrix matrix can participate in numerical operations in any way any other dense
// or sparse matrix can participate. It can also be combined with any other dense or sparse vector
// or matrix. The following code example gives an impression of the use of SymmetricMatrix within
// arithmetic operations:
\code
using blaze::SymmetricMatrix;
using blaze::DynamicMatrix;
using blaze::HybridMatrix;
using blaze::StaticMatrix;
using blaze::CompressedMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
DynamicMatrix<double,rowMajor> A( 3, 3 );
CompressedMatrix<double,rowMajor> B( 3, 3 );
SymmetricMatrix< DynamicMatrix<double,rowMajor> > C( 3 );
SymmetricMatrix< CompressedMatrix<double,rowMajor> > D( 3 );
SymmetricMatrix< HybridMatrix<float,3UL,3UL,rowMajor> > E;
SymmetricMatrix< StaticMatrix<float,3UL,3UL,columnMajor> > F;
E = A + B; // Matrix addition and assignment to a row-major symmetric matrix (includes runtime check)
F = C - D; // Matrix subtraction and assignment to a column-major symmetric matrix (only compile time check)
F = A * D; // Matrix multiplication between a dense and a sparse matrix (includes runtime check)
C *= 2.0; // In-place scaling of matrix C
E = 2.0 * B; // Scaling of matrix B (includes runtime check)
F = C * 2.0; // Scaling of matrix C (only compile time check)
E += A - B; // Addition assignment (includes runtime check)
F -= C + D; // Subtraction assignment (only compile time check)
F *= A * D; // Multiplication assignment (includes runtime check)
\endcode
// Note that it is possible to assign any kind of matrix to a symmetric matrix. In case the matrix
// to be assigned is not symmetric at compile time, a runtime check is performed.
//
//
// \n \section adaptors_symmetric_matrices_block_matrices Symmetric Block Matrices
// <hr>
//
// It is also possible to use symmetric block matrices:
\code
using blaze::CompressedMatrix;
using blaze::StaticMatrix;
using blaze::SymmetricMatrix;
// Definition of a 3x3 symmetric block matrix based on CompressedMatrix
SymmetricMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > A( 3 );
\endcode
// Also in this case, the SymmetricMatrix class template enforces the invariant of symmetry and
// guarantees that a modifications of element \f$ a_{ij} \f$ of the adapted matrix is also
// applied to element \f$ a_{ji} \f$:
\code
// Inserting the elements (2,4) and (4,2)
A.insert( 2, 4, StaticMatrix<int,3UL,3UL>{ { 1, -4, 5 },
{ 6, 8, -3 },
{ 2, -1, 2 } } );
// Manipulating the elements (2,4) and (4,2)
A(2,4)(1,1) = -5;
\endcode
// For more information on block matrices, see the tutorial on \ref block_vectors_and_matrices.
//
//
// \n \section adaptors_symmetric_matrices_performance Performance Considerations
// <hr>
//
// When the symmetric property of a matrix is known beforehands using the SymmetricMatrix adaptor
// instead of a general matrix can be a considerable performance advantage. The \b Blaze library
// tries to exploit the properties of symmetric matrices whenever possible. However, there are
// also situations when using a symmetric matrix introduces some overhead. The following examples
// demonstrate several situations where symmetric matrices can positively or negatively impact
// performance.
//
// \n \subsection adaptors_symmetric_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication
//
// When multiplying two matrices, at least one of which is symmetric, \b Blaze can exploit the fact
// that \f$ A = A^T \f$ and choose the fastest and most suited combination of storage orders for the
// multiplication. The following example demonstrates this by means of a dense matrix/sparse matrix
// multiplication:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
SymmetricMatrix< DynamicMatrix<double,rowMajor> > A;
SymmetricMatrix< CompressedMatrix<double,columnMajor> > B;
DynamicMatrix<double,columnMajor> C;
// ... Resizing and initialization
C = A * B;
\endcode
// Intuitively, the chosen combination of a row-major and a column-major matrix is the most suited
// for maximum performance. However, \b Blaze evaluates the multiplication as
\code
C = A * trans( B );
\endcode
// which significantly increases the performance since in contrast to the original formulation the
// optimized form can be vectorized. Therefore, in the context of matrix multiplications, using the
// SymmetricMatrix adapter is obviously an advantage.
//
// \n \subsection adaptors_symmetric_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication
//
// A similar optimization is possible in case of matrix/vector multiplications:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::CompressedVector;
using blaze::rowMajor;
using blaze::columnVector;
SymmetricMatrix< DynamicMatrix<double,rowMajor> > A;
CompressedVector<double,columnVector> x;
DynamicVector<double,columnVector> y;
// ... Resizing and initialization
y = A * x;
\endcode
// In this example it is not intuitively apparent that using a row-major matrix is not the best
// possible choice in terms of performance since the computation cannot be vectorized. Choosing
// a column-major matrix instead, however, would enable a vectorized computation. Therefore
// \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and
// evaluates the multiplication as
\code
y = trans( A ) * x;
\endcode
// which also significantly increases the performance.
//
// \n \subsection adaptors_symmetric_matrices_views Positive Impact: Row/Column Views on Column/Row-Major Matrices
//
// Another example is the optimization of a row view on a column-major symmetric matrix:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
using blaze::columnMajor;
SymmetricMatrix< DynamicMatrix<double,columnMajor> > A( 10UL );
auto row5 = row( A, 5UL );
\endcode
// Usually, a row view on a column-major matrix results in a considerable performance decrease in
// comparison to a row view on a row-major matrix due to the non-contiguous storage of the matrix
// elements. However, in case of symmetric matrices, \b Blaze instead uses the according column of
// the matrix, which provides the same performance as if the matrix would be row-major. Note that
// this also works for column views on row-major matrices, where \b Blaze can use the according
// row instead of a column in order to provide maximum performance.
//
// \n \subsection adaptors_symmetric_matrices_assignment Negative Impact: Assignment of a General Matrix
//
// In contrast to using a symmetric matrix on the right-hand side of an assignment (i.e. for read
// access), which introduces absolutely no performance penalty, using a symmetric matrix on the
// left-hand side of an assignment (i.e. for write access) may introduce additional overhead when
// it is assigned a general matrix, which is not symmetric at compile time:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
SymmetricMatrix< DynamicMatrix<double> > A, C;
DynamicMatrix<double> B;
B = A; // Only read-access to the symmetric matrix; no performance penalty
C = A; // Assignment of a symmetric matrix to another symmetric matrix; no runtime overhead
C = B; // Assignment of a general matrix to a symmetric matrix; some runtime overhead
\endcode
// When assigning a general, potentially not symmetric matrix to a symmetric matrix it is necessary
// to check whether the matrix is symmetric at runtime in order to guarantee the symmetry property
// of the symmetric matrix. In case it turns out to be symmetric, it is assigned as efficiently as
// possible, if it is not, an exception is thrown. In order to prevent this runtime overhead it is
// therefore generally advisable to assign symmetric matrices to other symmetric matrices.\n
// In this context it is especially noteworthy that in contrast to additions and subtractions the
// multiplication of two symmetric matrices does not necessarily result in another symmetric matrix:
\code
SymmetricMatrix< DynamicMatrix<double> > A, B, C;
C = A + B; // Results in a symmetric matrix; no runtime overhead
C = A - B; // Results in a symmetric matrix; no runtime overhead
C = A * B; // Is not guaranteed to result in a symmetric matrix; some runtime overhead
\endcode
// \n Previous: \ref adaptors Next: \ref adaptors_hermitian_matrices
*/
//*************************************************************************************************
//**Hermitian Matrices*****************************************************************************
/*!\page adaptors_hermitian_matrices Hermitian Matrices
//
// \tableofcontents
//
//
// \n \section adaptors_hermitian_matrices_general Hermitian Matrices
// <hr>
//
// In addition to symmetric matrices, \b Blaze also provides an adaptor for Hermitian matrices.
// Hermitian matrices provide the compile time guarantee to be square matrices with pair-wise
// conjugate complex values. Mathematically, this means that an Hermitian matrix is always equal
// to its conjugate transpose (\f$ A = \overline{A^T} \f$) and that all non-diagonal values have
// a complex conjugate counterpart (\f$ a_{ij} == \overline{a_{ji}} \f$). Within the \b Blaze
// library, Hermitian matrices are realized by the \ref adaptors_hermitian_matrices_hermitianmatrix
// class template.
//
//
// \n \section adaptors_hermitian_matrices_hermitianmatrix HermitianMatrix
// <hr>
//
// The HermitianMatrix class template is an adapter for existing dense and sparse matrix types.
// It inherits the properties and the interface of the given matrix type \c MT and extends it by
// enforcing the additional invariant of Hermitian symmetry (i.e. the matrix is always equal to
// its conjugate transpose \f$ A = \overline{A^T} \f$). It can be included via the header file
\code
#include <blaze/math/HermitianMatrix.h>
\endcode
// The type of the adapted matrix can be specified via template parameter:
\code
template< typename MT >
class HermitianMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. HermitianMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also,
// the given matrix type must have numeric element types (i.e. all integral types except \c bool,
// floating point and complex types). Note that the given matrix type must be either resizable (as
// for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as
// for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible Hermitian matrices:
\code
using blaze::unaligned;
using blaze::unpadded;
using blaze::rowMajor;
using blaze::columnMajor;
// Definition of a 3x3 row-major dense Hermitian matrix with static memory
blaze::HermitianMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A;
// Definition of a resizable column-major dense Hermitian matrix based on HybridMatrix
blaze::HermitianMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B;
// Definition of a resizable row-major dense Hermitian matrix based on DynamicMatrix
blaze::HermitianMatrix< blaze::DynamicMatrix<std::complex<double>,rowMajor> > C;
// Definition of a fixed size row-major dense Hermitian matrix based on CustomMatrix
blaze::HermitianMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D;
// Definition of a compressed row-major single precision complex Hermitian matrix
blaze::HermitianMatrix< blaze::CompressedMatrix<std::complex<float>,rowMajor> > E;
\endcode
// The storage order of an Hermitian matrix is depending on the storage order of the adapted matrix
// type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified as
// blaze::rowMajor), the Hermitian matrix will also be a row-major matrix. Otherwise, if the
// adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the Hermitian matrix
// will also be a column-major matrix.
//
//
// \n \section adaptors_hermitian_matrices_vs_symmetric_matrices Hermitian Matrices vs. Symmetric Matrices
//
// The blaze::HermitianMatrix adaptor and the blaze::SymmetricMatrix adaptor share several traits.
// However, there are a couple of differences, both from a mathematical point of view as well as
// from an implementation point of view.
//
// From a mathematical point of view, a matrix is called symmetric when it is equal to its
// transpose (\f$ A = A^T \f$) and it is called Hermitian when it is equal to its conjugate
// transpose (\f$ A = \overline{A^T} \f$). For matrices of real values, however, these two
// conditions coincide, which means that symmetric matrices of real values are also Hermitian
// and Hermitian matrices of real values are also symmetric.
//
// From an implementation point of view, \b Blaze restricts Hermitian matrices to numeric data
// types (i.e. all integral types except \c bool, floating point and complex types), whereas
// symmetric matrices can also be block matrices (i.e. can have vector or matrix elements).
// For built-in element types, the HermitianMatrix adaptor behaves exactly like the according
// SymmetricMatrix implementation. For complex element types, however, the Hermitian property
// is enforced (see also \ref adaptors_hermitian_matrices_hermitian).
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::HermitianMatrix;
using blaze::SymmetricMatrix;
// The following two matrices provide an identical experience (including performance)
HermitianMatrix< DynamicMatrix<double> > A; // Both Hermitian and symmetric
SymmetricMatrix< DynamicMatrix<double> > B; // Both Hermitian and symmetric
// The following two matrices will behave differently
HermitianMatrix< DynamicMatrix< complex<double> > > C; // Only Hermitian
SymmetricMatrix< DynamicMatrix< complex<double> > > D; // Only symmetric
// Hermitian block matrices are not allowed
HermitianMatrix< DynamicMatrix< DynamicVector<double> > > E; // Compilation error!
SymmetricMatrix< DynamicMatrix< DynamicVector<double> > > F; // Symmetric block matrix
\endcode
// \n \section adaptors_hermitian_matrices_special_properties Special Properties of Hermitian Matrices
// <hr>
//
// An Hermitian matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT.
// It also provides (nearly) the same interface as the underlying matrix type. However, there are
// some important exceptions resulting from the Hermitian symmetry constraint:
//
// -# <b>\ref adaptors_hermitian_matrices_square</b>
// -# <b>\ref adaptors_hermitian_matrices_hermitian</b>
// -# <b>\ref adaptors_hermitian_matrices_initialization</b>
//
// \n \subsection adaptors_hermitian_matrices_square Hermitian Matrices Must Always be Square!
//
// In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix,
// or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and
// the \c extend() functions only expect a single parameter, which specifies both the number of
// rows and columns, instead of two (one for the number of rows and one for the number of columns):
\code
using blaze::DynamicMatrix;
using blaze::HermitianMatrix;
using blaze::rowMajor;
// Default constructed, default initialized, row-major 3x3 Hermitian dynamic matrix
HermitianMatrix< DynamicMatrix<std::complex<double>,rowMajor> > A( 3 );
// Resizing the matrix to 5x5
A.resize( 5 );
// Extending the number of rows and columns by 2, resulting in a 7x7 matrix
A.extend( 2 );
\endcode
// In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number
// of rows and number of columns must be specified equally:
\code
using blaze::StaticMatrix;
using blaze::HermitianMatrix;
using blaze::columnMajor;
// Correct setup of a fixed size column-major 3x3 Hermitian static matrix
HermitianMatrix< StaticMatrix<std::complex<float>,3UL,3UL,columnMajor> > A;
// Compilation error: the provided matrix type is not a square matrix type
HermitianMatrix< StaticMatrix<std::complex<float>,3UL,4UL,columnMajor> > B;
\endcode
// \n \subsection adaptors_hermitian_matrices_hermitian The Hermitian Property is Always Enforced!
//
// This means that the following properties of an Hermitian matrix are always guaranteed:
//
// - The diagonal elements are real numbers, i.e. the imaginary part is zero
// - Element \f$ a_{ij} \f$ is always the complex conjugate of element \f$ a_{ji} \f$
//
// Thus modifying the element \f$ a_{ij} \f$ of an Hermitian matrix also modifies its
// counterpart element \f$ a_{ji} \f$. Also, it is only possible to assign matrices that
// are Hermitian themselves:
\code
using blaze::CompressedMatrix;
using blaze::DynamicMatrix;
using blaze::StaticMatrix;
using blaze::HermitianMatrix;
using blaze::rowMajor;
using cplx = std::complex<double>;
// Default constructed, row-major 3x3 Hermitian compressed matrix
HermitianMatrix< CompressedMatrix<cplx,rowMajor> > A( 3 );
// Initializing the matrix via the function call operator
//
// ( (1, 0) (0,0) (2,1) )
// ( (0, 0) (0,0) (0,0) )
// ( (2,-1) (0,0) (0,0) )
//
A(0,0) = cplx( 1.0, 0.0 ); // Initialization of the diagonal element (0,0)
A(0,2) = cplx( 2.0, 1.0 ); // Initialization of the elements (0,2) and (2,0)
// Inserting three more elements via the insert() function
//
// ( (1,-3) (0,0) (2, 1) )
// ( (0, 0) (2,0) (4,-2) )
// ( (2,-1) (4,2) (0, 0) )
//
A.insert( 1, 1, cplx( 2.0, 0.0 ) ); // Inserting the diagonal element (1,1)
A.insert( 1, 2, cplx( 4.0, -2.0 ) ); // Inserting the elements (1,2) and (2,1)
// Access via a non-const iterator
//
// ( (1,-3) (8,1) (2, 1) )
// ( (8,-1) (2,0) (4,-2) )
// ( (2,-1) (4,2) (0, 0) )
//
*A.begin(1UL) = cplx( 8.0, -1.0 ); // Modifies both elements (1,0) and (0,1)
// Erasing elements via the erase() function
//
// ( (0, 0) (8,1) (0, 0) )
// ( (8,-1) (2,0) (4,-2) )
// ( (0, 0) (4,2) (0, 0) )
//
A.erase( 0, 0 ); // Erasing the diagonal element (0,0)
A.erase( 0, 2 ); // Erasing the elements (0,2) and (2,0)
// Construction from an Hermitian dense matrix
StaticMatrix<cplx,3UL,3UL> B{ { cplx( 3.0, 0.0 ), cplx( 8.0, 2.0 ), cplx( -2.0, 2.0 ) },
{ cplx( 8.0, 1.0 ), cplx( 0.0, 0.0 ), cplx( -1.0, -1.0 ) },
{ cplx( -2.0, -2.0 ), cplx( -1.0, 1.0 ), cplx( 4.0, 0.0 ) } };
HermitianMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK
// Assignment of a non-Hermitian dense matrix
StaticMatrix<cplx,3UL,3UL> D{ { cplx( 3.0, 0.0 ), cplx( 7.0, 2.0 ), cplx( 3.0, 2.0 ) },
{ cplx( 8.0, 1.0 ), cplx( 0.0, 0.0 ), cplx( 6.0, 4.0 ) },
{ cplx( -2.0, 2.0 ), cplx( -1.0, 1.0 ), cplx( 4.0, 0.0 ) } };
C = D; // Throws an exception; Hermitian invariant would be violated!
\endcode
// The same restriction also applies to the \c append() function for sparse matrices: Appending
// the element \f$ a_{ij} \f$ additionally inserts the element \f$ a_{ji} \f$ into the matrix.
// Despite the additional insertion, the \c append() function still provides the most efficient
// way to set up an Hermitian sparse matrix. In order to achieve the maximum efficiency, the
// capacity of the individual rows/columns of the matrix should to be specifically prepared with
// \c reserve() calls:
\code
using blaze::CompressedMatrix;
using blaze::HermitianMatrix;
using blaze::rowMajor;
using cplx = std::complex<double>;
// Setup of the Hermitian matrix
//
// ( (0, 0) (1,2) (3,-4) )
// A = ( (1,-2) (2,0) (0, 0) )
// ( (3, 4) (0,0) (0, 0) )
//
HermitianMatrix< CompressedMatrix<cplx,rowMajor> > A( 3 );
A.reserve( 5 ); // Reserving enough space for 5 non-zero elements
A.reserve( 0, 2 ); // Reserving two non-zero elements in the first row
A.reserve( 1, 2 ); // Reserving two non-zero elements in the second row
A.reserve( 2, 1 ); // Reserving a single non-zero element in the third row
A.append( 0, 1, cplx( 1.0, 2.0 ) ); // Appending an element at position (0,1) and (1,0)
A.append( 1, 1, cplx( 2.0, 0.0 ) ); // Appending an element at position (1,1)
A.append( 2, 0, cplx( 3.0, 4.0 ) ); // Appending an element at position (2,0) and (0,2)
\endcode
// The Hermitian property is also enforced for Hermitian custom matrices: In case the given array
// of elements does not represent an Hermitian matrix, a \c std::invalid_argument exception is
// thrown:
\code
using blaze::CustomMatrix;
using blaze::HermitianMatrix;
using blaze::unaligned;
using blaze::unpadded;
using blaze::rowMajor;
using CustomHermitian = HermitianMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> >;
// Creating a 3x3 Hermitian custom matrix from a properly initialized array
double array[9] = { 1.0, 2.0, 4.0,
2.0, 3.0, 5.0,
4.0, 5.0, 6.0 };
CustomHermitian A( array, 3UL ); // OK
// Attempt to create a second 3x3 Hermitian custom matrix from an uninitialized array
std::unique_ptr<double[]> memory( new double[9UL] );
CustomHermitian B( memory.get(), 3UL ); // Throws an exception
\endcode
// Finally, the Hermitian property is enforced for views (rows, columns, submatrices, ...) on the
// Hermitian matrix. The following example demonstrates that modifying the elements of an entire
// row of the Hermitian matrix also affects the counterpart elements in the according column of
// the matrix:
\code
using blaze::DynamicMatrix;
using blaze::HermtianMatrix;
using cplx = std::complex<double>;
// Setup of the Hermitian matrix
//
// ( (0, 0) (1,-1) (0,0) (2, 1) )
// A = ( (1, 1) (3, 0) (4,2) (0, 0) )
// ( (0, 0) (4,-2) (0,0) (5,-3) )
// ( (2,-1) (0, 0) (5,3) (0, 0) )
//
HermitianMatrix< DynamicMatrix<int> > A( 4 );
A(0,1) = cplx( 1.0, -1.0 );
A(0,3) = cplx( 2.0, 1.0 );
A(1,1) = cplx( 3.0, 0.0 );
A(1,2) = cplx( 4.0, 2.0 );
A(2,3) = cplx( 5.0, 3.0 );
// Setting all elements in the 1st row to 0 results in the matrix
//
// ( (0, 0) (0,0) (0,0) (2, 1) )
// A = ( (0, 0) (0,0) (0,0) (0, 0) )
// ( (0, 0) (0,0) (0,0) (5,-3) )
// ( (2,-1) (0,0) (5,3) (0, 0) )
//
row( A, 1 ) = cplx( 0.0, 0.0 );
\endcode
// The next example demonstrates the (compound) assignment to submatrices of Hermitian matrices.
// Since the modification of element \f$ a_{ij} \f$ of an Hermitian matrix also modifies the
// element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the Hermitian
// symmetry of the matrix is preserved. Otherwise a \c std::invalid_argument exception is thrown:
\code
using blaze::DynamicMatrix;
using blaze::HermitianMatrix;
std::complex<double> cplx;
// Setup of two default 4x4 Hermitian matrices
HermitianMatrix< DynamicMatrix<cplx> > A1( 4 ), A2( 4 );
// Setup of the 3x2 dynamic matrix
//
// ( (1,-1) (2, 5) )
// B = ( (3, 0) (4,-6) )
// ( (5, 0) (6, 0) )
//
DynamicMatrix<int> B( 3UL, 2UL );
B(0,0) = cplx( 1.0, -1.0 );
B(0,1) = cplx( 2.0, 5.0 );
B(1,0) = cplx( 3.0, 0.0 );
B(1,1) = cplx( 4.0, -6.0 );
B(2,1) = cplx( 5.0, 0.0 );
B(2,2) = cplx( 6.0, 7.0 );
// OK: Assigning B to a submatrix of A1 such that the Hermitian property is preserved
//
// ( (0, 0) (0, 0) (1,-1) (2, 5) )
// A1 = ( (0, 0) (0, 0) (3, 0) (4,-6) )
// ( (1, 1) (3, 0) (5, 0) (6, 0) )
// ( (2,-5) (4, 6) (6, 0) (0, 0) )
//
submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK
// Error: Assigning B to a submatrix of A2 such that the Hermitian property isn't preserved!
// The elements marked with X cannot be assigned unambiguously!
//
// ( (0, 0) (1,-1) (2,5) (0,0) )
// A2 = ( (1, 1) (3, 0) (X,X) (0,0) )
// ( (2,-5) (X, X) (6,0) (0,0) )
// ( (0, 0) (0, 0) (0,0) (0,0) )
//
submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception!
\endcode
// \n \subsection adaptors_hermitian_matrices_initialization The Elements of a Dense Hermitian Matrix are Always Default Initialized!
//
// Although this results in a small loss of efficiency (especially in case all default values are
// overridden afterwards), this property is important since otherwise the Hermitian property of
// dense Hermitian matrices could not be guaranteed:
\code
using blaze::DynamicMatrix;
using blaze::HermitianMatrix;
// Uninitialized, 5x5 row-major dynamic matrix
DynamicMatrix<int,rowMajor> A( 5, 5 );
// Default initialized, 5x5 row-major Hermitian dynamic matrix
HermitianMatrix< DynamicMatrix<int,rowMajor> > B( 5 );
\endcode
// \n \section adaptors_hermitian_matrices_arithmetic_operations Arithmetic Operations
// <hr>
//
// An HermitianMatrix can be used within all numerical operations in any way any other dense or
// sparse matrix can be used. It can also be combined with any other dense or sparse vector or
// matrix. The following code example gives an impression of the use of HermitianMatrix within
// arithmetic operations:
\code
using blaze::HermitianMatrix;
using blaze::DynamicMatrix;
using blaze::HybridMatrix;
using blaze::StaticMatrix;
using blaze::CompressedMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
using cplx = complex<float>;
DynamicMatrix<cplx,rowMajor> A( 3, 3 );
CompressedMatrix<cplx,rowMajor> B( 3, 3 );
HermitianMatrix< DynamicMatrix<cplx,rowMajor> > C( 3 );
HermitianMatrix< CompressedMatrix<cplx,rowMajor> > D( 3 );
HermitianMatrix< HybridMatrix<cplx,3UL,3UL,rowMajor> > E;
HermitianMatrix< StaticMatrix<cplx,3UL,3UL,columnMajor> > F;
E = A + B; // Matrix addition and assignment to a row-major Hermitian matrix (includes runtime check)
F = C - D; // Matrix subtraction and assignment to a column-major Hermitian matrix (only compile time check)
F = A * D; // Matrix multiplication between a dense and a sparse matrix (includes runtime check)
C *= 2.0; // In-place scaling of matrix C
E = 2.0 * B; // Scaling of matrix B (includes runtime check)
F = C * 2.0; // Scaling of matrix C (only compile time check)
E += A - B; // Addition assignment (includes runtime check)
F -= C + D; // Subtraction assignment (only compile time check)
F *= A * D; // Multiplication assignment (includes runtime check)
\endcode
// Note that it is possible to assign any kind of matrix to an Hermitian matrix. In case the matrix
// to be assigned is not Hermitian at compile time, a runtime check is performed.
//
//
// \n \section adaptors_hermitian_matrices_performance Performance Considerations
// <hr>
//
// When the Hermitian property of a matrix is known beforehands using the HermitianMatrix adaptor
// instead of a general matrix can be a considerable performance advantage. This is particularly
// true in case the Hermitian matrix is also symmetric (i.e. has built-in element types). The
// \b Blaze library tries to exploit the properties of Hermitian (symmetric) matrices whenever
// possible. However, there are also situations when using an Hermitian matrix introduces some
// overhead. The following examples demonstrate several situations where Hermitian matrices can
// positively or negatively impact performance.
//
// \n \subsection adaptors_hermitian_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication
//
// When multiplying two matrices, at least one of which is symmetric, \b Blaze can exploit the fact
// that \f$ A = A^T \f$ and choose the fastest and most suited combination of storage orders for the
// multiplication. The following example demonstrates this by means of a dense matrix/sparse matrix
// multiplication:
\code
using blaze::DynamicMatrix;
using blaze::HermitianMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
HermitianMatrix< DynamicMatrix<double,rowMajor> > A; // Both Hermitian and symmetric
HermitianMatrix< CompressedMatrix<double,columnMajor> > B; // Both Hermitian and symmetric
DynamicMatrix<double,columnMajor> C;
// ... Resizing and initialization
C = A * B;
\endcode
// Intuitively, the chosen combination of a row-major and a column-major matrix is the most suited
// for maximum performance. However, \b Blaze evaluates the multiplication as
\code
C = A * trans( B );
\endcode
// which significantly increases the performance since in contrast to the original formulation the
// optimized form can be vectorized. Therefore, in the context of matrix multiplications, using a
// symmetric matrix is obviously an advantage.
//
// \n \subsection adaptors_hermitian_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication
//
// A similar optimization is possible in case of matrix/vector multiplications:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::CompressedVector;
using blaze::HermitianMatrix;
using blaze::rowMajor;
using blaze::columnVector;
HermitianMatrix< DynamicMatrix<double,rowMajor> > A; // Hermitian and symmetric
CompressedVector<double,columnVector> x;
DynamicVector<double,columnVector> y;
// ... Resizing and initialization
y = A * x;
\endcode
// In this example it is not intuitively apparent that using a row-major matrix is not the best
// possible choice in terms of performance since the computation cannot be vectorized. Choosing
// a column-major matrix instead, however, would enable a vectorized computation. Therefore
// \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and
// evaluates the multiplication as
\code
y = trans( A ) * x;
\endcode
// which also significantly increases the performance.
//
// \n \subsection adaptors_hermitian_matrices_views Positive Impact: Row/Column Views on Column/Row-Major Matrices
//
// Another example is the optimization of a row view on a column-major symmetric matrix:
\code
using blaze::DynamicMatrix;
using blaze::HermitianMatrix;
using blaze::columnMajor;
HermitianMatrix< DynamicMatrix<double,columnMajor> > A( 10UL ); // Both Hermitian and symmetric
auto row5 = row( A, 5UL );
\endcode
// Usually, a row view on a column-major matrix results in a considerable performance decrease in
// comparison to a row view on a row-major matrix due to the non-contiguous storage of the matrix
// elements. However, in case of symmetric matrices, \b Blaze instead uses the according column of
// the matrix, which provides the same performance as if the matrix would be row-major. Note that
// this also works for column views on row-major matrices, where \b Blaze can use the according
// row instead of a column in order to provide maximum performance.
//
// \n \subsection adaptors_hermitian_matrices_assignment Negative Impact: Assignment of a General Matrix
//
// In contrast to using an Hermitian matrix on the right-hand side of an assignment (i.e. for read
// access), which introduces absolutely no performance penalty, using an Hermitian matrix on the
// left-hand side of an assignment (i.e. for write access) may introduce additional overhead when
// it is assigned a general matrix, which is not Hermitian at compile time:
\code
using blaze::DynamicMatrix;
using blaze::HermitianMatrix;
HermitianMatrix< DynamicMatrix< complex<double> > > A, C;
DynamicMatrix<double> B;
B = A; // Only read-access to the Hermitian matrix; no performance penalty
C = A; // Assignment of an Hermitian matrix to another Hermitian matrix; no runtime overhead
C = B; // Assignment of a general matrix to an Hermitian matrix; some runtime overhead
\endcode
// When assigning a general, potentially not Hermitian matrix to an Hermitian matrix it is necessary
// to check whether the matrix is Hermitian at runtime in order to guarantee the Hermitian property
// of the Hermitian matrix. In case it turns out to be Hermitian, it is assigned as efficiently as
// possible, if it is not, an exception is thrown. In order to prevent this runtime overhead it is
// therefore generally advisable to assign Hermitian matrices to other Hermitian matrices.\n
// In this context it is especially noteworthy that in contrast to additions and subtractions the
// multiplication of two Hermitian matrices does not necessarily result in another Hermitian matrix:
\code
HermitianMatrix< DynamicMatrix<double> > A, B, C;
C = A + B; // Results in an Hermitian matrix; no runtime overhead
C = A - B; // Results in an Hermitian matrix; no runtime overhead
C = A * B; // Is not guaranteed to result in an Hermitian matrix; some runtime overhead
\endcode
// \n Previous: \ref adaptors_symmetric_matrices Next: \ref adaptors_triangular_matrices
*/
//*************************************************************************************************
//**Triangular Matrices****************************************************************************
/*!\page adaptors_triangular_matrices Triangular Matrices
//
// \tableofcontents
//
//
// \n \section adaptors_triangular_matrices_general Triangular Matrices
// <hr>
//
// Triangular matrices come in three flavors: Lower triangular matrices provide the compile time
// guarantee to be square matrices and that the upper part of the matrix contains only default
// elements that cannot be modified. Upper triangular matrices on the other hand provide the
// compile time guarantee to be square and that the lower part of the matrix contains only fixed
// default elements. Finally, diagonal matrices provide the compile time guarantee to be square
// and that both the lower and upper part of the matrix contain only immutable default elements.
// These properties can be exploited to gain higher performance and/or to save memory. Within the
// \b Blaze library, several kinds of lower and upper triangular and diagonal matrices are realized
// by the following class templates:
//
// Lower triangular matrices:
// - <b>\ref adaptors_triangular_matrices_lowermatrix</b>
// - <b>\ref adaptors_triangular_matrices_unilowermatrix</b>
// - <b>\ref adaptors_triangular_matrices_strictlylowermatrix</b>
//
// Upper triangular matrices:
// - <b>\ref adaptors_triangular_matrices_uppermatrix</b>
// - <b>\ref adaptors_triangular_matrices_uniuppermatrix</b>
// - <b>\ref adaptors_triangular_matrices_strictlyuppermatrix</b>
//
// Diagonal matrices
// - <b>\ref adaptors_triangular_matrices_diagonalmatrix</b>
//
//
// \n \section adaptors_triangular_matrices_lowermatrix LowerMatrix
// <hr>
//
// The blaze::LowerMatrix class template is an adapter for existing dense and sparse matrix types.
// It inherits the properties and the interface of the given matrix type \c MT and extends it by
// enforcing the additional invariant that all matrix elements above the diagonal are 0 (lower
// triangular matrix):
\f[\left(\begin{array}{*{5}{c}}
l_{0,0} & 0 & 0 & \cdots & 0 \\
l_{1,0} & l_{1,1} & 0 & \cdots & 0 \\
l_{2,0} & l_{2,1} & l_{2,2} & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
l_{N,0} & l_{N,1} & l_{N,2} & \cdots & l_{N,N} \\
\end{array}\right).\f]
// It can be included via the header file
\code
#include <blaze/math/LowerMatrix.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
template< typename MT >
class LowerMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::LowerMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note
// that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or
// blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible lower matrices:
\code
using blaze::unaligned;
using blaze::unpadded;
using blaze::rowMajor;
using blaze::columnMajor;
// Definition of a 3x3 row-major dense lower matrix with static memory
blaze::LowerMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A;
// Definition of a resizable column-major dense lower matrix based on HybridMatrix
blaze::LowerMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B;
// Definition of a resizable row-major dense lower matrix based on DynamicMatrix
blaze::LowerMatrix< blaze::DynamicMatrix<double,rowMajor> > C;
// Definition of a fixed size row-major dense lower matrix based on CustomMatrix
blaze::LowerMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D;
// Definition of a compressed row-major single precision lower matrix
blaze::LowerMatrix< blaze::CompressedMatrix<float,rowMajor> > E;
\endcode
// The storage order of a lower matrix is depending on the storage order of the adapted matrix
// type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified
// as blaze::rowMajor), the lower matrix will also be a row-major matrix. Otherwise, if the
// adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the lower matrix
// will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_unilowermatrix UniLowerMatrix
// <hr>
//
// The blaze::UniLowerMatrix class template is an adapter for existing dense and sparse matrix
// types. It inherits the properties and the interface of the given matrix type \c MT and extends
// it by enforcing the additional invariant that all diagonal matrix elements are 1 and all matrix
// elements above the diagonal are 0 (lower unitriangular matrix):
\f[\left(\begin{array}{*{5}{c}}
1 & 0 & 0 & \cdots & 0 \\
l_{1,0} & 1 & 0 & \cdots & 0 \\
l_{2,0} & l_{2,1} & 1 & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
l_{N,0} & l_{N,1} & l_{N,2} & \cdots & 1 \\
\end{array}\right).\f]
// It can be included via the header file
\code
#include <blaze/math/UniLowerMatrix.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
template< typename MT >
class UniLowerMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::UniLowerMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also,
// the given matrix type must have numeric element types (i.e. all integral types except \c bool,
// floating point and complex types). Note that the given matrix type must be either resizable (as
// for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as
// for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible lower unitriangular matrices:
\code
// Definition of a 3x3 row-major dense unilower matrix with static memory
blaze::UniLowerMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A;
// Definition of a resizable column-major dense unilower matrix based on HybridMatrix
blaze::UniLowerMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B;
// Definition of a resizable row-major dense unilower matrix based on DynamicMatrix
blaze::UniLowerMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C;
// Definition of a compressed row-major single precision unilower matrix
blaze::UniLowerMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D;
\endcode
// The storage order of a lower unitriangular matrix is depending on the storage order of the
// adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e.
// is specified as blaze::rowMajor), the unilower matrix will also be a row-major matrix.
// Otherwise if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor),
// the unilower matrix will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_strictlylowermatrix StrictlyLowerMatrix
// <hr>
//
// The blaze::StrictlyLowerMatrix class template is an adapter for existing dense and sparse matrix
// types. It inherits the properties and the interface of the given matrix type \c MT and extends
// it by enforcing the additional invariant that all diagonal matrix elements and all matrix
// elements above the diagonal are 0 (strictly lower triangular matrix):
\f[\left(\begin{array}{*{5}{c}}
0 & 0 & 0 & \cdots & 0 \\
l_{1,0} & 0 & 0 & \cdots & 0 \\
l_{2,0} & l_{2,1} & 0 & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
l_{N,0} & l_{N,1} & l_{N,2} & \cdots & 0 \\
\end{array}\right).\f]
// It can be included via the header file
\code
#include <blaze/math/StrictlyLowerMatrix.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
template< typename MT >
class StrictlyLowerMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::StrictlyLowerMatrix can be used
// with any non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix
// type. Note that the given matrix type must be either resizable (as for instance
// blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as for instance
// blaze::StaticMatrix).
//
// The following examples give an impression of several possible strictly lower triangular matrices:
\code
// Definition of a 3x3 row-major dense strictly lower matrix with static memory
blaze::StrictlyLowerMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A;
// Definition of a resizable column-major dense strictly lower matrix based on HybridMatrix
blaze::StrictlyLowerMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B;
// Definition of a resizable row-major dense strictly lower matrix based on DynamicMatrix
blaze::StrictlyLowerMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C;
// Definition of a compressed row-major single precision strictly lower matrix
blaze::StrictlyLowerMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D;
\endcode
// The storage order of a strictly lower triangular matrix is depending on the storage order of
// the adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e.
// is specified as blaze::rowMajor), the strictly lower matrix will also be a row-major matrix.
// Otherwise if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor),
// the strictly lower matrix will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_uppermatrix UpperMatrix
// <hr>
//
// The blaze::UpperMatrix class template is an adapter for existing dense and sparse matrix types.
// It inherits the properties and the interface of the given matrix type \c MT and extends it by
// enforcing the additional invariant that all matrix elements below the diagonal are 0 (upper
// triangular matrix):
\f[\left(\begin{array}{*{5}{c}}
u_{0,0} & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\
0 & u_{1,1} & u_{1,2} & \cdots & u_{1,N} \\
0 & 0 & u_{2,2} & \cdots & u_{2,N} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & 0 & 0 & \cdots & u_{N,N} \\
\end{array}\right).\f]
// It can be included via the header file
\code
#include <blaze/math/UpperMatrix.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
template< typename MT >
class UpperMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::UpperMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note
// that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or
// blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible upper matrices:
\code
// Definition of a 3x3 row-major dense upper matrix with static memory
blaze::UpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A;
// Definition of a resizable column-major dense upper matrix based on HybridMatrix
blaze::UpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B;
// Definition of a resizable row-major dense upper matrix based on DynamicMatrix
blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C;
// Definition of a compressed row-major single precision upper matrix
blaze::UpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D;
\endcode
// The storage order of an upper matrix is depending on the storage order of the adapted matrix
// type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified
// as blaze::rowMajor), the upper matrix will also be a row-major matrix. Otherwise, if the
// adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the upper matrix
// will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_uniuppermatrix UniUpperMatrix
// <hr>
//
// The blaze::UniUpperMatrix class template is an adapter for existing dense and sparse matrix
// types. It inherits the properties and the interface of the given matrix type \c MT and extends
// it by enforcing the additional invariant that all diagonal matrix elements are 1 and all matrix
// elements below the diagonal are 0 (upper unitriangular matrix):
\f[\left(\begin{array}{*{5}{c}}
1 & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\
0 & 1 & u_{1,2} & \cdots & u_{1,N} \\
0 & 0 & 1 & \cdots & u_{2,N} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & 0 & 0 & \cdots & 1 \\
\end{array}\right).\f]
// It can be included via the header file
\code
#include <blaze/math/UniUpperMatrix.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
template< typename MT >
class UniUpperMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::UniUpperMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also,
// the given matrix type must have numeric element types (i.e. all integral types except \c bool,
// floating point and complex types). Note that the given matrix type must be either resizable (as
// for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as
// for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible upper unitriangular matrices:
\code
// Definition of a 3x3 row-major dense uniupper matrix with static memory
blaze::UniUpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A;
// Definition of a resizable column-major dense uniupper matrix based on HybridMatrix
blaze::UniUpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B;
// Definition of a resizable row-major dense uniupper matrix based on DynamicMatrix
blaze::UniUpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C;
// Definition of a compressed row-major single precision uniupper matrix
blaze::UniUpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D;
\endcode
// The storage order of an upper unitriangular matrix is depending on the storage order of the
// adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e.
// is specified as blaze::rowMajor), the uniupper matrix will also be a row-major matrix.
// Otherwise, if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor),
// the uniupper matrix will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_strictlyuppermatrix StrictlyUpperMatrix
// <hr>
//
// The blaze::StrictlyUpperMatrix class template is an adapter for existing dense and sparse matrix
// types. It inherits the properties and the interface of the given matrix type \c MT and extends
// it by enforcing the additional invariant that all diagonal matrix elements and all matrix
// elements below the diagonal are 0 (strictly upper triangular matrix):
\f[\left(\begin{array}{*{5}{c}}
0 & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\
0 & 0 & u_{1,2} & \cdots & u_{1,N} \\
0 & 0 & 0 & \cdots & u_{2,N} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & 0 & 0 & \cdots & 0 \\
\end{array}\right).\f]
// It can be included via the header file
\code
#include <blaze/math/StrictlyUpperMatrix.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
template< typename MT >
class StrictlyUpperMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::StrictlyUpperMatrix can be used
// with any non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix
// type. Note that the given matrix type must be either resizable (as for instance
// blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as for instance
// blaze::StaticMatrix).
//
// The following examples give an impression of several possible strictly upper triangular matrices:
\code
// Definition of a 3x3 row-major dense strictly upper matrix with static memory
blaze::StrictlyUpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A;
// Definition of a resizable column-major dense strictly upper matrix based on HybridMatrix
blaze::StrictlyUpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B;
// Definition of a resizable row-major dense strictly upper matrix based on DynamicMatrix
blaze::StrictlyUpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C;
// Definition of a compressed row-major single precision strictly upper matrix
blaze::StrictlyUpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D;
\endcode
// The storage order of a strictly upper triangular matrix is depending on the storage order of
// the adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e.
// is specified as blaze::rowMajor), the strictly upper matrix will also be a row-major matrix.
// Otherwise, if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor),
// the strictly upper matrix will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_diagonalmatrix DiagonalMatrix
// <hr>
//
// The blaze::DiagonalMatrix class template is an adapter for existing dense and sparse matrix
// types. It inherits the properties and the interface of the given matrix type \c MT and extends
// it by enforcing the additional invariant that all matrix elements above and below the diagonal
// are 0 (diagonal matrix):
\f[\left(\begin{array}{*{5}{c}}
l_{0,0} & 0 & 0 & \cdots & 0 \\
0 & l_{1,1} & 0 & \cdots & 0 \\
0 & 0 & l_{2,2} & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & 0 & 0 & \cdots & l_{N,N} \\
\end{array}\right).\f]
// It can be included via the header file
\code
#include <blaze/math/DiagonalMatrix.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
template< typename MT >
class DiagonalMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::DiagonalMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note
// that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or
// blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible diagonal matrices:
\code
// Definition of a 3x3 row-major dense diagonal matrix with static memory
blaze::DiagonalMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A;
// Definition of a resizable column-major dense diagonal matrix based on HybridMatrix
blaze::DiagonalMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B;
// Definition of a resizable row-major dense diagonal matrix based on DynamicMatrix
blaze::DiagonalMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C;
// Definition of a compressed row-major single precision diagonal matrix
blaze::DiagonalMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D;
\endcode
// The storage order of a diagonal matrix is depending on the storage order of the adapted matrix
// type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified
// as blaze::rowMajor), the diagonal matrix will also be a row-major matrix. Otherwise, if the
// adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the diagonal matrix
// will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_special_properties Special Properties of Triangular Matrices
// <hr>
//
// A triangular matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT.
// It also provides (nearly) the same interface as the underlying matrix type. However, there are
// some important exceptions resulting from the triangular matrix constraint:
//
// -# <b>\ref adaptors_triangular_matrices_square</b>
// -# <b>\ref adaptors_triangular_matrices_triangular</b>
// -# <b>\ref adaptors_triangular_matrices_initialization</b>
// -# <b>\ref adaptors_triangular_matrices_storage</b>
// -# <b>\ref adaptors_triangular_matrices_scaling</b>
//
// \n \subsection adaptors_triangular_matrices_square Triangular Matrices Must Always be Square!
//
// In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix,
// or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and
// the \c extend() functions only expect a single parameter, which specifies both the number of
// rows and columns, instead of two (one for the number of rows and one for the number of columns):
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
using blaze::rowMajor;
// Default constructed, default initialized, row-major 3x3 lower dynamic matrix
LowerMatrix< DynamicMatrix<double,rowMajor> > A( 3 );
// Resizing the matrix to 5x5
A.resize( 5 );
// Extending the number of rows and columns by 2, resulting in a 7x7 matrix
A.extend( 2 );
\endcode
// In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number
// of rows and number of columns must be specified equally:
\code
using blaze::StaticMatrix;
using blaze::LowerMatrix;
using blaze::columnMajor;
// Correct setup of a fixed size column-major 3x3 lower static matrix
LowerMatrix< StaticMatrix<int,3UL,3UL,columnMajor> > A;
// Compilation error: the provided matrix type is not a square matrix type
LowerMatrix< StaticMatrix<int,3UL,4UL,columnMajor> > B;
\endcode
// \n \subsection adaptors_triangular_matrices_triangular The Triangular Property is Always Enforced!
//
// This means that it is only allowed to modify elements in the lower part or the diagonal of
// a lower triangular matrix and in the upper part or the diagonal of an upper triangular matrix.
// Unitriangular and strictly triangular matrices are even more restrictive and don't allow the
// modification of diagonal elements. Also, triangular matrices can only be assigned matrices that
// don't violate their triangular property. The following example demonstrates this restriction
// by means of the blaze::LowerMatrix adaptor. For examples with other triangular matrix types
// see the according class documentations.
\code
using blaze::CompressedMatrix;
using blaze::DynamicMatrix;
using blaze::StaticMatrix;
using blaze::LowerMatrix;
using blaze::rowMajor;
using CompressedLower = LowerMatrix< CompressedMatrix<double,rowMajor> >;
// Default constructed, row-major 3x3 lower compressed matrix
CompressedLower A( 3 );
// Initializing elements via the function call operator
A(0,0) = 1.0; // Initialization of the diagonal element (0,0)
A(2,0) = 2.0; // Initialization of the lower element (2,0)
A(1,2) = 9.0; // Throws an exception; invalid modification of upper element
// Inserting two more elements via the insert() function
A.insert( 1, 0, 3.0 ); // Inserting the lower element (1,0)
A.insert( 2, 1, 4.0 ); // Inserting the lower element (2,1)
A.insert( 0, 2, 9.0 ); // Throws an exception; invalid insertion of upper element
// Appending an element via the append() function
A.reserve( 1, 3 ); // Reserving enough capacity in row 1
A.append( 1, 1, 5.0 ); // Appending the diagonal element (1,1)
A.append( 1, 2, 9.0 ); // Throws an exception; appending an element in the upper part
// Access via a non-const iterator
CompressedLower::Iterator it = A.begin(1);
*it = 6.0; // Modifies the lower element (1,0)
++it;
*it = 9.0; // Modifies the diagonal element (1,1)
// Erasing elements via the erase() function
A.erase( 0, 0 ); // Erasing the diagonal element (0,0)
A.erase( 2, 0 ); // Erasing the lower element (2,0)
// Construction from a lower dense matrix
StaticMatrix<double,3UL,3UL> B{ { 3.0, 0.0, 0.0 },
{ 8.0, 0.0, 0.0 },
{ -2.0, -1.0, 4.0 } };
LowerMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK
// Assignment of a non-lower dense matrix
StaticMatrix<double,3UL,3UL> D{ { 3.0, 0.0, -2.0 },
{ 8.0, 0.0, 0.0 },
{ -2.0, -1.0, 4.0 } };
C = D; // Throws an exception; lower matrix invariant would be violated!
\endcode
// The triangular property is also enforced during the construction of triangular custom matrices:
// In case the given array of elements does not represent the according triangular matrix type, a
// \c std::invalid_argument exception is thrown:
\code
using blaze::CustomMatrix;
using blaze::LowerMatrix;
using blaze::unaligned;
using blaze::unpadded;
using blaze::rowMajor;
using CustomLower = LowerMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> >;
// Creating a 3x3 lower custom matrix from a properly initialized array
double array[9] = { 1.0, 0.0, 0.0,
2.0, 3.0, 0.0,
4.0, 5.0, 6.0 };
CustomLower A( array, 3UL ); // OK
// Attempt to create a second 3x3 lower custom matrix from an uninitialized array
std::unique_ptr<double[]> memory( new double[9UL] );
CustomLower B( memory.get(), 3UL ); // Throws an exception
\endcode
// Finally, the triangular matrix property is enforced for views (rows, columns, submatrices, ...)
// on the triangular matrix. The following example demonstrates that modifying the elements of an
// entire row and submatrix of a lower matrix only affects the lower and diagonal matrix elements.
// Again, this example uses blaze::LowerMatrix, for examples with other triangular matrix types
// see the according class documentations.
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
// Setup of the lower matrix
//
// ( 0 0 0 0 )
// A = ( 1 2 0 0 )
// ( 0 3 0 0 )
// ( 4 0 5 0 )
//
LowerMatrix< DynamicMatrix<int> > A( 4 );
A(1,0) = 1;
A(1,1) = 2;
A(2,1) = 3;
A(3,0) = 4;
A(3,2) = 5;
// Setting the lower and diagonal elements in the 2nd row to 9 results in the matrix
//
// ( 0 0 0 0 )
// A = ( 1 2 0 0 )
// ( 9 9 9 0 )
// ( 4 0 5 0 )
//
row( A, 2 ) = 9;
// Setting the lower and diagonal elements in the 1st and 2nd column to 7 results in
//
// ( 0 0 0 0 )
// A = ( 1 7 0 0 )
// ( 9 7 7 0 )
// ( 4 7 7 0 )
//
submatrix( A, 0, 1, 4, 2 ) = 7;
\endcode
// The next example demonstrates the (compound) assignment to rows/columns and submatrices of
// triangular matrices. Since only lower/upper and potentially diagonal elements may be modified
// the matrix to be assigned must be structured such that the triangular matrix invariant of the
// matrix is preserved. Otherwise a \c std::invalid_argument exception is thrown:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::LowerMatrix;
using blaze::rowVector;
// Setup of two default 4x4 lower matrices
LowerMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 );
// Setup of a 4-dimensional vector
//
// v = ( 1 2 3 0 )
//
DynamicVector<int,rowVector> v{ 1, 2, 3, 0 };
// OK: Assigning v to the 2nd row of A1 preserves the lower matrix invariant
//
// ( 0 0 0 0 )
// A1 = ( 0 0 0 0 )
// ( 1 2 3 0 )
// ( 0 0 0 0 )
//
row( A1, 2 ) = v; // OK
// Error: Assigning v to the 1st row of A1 violates the lower matrix invariant! The element
// marked with X cannot be assigned and triggers an exception.
//
// ( 0 0 0 0 )
// A1 = ( 1 2 X 0 )
// ( 1 2 3 0 )
// ( 0 0 0 0 )
//
row( A1, 1 ) = v; // Assignment throws an exception!
// Setup of the 3x2 dynamic matrix
//
// ( 0 0 )
// B = ( 7 0 )
// ( 8 9 )
//
DynamicMatrix<int> B( 3UL, 2UL, 0 );
B(1,0) = 7;
B(2,0) = 8;
B(2,1) = 9;
// OK: Assigning B to a submatrix of A2 such that the lower matrix invariant can be preserved
//
// ( 0 0 0 0 )
// A2 = ( 0 7 0 0 )
// ( 0 8 9 0 )
// ( 0 0 0 0 )
//
submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // OK
// Error: Assigning B to a submatrix of A2 such that the lower matrix invariant cannot be
// preserved! The elements marked with X cannot be assigned without violating the invariant!
//
// ( 0 0 0 0 )
// A2 = ( 0 7 X 0 )
// ( 0 8 8 X )
// ( 0 0 0 0 )
//
submatrix( A2, 0UL, 2UL, 3UL, 2UL ) = B; // Assignment throws an exception!
\endcode
// \n \subsection adaptors_triangular_matrices_initialization The Elements of a Dense Triangular Matrix are Always Default Initialized!
//
// Although this results in a small loss of efficiency during the creation of a dense lower or
// upper matrix this initialization is important since otherwise the lower/upper matrix property
// of dense lower matrices would not be guaranteed:
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
using blaze::UpperMatrix;
// Uninitialized, 5x5 row-major dynamic matrix
DynamicMatrix<int,rowMajor> A( 5, 5 );
// 5x5 row-major lower dynamic matrix with default initialized upper matrix
LowerMatrix< DynamicMatrix<int,rowMajor> > B( 5 );
// 7x7 column-major upper dynamic matrix with default initialized lower matrix
UpperMatrix< DynamicMatrix<int,columnMajor> > C( 7 );
// 3x3 row-major diagonal dynamic matrix with default initialized lower and upper matrix
DiagonalMatrix< DynamicMatrix<int,rowMajor> > D( 3 );
\endcode
// \n \subsection adaptors_triangular_matrices_storage Dense Triangular Matrices Store All Elements!
//
// All dense triangular matrices store all \f$ N \times N \f$ elements, including the immutable
// elements in the lower or upper part, respectively. Therefore dense triangular matrices don't
// provide any kind of memory reduction! There are two main reasons for this: First, storing also
// the zero elements guarantees maximum performance for many algorithms that perform vectorized
// operations on the triangular matrices, which is especially true for small dense matrices.
// Second, conceptually all triangular adaptors merely restrict the interface to the matrix type
// \c MT and do not change the data layout or the underlying matrix type.
//
// This property matters most for diagonal matrices. In order to achieve the perfect combination
// of performance and memory consumption for a diagonal matrix it is recommended to use dense
// matrices for small diagonal matrices and sparse matrices for large diagonal matrices:
\code
// Recommendation 1: use dense matrices for small diagonal matrices
using SmallDiagonalMatrix = blaze::DiagonalMatrix< blaze::StaticMatrix<float,3UL,3UL> >;
// Recommendation 2: use sparse matrices for large diagonal matrices
using LargeDiagonalMatrix = blaze::DiagonalMatrix< blaze::CompressedMatrix<float> >;
\endcode
// \n \subsection adaptors_triangular_matrices_scaling Unitriangular Matrices Cannot Be Scaled!
//
// Since the diagonal elements of a unitriangular matrix have a fixed value of 1 it is not possible
// to self-scale such a matrix:
\code
using blaze::DynamicMatrix;
using blaze::UniLowerMatrix;
UniLowerMatrix< DynamicMatrix<int> > A( 4 );
A *= 2; // Compilation error; Scale operation is not available on an unilower matrix
A /= 2; // Compilation error; Scale operation is not available on an unilower matrix
A.scale( 2 ); // Compilation error; Scale function is not available on an unilower matrix
A = A * 2; // Throws an exception; Invalid assignment of non-unilower matrix
A = A / 2; // Throws an exception; Invalid assignment of non-unilower matrix
\endcode
// \n \section adaptors_triangular_matrices_arithmetic_operations Arithmetic Operations
// <hr>
//
// A lower and upper triangular matrix can participate in numerical operations in any way any other
// dense or sparse matrix can participate. It can also be combined with any other dense or sparse
// vector or matrix. The following code example gives an impression of the use of blaze::LowerMatrix
// within arithmetic operations:
\code
using blaze::LowerMatrix;
using blaze::DynamicMatrix;
using blaze::HybridMatrix;
using blaze::StaticMatrix;
using blaze::CompressedMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
DynamicMatrix<double,rowMajor> A( 3, 3 );
CompressedMatrix<double,rowMajor> B( 3, 3 );
LowerMatrix< DynamicMatrix<double,rowMajor> > C( 3 );
LowerMatrix< CompressedMatrix<double,rowMajor> > D( 3 );
LowerMatrix< HybridMatrix<float,3UL,3UL,rowMajor> > E;
LowerMatrix< StaticMatrix<float,3UL,3UL,columnMajor> > F;
E = A + B; // Matrix addition and assignment to a row-major lower matrix (includes runtime check)
F = C - D; // Matrix subtraction and assignment to a column-major lower matrix (only compile time check)
F = A * D; // Matrix multiplication between a dense and a sparse matrix (includes runtime check)
C *= 2.0; // In-place scaling of matrix C
E = 2.0 * B; // Scaling of matrix B (includes runtime check)
F = C * 2.0; // Scaling of matrix C (only compile time check)
E += A - B; // Addition assignment (includes runtime check)
F -= C + D; // Subtraction assignment (only compile time check)
F *= A * D; // Multiplication assignment (includes runtime check)
\endcode
// Note that it is possible to assign any kind of matrix to a triangular matrix. In case the
// matrix to be assigned does not satisfy the invariants of the triangular matrix at compile
// time, a runtime check is performed. Also note that upper triangular, diagonal, unitriangular
// and strictly triangular matrix types can be used in the same way, but may pose some additional
// restrictions (see the according class documentations).
//
//
// \n \section adaptors_triangular_matrices_block_matrices Triangular Block Matrices
// <hr>
//
// It is also possible to use triangular block matrices:
\code
using blaze::CompressedMatrix;
using blaze::DynamicMatrix;
using blaze::StaticMatrix;
using blaze::LowerMatrix;
using blaze::UpperMatrix;
// Definition of a 5x5 lower block matrix based on DynamicMatrix
LowerMatrix< DynamicMatrix< StaticMatrix<int,3UL,3UL> > > A( 5 );
// Definition of a 7x7 upper block matrix based on CompressedMatrix
UpperMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > B( 7 );
\endcode
// Also in this case the triangular matrix invariant is enforced, i.e. it is not possible to
// manipulate elements in the upper part (lower triangular matrix) or the lower part (upper
// triangular matrix) of the matrix:
\code
const StaticMatrix<int,3UL,3UL> C{ { 1, -4, 5 },
{ 6, 8, -3 },
{ 2, -1, 2 } };
A(2,4)(1,1) = -5; // Invalid manipulation of upper matrix element; Results in an exception
B.insert( 4, 2, C ); // Invalid insertion of the elements (4,2); Results in an exception
\endcode
// Note that unitriangular matrices are restricted to numeric element types and therefore cannot
// be used for block matrices:
\code
using blaze::CompressedMatrix;
using blaze::DynamicMatrix;
using blaze::StaticMatrix;
using blaze::UniLowerMatrix;
using blaze::UniUpperMatrix;
// Compilation error: lower unitriangular matrices are restricted to numeric element types
UniLowerMatrix< DynamicMatrix< StaticMatrix<int,3UL,3UL> > > A( 5 );
// Compilation error: upper unitriangular matrices are restricted to numeric element types
UniUpperMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > B( 7 );
\endcode
// For more information on block matrices, see the tutorial on \ref block_vectors_and_matrices.
//
//
// \n \section adaptors_triangular_matrices_performance Performance Considerations
// <hr>
//
// The \b Blaze library tries to exploit the properties of lower and upper triangular matrices
// whenever and wherever possible. Therefore using triangular matrices instead of a general
// matrices can result in a considerable performance improvement. However, there are also
// situations when using a triangular matrix introduces some overhead. The following examples
// demonstrate several common situations where triangular matrices can positively or negatively
// impact performance.
//
// \n \subsection adaptors_triangular_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication
//
// When multiplying two matrices, at least one of which is triangular, \b Blaze can exploit the
// fact that either the lower or upper part of the matrix contains only default elements and
// restrict the algorithm to the non-zero elements. The following example demonstrates this by
// means of a dense matrix/dense matrix multiplication with lower triangular matrices:
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
LowerMatrix< DynamicMatrix<double,rowMajor> > A;
LowerMatrix< DynamicMatrix<double,columnMajor> > B;
DynamicMatrix<double,columnMajor> C;
// ... Resizing and initialization
C = A * B;
\endcode
// In comparison to a general matrix multiplication, the performance advantage is significant,
// especially for large matrices. Therefore is it highly recommended to use the blaze::LowerMatrix
// and blaze::UpperMatrix adaptors when a matrix is known to be lower or upper triangular,
// respectively. Note however that the performance advantage is most pronounced for dense matrices
// and much less so for sparse matrices.
//
// \n \subsection adaptors_triangular_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication
//
// A similar performance improvement can be gained when using a triangular matrix in a matrix/vector
// multiplication:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::rowMajor;
using blaze::columnVector;
LowerMatrix< DynamicMatrix<double,rowMajor> > A;
DynamicVector<double,columnVector> x, y;
// ... Resizing and initialization
y = A * x;
\endcode
// In this example, \b Blaze also exploits the structure of the matrix and approx. halves the
// runtime of the multiplication. Also in case of matrix/vector multiplications the performance
// improvement is most pronounced for dense matrices and much less so for sparse matrices.
//
// \n \subsection adaptors_triangular_matrices_assignment Negative Impact: Assignment of a General Matrix
//
// In contrast to using a triangular matrix on the right-hand side of an assignment (i.e. for
// read access), which introduces absolutely no performance penalty, using a triangular matrix
// on the left-hand side of an assignment (i.e. for write access) may introduce additional
// overhead when it is assigned a general matrix, which is not triangular at compile time:
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
LowerMatrix< DynamicMatrix<double> > A, C;
DynamicMatrix<double> B;
B = A; // Only read-access to the lower matrix; no performance penalty
C = A; // Assignment of a lower matrix to another lower matrix; no runtime overhead
C = B; // Assignment of a general matrix to a lower matrix; some runtime overhead
\endcode
// When assigning a general (potentially not lower triangular) matrix to a lower matrix or a
// general (potentially not upper triangular) matrix to an upper matrix it is necessary to check
// whether the matrix is lower or upper at runtime in order to guarantee the triangular property
// of the matrix. In case it turns out to be lower or upper, respectively, it is assigned as
// efficiently as possible, if it is not, an exception is thrown. In order to prevent this runtime
// overhead it is therefore generally advisable to assign lower or upper triangular matrices to
// other lower or upper triangular matrices.\n
// In this context it is especially noteworthy that the addition, subtraction, and multiplication
// of two triangular matrices of the same structure always results in another triangular matrix:
\code
LowerMatrix< DynamicMatrix<double> > A, B, C;
C = A + B; // Results in a lower matrix; no runtime overhead
C = A - B; // Results in a lower matrix; no runtime overhead
C = A * B; // Results in a lower matrix; no runtime overhead
\endcode
\code
UpperMatrix< DynamicMatrix<double> > A, B, C;
C = A + B; // Results in an upper matrix; no runtime overhead
C = A - B; // Results in an upper matrix; no runtime overhead
C = A * B; // Results in an upper matrix; no runtime overhead
\endcode
// \n Previous: \ref adaptors_hermitian_matrices Next: \ref views
*/
//*************************************************************************************************
//**Views******************************************************************************************
/*!\page views Views
//
// \tableofcontents
//
//
// \section views_general General Concepts
// <hr>
//
// Views represents parts of a vector or matrix, such as a subvector, a submatrix, or a specific
// row, column, or band of a matrix. As such, views act as a reference to specific elements of
// a vector or matrix. This reference is valid and can be used in every way as any other vector
// or matrix can be used as long as the referenced vector or matrix is not resized or entirely
// destroyed. Views also act as alias to the elements of the vector or matrix: Changes made to the
// elements (e.g. modifying values, inserting or erasing elements) via the view are immediately
// visible in the vector or matrix and changes made via the vector or matrix are immediately
// visible in the view.
//
// It is also possible to create nested views (compound views), such as for instance bands of
// submatrices or row selections on column selections. A compound view also acts as reference
// to specific elements of the underlying vector or matrix and is valid as long as the underlying,
// referenced vector or matrix is not resized or entirely destroyed.
//
// The \b Blaze library provides the following views on vectors and matrices:
//
// Vector views:
// - \ref views_subvectors
// - \ref views_element_selections
//
// Matrix views:
// - \ref views_submatrices
// - \ref views_rows
// - \ref views_row_selections
// - \ref views_columns
// - \ref views_column_selections
// - \ref views_bands
//
//
// \n \section views_examples Examples
\code
using blaze::DynamicMatrix;
using blaze::StaticVector;
// Setup of the 3x5 row-major matrix
DynamicMatrix<int> A{ { 1, 0, -2, 3, 0 },
{ 0, 2, 5, -1, -1 },
{ 1, 0, 0, 2, 1 } };
// Setup of the 2-dimensional row vector
StaticVector<int,2UL,rowVector> vec{ 18, 19 };
// Assigning to the elements (1,2) and (1,3) via a subvector of a row
//
// ( 1 0 -2 3 0 )
// ( 0 2 18 19 -1 )
// ( 1 0 0 2 1 )
//
subvector( row( A, 1UL ), 2UL, 2UL ) = vec;
// Switching rows 0 and 2 of A
//
// ( 1 0 0 2 1 )
// ( 0 2 18 19 -1 )
// ( 1 0 -2 3 0 )
//
rows<0,2>( A ) = rows<2,0>( A );
// Warning: It is the programmer's responsibility to ensure the view does not outlive
// the viewed vector or matrix (dangling reference)!
auto row1 = row<1UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 } } );
\endcode
// \n Previous: \ref adaptors_triangular_matrices Next: \ref views_subvectors
*/
//*************************************************************************************************
//**Subvectors*************************************************************************************
/*!\page views_subvectors Subvectors
//
// \tableofcontents
//
//
// Subvectors provide views on a specific part of a dense or sparse vector. As such, subvectors
// act as a reference to a specific range within a vector. This reference is valid and can be
// used in every way any other dense or sparse vector can be used as long as the vector containing
// the subvector is not resized or entirely destroyed. The subvector also acts as an alias to the
// vector elements in the specified range: Changes made to the elements (e.g. modifying values,
// inserting or erasing elements) are immediately visible in the vector and changes made via the
// vector are immediately visible in the subvector.
//
//
// \n \section views_subvectors_setup Setup of Subvectors
// <hr>
//
// A view on a dense or sparse subvector can be created very conveniently via the \c subvector()
// function. It can be included via the header file
\code
#include <blaze/math/Subvector.h>
\endcode
// The first parameter specifies the offset of the subvector within the underlying dense or sparse
// vector, the second parameter specifies the size of the subvector. The two parameters can be
// specified either at compile time or at runtime:
\code
blaze::DynamicVector<double,blaze::rowVector> x;
// ... Resizing and initialization
// Create a subvector from index 4 with a size of 12 (i.e. in the range [4..15]) (compile time arguments)
auto sv1 = subvector<4UL,12UL>( x );
// Create a subvector from index 8 with a size of 16 (i.e. in the range [8..23]) (runtime arguments)
auto sv2 = subvector( x, 8UL, 16UL );
\endcode
// The \c subvector() function returns an expression representing the subvector view. The type of
// this expression depends on the given subvector arguments, primarily the type of the vector and
// the compile time arguments. If the type is required, it can be determined via the \c decltype
// specifier:
\code
using VectorType = blaze::DynamicVector<int>;
using SubvectorType = decltype( blaze::subvector<4UL,12UL>( std::declval<VectorType>() ) );
\endcode
// The resulting view can be treated as any other dense or sparse vector, i.e. it can be assigned
// to, it can be copied from, and it can be used in arithmetic operations. A subvector created
// from a row vector can be used as any other row vector, a subvector created from a column vector
// can be used as any other column vector. The view can also be used on both sides of an assignment:
// The subvector can either be used as an alias to grant write access to a specific subvector of a
// vector primitive on the left-hand side of an assignment or to grant read-access to a specific
// subvector of a vector primitive or expression on the right-hand side of an assignment. The
// following example demonstrates this in detail:
\code
blaze::DynamicVector<double,blaze::rowVector> x;
blaze::CompressedVector<double,blaze::rowVector> y;
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Create a subvector from index 0 with a size of 10 (i.e. in the range [0..9])
auto sv = subvector( x, 0UL, 10UL );
// Setting the first ten elements of x to the 2nd row of matrix A
sv = row( A, 2UL );
// Setting the second ten elements of x to y
subvector( x, 10UL, 10UL ) = y;
// Setting the 3rd row of A to a subvector of x
row( A, 3UL ) = subvector( x, 3UL, 10UL );
// Setting x to a subvector of the result of the addition between y and the 1st row of A
x = subvector( y + row( A, 1UL ), 2UL, 5UL );
\endcode
// \warning It is the programmer's responsibility to ensure the subvector does not outlive the
// viewed vector:
\code
// Creating a subvector on a temporary vector; results in a dangling reference!
auto sv = subvector<1UL,3UL>( DynamicVector<int>{ 1, 2, 3, 4, 5 } );
\endcode
// \n \section views_subvectors_element_access Element Access
// <hr>
//
// The elements of a subvector can be directly accessed via the subscript operator:
\code
blaze::DynamicVector<double,blaze::rowVector> v;
// ... Resizing and initialization
// Creating an 8-dimensional subvector, starting from index 4
auto sv = subvector( v, 4UL, 8UL );
// Setting the 1st element of the subvector, which corresponds to
// the element at index 5 in vector v
sv[1] = 2.0;
\endcode
// The numbering of the subvector elements is
\f[\left(\begin{array}{*{5}{c}}
0 & 1 & 2 & \cdots & N-1 \\
\end{array}\right),\f]
// where N is the specified size of the subvector. Alternatively, the elements of a subvector can
// be traversed via iterators. Just as with vectors, in case of non-const subvectors, \c begin()
// and \c end() return an iterator, which allows to manipulate the elements, in case of constant
// subvectors an iterator to immutable elements is returned:
\code
blaze::DynamicVector<int,blaze::rowVector> v( 256UL );
// ... Resizing and initialization
// Creating a reference to a specific subvector of vector v
auto sv = subvector( v, 16UL, 64UL );
// Traversing the elements via iterators to non-const elements
for( auto it=sv.begin(); it!=sv.end(); ++it ) {
*it = ...; // OK: Write access to the dense subvector value.
... = *it; // OK: Read access to the dense subvector value.
}
// Traversing the elements via iterators to const elements
for( auto it=sv.cbegin(); it!=sv.cend(); ++it ) {
*it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = *it; // OK: Read access to the dense subvector value.
}
\endcode
\code
blaze::CompressedVector<int,blaze::rowVector> v( 256UL );
// ... Resizing and initialization
// Creating a reference to a specific subvector of vector v
auto sv = subvector( v, 16UL, 64UL );
// Traversing the elements via iterators to non-const elements
for( auto it=sv.begin(); it!=sv.end(); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
// Traversing the elements via iterators to const elements
for( auto it=sv.cbegin(); it!=sv.cend(); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
\endcode
// \n \section views_subvectors_element_insertion Element Insertion
// <hr>
//
// Inserting/accessing elements in a sparse subvector can be done by several alternative functions.
// The following example demonstrates all options:
\code
blaze::CompressedVector<double,blaze::rowVector> v( 256UL ); // Non-initialized vector of size 256
auto sv = subvector( v, 10UL, 60UL ); // View on the range [10..69] of v
// The subscript operator provides access to all possible elements of the sparse subvector,
// including the zero elements. In case the subscript operator is used to access an element
// that is currently not stored in the sparse subvector, the element is inserted into the
// subvector.
sv[42] = 2.0;
// The second operation for inserting elements is the set() function. In case the element is
// not contained in the subvector it is inserted into the subvector, if it is already contained
// in the subvector its value is modified.
sv.set( 45UL, -1.2 );
// An alternative for inserting elements into the subvector is the insert() function. However,
// it inserts the element only in case the element is not already contained in the subvector.
sv.insert( 50UL, 3.7 );
// Just as in case of vectors, elements can also be inserted via the append() function. In
// case of subvectors, append() also requires that the appended element's index is strictly
// larger than the currently largest non-zero index of the subvector and that the subvector's
// capacity is large enough to hold the new element. Note however that due to the nature of
// a subvector, which may be an alias to the middle of a sparse vector, the append() function
// does not work as efficiently for a subvector as it does for a vector.
sv.reserve( 10UL );
sv.append( 51UL, -2.1 );
\endcode
// \n \section views_subvectors_common_operations Common Operations
// <hr>
//
// A subvector view can be used like any other dense or sparse vector. This means that with
// only a few exceptions all \ref vector_operations and \ref arithmetic_operations can be used.
// For instance, the current number of elements can be obtained via the \c size() function, the
// current capacity via the \c capacity() function, and the number of non-zero elements via the
// \c nonZeros() function. However, since subvectors are references to a specific range of a
// vector, several operations are not possible, such as resizing and swapping. The following
// example shows this by means of a dense subvector view:
\code
blaze::DynamicVector<int,blaze::rowVector> v( 42UL );
// ... Resizing and initialization
// Creating a view on the range [5..15] of vector v
auto sv = subvector( v, 5UL, 10UL );
sv.size(); // Returns the number of elements in the subvector
sv.capacity(); // Returns the capacity of the subvector
sv.nonZeros(); // Returns the number of non-zero elements contained in the subvector
sv.resize( 84UL ); // Compilation error: Cannot resize a subvector of a vector
auto sv2 = subvector( v, 15UL, 10UL );
swap( sv, sv2 ); // Compilation error: Swap operation not allowed
\endcode
// \n \section views_subvectors_arithmetic_operations Arithmetic Operations
// <hr>
//
// Both dense and sparse subvectors can be used in all arithmetic operations that any other dense
// or sparse vector can be used in. The following example gives an impression of the use of dense
// subvectors within arithmetic operations. All operations (addition, subtraction, multiplication,
// scaling, ...) can be performed on all possible combinations of dense and sparse subvectors with
// fitting element types:
\code
blaze::DynamicVector<double,blaze::rowVector> d1, d2, d3;
blaze::CompressedVector<double,blaze::rowVector> s1, s2;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> A;
auto sv( subvector( d1, 0UL, 10UL ) ); // View on the range [0..9] of vector d1
sv = d2; // Dense vector initialization of the range [0..9]
subvector( d1, 10UL, 10UL ) = s1; // Sparse vector initialization of the range [10..19]
d3 = sv + d2; // Dense vector/dense vector addition
s2 = s1 + subvector( d1, 10UL, 10UL ); // Sparse vector/dense vector addition
d2 = sv * subvector( d1, 20UL, 10UL ); // Component-wise vector multiplication
subvector( d1, 3UL, 4UL ) *= 2.0; // In-place scaling of the range [3..6]
d2 = subvector( d1, 7UL, 3UL ) * 2.0; // Scaling of the range [7..9]
d2 = 2.0 * subvector( d1, 7UL, 3UL ); // Scaling of the range [7..9]
subvector( d1, 0UL , 10UL ) += d2; // Addition assignment
subvector( d1, 10UL, 10UL ) -= s2; // Subtraction assignment
subvector( d1, 20UL, 10UL ) *= sv; // Multiplication assignment
double scalar = subvector( d1, 5UL, 10UL ) * trans( s1 ); // Scalar/dot/inner product between two vectors
A = trans( s1 ) * subvector( d1, 4UL, 16UL ); // Outer product between two vectors
\endcode
// \n \section views_aligned_subvectors Aligned Subvectors
// <hr>
//
// Usually subvectors can be defined anywhere within a vector. They may start at any position and
// may have an arbitrary size (only restricted by the size of the underlying vector). However, in
// contrast to vectors themselves, which are always properly aligned in memory and therefore can
// provide maximum performance, this means that subvectors in general have to be considered to be
// unaligned. This can be made explicit by the \c blaze::unaligned flag:
\code
using blaze::unaligned;
blaze::DynamicVector<double,blaze::rowVector> x;
// ... Resizing and initialization
// Identical creations of an unaligned subvector in the range [8..23]
auto sv1 = subvector ( x, 8UL, 16UL );
auto sv2 = subvector<unaligned>( x, 8UL, 16UL );
auto sv3 = subvector<8UL,16UL> ( x );
auto sv4 = subvector<unaligned,8UL,16UL>( x );
\endcode
// All of these calls to the \c subvector() function are identical. Whether the alignment flag is
// explicitly specified or not, it always returns an unaligned subvector. Whereas this may provide
// full flexibility in the creation of subvectors, this might result in performance disadvantages
// in comparison to vector primitives (even in case the specified subvector could be aligned).
// Whereas vector primitives are guaranteed to be properly aligned and therefore provide maximum
// performance in all operations, a general view on a vector might not be properly aligned. This
// may cause a performance penalty on some platforms and/or for some operations.
//
// However, it is also possible to create aligned subvectors. Aligned subvectors are identical to
// unaligned subvectors in all aspects, except that they may pose additional alignment restrictions
// and therefore have less flexibility during creation, but don't suffer from performance penalties
// and provide the same performance as the underlying vector. Aligned subvectors are created by
// explicitly specifying the \c blaze::aligned flag:
\code
using blaze::aligned;
// Creating an aligned subvector in the range [8..23]
auto sv1 = subvector<aligned>( x, 8UL, 16UL );
auto sv2 = subvector<aligned,8UL,16UL>( x );
\endcode
// The alignment restrictions refer to system dependent address restrictions for the used element
// type and the available vectorization mode (SSE, AVX, ...). In order to be properly aligned the
// first element of the subvector must be aligned. The following source code gives some examples
// for a double precision dynamic vector, assuming that AVX is available, which packs 4 \c double
// values into a SIMD vector:
\code
using blaze::aligned;
blaze::DynamicVector<double,blaze::columnVector> d( 17UL );
// ... Resizing and initialization
// OK: Starts at the beginning, i.e. the first element is aligned
auto dsv1 = subvector<aligned>( d, 0UL, 13UL );
// OK: Start index is a multiple of 4, i.e. the first element is aligned
auto dsv2 = subvector<aligned>( d, 4UL, 7UL );
// OK: The start index is a multiple of 4 and the subvector includes the last element
auto dsv3 = subvector<aligned>( d, 8UL, 9UL );
// Error: Start index is not a multiple of 4, i.e. the first element is not aligned
auto dsv4 = subvector<aligned>( d, 5UL, 8UL );
\endcode
// Note that the discussed alignment restrictions are only valid for aligned dense subvectors.
// In contrast, aligned sparse subvectors at this time don't pose any additional restrictions.
// Therefore aligned and unaligned sparse subvectors are truly fully identical. Still, in case
// the \c blaze::aligned flag is specified during setup, an aligned subvector is created:
\code
using blaze::aligned;
blaze::CompressedVector<double,blaze::rowVector> x;
// ... Resizing and initialization
// Creating an aligned subvector in the range [8..23]
auto sv1 = subvector<aligned>( x, 8UL, 16UL );
auto sv2 = subvector<aligned,8UL,16UL>( x );
\endcode
// \n Previous: \ref views Next: \ref views_element_selections
*/
//*************************************************************************************************
//**Element Selections*****************************************************************************
/*!\page views_element_selections Element Selections
//
// \tableofcontents
//
//
// Element selections provide views on arbitrary compositions of elements of dense and sparse
// vectors. These views act as a reference to the selected elements and represent them as another
// dense or sparse vector. This reference is valid and can be used in every way any other dense
// or sparse vector can be used as long as the vector containing the elements is not resized or
// entirely destroyed. The element selection also acts as an alias to the vector elements in the
// specified range: Changes made to the elements (e.g. modifying values, inserting or erasing
// elements) are immediately visible in the vector and changes made via the vector are immediately
// visible in the elements.
//
//
// \n \section views_element_selections_setup Setup of Element Selections
//
// An element selection can be created very conveniently via the \c elements() function. It can
// be included via the header file
\code
#include <blaze/math/Elements.h>
\endcode
// The indices of the elements to be selected can be specified either at compile time or at runtime
// (by means of an initializer list, array or vector):
\code
blaze::DynamicVector<double,blaze::rowVector> x;
// ... Resizing and initialization
// Selecting the elements 4, 6, 8, and 10 (compile time arguments)
auto e1 = elements<4UL,6UL,8UL,10UL>( x );
// Selecting the elements 3, 2, and 1 (runtime arguments via an initializer list)
const std::initializer_list<size_t> list{ 3UL, 2UL, 1UL };
auto e2 = elements( x, { 3UL, 2UL, 1UL } );
auto e3 = elements( x, list );
// Selecting the elements 1, 2, 3, 3, 2, and 1 (runtime arguments via a std::array)
const std::array<size_t> array{ 1UL, 2UL, 3UL, 3UL, 2UL, 1UL };
auto e4 = elements( x, array );
auto e5 = elements( x, array.data(), array.size() );
// Selecting the element 4 fives times (runtime arguments via a std::vector)
const std::vector<size_t> vector{ 4UL, 4UL, 4UL, 4UL, 4UL };
auto e6 = elements( x, vector );
auto e7 = elements( x, vector.data(), vector.size() );
\endcode
// Note that it is possible to alias the elements of the underlying vector in any order. Also note
// that it is possible to use the same index multiple times.
//
// Alternatively it is possible to pass a callable such as a lambda or functor that produces the
// indices:
\code
blaze::DynamicVector<double,blaze::rowVector> x{ 0, 1, 2, 3, 4, 5, 6, 7, 8 };
// Selecting all even elements of the vector, i.e. selecting (0,2,4,6,8)
auto e1 = elements( x, []( size_t i ){ return i*2UL; }, 5UL );
// Selecting all odd elements of the vector, i.e. selecting (1,3,5,7)
auto e2 = elements( x, []( size_t i ){ return i*2UL+1UL; }, 4UL );
// Reversing the elements of the vector, i.e. selecting (8,7,6,5,4,3,2,1,0)
auto e3 = elements( x, [max=v.size()-1UL]( size_t i ){ return max-i; }, 9UL );
\endcode
// The \c elements() function returns an expression representing the view on the selected elements.
// The type of this expression depends on the given arguments, primarily the type of the vector and
// the compile time arguments. If the type is required, it can be determined via the \c decltype
// specifier:
\code
using VectorType = blaze::DynamicVector<int>;
using ElementsType = decltype( blaze::elements<4UL,12UL>( std::declval<VectorType>() ) );
\endcode
// The resulting view can be treated as any other dense or sparse vector, i.e. it can be assigned
// to, it can be copied from, and it can be used in arithmetic operations. An element selection
// created from a row vector can be used as any other row vector, an element selection created
// from a column vector can be used as any other column vector. The view can also be used on both
// sides of an assignment: It can either be used as an alias to grant write access to specific
// elements of a vector primitive on the left-hand side of an assignment or to grant read-access
// to specific elements of a vector primitive or expression on the right-hand side of an assignment.
// The following example demonstrates this in detail:
\code
blaze::DynamicVector<double,blaze::rowVector> x;
blaze::CompressedVector<double,blaze::rowVector> y;
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Selecting the elements 1, 3, 5, and 7
auto e = elements( x, { 1UL, 3UL, 5UL, 7UL } );
// Setting the elements 1, 3, 5, and 7 of x to the 2nd row of matrix A
e = row( A, 2UL );
// Setting the elements 2, 4, 6, and 8 of x to y
elements( x, { 2UL, 4UL, 6UL, 8UL } ) = y;
// Setting the 3rd row of A to the elements 5, 4, 3, and 2 of x
row( A, 3UL ) = elements( x, { 5UL, 4UL, 3UL, 2UL } );
// Rotating the result of the addition between y and the 1st row of A
x = elements( y + row( A, 1UL ), { 2UL, 3UL, 0UL, 1UL } )
\endcode
// Please note that using an element selection, which refers to an index multiple times, on the
// left-hand side of an assignment leads to undefined behavior:
\code
blaze::DynamicVector<int,blaze::rowVector> a{ 1, 2, 3 };
blaze::DynamicVector<int,blaze::rowVector> b{ 1, 2, 3, 4 };
auto e = elements( a, { 1, 1, 1, 1 } ); // Selecting the element 1 four times
e = b; // Undefined behavior
\endcode
// In this example both vectors have the same size, which results in a correct vector assignment,
// but the final value of the element at index 1 is unspecified.
//
// \warning It is the programmer's responsibility to ensure the element selection does not outlive
// the viewed vector:
\code
// Creating an element selection on a temporary vector; results in a dangling reference!
auto e = elements<1UL,3UL>( DynamicVector<int>{ 1, 2, 3, 4, 5 } );
\endcode
// \n \section views_element_selections_element_access Element Access
//
// The elements of an element selection can be directly accessed via the subscript operator:
\code
blaze::DynamicVector<double,blaze::rowVector> v;
// ... Resizing and initialization
// Selecting the elements 2, 4, 6, and 8
auto e = elements( v, { 2UL, 4UL, 6UL, 8UL } );
// Setting the 1st element of the element selection, which corresponds to
// the element at index 4 in vector v
e[1] = 2.0;
\endcode
// The numbering of the selected elements is
\f[\left(\begin{array}{*{5}{c}}
0 & 1 & 2 & \cdots & N-1 \\
\end{array}\right),\f]
// where N is the number of selected elements. Alternatively, the elements of an element selection
// can be traversed via iterators. Just as with vectors, in case of non-const element selections,
// \c begin() and \c end() return an iterator, which allows to manipulate the elements, in case of
// constant element selections an iterator to immutable elements is returned:
\code
blaze::DynamicVector<int,blaze::rowVector> v( 256UL );
// ... Resizing and initialization
// Creating an element selection including specific elements of dense vector v
auto e = elements( v, { 0UL, 3UL, 6UL, 9UL, 12UL } );
// Traversing the elements via iterators to non-const elements
for( auto it=e.begin(); it!=e.end(); ++it ) {
*it = ...; // OK: Write access to the dense vector value.
... = *it; // OK: Read access to the dense vector value.
}
// Traversing the elements via iterators to const elements
for( auto it=e.cbegin(); it!=e.cend(); ++it ) {
*it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = *it; // OK: Read access to the dense vector value.
}
\endcode
\code
blaze::CompressedVector<int,blaze::rowVector> v( 256UL );
// ... Resizing and initialization
// Creating an element selection including specific elements of sparse vector v
auto e = elements( v, { 0UL, 3UL, 6UL, 9UL, 12UL } );
// Traversing the elements via iterators to non-const elements
for( auto it=e.begin(); it!=e.end(); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
// Traversing the elements via iterators to const elements
for( auto it=e.cbegin(); it!=e.cend(); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
\endcode
// \n \section views_element_selections_element_insertion Element Insertion
//
// Inserting/accessing elements in a sparse element selection can be done by several alternative
// functions. The following example demonstrates all options:
\code
blaze::CompressedVector<double,blaze::rowVector> v( 256UL ); // Non-initialized vector of size 256
std::vector<size_t> indices;
// ... Selecting indices of the sparse vector
auto e = elements( v, indices );
// The subscript operator provides access to the selected elements of the sparse vector,
// including the zero elements. In case the subscript operator is used to access an element
// that is currently not stored in the sparse vector, the element is inserted.
e[42] = 2.0;
// The second operation for inserting elements via the element selection is the set() function.
// In case the element is not contained in the vector it is inserted into the vector, if it is
// already contained in the vector its value is modified.
e.set( 45UL, -1.2 );
// An alternative for inserting elements into the vector is the insert() function. However, it
// inserts the element only in case the element is not already contained in the vector.
e.insert( 50UL, 3.7 );
// Just as in case of vectors, elements can also be inserted via the append() function. In case
// of element selections, append() also requires that the appended element's index is strictly
// larger than the currently largest non-zero index of the selection and that the selections's
// capacity is large enough to hold the new element. Note however that due to the nature of an
// element selection, which is an alias to arbitrary elements of a sparse vector, the append()
// function does not work as efficiently for an element selection as it does for a vector.
e.reserve( 10UL );
e.append( 51UL, -2.1 );
\endcode
// \n \section views_element_selections_common_operations Common Operations
//
// An element selection can be used like any other dense or sparse vector. For instance, the
// number of selected elements can be obtained via the \c size() function, the current capacity
// via the \c capacity() function, and the number of non-zero elements via the \c nonZeros()
// function. However, since element selections are references to a specific range of a vector,
// several operations are not possible, such as resizing and swapping. The following example
// shows this by means of an element selection on a dense vector:
\code
blaze::DynamicVector<int,blaze::rowVector> v( 42UL );
// ... Resizing and initialization
// Selecting the elements 5 and 10
auto e = elements( v, { 5UL, 10UL } );
e.size(); // Returns the number of elements in the element selection
e.capacity(); // Returns the capacity of the element selection
e.nonZeros(); // Returns the number of non-zero elements contained in the element selection
e.resize( 84UL ); // Compilation error: Cannot resize an element selection
auto e2 = elements( v, { 15UL, 10UL } );
swap( e, e2 ); // Compilation error: Swap operation not allowed
\endcode
// \n \section views_element_selections_arithmetic_operations Arithmetic Operations
//
// Both dense and sparse element selections can be used in all arithmetic operations that any other
// dense or sparse vector can be used in. The following example gives an impression of the use of
// dense element selections within arithmetic operations. All operations (addition, subtraction,
// multiplication, scaling, ...) can be performed on all possible combinations of dense and sparse
// element selections with fitting element types:
\code
blaze::DynamicVector<double,blaze::rowVector> d1, d2, d3;
blaze::CompressedVector<double,blaze::rowVector> s1, s2;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> A;
std::initializer_list<size_t> indices1{ 0UL, 3UL, 6UL, 9UL, 12UL, 15UL, 18UL, 21UL };
std::initializer_list<size_t> indices2{ 1UL, 4UL, 7UL, 10UL, 13UL, 16UL, 19UL, 22UL };
std::initializer_list<size_t> indices3{ 2UL, 5UL, 8UL, 11UL, 14UL, 17UL, 20UL, 23UL };
auto e( elements( d1, indices1 ) ); // Selecting the every third element of d1 in the range [0..21]
e = d2; // Dense vector assignment to the selected elements
elements( d1, indices2 ) = s1; // Sparse vector assignment to the selected elements
d3 = e + d2; // Dense vector/dense vector addition
s2 = s1 + elements( d1, indices2 ); // Sparse vector/dense vector addition
d2 = e * elements( d1, indices3 ); // Component-wise vector multiplication
elements( d1, indices2 ) *= 2.0; // In-place scaling of the second selection of elements
d2 = elements( d1, indices3 ) * 2.0; // Scaling of the elements in the third selection of elements
d2 = 2.0 * elements( d1, indices3 ); // Scaling of the elements in the third selection of elements
elements( d1, indices1 ) += d2; // Addition assignment
elements( d1, indices2 ) -= s2; // Subtraction assignment
elements( d1, indices3 ) *= e; // Multiplication assignment
double scalar = elements( d1, indices2 ) * trans( s1 ); // Scalar/dot/inner product between two vectors
A = trans( s1 ) * elements( d1, { 3UL, 6UL } ); // Outer product between two vectors
\endcode
// \n Previous: \ref views_subvectors Next: \ref views_submatrices
*/
//*************************************************************************************************
//**Submatrices************************************************************************************
/*!\page views_submatrices Submatrices
//
// \tableofcontents
//
//
// Submatrices provide views on a specific part of a dense or sparse matrix just as subvectors
// provide views on specific parts of vectors. As such, submatrices act as a reference to a
// specific block within a matrix. This reference is valid and can be used in evary way any
// other dense or sparse matrix can be used as long as the matrix containing the submatrix is
// not resized or entirely destroyed. The submatrix also acts as an alias to the matrix elements
// in the specified block: Changes made to the elements (e.g. modifying values, inserting or
// erasing elements) are immediately visible in the matrix and changes made via the matrix are
// immediately visible in the submatrix.
//
//
// \n \section views_submatrices_setup Setup of Submatrices
// <hr>
//
// A view on a dense or sparse submatrix can be created very conveniently via the \c submatrix()
// function. It can be included via the header file
\code
#include <blaze/math/Submatrix.h>
\endcode
// The first and second parameter specify the row and column of the first element of the submatrix.
// The third and fourth parameter specify the number of rows and columns, respectively. The four
// parameters can be specified either at compile time or at runtime:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Creating a dense submatrix of size 4x8, starting in row 3 and column 0 (compile time arguments)
auto sm1 = submatrix<3UL,0UL,4UL,8UL>( A );
// Creating a dense submatrix of size 8x16, starting in row 0 and column 4 (runtime arguments)
auto sm2 = submatrix( A, 0UL, 4UL, 8UL, 16UL );
\endcode
// The \c submatrix() function returns an expression representing the submatrix view. The type of
// this expression depends on the given submatrix arguments, primarily the type of the matrix and
// the compile time arguments. If the type is required, it can be determined via the \c decltype
// specifier:
\code
using MatrixType = blaze::DynamicMatrix<int>;
using SubmatrixType = decltype( blaze::submatrix<3UL,0UL,4UL,8UL>( std::declval<MatrixType>() ) );
\endcode
// The resulting view can be treated as any other dense or sparse matrix, i.e. it can be assigned
// to, it can be copied from, and it can be used in arithmetic operations. A submatrix created from
// a row-major matrix will itself be a row-major matrix, a submatrix created from a column-major
// matrix will be a column-major matrix. The view can also be used on both sides of an assignment:
// The submatrix can either be used as an alias to grant write access to a specific submatrix
// of a matrix primitive on the left-hand side of an assignment or to grant read-access to
// a specific submatrix of a matrix primitive or expression on the right-hand side of an
// assignment. The following example demonstrates this in detail:
\code
blaze::DynamicMatrix<double,blaze::columnMajor> A, B;
blaze::CompressedMatrix<double,blaze::rowMajor> C;
// ... Resizing and initialization
// Creating a dense submatrix of size 8x4, starting in row 0 and column 2
auto sm = submatrix( A, 0UL, 2UL, 8UL, 4UL );
// Setting the submatrix of A to a 8x4 submatrix of B
sm = submatrix( B, 0UL, 0UL, 8UL, 4UL );
// Copying the sparse matrix C into another 8x4 submatrix of A
submatrix( A, 8UL, 2UL, 8UL, 4UL ) = C;
// Assigning part of the result of a matrix addition to the first submatrix
sm = submatrix( B + C, 0UL, 0UL, 8UL, 4UL );
\endcode
// \warning It is the programmer's responsibility to ensure the submatrix does not outlive the
// viewed matrix:
\code
// Creating a submatrix on a temporary matrix; results in a dangling reference!
auto sm = submatrix<1UL,0UL,2UL,3UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } );
\endcode
// \n \section views_submatrices_element_access Element Access
// <hr>
//
// The elements of a submatrix can be directly accessed with the function call operator:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Creating a 8x8 submatrix, starting from position (4,4)
auto sm = submatrix( A, 4UL, 4UL, 8UL, 8UL );
// Setting the element (0,0) of the submatrix, which corresponds to
// the element at position (4,4) in matrix A
sm(0,0) = 2.0;
\endcode
// Alternatively, the elements of a submatrix can be traversed via (const) iterators. Just as
// with matrices, in case of non-const submatrices, \c begin() and \c end() return an iterator,
// which allows to manipuate the elements, in case of constant submatrices an iterator to
// immutable elements is returned:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> A( 256UL, 512UL );
// ... Resizing and initialization
// Creating a reference to a specific submatrix of matrix A
auto sm = submatrix( A, 16UL, 16UL, 64UL, 128UL );
// Traversing the elements of the 0th row via iterators to non-const elements
for( auto it=sm.begin(0); it!=sm.end(0); ++it ) {
*it = ...; // OK: Write access to the dense submatrix value.
... = *it; // OK: Read access to the dense submatrix value.
}
// Traversing the elements of the 1st row via iterators to const elements
for( auto it=sm.cbegin(1); it!=sm.cend(1); ++it ) {
*it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = *it; // OK: Read access to the dense submatrix value.
}
\endcode
\code
blaze::CompressedMatrix<int,blaze::rowMajor> A( 256UL, 512UL );
// ... Resizing and initialization
// Creating a reference to a specific submatrix of matrix A
auto sm = submatrix( A, 16UL, 16UL, 64UL, 128UL );
// Traversing the elements of the 0th row via iterators to non-const elements
for( auto it=sm.begin(0); it!=sm.end(0); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
// Traversing the elements of the 1st row via iterators to const elements
for( auto it=sm.cbegin(1); it!=sm.cend(1); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
\endcode
// \n \section views_submatrices_element_insertion Element Insertion
// <hr>
//
// Inserting/accessing elements in a sparse submatrix can be done by several alternative functions.
// The following example demonstrates all options:
\code
blaze::CompressedMatrix<double,blaze::rowMajor> A( 256UL, 512UL ); // Non-initialized matrix of size 256x512
auto sm = submatrix( A, 10UL, 10UL, 16UL, 16UL ); // View on a 16x16 submatrix of A
// The function call operator provides access to all possible elements of the sparse submatrix,
// including the zero elements. In case the function call operator is used to access an element
// that is currently not stored in the sparse submatrix, the element is inserted into the
// submatrix.
sm(2,4) = 2.0;
// The second operation for inserting elements is the set() function. In case the element is
// not contained in the submatrix it is inserted into the submatrix, if it is already contained
// in the submatrix its value is modified.
sm.set( 2UL, 5UL, -1.2 );
// An alternative for inserting elements into the submatrix is the insert() function. However,
// it inserts the element only in case the element is not already contained in the submatrix.
sm.insert( 2UL, 6UL, 3.7 );
// Just as in the case of sparse matrices, elements can also be inserted via the append()
// function. In case of submatrices, append() also requires that the appended element's
// index is strictly larger than the currently largest non-zero index in the according row
// or column of the submatrix and that the according row's or column's capacity is large
// enough to hold the new element. Note however that due to the nature of a submatrix, which
// may be an alias to the middle of a sparse matrix, the append() function does not work as
// efficiently for a submatrix as it does for a matrix.
sm.reserve( 2UL, 10UL );
sm.append( 2UL, 10UL, -2.1 );
\endcode
// \n \section views_submatrices_common_operations Common Operations
// <hr>
//
// A submatrix view can be used like any other dense or sparse matrix. This means that with only
// a few exceptions all \ref matrix_operations and \ref arithmetic_operations can be used. For
// instance, the current size of the matrix, i.e. the number of rows or columns can be obtained
// via the \c rows() and \c columns() functions, the current total capacity via the \c capacity()
// function, and the number of non-zero elements via the \c nonZeros() function. However, since
// submatrices are views on a specific submatrix of a matrix, several operations are not possible,
// such as resizing and swapping:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL );
// ... Resizing and initialization
// Creating a view on the a 8x12 submatrix of matrix A
auto sm = submatrix( A, 0UL, 0UL, 8UL, 12UL );
sm.rows(); // Returns the number of rows of the submatrix
sm.columns(); // Returns the number of columns of the submatrix
sm.capacity(); // Returns the capacity of the submatrix
sm.nonZeros(); // Returns the number of non-zero elements contained in the submatrix
sm.resize( 10UL, 8UL ); // Compilation error: Cannot resize a submatrix of a matrix
auto sm2 = submatrix( A, 8UL, 0UL, 12UL, 8UL );
swap( sm, sm2 ); // Compilation error: Swap operation not allowed
\endcode
// \n \section views_submatrices_arithmetic_operations Arithmetic Operations
// <hr>
//
// Both dense and sparse submatrices can be used in all arithmetic operations that any other dense
// or sparse matrix can be used in. The following example gives an impression of the use of dense
// submatrices within arithmetic operations. All operations (addition, subtraction, multiplication,
// scaling, ...) can be performed on all possible combinations of dense and sparse matrices with
// fitting element types:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> D1, D2, D3;
blaze::CompressedMatrix<double,blaze::rowMajor> S1, S2;
blaze::CompressedVector<double,blaze::columnVector> a, b;
// ... Resizing and initialization
auto sm = submatrix( D1, 0UL, 0UL, 8UL, 8UL ); // View on the 8x8 submatrix of matrix D1
// starting from row 0 and column 0
submatrix( D1, 0UL, 8UL, 8UL, 8UL ) = D2; // Dense matrix initialization of the 8x8 submatrix
// starting in row 0 and column 8
sm = S1; // Sparse matrix initialization of the second 8x8 submatrix
D3 = sm + D2; // Dense matrix/dense matrix addition
S2 = S1 - submatrix( D1, 8UL, 0UL, 8UL, 8UL ); // Sparse matrix/dense matrix subtraction
D2 = sm * submatrix( D1, 8UL, 8UL, 8UL, 8UL ); // Dense matrix/dense matrix multiplication
submatrix( D1, 8UL, 0UL, 8UL, 8UL ) *= 2.0; // In-place scaling of a submatrix of D1
D2 = submatrix( D1, 8UL, 8UL, 8UL, 8UL ) * 2.0; // Scaling of the a submatrix of D1
D2 = 2.0 * sm; // Scaling of the a submatrix of D1
submatrix( D1, 0UL, 8UL, 8UL, 8UL ) += D2; // Addition assignment
submatrix( D1, 8UL, 0UL, 8UL, 8UL ) -= S1; // Subtraction assignment
submatrix( D1, 8UL, 8UL, 8UL, 8UL ) *= sm; // Multiplication assignment
a = submatrix( D1, 4UL, 4UL, 8UL, 8UL ) * b; // Dense matrix/sparse vector multiplication
\endcode
// \n \section views_aligned_submatrices Aligned Submatrices
// <hr>
//
// Usually submatrices can be defined anywhere within a matrix. They may start at any position and
// may have an arbitrary extension (only restricted by the extension of the underlying matrix).
// However, in contrast to matrices themselves, which are always properly aligned in memory and
// therefore can provide maximum performance, this means that submatrices in general have to be
// considered to be unaligned. This can be made explicit by the \c blaze::unaligned flag:
\code
using blaze::unaligned;
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Identical creations of an unaligned submatrix of size 8x8, starting in row 0 and column 0
auto sm1 = submatrix ( A, 0UL, 0UL, 8UL, 8UL );
auto sm2 = submatrix<unaligned>( A, 0UL, 0UL, 8UL, 8UL );
auto sm3 = submatrix<0UL,0UL,8UL,8UL> ( A );
auto sm4 = submatrix<unaligned,0UL,0UL,8UL,8UL>( A );
\endcode
// All of these calls to the \c submatrix() function are identical. Whether the alignment flag is
// explicitly specified or not, it always returns an unaligned submatrix. Whereas this may provide
// full flexibility in the creation of submatrices, this might result in performance disadvantages
// in comparison to matrix primitives (even in case the specified submatrix could be aligned).
// Whereas matrix primitives are guaranteed to be properly aligned and therefore provide maximum
// performance in all operations, a general view on a matrix might not be properly aligned. This
// may cause a performance penalty on some platforms and/or for some operations.
//
// However, it is also possible to create aligned submatrices. Aligned submatrices are identical to
// unaligned submatrices in all aspects, except that they may pose additional alignment restrictions
// and therefore have less flexibility during creation, but don't suffer from performance penalties
// and provide the same performance as the underlying matrix. Aligned submatrices are created by
// explicitly specifying the \c blaze::aligned flag:
\code
using blaze::aligned;
// Creating an aligned submatrix of size 8x8, starting in row 0 and column 0
auto sv1 = submatrix<aligned>( A, 0UL, 0UL, 8UL, 8UL );
auto sv2 = submatrix<aligned,0UL,0UL,8UL,8UL>( A );
\endcode
// The alignment restrictions refer to system dependent address restrictions for the used element
// type and the available vectorization mode (SSE, AVX, ...). In order to be properly aligned the
// first element of each row/column of the submatrix must be aligned. The following source code
// gives some examples for a double precision row-major dynamic matrix, assuming that padding is
// enabled and that AVX is available, which packs 4 \c double values into a SIMD vector:
\code
using blaze::aligned;
blaze::DynamicMatrix<double,blaze::rowMajor> D( 13UL, 17UL );
// ... Resizing and initialization
// OK: Starts at position (0,0), i.e. the first element of each row is aligned (due to padding)
auto dsm1 = submatrix<aligned>( D, 0UL, 0UL, 7UL, 11UL );
// OK: First column is a multiple of 4, i.e. the first element of each row is aligned (due to padding)
auto dsm2 = submatrix<aligned>( D, 3UL, 12UL, 8UL, 16UL );
// OK: First column is a multiple of 4 and the submatrix includes the last row and column
auto dsm3 = submatrix<aligned>( D, 4UL, 0UL, 9UL, 17UL );
// Error: First column is not a multiple of 4, i.e. the first element is not aligned
auto dsm4 = submatrix<aligned>( D, 2UL, 3UL, 12UL, 12UL );
\endcode
// Note that the discussed alignment restrictions are only valid for aligned dense submatrices.
// In contrast, aligned sparse submatrices at this time don't pose any additional restrictions.
// Therefore aligned and unaligned sparse submatrices are truly fully identical. Still, in case
// the \c blaze::aligned flag is specified during setup, an aligned submatrix is created:
\code
using blaze::aligned;
blaze::CompressedMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Creating an aligned submatrix of size 8x8, starting in row 0 and column 0
auto sv = submatrix<aligned>( A, 0UL, 0UL, 8UL, 8UL );
\endcode
// \n \section views_submatrices_on_symmetric_matrices Submatrices on Symmetric Matrices
//
// Submatrices can also be created on symmetric matrices (see the \c SymmetricMatrix class template):
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
// Setup of a 16x16 symmetric matrix
SymmetricMatrix< DynamicMatrix<int> > A( 16UL );
// Creating a dense submatrix of size 8x12, starting in row 2 and column 4
auto sm = submatrix( A, 2UL, 4UL, 8UL, 12UL );
\endcode
// It is important to note, however, that (compound) assignments to such submatrices have a
// special restriction: The symmetry of the underlying symmetric matrix must not be broken!
// Since the modification of element \f$ a_{ij} \f$ of a symmetric matrix also modifies the
// element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the symmetry
// of the symmetric matrix is preserved. Otherwise a \a std::invalid_argument exception is
// thrown:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
// Setup of two default 4x4 symmetric matrices
SymmetricMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 );
// Setup of the 3x2 dynamic matrix
//
// ( 1 2 )
// B = ( 3 4 )
// ( 5 6 )
//
DynamicMatrix<int> B{ { 1, 2 }, { 3, 4 }, { 5, 6 } };
// OK: Assigning B to a submatrix of A1 such that the symmetry can be preserved
//
// ( 0 0 1 2 )
// A1 = ( 0 0 3 4 )
// ( 1 3 5 6 )
// ( 2 4 6 0 )
//
submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK
// Error: Assigning B to a submatrix of A2 such that the symmetry cannot be preserved!
// The elements marked with X cannot be assigned unambiguously!
//
// ( 0 1 2 0 )
// A2 = ( 1 3 X 0 )
// ( 2 X 6 0 )
// ( 0 0 0 0 )
//
submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception!
\endcode
// \n Previous: \ref views_element_selections Next: \ref views_rows
*/
//*************************************************************************************************
//**Rows*******************************************************************************************
/*!\page views_rows Rows
//
// \tableofcontents
//
//
// Rows provide views on a specific row of a dense or sparse matrix. As such, rows act as a
// reference to a specific row. This reference is valid and can be used in every way any other
// row vector can be used as long as the matrix containing the row is not resized or entirely
// destroyed. The row also acts as an alias to the row elements: Changes made to the elements
// (e.g. modifying values, inserting or erasing elements) are immediately visible in the matrix
// and changes made via the matrix are immediately visible in the row.
//
//
// \n \section views_rows_setup Setup of Rows
// <hr>
//
// \image html row.png
// \image latex row.eps "Row view" width=250pt
//
// A reference to a dense or sparse row can be created very conveniently via the \c row() function.
// It can be included via the header file
\code
#include <blaze/math/Row.h>
\endcode
// The row index must be in the range from \f$[0..M-1]\f$, where \c M is the total number of rows
// of the matrix, and can be specified both at compile time or at runtime:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Creating a reference to the 1st row of matrix A (compile time index)
auto row1 = row<1UL>( A );
// Creating a reference to the 2nd row of matrix A (runtime index)
auto row2 = row( A, 2UL );
\endcode
// The \c row() function returns an expression representing the row view. The type of this
// expression depends on the given row arguments, primarily the type of the matrix and the compile
// time arguments. If the type is required, it can be determined via the \c decltype specifier:
\code
using MatrixType = blaze::DynamicMatrix<int>;
using RowType = decltype( blaze::row<1UL>( std::declval<MatrixType>() ) );
\endcode
// The resulting view can be treated as any other row vector, i.e. it can be assigned to, it can
// be copied from, and it can be used in arithmetic operations. The reference can also be used on
// both sides of an assignment: The row can either be used as an alias to grant write access to a
// specific row of a matrix primitive on the left-hand side of an assignment or to grant read-access
// to a specific row of a matrix primitive or expression on the right-hand side of an assignment.
// The following example demonstrates this in detail:
\code
blaze::DynamicVector<double,blaze::rowVector> x;
blaze::CompressedVector<double,blaze::rowVector> y;
blaze::DynamicMatrix<double,blaze::rowMajor> A, B;
blaze::CompressedMatrix<double,blaze::rowMajor> C, D;
// ... Resizing and initialization
// Setting the 2nd row of matrix A to x
auto row2 = row( A, 2UL );
row2 = x;
// Setting the 3rd row of matrix B to y
row( B, 3UL ) = y;
// Setting x to the 4th row of the result of the matrix multiplication
x = row( A * B, 4UL );
// Setting y to the 2nd row of the result of the sparse matrix multiplication
y = row( C * D, 2UL );
\endcode
// \warning It is the programmer's responsibility to ensure the row does not outlive the viewed
// matrix:
\code
// Creating a row on a temporary matrix; results in a dangling reference!
auto row1 = row<1UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } );
\endcode
// \n \section views_rows_element_access Element Access
// <hr>
//
// The elements of a row can be directly accessed with the subscript operator:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Creating a view on the 4th row of matrix A
auto row4 = row( A, 4UL );
// Setting the 1st element of the dense row, which corresponds
// to the 1st element in the 4th row of matrix A
row4[1] = 2.0;
\endcode
// The numbering of the row elements is
\f[\left(\begin{array}{*{5}{c}}
0 & 1 & 2 & \cdots & N-1 \\
\end{array}\right),\f]
// where N is the number of columns of the referenced matrix. Alternatively, the elements of a
// row can be traversed via iterators. Just as with vectors, in case of non-const rows, \c begin()
// and \c end() return an iterator, which allows to manipulate the elements, in case of constant
// rows an iterator to immutable elements is returned:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> A( 128UL, 256UL );
// ... Resizing and initialization
// Creating a reference to the 31st row of matrix A
auto row31 = row( A, 31UL );
// Traversing the elements via iterators to non-const elements
for( auto it=row31.begin(); it!=row31.end(); ++it ) {
*it = ...; // OK; Write access to the dense row value
... = *it; // OK: Read access to the dense row value.
}
// Traversing the elements via iterators to const elements
for( auto it=row31.cbegin(); it!=row31.cend(); ++it ) {
*it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid.
... = *it; // OK: Read access to the dense row value.
}
\endcode
\code
blaze::CompressedMatrix<int,blaze::rowMajor> A( 128UL, 256UL );
// ... Resizing and initialization
// Creating a reference to the 31st row of matrix A
auto row31 = row( A, 31UL );
// Traversing the elements via iterators to non-const elements
for( auto it=row31.begin(); it!=row31.end(); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
// Traversing the elements via iterators to const elements
for( auto it=row31.cbegin(); it!=row31.cend(); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
\endcode
// \n \section views_rows_element_insertion Element Insertion
// <hr>
//
// Inserting/accessing elements in a sparse row can be done by several alternative functions.
// The following example demonstrates all options:
\code
blaze::CompressedMatrix<double,blaze::rowMajor> A( 10UL, 100UL ); // Non-initialized 10x100 matrix
auto row0( row( A, 0UL ) ); // Reference to the 0th row of A
// The subscript operator provides access to all possible elements of the sparse row,
// including the zero elements. In case the subscript operator is used to access an element
// that is currently not stored in the sparse row, the element is inserted into the row.
row0[42] = 2.0;
// The second operation for inserting elements is the set() function. In case the element
// is not contained in the row it is inserted into the row, if it is already contained in
// the row its value is modified.
row0.set( 45UL, -1.2 );
// An alternative for inserting elements into the row is the insert() function. However,
// it inserts the element only in case the element is not already contained in the row.
row0.insert( 50UL, 3.7 );
// A very efficient way to add new elements to a sparse row is the append() function.
// Note that append() requires that the appended element's index is strictly larger than
// the currently largest non-zero index of the row and that the row's capacity is large
// enough to hold the new element.
row0.reserve( 10UL );
row0.append( 51UL, -2.1 );
\endcode
// \n \section views_rows_common_operations Common Operations
// <hr>
//
// A row view can be used like any other row vector. This means that with only a few exceptions
// all \ref vector_operations and \ref arithmetic_operations can be used. For instance, the
// current number of elements can be obtained via the \c size() function, the current capacity
// via the \c capacity() function, and the number of non-zero elements via the \c nonZeros()
// function. However, since rows are references to specific rows of a matrix, several operations
// are not possible on views, such as resizing and swapping. The following example shows this by
// means of a dense row view:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL );
// ... Resizing and initialization
// Creating a reference to the 2nd row of matrix A
auto row2 = row( A, 2UL );
row2.size(); // Returns the number of elements in the row
row2.capacity(); // Returns the capacity of the row
row2.nonZeros(); // Returns the number of non-zero elements contained in the row
row2.resize( 84UL ); // Compilation error: Cannot resize a single row of a matrix
auto row3 = row( A, 3UL );
swap( row2, row3 ); // Compilation error: Swap operation not allowed
\endcode
// \n \section views_rows_arithmetic_operations Arithmetic Operations
// <hr>
//
// Both dense and sparse rows can be used in all arithmetic operations that any other dense or
// sparse row vector can be used in. The following example gives an impression of the use of
// dense rows within arithmetic operations. All operations (addition, subtraction, multiplication,
// scaling, ...) can be performed on all possible combinations of dense and sparse rows with
// fitting element types:
\code
blaze::DynamicVector<double,blaze::rowVector> a( 2UL, 2.0 ), b;
blaze::CompressedVector<double,blaze::rowVector> c( 2UL );
c[1] = 3.0;
blaze::DynamicMatrix<double,blaze::rowMajor> A( 4UL, 2UL ); // Non-initialized 4x2 matrix
auto row0( row( A, 0UL ) ); // Reference to the 0th row of A
row0[0] = 0.0; // Manual initialization of the 0th row of A
row0[1] = 0.0;
row( A, 1UL ) = 1.0; // Homogeneous initialization of the 1st row of A
row( A, 2UL ) = a; // Dense vector initialization of the 2nd row of A
row( A, 3UL ) = c; // Sparse vector initialization of the 3rd row of A
b = row0 + a; // Dense vector/dense vector addition
b = c + row( A, 1UL ); // Sparse vector/dense vector addition
b = row0 * row( A, 2UL ); // Component-wise vector multiplication
row( A, 1UL ) *= 2.0; // In-place scaling of the 1st row
b = row( A, 1UL ) * 2.0; // Scaling of the 1st row
b = 2.0 * row( A, 1UL ); // Scaling of the 1st row
row( A, 2UL ) += a; // Addition assignment
row( A, 2UL ) -= c; // Subtraction assignment
row( A, 2UL ) *= row( A, 0UL ); // Multiplication assignment
double scalar = row( A, 1UL ) * trans( c ); // Scalar/dot/inner product between two vectors
A = trans( c ) * row( A, 1UL ); // Outer product between two vectors
\endcode
// \n \section views_rows_non_fitting_storage_order Views on Matrices with Non-Fitting Storage Order
// <hr>
//
// Especially noteworthy is that row views can be created for both row-major and column-major
// matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly
// and the interface of a column-major matrix only allows to traverse a column, via views it is
// possible to traverse a row of a column-major matrix or a column of a row-major matrix. For
// instance:
\code
blaze::DynamicMatrix<int,blaze::columnMajor> A( 64UL, 32UL );
// ... Resizing and initialization
// Creating a reference to the 1st row of a column-major matrix A
auto row1 = row( A, 1UL );
for( auto it=row1.begin(); it!=row1.end(); ++it ) {
// ...
}
\endcode
// However, please note that creating a row view on a matrix stored in a column-major fashion
// can result in a considerable performance decrease in comparison to a row view on a matrix
// with row-major storage format. This is due to the non-contiguous storage of the matrix
// elements. Therefore care has to be taken in the choice of the most suitable storage order:
\code
// Setup of two column-major matrices
blaze::DynamicMatrix<double,blaze::columnMajor> A( 128UL, 128UL );
blaze::DynamicMatrix<double,blaze::columnMajor> B( 128UL, 128UL );
// ... Resizing and initialization
// The computation of the 15th row of the multiplication between A and B ...
blaze::DynamicVector<double,blaze::rowVector> x = row( A * B, 15UL );
// ... is essentially the same as the following computation, which multiplies
// the 15th row of the column-major matrix A with B.
blaze::DynamicVector<double,blaze::rowVector> x = row( A, 15UL ) * B;
\endcode
// Although \b Blaze performs the resulting vector/matrix multiplication as efficiently as possible
// using a row-major storage order for matrix \c A would result in a more efficient evaluation.
//
// \n Previous: \ref views_submatrices Next: \ref views_row_selections
*/
//*************************************************************************************************
//**Row Selections*********************************************************************************
/*!\page views_row_selections Row Selections
//
// \tableofcontents
//
//
// Row selections provide views on arbitrary compositions of rows of dense and sparse matrices.
// These views act as a reference to the selected rows and represent them as another dense or
// sparse matrix. This reference is valid and can be used in every way any other dense or sparse
// matrix can be used as long as the matrix containing the rows is not resized or entirely
// destroyed. The row selection also acts as an alias to the matrix elements in the specified
// range: Changes made to the rows (e.g. modifying values, inserting or erasing elements) are
// immediately visible in the matrix and changes made via the matrix are immediately visible
// in the rows.
//
//
// \n \section views_row_selections_setup Setup of Row Selections
//
// A row selection can be created very conveniently via the \c rows() function. It can be included
// via the header file
\code
#include <blaze/math/Rows.h>
\endcode
// The indices of the rows to be selected can be specified either at compile time or at runtime
// (by means of an initializer list, array or vector):
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Selecting the rows 4, 6, 8, and 10 (compile time arguments)
auto rs1 = rows<4UL,6UL,8UL,10UL>( A );
// Selecting the rows 3, 2, and 1 (runtime arguments via an initializer list)
const std::initializer_list<size_t> list{ 3UL, 2UL, 1UL };
auto rs2 = rows( A, { 3UL, 2UL, 1UL } );
auto rs3 = rows( A, list );
// Selecting the rows 1, 2, 3, 3, 2, and 1 (runtime arguments via a std::array)
const std::array<size_t> array{ 1UL, 2UL, 3UL, 3UL, 2UL, 1UL };
auto rs4 = rows( A, array );
auto rs5 = rows( A, array.data(), array.size() );
// Selecting the row 4 fives times (runtime arguments via a std::vector)
const std::vector<size_t> vector{ 4UL, 4UL, 4UL, 4UL, 4UL };
auto rs6 = rows( A, vector );
auto rs7 = rows( A, vector.data(), vector.size() );
\endcode
// Note that it is possible to alias the rows of the underlying matrix in any order. Also note
// that it is possible to use the same index multiple times.
//
// Alternatively it is possible to pass a callable such as a lambda or functor that produces the
// indices:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A( 9UL, 18UL );
// Selecting all even rows of the matrix, i.e. selecting the rows 0, 2, 4, 6, and 8
auto rs1 = rows( A, []( size_t i ){ return i*2UL; }, 5UL );
// Selecting all odd rows of the matrix, i.e. selecting the rows 1, 3, 5, and 7
auto rs2 = rows( A, []( size_t i ){ return i*2UL+1UL; }, 4UL );
// Reversing the rows of the matrix, i.e. selecting the rows 8, 7, 6, 5, 4, 3, 2, 1, and 0
auto rs3 = rows( A, [max=A.rows()-1UL]( size_t i ){ return max-i; }, 9UL );
\endcode
// The \c rows() function returns an expression representing the view on the selected rows. The
// type of this expression depends on the given arguments, primarily the type of the matrix and
// the compile time arguments. If the type is required, it can be determined via the \c decltype
// specifier:
\code
using MatrixType = blaze::DynamicMatrix<int>;
using RowsType = decltype( blaze::rows<3UL,0UL,4UL,8UL>( std::declval<MatrixType>() ) );
\endcode
// The resulting view can be treated as any other dense or sparse matrix, i.e. it can be assigned
// to, it can be copied from, and it can be used in arithmetic operations. Note, however, that a
// row selection will always be treated as a row-major matrix, regardless of the storage order of
// the matrix containing the rows. The view can also be used on both sides of an assignment: It
// can either be used as an alias to grant write access to specific rows of a matrix primitive
// on the left-hand side of an assignment or to grant read-access to specific rows of a matrix
// primitive or expression on the right-hand side of an assignment. The following example
// demonstrates this in detail:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
blaze::DynamicMatrix<double,blaze::columnMajor> B;
blaze::CompressedMatrix<double,blaze::rowMajor> C;
// ... Resizing and initialization
// Selecting the rows 1, 3, 5, and 7 of A
auto rs = rows( A, { 1UL, 3UL, 5UL, 7UL } );
// Setting rows 1, 3, 5, and 7 of A to row 4 of B
rs = rows( B, { 4UL, 4UL, 4UL, 4UL } );
// Setting the rows 2, 4, 6, and 8 of A to C
rows( A, { 2UL, 4UL, 6UL, 8UL } ) = C;
// Setting the first 4 rows of A to the rows 5, 4, 3, and 2 of C
submatrix( A, 0UL, 0UL, 4UL, A.columns() ) = rows( C, { 5UL, 4UL, 3UL, 2UL } );
// Rotating the result of the addition between rows 1, 3, 5, and 7 of A and C
B = rows( rs + C, { 2UL, 3UL, 0UL, 1UL } );
\endcode
// \warning It is the programmer's responsibility to ensure the row selection does not outlive the
// viewed matrix:
\code
// Creating a row selection on a temporary matrix; results in a dangling reference!
auto rs = rows<2UL,0UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } );
\endcode
// \n \section views_row_selections_element_access Element Access
//
// The elements of a row selection can be directly accessed via the function call operator:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Creating a view on the first four rows of A in reverse order
auto rs = rows( A, { 3UL, 2UL, 1UL, 0UL } );
// Setting the element (0,0) of the row selection, which corresponds
// to the element at position (3,0) in matrix A
rs(0,0) = 2.0;
\endcode
// Alternatively, the elements of a row selection can be traversed via (const) iterators. Just as
// with matrices, in case of non-const row selection, \c begin() and \c end() return an iterator,
// which allows to manipuate the elements, in case of constant row selection an iterator to
// immutable elements is returned:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> A( 256UL, 512UL );
// ... Resizing and initialization
// Creating a reference to a selection of rows of matrix A
auto rs = rows( A, { 16UL, 32UL, 64UL, 128UL } );
// Traversing the elements of the 0th row via iterators to non-const elements
for( auto it=rs.begin(0); it!=rs.end(0); ++it ) {
*it = ...; // OK: Write access to the dense value.
... = *it; // OK: Read access to the dense value.
}
// Traversing the elements of the 1st row via iterators to const elements
for( auto it=rs.cbegin(1); it!=rs.cend(1); ++it ) {
*it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = *it; // OK: Read access to the dense value.
}
\endcode
\code
blaze::CompressedMatrix<int,blaze::rowMajor> A( 256UL, 512UL );
// ... Resizing and initialization
// Creating a reference to a selection of rows of matrix A
auto rs = rows( A, { 16UL, 32UL, 64UL, 128UL } );
// Traversing the elements of the 0th row via iterators to non-const elements
for( auto it=rs.begin(0); it!=rs.end(0); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
// Traversing the elements of the 1st row via iterators to const elements
for( auto it=rs.cbegin(1); it!=rs.cend(1); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
\endcode
// \n \section views_row_selections_element_insertion Element Insertion
//
// Inserting/accessing elements in a sparse row selection can be done by several alternative
// functions. The following example demonstrates all options:
\code
blaze::CompressedMatrix<double,blaze::rowMajor> A( 256UL, 512UL ); // Non-initialized matrix of size 256x512
auto rs = rows( A, { 10UL, 20UL, 30UL, 40UL } ); // View on the rows 10, 20, 30, and 40 of A
// The function call operator provides access to all possible elements of the sparse row
// selection, including the zero elements. In case the function call operator is used to
// access an element that is currently not stored in the sparse row selection, the element
// is inserted into the row selection.
rs(2,4) = 2.0;
// The second operation for inserting elements is the set() function. In case the element is
// not contained in the row selection it is inserted into the row selection, if it is already
// contained in the row selection its value is modified.
rs.set( 2UL, 5UL, -1.2 );
// An alternative for inserting elements into the row selection is the insert() function.
// However, it inserts the element only in case the element is not already contained in the
// row selection.
rs.insert( 2UL, 6UL, 3.7 );
// Just as in the case of sparse matrices, elements can also be inserted via the append()
// function. In case of row selections, append() also requires that the appended element's
// index is strictly larger than the currently largest non-zero index in the according row
// of the row selection and that the according row's capacity is large enough to hold the new
// element. Note however that due to the nature of a row selection, which may be an alias to
// an arbitrary collection of rows, the append() function does not work as efficiently for
// a row selection as it does for a matrix.
rs.reserve( 2UL, 10UL );
rs.append( 2UL, 10UL, -2.1 );
\endcode
// \n \section views_row_selections_common_operations Common Operations
//
// A view on specific rows of a matrix can be used like any other dense or sparse matrix. For
// instance, the current size of the matrix, i.e. the number of rows or columns can be obtained
// via the \c rows() and \c columns() functions, the current total capacity via the \c capacity()
// function, and the number of non-zero elements via the \c nonZeros() function. However, since
// row selections are views on specific rows of a matrix, several operations are not possible,
// such as resizing and swapping:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL );
// ... Resizing and initialization
// Creating a view on the rows 8, 16, 24, and 32 of matrix A
auto rs = rows( A, { 8UL, 16UL, 24UL, 32UL } );
rs.rows(); // Returns the number of rows of the row selection
rs.columns(); // Returns the number of columns of the row selection
rs.capacity(); // Returns the capacity of the row selection
rs.nonZeros(); // Returns the number of non-zero elements contained in the row selection
rs.resize( 10UL, 8UL ); // Compilation error: Cannot resize a row selection
auto rs2 = rows( A, 9UL, 17UL, 25UL, 33UL );
swap( rs, rs2 ); // Compilation error: Swap operation not allowed
\endcode
// \n \section views_row_selections_arithmetic_operations Arithmetic Operations
//
// Both dense and sparse row selections can be used in all arithmetic operations that any other
// dense or sparse matrix can be used in. The following example gives an impression of the use
// of dense row selctions within arithmetic operations. All operations (addition, subtraction,
// multiplication, scaling, ...) can be performed on all possible combinations of dense and
// sparse matrices with fitting element types:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> D1, D2, D3;
blaze::CompressedMatrix<double,blaze::rowMajor> S1, S2;
blaze::CompressedVector<double,blaze::columnVector> a, b;
// ... Resizing and initialization
std::initializer_list<size_t> indices1{ 0UL, 3UL, 6UL, 9UL, 12UL, 15UL, 18UL, 21UL };
std::initializer_list<size_t> indices2{ 1UL, 4UL, 7UL, 10UL, 13UL, 16UL, 19UL, 22UL };
std::initializer_list<size_t> indices3{ 2UL, 5UL, 8UL, 11UL, 14UL, 17UL, 20UL, 23UL };
auto rs = rows( D1, indices1 ); // Selecting the every third row of D1 in the range [0..21]
rs = D2; // Dense matrix assignment to the selected rows
rows( D1, indices2 ) = S1; // Sparse matrix assignment to the selected rows
D3 = rs + D2; // Dense matrix/dense matrix addition
S2 = S1 - rows( D1, indices2 ); // Sparse matrix/dense matrix subtraction
D2 = rs % rows( D1, indices3 ); // Dense matrix/dense matrix Schur product
D2 = rows( D1, indices2 ) * D1; // Dense matrix/dense matrix multiplication
rows( D1, indices2 ) *= 2.0; // In-place scaling of the second selection of rows
D2 = rows( D1, indices3 ) * 2.0; // Scaling of the elements in the third selection of rows
D2 = 2.0 * rows( D1, indices3 ); // Scaling of the elements in the third selection of rows
rows( D1, indices1 ) += D2; // Addition assignment
rows( D1, indices2 ) -= S1; // Subtraction assignment
rows( D1, indices3 ) %= rs; // Schur product assignment
a = rows( D1, indices1 ) * b; // Dense matrix/sparse vector multiplication
\endcode
// \n \section views_row_selections_on_column_major_matrix Row Selections on Column-Major Matrices
//
// Especially noteworthy is that row selections can be created for both row-major and column-major
// matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly
// and the interface of a column-major matrix only allows to traverse a column, via views it is
// possible to traverse a row of a column-major matrix or a column of a row-major matrix. For
// instance:
\code
blaze::DynamicMatrix<int,blaze::columnMajor> A( 64UL, 32UL );
// ... Resizing and initialization
// Creating a reference to the 1st and 3rd row of a column-major matrix A
auto rs = rows( A, { 1UL, 3UL } );
// Traversing row 0 of the selection, which corresponds to the 1st row of matrix A
for( auto it=rs.begin( 0UL ); it!=rs.end( 0UL ); ++it ) {
// ...
}
\endcode
// However, please note that creating a row selection on a matrix stored in a column-major fashion
// can result in a considerable performance decrease in comparison to a row selection on a matrix
// with row-major storage format. This is due to the non-contiguous storage of the matrix elements.
// Therefore care has to be taken in the choice of the most suitable storage order:
\code
// Setup of two column-major matrices
blaze::DynamicMatrix<double,blaze::columnMajor> A( 128UL, 128UL );
blaze::DynamicMatrix<double,blaze::columnMajor> B( 128UL, 128UL );
// ... Resizing and initialization
// The computation of the 15th, 30th, and 45th row of the multiplication between A and B ...
blaze::DynamicMatrix<double,blaze::rowMajor> x = rows( A * B, { 15UL, 30UL, 45UL } );
// ... is essentially the same as the following computation, which multiplies
// the 15th, 30th, and 45th row of the column-major matrix A with B.
blaze::DynamicMatrix<double,blaze::rowMajor> x = rows( A, { 15UL, 30UL, 45UL } ) * B;
\endcode
// Although \b Blaze performs the resulting matrix/matrix multiplication as efficiently as possible
// using a row-major storage order for matrix \c A would result in a more efficient evaluation.
//
// \n Previous: \ref views_rows Next: \ref views_columns
*/
//*************************************************************************************************
//**Columns****************************************************************************************
/*!\page views_columns Columns
//
// \tableofcontents
//
//
// Just as rows provide a view on a specific row of a matrix, columns provide views on a specific
// column of a dense or sparse matrix. As such, columns act as a reference to a specific column.
// This reference is valid an can be used in every way any other column vector can be used as long
// as the matrix containing the column is not resized or entirely destroyed. Changes made to the
// elements (e.g. modifying values, inserting or erasing elements) are immediately visible in the
// matrix and changes made via the matrix are immediately visible in the column.
//
//
// \n \section views_colums_setup Setup of Columns
// <hr>
//
// \image html column.png
// \image latex column.eps "Column view" width=250pt
//
// A reference to a dense or sparse column can be created very conveniently via the \c column()
// function. It can be included via the header file
\code
#include <blaze/math/Column.h>
\endcode
// The column index must be in the range from \f$[0..N-1]\f$, where \c N is the total number of
// columns of the matrix, and can be specified both at compile time or at runtime:
\code
blaze::DynamicMatrix<double,blaze::columnMajor> A;
// ... Resizing and initialization
// Creating a reference to the 1st column of matrix A (compile time index)
auto col1 = column<1UL>( A );
// Creating a reference to the 2nd column of matrix A (runtime index)
auto col2 = column( A, 2UL );
\endcode
// The \c column() function returns an expression representing the column view. The type of this
// expression depends on the given column arguments, primarily the type of the matrix and the
// compile time arguments. If the type is required, it can be determined via the \c decltype
// specifier:
\code
using MatrixType = blaze::DynamicMatrix<int>;
using ColumnType = decltype( blaze::column<1UL>( std::declval<MatrixType>() ) );
\endcode
// The resulting view can be treated as any other column vector, i.e. it can be assigned to, it
// can be copied from, and it can be used in arithmetic operations. The reference can also be used
// on both sides of an assignment: The column can either be used as an alias to grant write access
// to a specific column of a matrix primitive on the left-hand side of an assignment or to grant
// read-access to a specific column of a matrix primitive or expression on the right-hand side
// of an assignment. The following example demonstrates this in detail:
\code
blaze::DynamicVector<double,blaze::columnVector> x;
blaze::CompressedVector<double,blaze::columnVector> y;
blaze::DynamicMatrix<double,blaze::columnMajor> A, B;
blaze::CompressedMatrix<double,blaze::columnMajor> C, D;
// ... Resizing and initialization
// Setting the 1st column of matrix A to x
auto col1 = column( A, 1UL );
col1 = x;
// Setting the 4th column of matrix B to y
column( B, 4UL ) = y;
// Setting x to the 2nd column of the result of the matrix multiplication
x = column( A * B, 2UL );
// Setting y to the 2nd column of the result of the sparse matrix multiplication
y = column( C * D, 2UL );
\endcode
// \warning It is the programmer's responsibility to ensure the column does not outlive the
// viewed matrix:
\code
// Creating a column on a temporary matrix; results in a dangling reference!
auto col1 = column<1UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } );
\endcode
// \n \section views_columns_element_access Element Access
// <hr>
//
// The elements of a column can be directly accessed with the subscript operator.
\code
blaze::DynamicMatrix<double,blaze::columnMajor> A;
// ... Resizing and initialization
// Creating a view on the 4th column of matrix A
auto col4 = column( A, 4UL );
// Setting the 1st element of the dense column, which corresponds
// to the 1st element in the 4th column of matrix A
col4[1] = 2.0;
\endcode
// The numbering of the column elements is
\f[\left(\begin{array}{*{5}{c}}
0 & 1 & 2 & \cdots & N-1 \\
\end{array}\right),\f]
// where N is the number of rows of the referenced matrix. Alternatively, the elements of a column
// can be traversed via iterators. Just as with vectors, in case of non-const columns, \c begin()
// and \c end() return an iterator, which allows to manipulate the elements, in case of constant
// columns an iterator to immutable elements is returned:
\code
blaze::DynamicMatrix<int,blaze::columnMajor> A( 128UL, 256UL );
// ... Resizing and initialization
// Creating a reference to the 31st column of matrix A
auto col31 = column( A, 31UL );
// Traversing the elements via iterators to non-const elements
for( auto it=col31.begin(); it!=col31.end(); ++it ) {
*it = ...; // OK; Write access to the dense column value
... = *it; // OK: Read access to the dense column value.
}
// Traversing the elements via iterators to const elements
for( auto it=col31.cbegin(); it!=col31.cend(); ++it ) {
*it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = *it; // OK: Read access to the dense column value.
}
\endcode
\code
blaze::CompressedMatrix<int,blaze::columnMajor> A( 128UL, 256UL );
// ... Resizing and initialization
// Creating a reference to the 31st column of matrix A
auto col31 = column( A, 31UL );
// Traversing the elements via iterators to non-const elements
for( auto it=col31.begin(); it!=col31.end(); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
// Traversing the elements via iterators to const elements
for( auto it=col31.cbegin(); it!=col31.cend(); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
\endcode
// \n \section views_columns_element_insertion Element Insertion
// <hr>
//
// Inserting/accessing elements in a sparse column can be done by several alternative functions.
// The following example demonstrates all options:
\code
blaze::CompressedMatrix<double,blaze::columnMajor> A( 100UL, 10UL ); // Non-initialized 100x10 matrix
auto col0( column( A, 0UL ) ); // Reference to the 0th column of A
// The subscript operator provides access to all possible elements of the sparse column,
// including the zero elements. In case the subscript operator is used to access an element
// that is currently not stored in the sparse column, the element is inserted into the column.
col0[42] = 2.0;
// The second operation for inserting elements is the set() function. In case the element
// is not contained in the column it is inserted into the column, if it is already contained
// in the column its value is modified.
col0.set( 45UL, -1.2 );
// An alternative for inserting elements into the column is the insert() function. However,
// it inserts the element only in case the element is not already contained in the column.
col0.insert( 50UL, 3.7 );
// A very efficient way to add new elements to a sparse column is the append() function.
// Note that append() requires that the appended element's index is strictly larger than
// the currently largest non-zero index of the column and that the column's capacity is
// large enough to hold the new element.
col0.reserve( 10UL );
col0.append( 51UL, -2.1 );
\endcode
// \n \section views_columns_common_operations Common Operations
// <hr>
//
// A column view can be used like any other column vector. This means that with only a few
// exceptions all \ref vector_operations and \ref arithmetic_operations can be used. For instance,
// the current number of elements can be obtained via the \c size() function, the current capacity
// via the \c capacity() function, and the number of non-zero elements via the \c nonZeros()
// function. However, since columns are references to specific columns of a matrix, several
// operations are not possible on views, such as resizing and swapping. The following example
// shows this by means of a dense column view:
\code
blaze::DynamicMatrix<int,blaze::columnMajor> A( 42UL, 42UL );
// ... Resizing and initialization
// Creating a reference to the 2nd column of matrix A
auto col2 = column( A, 2UL );
col2.size(); // Returns the number of elements in the column
col2.capacity(); // Returns the capacity of the column
col2.nonZeros(); // Returns the number of non-zero elements contained in the column
col2.resize( 84UL ); // Compilation error: Cannot resize a single column of a matrix
auto col3 = column( A, 3UL );
swap( col2, col3 ); // Compilation error: Swap operation not allowed
\endcode
// \n \section views_columns_arithmetic_operations Arithmetic Operations
// <hr>
//
// Both dense and sparse columns can be used in all arithmetic operations that any other dense or
// sparse column vector can be used in. The following example gives an impression of the use of
// dense columns within arithmetic operations. All operations (addition, subtraction, multiplication,
// scaling, ...) can be performed on all possible combinations of dense and sparse columns with
// fitting element types:
\code
blaze::DynamicVector<double,blaze::columnVector> a( 2UL, 2.0 ), b;
blaze::CompressedVector<double,blaze::columnVector> c( 2UL );
c[1] = 3.0;
blaze::DynamicMatrix<double,blaze::columnMajor> A( 2UL, 4UL ); // Non-initialized 2x4 matrix
auto col0( column( A, 0UL ) ); // Reference to the 0th column of A
col0[0] = 0.0; // Manual initialization of the 0th column of A
col0[1] = 0.0;
column( A, 1UL ) = 1.0; // Homogeneous initialization of the 1st column of A
column( A, 2UL ) = a; // Dense vector initialization of the 2nd column of A
column( A, 3UL ) = c; // Sparse vector initialization of the 3rd column of A
b = col0 + a; // Dense vector/dense vector addition
b = c + column( A, 1UL ); // Sparse vector/dense vector addition
b = col0 * column( A, 2UL ); // Component-wise vector multiplication
column( A, 1UL ) *= 2.0; // In-place scaling of the 1st column
b = column( A, 1UL ) * 2.0; // Scaling of the 1st column
b = 2.0 * column( A, 1UL ); // Scaling of the 1st column
column( A, 2UL ) += a; // Addition assignment
column( A, 2UL ) -= c; // Subtraction assignment
column( A, 2UL ) *= column( A, 0UL ); // Multiplication assignment
double scalar = trans( c ) * column( A, 1UL ); // Scalar/dot/inner product between two vectors
A = column( A, 1UL ) * trans( c ); // Outer product between two vectors
\endcode
// \n \section views_columns_non_fitting_storage_order Views on Matrices with Non-Fitting Storage Order
// <hr>
//
// Especially noteworthy is that column views can be created for both row-major and column-major
// matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly
// and the interface of a column-major matrix only allows to traverse a column, via views it is
// possible to traverse a row of a column-major matrix or a column of a row-major matrix. For
// instance:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> A( 64UL, 32UL );
// ... Resizing and initialization
// Creating a reference to the 1st column of a column-major matrix A
auto col1 = column( A, 1UL );
for( auto it=col1.begin(); it!=col1.end(); ++it ) {
// ...
}
\endcode
// However, please note that creating a column view on a matrix stored in a row-major fashion
// can result in a considerable performance decrease in comparison to a column view on a matrix
// with column-major storage format. This is due to the non-contiguous storage of the matrix
// elements. Therefore care has to be taken in the choice of the most suitable storage order:
\code
// Setup of two row-major matrices
blaze::DynamicMatrix<double,blaze::rowMajor> A( 128UL, 128UL );
blaze::DynamicMatrix<double,blaze::rowMajor> B( 128UL, 128UL );
// ... Resizing and initialization
// The computation of the 15th column of the multiplication between A and B ...
blaze::DynamicVector<double,blaze::columnVector> x = column( A * B, 15UL );
// ... is essentially the same as the following computation, which multiplies
// A with the 15th column of the row-major matrix B.
blaze::DynamicVector<double,blaze::columnVector> x = A * column( B, 15UL );
\endcode
// Although \b Blaze performs the resulting matrix/vector multiplication as efficiently as possible
// using a column-major storage order for matrix \c B would result in a more efficient evaluation.
//
// \n Previous: \ref views_row_selections Next: \ref views_column_selections
*/
//*************************************************************************************************
//**Column Selections******************************************************************************
/*!\page views_column_selections Column Selections
//
// \tableofcontents
//
//
// Column selections provide views on arbitrary compositions of columns of dense and sparse
// matrices. These views act as a reference to the selected columns and represent them as another
// dense or sparse matrix. This reference is valid and can be used in every way any other dense
// or sparse matrix can be used as long as the matrix containing the columns is not resized or
// entirely destroyed. The column selection also acts as an alias to the matrix elements in the
// specified range: Changes made to the columns (e.g. modifying values, inserting or erasing
// elements) are immediately visible in the matrix and changes made via the matrix are immediately
// visible in the columns.
//
//
// \n \section views_column_selections_setup Setup of Column Selections
//
// A column selection can be created very conveniently via the \c columns() function. It can be
// included via the header file
\code
#include <blaze/math/Columns.h>
\endcode
// The indices of the columns to be selected can be specified either at compile time or at runtime
// (by means of an initializer list, array or vector):
\code
blaze::DynamicMatrix<double,blaze::columnMajor> A;
// ... Resizing and initialization
// Selecting the columns 4, 6, 8, and 10 (compile time arguments)
auto cs1 = columns<4UL,6UL,8UL,10UL>( A );
// Selecting the columns 3, 2, and 1 (runtime arguments via an initializer list)
const std::initializer_list<size_t> list{ 3UL, 2UL, 1UL };
auto cs2 = columns( A, { 3UL, 2UL, 1UL } );
auto cs3 = columns( A, list );
// Selecting the columns 1, 2, 3, 3, 2, and 1 (runtime arguments via a std::array)
const std::array<size_t> array{ 1UL, 2UL, 3UL, 3UL, 2UL, 1UL };
auto cs4 = columns( A, array );
auto cs5 = columns( A, array.data(), array.size() );
// Selecting the column 4 fives times (runtime arguments via a std::vector)
const std::vector<size_t> vector{ 4UL, 4UL, 4UL, 4UL, 4UL };
auto cs6 = columns( A, vector );
auto cs7 = columns( A, vector.data(), vector.size() );
\endcode
// Note that it is possible to alias the columns of the underlying matrix in any order. Also note
// that it is possible to use the same index multiple times.
//
// Alternatively it is possible to pass a callable such as a lambda or functor that produces the
// indices:
\code
blaze::DynamicMatrix<double,blaze::columnMajor> A( 18UL, 9UL );
// Selecting all even columns of the matrix, i.e. selecting the columns 0, 2, 4, 6, and 8
auto cs1 = columns( A, []( size_t i ){ return i*2UL; }, 5UL );
// Selecting all odd columns of the matrix, i.e. selecting the columns 1, 3, 5, and 7
auto cs2 = columns( A, []( size_t i ){ return i*2UL+1UL; }, 4UL );
// Reversing the columns of the matrix, i.e. selecting the columns 8, 7, 6, 5, 4, 3, 2, 1, and 0
auto cs3 = columns( A, [max=A.columns()-1UL]( size_t i ){ return max-i; }, 9UL );
\endcode
// The \c columns() function returns an expression representing the view on the selected columns.
// The type of this expression depends on the given arguments, primarily the type of the matrix
// and the compile time arguments. If the type is required, it can be determined via the \c decltype
// specifier:
\code
using MatrixType = blaze::DynamicMatrix<int>;
using ColumnsType = decltype( blaze::columns<3UL,0UL,4UL,8UL>( std::declval<MatrixType>() ) );
\endcode
// The resulting view can be treated as any other dense or sparse matrix, i.e. it can be assigned
// to, it can be copied from, and it can be used in arithmetic operations. Note, however, that a
// column selection will always be treated as a column-major matrix, regardless of the storage
// order of the matrix containing the columns. The view can also be used on both sides of an
// assignment: It can either be used as an alias to grant write access to specific columns of a
// matrix primitive on the left-hand side of an assignment or to grant read-access to specific
// columns of a matrix primitive or expression on the right-hand side of an assignment. The
// following example demonstrates this in detail:
\code
blaze::DynamicMatrix<double,blaze::columnMajor> A;
blaze::DynamicMatrix<double,blaze::rowMajor> B;
blaze::CompressedMatrix<double,blaze::columnMajor> C;
// ... Resizing and initialization
// Selecting the columns 1, 3, 5, and 7 of A
auto cs = columns( A, { 1UL, 3UL, 5UL, 7UL } );
// Setting columns 1, 3, 5, and 7 of A to column 4 of B
cs = columns( B, { 4UL, 4UL, 4UL, 4UL } );
// Setting the columns 2, 4, 6, and 8 of A to C
columns( A, { 2UL, 4UL, 6UL, 8UL } ) = C;
// Setting the first 4 columns of A to the columns 5, 4, 3, and 2 of C
submatrix( A, 0UL, 0UL, A.rows(), 4UL ) = columns( C, { 5UL, 4UL, 3UL, 2UL } );
// Rotating the result of the addition between columns 1, 3, 5, and 7 of A and C
B = columns( cs + C, { 2UL, 3UL, 0UL, 1UL } );
\endcode
// \warning It is the programmer's responsibility to ensure the column selection does not outlive
// the viewed matrix:
\code
// Creating a column selection on a temporary matrix; results in a dangling reference!
auto cs = columns<2UL,0UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } );
\endcode
// \n \section views_column_selections_element_access Element Access
//
// The elements of a column selection can be directly accessed via the function call operator:
\code
blaze::DynamicMatrix<double,blaze::columnMajor> A;
// ... Resizing and initialization
// Creating a view on the first four columns of A in reverse order
auto cs = columns( A, { 3UL, 2UL, 1UL, 0UL } );
// Setting the element (0,0) of the column selection, which corresponds
// to the element at position (0,3) in matrix A
cs(0,0) = 2.0;
\endcode
// Alternatively, the elements of a column selection can be traversed via (const) iterators.
// Just as with matrices, in case of non-const column selection, \c begin() and \c end() return
// an iterator, which allows to manipuate the elements, in case of constant column selection an
// iterator to immutable elements is returned:
\code
blaze::DynamicMatrix<int,blaze::columnMajor> A( 512UL, 256UL );
// ... Resizing and initialization
// Creating a reference to a selection of columns of matrix A
auto cs = columns( A, { 16UL, 32UL, 64UL, 128UL } );
// Traversing the elements of the 0th column via iterators to non-const elements
for( auto it=cs.begin(0); it!=cs.end(0); ++it ) {
*it = ...; // OK: Write access to the dense value.
... = *it; // OK: Read access to the dense value.
}
// Traversing the elements of the 1st column via iterators to const elements
for( auto it=cs.cbegin(1); it!=cs.cend(1); ++it ) {
*it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = *it; // OK: Read access to the dense value.
}
\endcode
\code
blaze::CompressedMatrix<int,blaze::columnMajor> A( 512UL, 256UL );
// ... Resizing and initialization
// Creating a reference to a selection of columns of matrix A
auto cs = columns( A, { 16UL, 32UL, 64UL, 128UL } );
// Traversing the elements of the 0th column via iterators to non-const elements
for( auto it=cs.begin(0); it!=cs.end(0); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
// Traversing the elements of the 1st column via iterators to const elements
for( auto it=cs.cbegin(1); it!=cs.cend(1); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
\endcode
// \n \section views_column_selections_element_insertion Element Insertion
//
// Inserting/accessing elements in a sparse column selection can be done by several alternative
// functions. The following example demonstrates all options:
\code
blaze::CompressedMatrix<double,blaze::columnMajor> A( 512UL, 256UL ); // Non-initialized matrix of size 512x256
auto cs = columns( A, { 10UL, 20UL, 30UL, 40UL } ); // View on the columns 10, 20, 30, and 40 of A
// The function call operator provides access to all possible elements of the sparse column
// selection, including the zero elements. In case the function call operator is used to
// access an element that is currently not stored in the sparse column selection, the element
// is inserted into the column selection.
cs(2,4) = 2.0;
// The second operation for inserting elements is the set() function. In case the element is
// not contained in the column selection it is inserted into the column selection, if it is
// already contained in the column selection its value is modified.
cs.set( 2UL, 5UL, -1.2 );
// An alternative for inserting elements into the column selection is the insert() function.
// However, it inserts the element only in case the element is not already contained in the
// column selection.
cs.insert( 2UL, 6UL, 3.7 );
// Just as in the case of sparse matrices, elements can also be inserted via the append()
// function. In case of column selections, append() also requires that the appended element's
// index is strictly larger than the currently largest non-zero index in the according column
// of the column selection and that the according column's capacity is large enough to hold the
// new element. Note however that due to the nature of a column selection, which may be an alias
// to an arbitrary collection of columns, the append() function does not work as efficiently
// for a column selection as it does for a matrix.
cs.reserve( 2UL, 10UL );
cs.append( 2UL, 10UL, -2.1 );
\endcode
// \n \section views_column_selections_common_operations Common Operations
//
// A view on specific columns of a matrix can be used like any other dense or sparse matrix. For
// instance, the current size of the matrix, i.e. the number of rows or columns can be obtained
// via the \c rows() and \c columns() functions, the current total capacity via the \c capacity()
// function, and the number of non-zero elements via the \c nonZeros() function. However, since
// column selections are views on specific columns of a matrix, several operations are not possible,
// such as resizing and swapping:
\code
blaze::DynamicMatrix<int,blaze::columnMajor> A( 42UL, 42UL );
// ... Resizing and initialization
// Creating a view on the columns 8, 16, 24, and 32 of matrix A
auto cs = columns( A, { 8UL, 16UL, 24UL, 32UL } );
cs.rows(); // Returns the number of rows of the column selection
cs.columns(); // Returns the number of columns of the column selection
cs.capacity(); // Returns the capacity of the column selection
cs.nonZeros(); // Returns the number of non-zero elements contained in the column selection
cs.resize( 10UL, 8UL ); // Compilation error: Cannot resize a column selection
auto cs2 = columns( A, 9UL, 17UL, 25UL, 33UL );
swap( cs, cs2 ); // Compilation error: Swap operation not allowed
\endcode
// \n \section views_column_selections_arithmetic_operations Arithmetic Operations
//
// Both dense and sparse column selections can be used in all arithmetic operations that any other
// dense or sparse matrix can be used in. The following example gives an impression of the use of
// dense column selctions within arithmetic operations. All operations (addition, subtraction,
// multiplication, scaling, ...) can be performed on all possible combinations of dense and
// sparse matrices with fitting element types:
\code
blaze::DynamicMatrix<double,blaze::columnMajor> D1, D2, D3;
blaze::CompressedMatrix<double,blaze::columnMajor> S1, S2;
blaze::CompressedVector<double,blaze::columnVector> a, b;
// ... Resizing and initialization
std::initializer_list<size_t> indices1{ 0UL, 3UL, 6UL, 9UL, 12UL, 15UL, 18UL, 21UL };
std::initializer_list<size_t> indices2{ 1UL, 4UL, 7UL, 10UL, 13UL, 16UL, 19UL, 22UL };
std::initializer_list<size_t> indices3{ 2UL, 5UL, 8UL, 11UL, 14UL, 17UL, 20UL, 23UL };
auto cs = columns( D1, indices1 ); // Selecting the every third column of D1 in the range [0..21]
cs = D2; // Dense matrix assignment to the selected columns
columns( D1, indices2 ) = S1; // Sparse matrix assignment to the selected columns
D3 = cs + D2; // Dense matrix/dense matrix addition
S2 = S1 - columns( D1, indices2 ); // Sparse matrix/dense matrix subtraction
D2 = cs % columns( D1, indices3 ); // Dense matrix/dense matrix Schur product
D2 = columns( D1, indices2 ) * D1; // Dense matrix/dense matrix multiplication
columns( D1, indices2 ) *= 2.0; // In-place scaling of the second selection of columns
D2 = columns( D1, indices3 ) * 2.0; // Scaling of the elements in the third selection of columns
D2 = 2.0 * columns( D1, indices3 ); // Scaling of the elements in the third selection of columns
columns( D1, indices1 ) += D2; // Addition assignment
columns( D1, indices2 ) -= S1; // Subtraction assignment
columns( D1, indices3 ) %= cs; // Schur product assignment
a = columns( D1, indices1 ) * b; // Dense matrix/sparse vector multiplication
\endcode
// \n \section views_column_selections_on_row_major_matrix Column Selections on a Row-Major Matrix
//
// Especially noteworthy is that column selections can be created for both row-major and
// column-major matrices. Whereas the interface of a row-major matrix only allows to traverse a
// row directly and the interface of a column-major matrix only allows to traverse a column, via
// views it is possible to traverse a row of a column-major matrix or a column of a row-major
// matrix. For instance:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> A( 64UL, 32UL );
// ... Resizing and initialization
// Creating a reference to the 1st and 3rd column of a column-major matrix A
auto cs = columns( A, { 1UL, 3UL } );
// Traversing column 0 of the selection, which corresponds to the 1st column of matrix A
for( auto it=cs.begin( 0UL ); it!=cs.end( 0UL ); ++it ) {
// ...
}
\endcode
// However, please note that creating a column selection on a matrix stored in a row-major fashion
// can result in a considerable performance decrease in comparison to a column selection on a
// matrix with column-major storage format. This is due to the non-contiguous storage of the
// matrix elements. Therefore care has to be taken in the choice of the most suitable storage
// order:
\code
// Setup of two row-major matrices
blaze::DynamicMatrix<double,blaze::rowMajor> A( 128UL, 128UL );
blaze::DynamicMatrix<double,blaze::rowMajor> B( 128UL, 128UL );
// ... Resizing and initialization
// The computation of the 15th, 30th, and 45th column of the multiplication between A and B ...
blaze::DynamicMatrix<double,blaze::columnMajor> x = columns( A * B, { 15UL, 30UL, 45UL } );
// ... is essentially the same as the following computation, which multiplies
// A with the 15th, 30th, and 45th column of the row-major matrix B.
blaze::DynamicMatrix<double,blaze::columnMajor> x = A * column( B, { 15UL, 30UL, 45UL } );
\endcode
// Although \b Blaze performs the resulting matrix/matrix multiplication as efficiently as possible
// using a column-major storage order for matrix \c A would result in a more efficient evaluation.
//
// \n Previous: \ref views_columns Next: \ref views_bands
*/
//*************************************************************************************************
//**Bands******************************************************************************************
/*!\page views_bands Bands
//
// \tableofcontents
//
//
// Bands provide views on a specific band of a dense or sparse matrix (e.g. the diagonal, the
// subdiagonal, ...). As such, bands act as a reference to a specific band. This reference
// is valid and can be used in every way any other vector can be used as long as the matrix
// containing the band is not resized or entirely destroyed. The band also acts as an alias to
// the band elements: Changes made to the elements (e.g. modifying values, inserting or erasing
// elements) are immediately visible in the matrix and changes made via the matrix are immediately
// visible in the band.
//
//
// \n \section views_bands_setup Setup of Bands
// <hr>
//
// \image html band.png
// \image latex band.eps "Band view" width=250pt
//
// A reference to a dense or sparse band can be created very conveniently via the \c band()
// function. It can be included via the header file
\code
#include <blaze/math/Band.h>
\endcode
// The band index must be in the range from \f$[min(0,1-M)..max(0,N-1)]\f$, where \c M is the
// total number of rows and \c N is the total number of columns, and can be specified both at
// compile time or at runtime:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Creating a reference to the 1st lower band of matrix A (compile time index)
auto band1 = band<-1L>( A );
// Creating a reference to the 2nd upper band of matrix A (runtime index)
auto band2 = band( A, 2L );
\endcode
// In addition, the \c diagonal() function provides a convenient shortcut for the setup of a view
// on the diagonal of a dense or sparse matrix. It has the same effect as calling the \c band()
// function with a compile time index of 0:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Creating a reference to the diagonal of matrix A via the band() and diagonal() functions
auto diag1 = band<0L>( A );
auto diag2 = diagonal( A );
static_assert( blaze::IsSame< decltype(diag1), decltype(diag2) >::value, "Non-identical types detected" );
\endcode
// Both the \c band() and the \c diagonal() function return an expression representing the band
// view. The type of this expression depends on the given arguments, primarily the type of the
// matrix and the compile time arguments. If the type is required, it can be determined via
// \c decltype specifier:
\code
using MatrixType = blaze::DynamicMatrix<int>;
using BandType = decltype( blaze::band<1L>( std::declval<MatrixType>() ) );
using DiagonalType = decltype( blaze::diagonal( std::declval<MatrixType>() ) );
\endcode
// This resulting view can be treated as any other vector, i.e. it can be assigned to, it can
// be copied from, and it can be used in arithmetic operations. By default, bands are considered
// column vectors, but this setting can be changed via the \c BLAZE_DEFAULT_TRANSPOSE_FLAG switch
// (see \ref transpose_flag). The reference can also be used on both sides of an assignment: The
// band can either be used as an alias to grant write access to a specific band of a matrix
// primitive on the left-hand side of an assignment or to grant read-access to a specific band of
// a matrix primitive or expression on the right-hand side of an assignment. The following example
// demonstrates this in detail:
\code
blaze::DynamicVector<double,blaze::rowVector> x;
blaze::CompressedVector<double,blaze::rowVector> y;
blaze::DynamicMatrix<double,blaze::rowMajor> A, B;
blaze::CompressedMatrix<double,blaze::rowMajor> C, D;
// ... Resizing and initialization
// Setting the 2nd upper band of matrix A to x
auto band2 = band( A, 2L );
band2 = x;
// Setting the 3rd upper band of matrix B to y
band( B, 3L ) = y;
// Setting x to the 2nd lower band of the result of the matrix multiplication
x = band( A * B, -2L );
// Setting y to the 2nd upper band of the result of the sparse matrix multiplication
y = band( C * D, 2L );
\endcode
// \warning It is the programmer's responsibility to ensure the band does not outlive the viewed
// matrix:
\code
// Creating a band on a temporary matrix; results in a dangling reference!
auto band1 = band<1L>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } );
\endcode
// \n \section views_bands_element_access Element Access
// <hr>
//
// The elements of a band can be directly accessed with the subscript operator:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
// Creating a view on the 4th upper band of matrix A
auto band4 = band( A, 4L );
// Setting the 1st element of the dense band, which corresponds
// to the 1st element in the 4th upper band of matrix A
band4[1] = 2.0;
\endcode
// The numbering of the band elements is
\f[\left(\begin{array}{*{5}{c}}
0 & 1 & 2 & \cdots & N-1 \\
\end{array}\right),\f]
// where N is the number of elements of the referenced band. Alternatively, the elements of a band
// can be traversed via iterators. Just as with vectors, in case of non-const band, \c begin() and
// \c end() return an iterator, which allows to manipulate the elements, in case of constant bands
// an iterator to immutable elements is returned:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> A( 128UL, 256UL );
// ... Resizing and initialization
// Creating a reference to the 5th upper band of matrix A
auto band5 = band( A, 5L );
// Traversing the elements via iterators to non-const elements
for( auto it=band5.begin(); it!=band5.end(); ++it ) {
*it = ...; // OK; Write access to the dense band value
... = *it; // OK: Read access to the dense band value.
}
// Traversing the elements via iterators to const elements
for( auto it=band5.cbegin(); it!=band5.cend(); ++it ) {
*it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = *it; // OK: Read access to the dense band value.
}
\endcode
\code
blaze::CompressedMatrix<int,blaze::rowMajor> A( 128UL, 256UL );
// ... Resizing and initialization
// Creating a reference to the 5th band of matrix A
auto band5 = band( A, 5L );
// Traversing the elements via iterators to non-const elements
for( auto it=band5.begin(); it!=band5.end(); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
// Traversing the elements via iterators to const elements
for( auto it=band5.cbegin(); it!=band5.cend(); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
\endcode
// \n \section views_bands_element_insertion Element Insertion
// <hr>
//
// Inserting/accessing elements in a sparse band can be done by several alternative functions.
// The following example demonstrates all options:
\code
blaze::CompressedMatrix<double,blaze::rowMajor> A( 10UL, 100UL ); // Non-initialized 10x100 matrix
auto diag( band( A, 0L ) ); // Reference to the diagonal of A
// The subscript operator provides access to all possible elements of the sparse band,
// including the zero elements. In case the subscript operator is used to access an element
// that is currently not stored in the sparse band, the element is inserted into the band.
diag[42] = 2.0;
// The second operation for inserting elements is the set() function. In case the element
// is not contained in the band it is inserted into the band, if it is already contained in
// the band its value is modified.
diag.set( 45UL, -1.2 );
// An alternative for inserting elements into the band is the insert() function. However,
// it inserts the element only in case the element is not already contained in the band.
diag.insert( 50UL, 3.7 );
\endcode
// \n \section views_bands_common_operations Common Operations
// <hr>
//
// A band view can be used like any other column vector. This means that with only a few
// exceptions all \ref vector_operations and \ref arithmetic_operations can be used. For instance,
// the current number of band elements can be obtained via the \c size() function, the current
// capacity via the \c capacity() function, and the number of non-zero elements via the
// \c nonZeros() function. However, since bands are references to specific bands of a matrix,
// several operations are not possible, such as resizing and swapping. The following example
// shows this by means of a dense band view:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL );
// ... Resizing and initialization
// Creating a reference to the 2nd upper band of matrix A
auto band2 = band( A, 2L );
band2.size(); // Returns the number of elements in the band
band2.capacity(); // Returns the capacity of the band
band2.nonZeros(); // Returns the number of non-zero elements contained in the band
band2.resize( 84UL ); // Compilation error: Cannot resize a single band of a matrix
auto band3 = band( A, 3L );
swap( band2, band3 ); // Compilation error: Swap operation not allowed
\endcode
// \n \section views_bands_arithmetic_operations Arithmetic Operations
// <hr>
//
// Both dense and sparse bands can be used in all arithmetic operations that any other dense or
// sparse vector can be used in. The following example gives an impression of the use of dense
// bands within arithmetic operations. All operations (addition, subtraction, multiplication,
// scaling, ...) can be performed on all possible combinations of dense and sparse bands with
// fitting element types:
\code
blaze::DynamicVector<double,blaze::columnVector> a( 2UL, 2.0 ), b;
blaze::CompressedVector<double,blaze::columnVector> c( 2UL );
c[1] = 3.0;
blaze::DynamicMatrix<double,blaze::rowMajor> A( 4UL, 2UL ); // Non-initialized 4x2 matrix
auto band1( band( A, 1L ) ); // Reference to the 1st upper band of A
auto diag ( band( A, 0L ) ); // Reference to the diagonal of A
band1[0] = 0.0; // Manual initialization of the 1st upper band of A
diag = 1.0; // Homogeneous initialization of the diagonal of A
band( A, -1L ) = a; // Dense vector initialization of the 1st lower band of A
band( A, -2L ) = c; // Sparse vector initialization of the 2nd lower band of A
b = diag + a; // Dense vector/dense vector addition
b = c + band( A, -1L ); // Sparse vector/dense vector addition
b = diag * band( A, -2L ); // Component-wise vector multiplication
band( A, -1L ) *= 2.0; // In-place scaling of the 1st upper band
b = band( A, -1L ) * 2.0; // Scaling of the 1st upper band
b = 2.0 * band( A, -1L ); // Scaling of the 1st upper band
band( A, -2L ) += a; // Addition assignment
band( A, -2L ) -= c; // Subtraction assignment
band( A, -2L ) *= band( A, 0L ); // Multiplication assignment
double scalar = trans( c ) * band( A, -1L ); // Scalar/dot/inner product between two vectors
A = band( A, -1L ) * trans( c ); // Outer product between two vectors
\endcode
// \n Previous: \ref views_column_selections Next: \ref arithmetic_operations
*/
//*************************************************************************************************
//**Arithmetic Operations**************************************************************************
/*!\page arithmetic_operations Arithmetic Operations
//
// \tableofcontents
//
//
// \b Blaze provides the following arithmetic operations for vectors and matrices:
//
// <ul>
// <li> \ref addition
// <ul>
// <li> \ref vector_vector_addition </li>
// <li> \ref matrix_matrix_addition </li>
// <li> \ref scalar_addition </li>
// </ul>
// </li>
// <li> \ref subtraction
// <ul>
// <li> \ref vector_vector_subtraction </li>
// <li> \ref matrix_matrix_subtraction </li>
// <li> \ref scalar_subtraction </li>
// </ul>
// </li>
// <li> \ref scalar_multiplication </li>
// <li> \ref vector_vector_multiplication
// <ul>
// <li> \ref componentwise_multiplication </li>
// <li> \ref inner_product </li>
// <li> \ref outer_product </li>
// <li> \ref cross_product </li>
// <li> \ref vector_kronecker_product </li>
// </ul>
// </li>
// <li> \ref vector_vector_division </li>
// <li> \ref matrix_vector_multiplication </li>
// <li> \ref matrix_matrix_multiplication
// <ul>
// <li> \ref schur_product </li>
// <li> \ref matrix_product </li>
// <li> \ref matrix_kronecker_product </li>
// </ul>
// </li>
// </ul>
//
// \n Previous: \ref views_bands Next: \ref addition
*/
//*************************************************************************************************
//**Addition***************************************************************************************
/*!\page addition Addition
//
// \n \section vector_vector_addition Vector/Vector Addition
// <hr>
//
// The addition of vectors is as intuitive as the addition of scalar values. For the addition of
// any two vectors the addition operator (i.e. \c operator+()) can be used. It even enables the
// addition of dense and sparse vectors:
\code
blaze::DynamicVector<int> v1( 5UL ), v3;
blaze::CompressedVector<float> v2( 5UL );
// ... Initializing the vectors
v3 = v1 + v2; // Addition of a dense and a sparse column vector of different data type
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. Also note that it is only possible to add vectors with
// the same transpose flag:
\code
using blaze::columnVector;
using blaze::rowVector;
blaze::DynamicVector<int,columnVector> v1( 5UL );
blaze::CompressedVector<float,rowVector> v2( 5UL );
v1 + v2; // Compilation error: Cannot add a column vector and a row vector
v1 + trans( v2 ); // OK: Addition of two column vectors
\endcode
// Also note that the addition of two vectors with the same element type is favorable due to
// possible vectorization of the operation:
\code
blaze::DynamicVector<double> v1( 100UL ), v2( 100UL ), v3;
// ... Initialization of the vectors
v3 = v1 + v2; // Vectorized addition of two double precision vectors
\endcode
// \n \section matrix_matrix_addition Matrix/Matrix Addition
// <hr>
//
// For the addition of any two matrices the addition operator (i.e. \c operator+()) can be used.
// It even enables the addition of dense and sparse matrices:
\code
using blaze::rowMajor;
using blaze::columnMajor;
blaze::CompressedMatrix<size_t,columnMajor> M1( 7UL, 3UL );
blaze::DynamicMatrix<float,rowMajor> M2( 7UL, 3UL ), M3;
// ... Initializing the matrices
M3 = M1 + M2; // Addition of a sparse column-major and a dense row-major matrix of different data type
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. It is possible to add row-major and column-major matrices.
// Note however that in favor of performance the addition of two matrices with the same storage
// order is favorable. The same argument holds for the element type: In case two matrices with
// the same element type are added, the performance can be much higher due to vectorization of
// the operation.
\code
blaze::DynamicMatrix<float> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3;
// ... Initialization of the matrices
M3 = M1 + M2; // Vectorized addition of two row-major, single precision dense matrices
\endcode
// \n \section scalar_addition Scalar Addition
// <hr>
//
// For convenience it is also possible to add a scalar value to a dense vector or dense matrix,
// which has the same effect as adding a uniform vector or matrix. In \b Blaze it is possible to
// use all built-in/fundamental data types except bool as scalar values. Additionally, it is
// possible to use \c std::complex values with the same built-in data types as element type.
// Examples:
\code
blaze::StaticVector<int,3UL> v1{ 3, 2, 5, -4, 1, 6 };
blaze::DynamicVector<int> v2 = v1 + 2; // Results in { 5, 4, 7, -2, 3, 8 }
blaze::CompressedVector<int> v3 = 3 + v1; // Results in { 6, 5, 8, -1, 4, 9 }
\endcode
\code
blaze::StaticMatrix<int,2UL,3UL> M1{ { 3, 2, 5 },
{ -4, 1, 6 } };
blaze::DynamicMatrix<int> M2 = M1 + 2; // Results in { { 5, 4, 7 }, { -2, 3, 8 } }
blaze::CompressedMatrix<int> M3 = 3 + M1; // Results in { { 6, 5, 8 }, { -1, 4, 9 } }
\endcode
// \n Previous: \ref arithmetic_operations Next: \ref subtraction
*/
//*************************************************************************************************
//**Subtraction************************************************************************************
/*!\page subtraction Subtraction
//
// \n \section vector_vector_subtraction Vector/Vector Subtraction
// <hr>
//
// The subtraction of vectors works exactly as intuitive as the addition, but with the subtraction
// operator (i.e. \c operator-()). It also enables the subtraction of dense and sparse vectors:
\code
blaze::DynamicVector<int> v1( 5UL ), v3;
blaze::CompressedVector<float> v2( 5UL );
// ... Initializing the vectors
v3 = v1 - v2; // Subtraction of a dense and a sparse column vector of different data type
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. Also note that in case of vectors it is only possible to
// subtract vectors with the same transpose flag:
\code
blaze::DynamicVector<int,columnVector> v1( 5UL );
blaze::CompressedVector<float,rowVector> v2( 5UL );
v1 - v2; // Compilation error: Cannot subtract a row vector from a column vector
v1 - trans( v2 ); // OK: Subtraction of two column vectors
\endcode
// Also note that the subtraction of two vectors with the same element type is favorable due to
// possible vectorization of the operation:
\code
blaze::DynamicVector<double> v1( 100UL ), v2( 100UL ), v3;
// ... Initialization of the vectors
v3 = v1 - v2; // Vectorized subtraction of two double precision vectors
\endcode
// \n \section matrix_matrix_subtraction Matrix/Matrix Subtraction
// <hr>
//
// For the subtraction of any two matrices the subtraction operator (i.e. \c operator-()) can be
// used. It even enables the subtraction of dense and sparse matrices:
\code
blaze::DynamicMatrix<float,rowMajor> M1( 7UL, 3UL );
blaze::CompressedMatrix<size_t,columnMajor> M2( 7UL, 3UL ), M3;
// ... Initializing the matrices
M3 = M1 - M2; // Subtraction of a row-major and a column-major matrix of different data type
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. It is possible to subtract row-major and column-major
// matrices. Note however that in favor of performance the subtraction of two matrices with the
// same storage order is favorable. The same argument holds for the element type: In case two
// matrices with the same element type are subtracted, the performance can be much higher due
// to vectorization of the operation.
\code
blaze::DynamicMatrix<float> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3;
// ... Initialization of the matrices
M3 = M1 - M2; // Vectorized subtraction of two row-major, single precision dense matrices
\endcode
// \n \section scalar_subtraction Scalar Subtraction
// <hr>
//
// For convenience it is also possible to subtract a scalar value from a dense vector or dense
// matrix, which has the same effect as subtracting a uniform vector or matrix. In \b Blaze it is
// possible to use all built-in/fundamental data types except bool as scalar values. Additionally,
// it is possible to use \c std::complex values with the same built-in data types as element type.
// Examples:
\code
blaze::StaticVector<int,3UL> v1{ 3, 2, 5, -4, 1, 6 };
blaze::DynamicVector<int> v2 = v1 - 2; // Results in { 1, 0, 3, -6, -1, 4 }
blaze::CompressedVector<int> v3 = 3 - v1; // Results in { 0, 1, -2, 7, 2, -3 }
\endcode
\code
blaze::StaticMatrix<int,2UL,3UL> M1{ { 3, 2, 5 },
{ -4, 1, 6 } };
blaze::DynamicMatrix<int> M2 = M1 - 2; // Results in { { 1, 0, 3 }, { -6, -1, 4 } }
blaze::CompressedMatrix<int> M3 = 3 - M1; // Results in { { 0, 1, -2 }, { 7, 2, -3 } }
\endcode
// \n Previous: \ref addition Next: \ref scalar_multiplication
*/
//*************************************************************************************************
//**Scalar Multiplication**************************************************************************
/*!\page scalar_multiplication Scalar Multiplication
//
// The scalar multiplication is the multiplication of vector or a matrix with a scalar value.
// Alternatively it is also possible to divide a vector or a matrix by a scalar value. In \b Blaze
// it is possible to use all built-in/fundamental data types except bool as scalar values.
// Additionally, it is possible to use \c std::complex values with the same built-in data types
// as element type.
\code
blaze::StaticVector<int,3UL> v1{ 1, 2, 3 };
blaze::DynamicVector<double> v2 = v1 * 1.2; // Scalar multiplication
blaze::CompressedVector<float> v3 = -0.3F * v1; // Scalar multiplication
blaze::DynamicVector<double> v4 = v1 / 1.2; // Scalar division
blaze::CompressedVector<float> v5 = 12.0F / v1; // Scalar division (only dense vectors)
\endcode
\code
blaze::StaticMatrix<int,3UL,2UL> M1{ { 1, 2 }, { 3, 4 }, { 5, 6 } };
blaze::DynamicMatrix<double> M2 = M1 * 1.2; // Scalar multiplication
blaze::CompressedMatrix<float> M3 = -0.3F * M1; // Scalar multiplication
blaze::DynamicMatrix<double> M4 = M1 / 1.2; // Scalar division
blaze::CompressedMatrix<float> M5 = 12.0F / M1; // Scalar division (only dense matrices)
\endcode
// Vectors and matrices cannot be used for as scalar value for scalar multiplications or divisions
// (see the following example). However, each vector and matrix provides the \c scale() function,
// which can be used to scale a vector or matrix element-wise with arbitrary scalar data types:
\code
blaze::CompressedMatrix< blaze::StaticMatrix<int,3UL,3UL> > M1;
blaze::StaticMatrix<int,3UL,3UL> scalar;
M1 * scalar; // No scalar multiplication, but matrix/matrix multiplication
M1.scale( scalar ); // Scalar multiplication
\endcode
// \n Previous: \ref subtraction Next: \ref componentwise_multiplication
*/
//*************************************************************************************************
//**Vector/Vector Multiplication*******************************************************************
/*!\page vector_vector_multiplication Vector/Vector Multiplication
//
// \n \section componentwise_multiplication Componentwise Multiplication
// <hr>
//
// Multiplying two vectors with the same transpose flag (i.e. either blaze::columnVector or
// blaze::rowVector) via the multiplication operator results in a componentwise multiplication
// of the two vectors:
\code
using blaze::DynamicVector;
using blaze::CompressedVector;
CompressedVector<int,columnVector> v1( 17UL );
DynamicVector<int,columnVector> v2( 17UL );
StaticVector<double,10UL,rowVector> v3;
DynamicVector<double,rowVector> v4( 10UL );
// ... Initialization of the vectors
CompressedVector<int,columnVector> v5( v1 * v2 ); // Componentwise multiplication of a sparse and
// a dense column vector. The result is a sparse
// column vector.
DynamicVector<double,rowVector> v6( v3 * v4 ); // Componentwise multiplication of two dense row
// vectors. The result is a dense row vector.
\endcode
// \n \section inner_product Inner Product / Scalar Product / Dot Product
// <hr>
//
// The multiplication between a row vector and a column vector results in an inner product between
// the two vectors:
\code
blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 };
blaze::DynamicVector<int,columnVector> v2{ -1, 3, -2 };
int result = v1 * v2; // Results in the value 15
\endcode
// The \c trans() function can be used to transpose a vector as necessary:
\code
blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 };
blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 };
int result = v1 * trans( v2 ); // Also results in the value 15
\endcode
// Alternatively, either the \c inner() function, the \c dot() function or the comma operator can
// be used for any combination of vectors (row or column vectors) to perform an inner product:
\code
blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 };
blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 };
// All alternatives for the inner product between a column vector and a row vector
int result1 = trans( v1 ) * trans( v2 );
int result2 = inner( v1, v2 );
int result3 = dot( v1, v2 );
int result4 = (v1,v2);
\endcode
// When using the comma operator, please note the brackets embracing the inner product expression.
// Due to the low precedence of the comma operator (lower even than the assignment operator) these
// brackets are strictly required for a correct evaluation of the inner product.
//
//
// \n \section outer_product Outer Product
// <hr>
//
// The multiplication between a column vector and a row vector results in the outer product of
// the two vectors:
\code
blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 };
blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2 };
StaticMatrix<int,3UL,3UL> M1 = v1 * v2;
\endcode
// The \c trans() function can be used to transpose a vector as necessary:
\code
blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 };
blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 };
int result = trans( v1 ) * v2;
\endcode
// Alternatively, the \c outer() function can be used for any combination of vectors (row or column
// vectors) to perform an outer product:
\code
blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 };
blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 };
StaticMatrix<int,3UL,3UL> M1 = outer( v1, v2 ); // Outer product between two row vectors
\endcode
// \n \section cross_product Cross Product
// <hr>
//
// Two vectors with the same transpose flag can be multiplied via the cross product. The cross
// product between two vectors \f$ a \f$ and \f$ b \f$ is defined as
\f[
\left(\begin{array}{*{1}{c}}
c_0 \\
c_1 \\
c_2 \\
\end{array}\right)
=
\left(\begin{array}{*{1}{c}}
a_1 b_2 - a_2 b_1 \\
a_2 b_0 - a_0 b_2 \\
a_0 b_1 - a_1 b_0 \\
\end{array}\right).
\f]
// Due to the absence of a \f$ \times \f$ operator in the C++ language, the cross product is
// realized via the \c cross() function. Alternatively, the modulo operator (i.e. \c operator%)
// can be used in case infix notation is required:
\code
blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 };
blaze::DynamicVector<int,columnVector> v2{ -1, 3, -2 };
blaze::StaticVector<int,3UL,columnVector> v3( cross( v1, v2 ) );
blaze::StaticVector<int,3UL,columnVector> v4( v1 % v2 );
\endcode
// Please note that the cross product is restricted to three dimensional (dense and sparse)
// column vectors.
//
//
// \n \section vector_kronecker_product Kronecker Product
// <hr>
//
// The Kronecker product of two vectors with the same transpose flag can be computed via the
// \a kron() function:
\code
using blaze::DynamicVector;
using blaze::CompressedVector;
DynamicVector<double> v1( 28UL );
CompressedVector<float> v2( 17UL );
// ... Initialization of the vectors
CompressedVector<double> v3 = kron( v1, v2 );
\endcode
// Both dense and sparse vectors can be used for a Kronecker product. It is possible to multiply
// two vectors with different element type, as long as the element types themselves can be
// multiplied.
//
// \n Previous: \ref scalar_multiplication Next: \ref vector_vector_division
*/
//*************************************************************************************************
//**Vector/Vector Division*************************************************************************
/*!\page vector_vector_division Vector/Vector Division
//
// \n \section componentwise_division Componentwise Division
// <hr>
//
// Dividing a vector by a dense vector with the same transpose flag (i.e. either blaze::columnVector
// or blaze::rowVector) via the division operator results in a componentwise division:
\code
using blaze::DynamicVector;
using blaze::CompressedVector;
CompressedVector<int,columnVector> v1( 17UL );
DynamicVector<int,columnVector> v2( 17UL );
StaticVector<double,10UL,rowVector> v3;
DynamicVector<double,rowVector> v4( 10UL );
// ... Initialization of the vectors
CompressedVector<int,columnVector> v5( v1 / v2 ); // Componentwise division of a sparse and a
// dense column vector. The result is a sparse
// column vector.
DynamicVector<double,rowVector> v6( v3 / v4 ); // Componentwise division of two dense row
// vectors. The result is a dense row vector.
\endcode
// Note that all values of the divisor must be non-zero and that no checks are performed to assert
// this precondition!
//
// \n Previous: \ref vector_vector_multiplication Next: \ref matrix_vector_multiplication
*/
//*************************************************************************************************
//**Matrix/Vector Multiplication*******************************************************************
/*!\page matrix_vector_multiplication Matrix/Vector Multiplication
//
// In \b Blaze matrix/vector multiplications can be as intuitively formulated as in mathematical
// textbooks. Just as in textbooks there are two different multiplications between a matrix and
// a vector: a matrix/column vector multiplication and a row vector/matrix multiplication:
\code
using blaze::StaticVector;
using blaze::DynamicVector;
using blaze::DynamicMatrix;
DynamicMatrix<int> M1( 39UL, 12UL );
StaticVector<int,12UL,columnVector> v1;
// ... Initialization of the matrix and the vector
DynamicVector<int,columnVector> v2 = M1 * v1; // Matrix/column vector multiplication
DynamicVector<int,rowVector> v3 = trans( v1 ) * M1; // Row vector/matrix multiplication
\endcode
// Note that the storage order of the matrix poses no restrictions on the operation. Also note,
// that the highest performance for a multiplication between a dense matrix and a dense vector can
// be achieved if both the matrix and the vector have the same scalar element type.
//
// \n Previous: \ref vector_vector_division Next: \ref matrix_matrix_multiplication
*/
//*************************************************************************************************
//**Matrix/Matrix Multiplication*******************************************************************
/*!\page matrix_matrix_multiplication Matrix/Matrix Multiplication
//
// \n \section schur_product Componentwise Multiplication / Schur Product
// <hr>
//
// Multiplying two matrices with the same dimensions (i.e. the same number of rows and columns)
// via the modulo operator results in a componentwise multiplication (Schur product) of the two
// matrices:
\code
using blaze::DynamicMatrix;
using blaze::CompressedMatrix;
DynamicMatrix<double> M1( 28UL, 35UL );
CompressedMatrix<float> M2( 28UL, 35UL );
// ... Initialization of the matrices
DynamicMatrix<double> M3 = M1 % M2;
\endcode
// Both dense and sparse matrices can be used for a Schur product. The storage order of the two
// matrices poses no restrictions on the operation, all variations are possible. It is also
// possible to multiply two matrices with different element type, as long as the element types
// themselves can be multiplied.
//
//
// \n \section matrix_product Matrix Product
// <hr>
//
// The matrix/matrix product can be formulated exactly as in mathematical textbooks:
\code
using blaze::DynamicMatrix;
using blaze::CompressedMatrix;
DynamicMatrix<double> M1( 45UL, 85UL );
CompressedMatrix<float> M2( 85UL, 37UL );
// ... Initialization of the matrices
DynamicMatrix<double> M3 = M1 * M2;
\endcode
// The storage order of the two matrices poses no restrictions on the operation, all variations
// are possible. It is also possible to multiply two matrices with different element type, as
// long as the element types themselves can be multiplied and added. Note however that the
// highest performance for a multiplication between two matrices can be expected for two
// matrices with the same scalar element type.
//
// In case the resulting matrix is known to be symmetric, Hermitian, lower triangular, upper
// triangular, or diagonal, the computation can be optimized by explicitly declaring the
// multiplication as symmetric, Hermitian, lower triangular, upper triangular, or diagonal by
// means of the \ref matrix_operations_declaration_operations :
\code
using blaze::DynamicMatrix;
DynamicMatrix<double> M1, M2, M3;
// ... Initialization of the square matrices
M3 = declsym ( M1 * M2 ); // Declare the result of the matrix multiplication as symmetric
M3 = declherm( M1 * M2 ); // Declare the result of the matrix multiplication as Hermitian
M3 = decllow ( M1 * M2 ); // Declare the result of the matrix multiplication as lower triangular
M3 = declupp ( M1 * M2 ); // Declare the result of the matrix multiplication as upper triangular
M3 = decldiag( M1 * M2 ); // Declare the result of the matrix multiplication as diagonal
\endcode
// Using a declaration operation on the a multiplication expression can speed up the computation
// by a factor of 2. Note however that the caller of the according declaration operation takes
// full responsibility for the correctness of the declaration. Falsely declaring a multiplication
// as symmetric, Hermitian, lower triangular, upper triangular, or diagonal leads to undefined
// behavior!
//
//
// \n \section matrix_kronecker_product Kronecker Product
// <hr>
//
// The Kronecker product of two matrices can be computed via the \a kron() function:
\code
using blaze::DynamicMatrix;
using blaze::CompressedMatrix;
DynamicMatrix<double> M1( 28UL, 35UL );
CompressedMatrix<float> M2( 17UL, 11UL );
// ... Initialization of the matrices
CompressedMatrix<double> M3 = kron( M1, M2 );
\endcode
// Both dense and sparse matrices can be used for a Kronecker product. The storage order of the
// two matrices poses no restrictions on the operation, all variations are possible. It is also
// possible to multiply two matrices with different element type, as long as the element types
// themselves can be multiplied.
//
// \n Previous: \ref matrix_vector_multiplication Next: \ref bitwise_operations
*/
//*************************************************************************************************
//**Bitwise Operations*****************************************************************************
/*!\page bitwise_operations Bitwise Operations
//
// \tableofcontents
//
//
// \b Blaze provides the following bitwise operations for vectors and matrices:
//
// <ul>
// <li> \ref bitwise_shift
// <ul>
// <li> \ref vector_vector_shift </li>
// <li> \ref matrix_matrix_shift </li>
// <li> \ref scalar_shift </li>
// </ul>
// </li>
// <li> \ref bitwise_and
// <ul>
// <li> \ref vector_vector_bitand </li>
// <li> \ref matrix_matrix_bitand </li>
// <li> \ref scalar_bitand </li>
// </ul>
// </li>
// <li> \ref bitwise_or
// <ul>
// <li> \ref vector_vector_bitor </li>
// <li> \ref matrix_matrix_bitor </li>
// <li> \ref scalar_bitor </li>
// </ul>
// </li>
// <li> \ref bitwise_xor
// <ul>
// <li> \ref vector_vector_bitxor </li>
// <li> \ref matrix_matrix_bitxor </li>
// <li> \ref scalar_bitxor </li>
// </ul>
// </li>
// </ul>
//
// \n Previous: \ref matrix_matrix_multiplication Next: \ref bitwise_shift
*/
//*************************************************************************************************
//**Bitwise Shift**********************************************************************************
/*!\page bitwise_shift Bitwise Shift
//
// \n \section vector_vector_shift Vector/Vector Shift
// <hr>
//
// Via the left-shift operator (i.e. operator<<()) and the right-shift operator (i.e. operator>>())
// it is possible to perform an elementwise shift of a dense vector:
\code
blaze::DynamicVector<unsigned int> v1( 5UL ), v3;
blaze::DynamicVector<unsigned short> v2( 5UL );
// ... Initializing the vectors
v3 = v1 << v2; // Elementwise left-shift of a dense column vector
v3 = v1 >> v2; // Elementwise right-shift of a dense column vector
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. Also note that it is only possible to shift vectors with
// the same transpose flag:
\code
using blaze::columnVector;
using blaze::rowVector;
blaze::DynamicVector<unsigned int,columnVector> v1( 5UL );
blaze::DynamicVector<unsigned int,rowVector> v2( 5UL );
v1 << v2; // Compilation error: Cannot shift a column vector by a row vector
v1 << trans( v2 ); // OK: Shifting a column vector by another column vector
\endcode
// Furthermore, it is possible to use different element types in the two vector operands, but
// shifting two vectors with the same element type is favorable due to possible vectorization
// of the operation:
\code
blaze::DynamicVector<unsigned int> v1( 100UL ), v2( 100UL ), v3;
// ... Initialization of the vectors
v3 = v1 << v2; // Vectorized left-shift of an unsigned int vector
\endcode
// \n \section matrix_matrix_shift Matrix/Matrix Shift
// <hr>
//
// The left-shift operator (i.e. operator<<()) and the right-shift operator (i.e. operator>>())
// can also be used to perform an elementwise shift of a dense matrix:
\code
using blaze::rowMajor;
using blaze::columnMajor;
blaze::DynamicMatrix<unsigned int,columnMajor> M1( 7UL, 3UL );
blaze::DynamicMatrix<unsigned short,rowMajor> M2( 7UL, 3UL ), M3;
// ... Initializing the matrices
M3 = M1 << M2; // Elementwise left-shift of a dense column-major matrix
M3 = M1 >> M2; // Elementwise right-shift of a dense column-major matrix
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. It is possible to use any combination of row-major and
// column-major matrices. Note however that in favor of performance using two matrices with the
// same storage order is favorable. The same argument holds for the element type: While it is
// possible to use matrices with different element type, using two matrices with the same element
// type potentially leads to better performance due to vectorization of the operation.
\code
blaze::DynamicMatrix<unsigned int> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3;
// ... Initialization of the matrices
M3 = M1 << M2; // Vectorized left-shift of an unsigned int matrix
\endcode
// \n \section scalar_shift Scalar Shift
// <hr>
//
// It is also possible to uniformly shift all elements of a dense vector or dense matrix by means
// of a scalar, which has the same effect as shifting by means of a uniform vector or matrix (see
// \ref vector_types_uniform_vector and \ref matrix_types_uniform_matrix). In \b Blaze it is
// possible to use all built-in/fundamental data types except bool as scalar values. Examples:
\code
blaze::DynamicVector<unsigned int> v1{ 3, 2, 5, 4, 1, 6 };
// Uniform left-shift by one bit of all elements of v1; Results in
//
// ( 6, 4, 10, 8, 2, 12 )
//
blaze::DynamicVector<int> v2( v1 << 1U );
\endcode
\code
blaze::DynamicMatrix<unsigned int> M1{ { 3, 2, 5 },
{ 4, 1, 6 } };
// Uniform left-shift by one bit of all elements of M1; Results in
//
// ( 6, 4, 10 )
// ( 8, 2, 12 )
//
blaze::DynamicMatrix<unsigned int> M2( M1 << 1U );
\endcode
// \n Previous: \ref bitwise_operations Next: \ref bitwise_and
*/
//*************************************************************************************************
//**Bitwise AND************************************************************************************
/*!\page bitwise_and Bitwise AND
//
// \n \section vector_vector_bitand Vector/Vector Bitwise AND
// <hr>
//
// Via the bitwise AND operator (i.e. operator&()) it is possible to perform an elementwise
// bitwise AND with dense vectors:
\code
blaze::DynamicVector<unsigned int> v1( 5UL ), v3;
blaze::DynamicVector<unsigned short> v2( 5UL );
// ... Initializing the vectors
v3 = v1 & v2; // Elementwise bitwise AND of two dense column vectors of different data type
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. Also note that it is only possible to use vectors with
// the same transpose flag:
\code
using blaze::columnVector;
using blaze::rowVector;
blaze::DynamicVector<unsigned int,columnVector> v1( 5UL );
blaze::DynamicVector<unsigned int,rowVector> v2( 5UL );
v1 & v2; // Compilation error: Cannot AND a column vector and a row vector
v1 & trans( v2 ); // OK: Bitwise AND of two column vectors
\endcode
// Furthermore, it is possible to use different element types in the two vector operands, but a
// bitwise AND of two vectors with the same element type is favorable due to possible vectorization
// of the operation:
\code
blaze::DynamicVector<unsigned int> v1( 100UL ), v2( 100UL ), v3;
// ... Initialization of the vectors
v3 = v1 & v2; // Vectorized bitwise AND of an unsigned int vector
\endcode
// \n \section matrix_matrix_bitand Matrix/Matrix Bitwise AND
// <hr>
//
// The bitwise AND operator (i.e. operator&()) can also be used to perform an elementwise bitwise
// AND with dense matrices:
\code
using blaze::rowMajor;
using blaze::columnMajor;
blaze::DynamicMatrix<unsigned int,columnMajor> M1( 7UL, 3UL );
blaze::DynamicMatrix<unsigned short,rowMajor> M2( 7UL, 3UL ), M3;
// ... Initializing the matrices
M3 = M1 & M2; // Elementwise bitwise AND of two dense matrices of different data type
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. It is possible to use any combination of row-major and
// column-major matrices. Note however that in favor of performance using two matrices with the
// same storage order is favorable. The same argument holds for the element type: While it is
// possible to use matrices with different element type, using two matrices with the same element
// type potentially leads to better performance due to vectorization of the operation.
\code
blaze::DynamicMatrix<unsigned int> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3;
// ... Initialization of the matrices
M3 = M1 & M2; // Vectorized bitwise AND of two row-major, unsigned int dense matrices
\endcode
// \n \section scalar_bitand Scalar Bitwise AND
// <hr>
//
// Is is also possible to perform a bitwise AND between a dense vector or dense matrix and a
// scalar value, which has the same effect as performing a bitwise AND by means of a uniform
// vector or matrix (see \ref vector_types_uniform_vector and \ref matrix_types_uniform_matrix).
// In \b Blaze it is possible to use all built-in/fundamental data types except bool as scalar
// values. Examples:
\code
blaze::DynamicVector<unsigned int> v1{ 3U, 2U, 5U, 4U, 1U, 6U };
// Perform a bitwise AND with all elements of v1; Results in
//
// ( 3, 2, 1, 0, 1, 2 )
//
blaze::DynamicVector<int> v2( v1 & 3U );
\endcode
\code
blaze::DynamicMatrix<unsigned int> M1{ { 3U, 2U, 5U },
{ 4U, 1U, 6U } };
// Perform a bitwise AND with all elements of M1; Results in
//
// ( 3, 2, 1 )
// ( 0, 1, 2 )
//
blaze::DynamicMatrix<unsigned int> M2( M1 & 3U );
\endcode
// \n Previous: \ref bitwise_shift Next: \ref bitwise_or
*/
//*************************************************************************************************
//**Bitwise OR*************************************************************************************
/*!\page bitwise_or Bitwise OR
//
// \n \section vector_vector_bitor Vector/Vector Bitwise OR
// <hr>
//
// Via the bitwise OR operator (i.e. operator|()) it is possible to perform an elementwise
// bitwise OR with dense vectors:
\code
blaze::DynamicVector<unsigned int> v1( 5UL ), v3;
blaze::DynamicVector<unsigned short> v2( 5UL );
// ... Initializing the vectors
v3 = v1 | v2; // Elementwise bitwise OR of two dense column vectors of different data type
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. Also note that it is only possible to use vectors with
// the same transpose flag:
\code
using blaze::columnVector;
using blaze::rowVector;
blaze::DynamicVector<unsigned int,columnVector> v1( 5UL );
blaze::DynamicVector<unsigned int,rowVector> v2( 5UL );
v1 | v2; // Compilation error: Cannot OR a column vector and a row vector
v1 | trans( v2 ); // OK: Bitwise OR of two column vectors
\endcode
// Furthermore, it is possible to use different element types in the two vector operands, but a
// bitwise OR of two vectors with the same element type is favorable due to possible vectorization
// of the operation:
\code
blaze::DynamicVector<unsigned int> v1( 100UL ), v2( 100UL ), v3;
// ... Initialization of the vectors
v3 = v1 | v2; // Vectorized bitwise OR of an unsigned int vector
\endcode
// \n \section matrix_matrix_bitor Matrix/Matrix Bitwise OR
// <hr>
//
// The bitwise OR operator (i.e. operator|()) can also be used to perform an elementwise bitwise
// OR with dense matrices:
\code
using blaze::rowMajor;
using blaze::columnMajor;
blaze::DynamicMatrix<unsigned int,columnMajor> M1( 7UL, 3UL );
blaze::DynamicMatrix<unsigned short,rowMajor> M2( 7UL, 3UL ), M3;
// ... Initializing the matrices
M3 = M1 | M2; // Elementwise bitwise OR of two dense matrices of different data type
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. It is possible to use any combination of row-major and
// column-major matrices. Note however that in favor of performance using two matrices with the
// same storage order is favorable. The same argument holds for the element type: While it is
// possible to use matrices with different element type, using two matrices with the same element
// type potentially leads to better performance due to vectorization of the operation.
\code
blaze::DynamicMatrix<unsigned int> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3;
// ... Initialization of the matrices
M3 = M1 | M2; // Vectorized bitwise OR of two row-major, unsigned int dense matrices
\endcode
// \n \section scalar_bitor Scalar Bitwise OR
// <hr>
//
// Is is also possible to perform a bitwise OR between a dense vector or dense matrix and a
// scalar value, which has the same effect as performing a bitwise OR by means of a uniform
// vector or matrix (see \ref vector_types_uniform_vector and \ref matrix_types_uniform_matrix).
// In \b Blaze it is possible to use all built-in/fundamental data types except bool as scalar
// values. Examples:
\code
blaze::DynamicVector<unsigned int> v1{ 3U, 2U, 5U, 4U, 1U, 6U };
// Perform a bitwise OR with all elements of v1; Results in
//
// ( 3, 3, 7, 7, 3, 3 )
//
blaze::DynamicVector<int> v2( v1 | 3U );
\endcode
\code
blaze::DynamicMatrix<unsigned int> M1{ { 3U, 2U, 5U },
{ 4U, 1U, 6U } };
// Perform a bitwise OR with all elements of M1; Results in
//
// ( 3, 3, 7 )
// ( 7, 3, 3 )
//
blaze::DynamicMatrix<unsigned int> M2( M1 | 3U );
\endcode
// \n Previous: \ref bitwise_and Next: \ref bitwise_xor
*/
//*************************************************************************************************
//**Bitwise XOR************************************************************************************
/*!\page bitwise_xor Bitwise XOR
//
// \n \section vector_vector_bitxor Vector/Vector Bitwise XOR
// <hr>
//
// Via the bitwise XOR operator (i.e. operator^()) it is possible to perform an elementwise
// bitwise XOR with dense vectors:
\code
blaze::DynamicVector<unsigned int> v1( 5UL ), v3;
blaze::DynamicVector<unsigned short> v2( 5UL );
// ... Initializing the vectors
v3 = v1 ^ v2; // Elementwise bitwise XOR of two dense column vectors of different data type
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. Also note that it is only possible to use vectors with
// the same transpose flag:
\code
using blaze::columnVector;
using blaze::rowVector;
blaze::DynamicVector<unsigned int,columnVector> v1( 5UL );
blaze::DynamicVector<unsigned int,rowVector> v2( 5UL );
v1 ^ v2; // Compilation error: Cannot XOR a column vector and a row vector
v1 ^ trans( v2 ); // OK: Bitwise XOR of two column vectors
\endcode
// Furthermore, it is possible to use different element types in the two vector operands, but a
// bitwise XOR of two vectors with the same element type is favorable due to possible vectorization
// of the operation:
\code
blaze::DynamicVector<unsigned int> v1( 100UL ), v2( 100UL ), v3;
// ... Initialization of the vectors
v3 = v1 ^ v2; // Vectorized bitwise XOR of an unsigned int vector
\endcode
// \n \section matrix_matrix_bitxor Matrix/Matrix Bitwise XOR
// <hr>
//
// The bitwise XOR operator (i.e. operator^()) can also be used to perform an elementwise bitwise
// XOR with dense matrices:
\code
using blaze::rowMajor;
using blaze::columnMajor;
blaze::DynamicMatrix<unsigned int,columnMajor> M1( 7UL, 3UL );
blaze::DynamicMatrix<unsigned short,rowMajor> M2( 7UL, 3UL ), M3;
// ... Initializing the matrices
M3 = M1 ^ M2; // Elementwise bitwise XOR of two dense matrices of different data type
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. It is possible to use any combination of row-major and
// column-major matrices. Note however that in favor of performance using two matrices with the
// same storage order is favorable. The same argument holds for the element type: While it is
// possible to use matrices with different element type, using two matrices with the same element
// type potentially leads to better performance due to vectorization of the operation.
\code
blaze::DynamicMatrix<unsigned int> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3;
// ... Initialization of the matrices
M3 = M1 ^ M2; // Vectorized bitwise XOR of two row-major, unsigned int dense matrices
\endcode
// \n \section scalar_bitxor Scalar Bitwise XOR
// <hr>
//
// Is is also possible to perform a bitwise XOR between a dense vector or dense matrix and a
// scalar value, which has the same effect as performing a bitwise XOR by means of a uniform
// vector or matrix (see \ref vector_types_uniform_vector and \ref matrix_types_uniform_matrix).
// In \b Blaze it is possible to use all built-in/fundamental data types except bool as scalar
// values. Examples:
\code
blaze::DynamicVector<unsigned int> v1{ 3U, 2U, 5U, 4U, 1U, 6U };
// Perform a bitwise XOR with all elements of v1; Results in
//
// ( 0, 1, 6, 7, 2, 5 )
//
blaze::DynamicVector<int> v2( v1 ^ 3U );
\endcode
\code
blaze::DynamicMatrix<unsigned int> M1{ { 3U, 2U, 5U },
{ 4U, 1U, 6U } };
// Perform a bitwise XOR with all elements of M1; Results in
//
// ( 0, 1, 6 )
// ( 7, 2, 5 )
//
blaze::DynamicMatrix<unsigned int> M2( M1 ^ 3U );
\endcode
// \n Previous: \ref bitwise_or Next: \ref logical_operations
*/
//*************************************************************************************************
//**Logical Operations*****************************************************************************
/*!\page logical_operations Logical Operations
//
// \tableofcontents
//
//
// \b Blaze provides the following logical operations for vectors and matrices:
//
// <ul>
// <li> \ref logical_not
// <ul>
// <li> \ref vector_vector_not </li>
// <li> \ref matrix_matrix_not </li>
// </ul>
// </li>
// <li> \ref logical_and
// <ul>
// <li> \ref vector_vector_and </li>
// <li> \ref matrix_matrix_and </li>
// </ul>
// </li>
// <li> \ref logical_or
// <ul>
// <li> \ref vector_vector_or </li>
// <li> \ref matrix_matrix_or </li>
// </ul>
// </li>
// </ul>
//
// \n Previous: \ref bitwise_xor Next: \ref logical_not
*/
//*************************************************************************************************
//**Logical NOT************************************************************************************
/*!\page logical_not Logical NOT
//
// \n \section vector_vector_not Vector/Vector Logical NOT
// <hr>
//
// Via the logical NOT operator (i.e. operator!()) it is possible to compute an elementwise
// logical NOT of a dense vector:
\code
blaze::DynamicVector<bool> v1( 5UL ), v2;
// ... Initializing the vectors
v2 = !v1; // Elementwise logical NOT of a dense column vector
\endcode
// \n \section matrix_matrix_not Matrix/Matrix Logical NOT
// <hr>
//
// The logical NOT operator (i.e. operator!()) can also be used to compute an elementwise logical
// NOT with dense matrices:
\code
using blaze::rowMajor;
using blaze::columnMajor;
blaze::DynamicMatrix<bool,rowMajor> M1( 7UL, 3UL ), M2;
// ... Initializing the matrices
M2 = !M1; // Elementwise logical NOT of a dense row-major matrix
\endcode
// \n Previous: \ref logical_operations Next: \ref logical_and
*/
//*************************************************************************************************
//**Logical AND************************************************************************************
/*!\page logical_and Logical AND
//
// \n \section vector_vector_and Vector/Vector Logical AND
// <hr>
//
// Via the logical AND operator (i.e. operator&&()) it is possible to compute an elementwise
// logical AND with dense vectors:
\code
blaze::DynamicVector<bool> v1( 5UL ), v3;
blaze::DynamicVector<bool> v2( 5UL );
// ... Initializing the vectors
v3 = v1 && v2; // Elementwise logical AND of two dense column vectors
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. Also note that it is only possible to use vectors with
// the same transpose flag:
\code
using blaze::columnVector;
using blaze::rowVector;
blaze::DynamicVector<bool,columnVector> v1( 5UL );
blaze::DynamicVector<bool,rowVector> v2( 5UL );
v1 && v2; // Compilation error: Cannot AND a column vector and a row vector
v1 && trans( v2 ); // OK: Logical AND of two column vectors
\endcode
// \n \section matrix_matrix_and Matrix/Matrix Logical AND
// <hr>
//
// The logical AND operator (i.e. operator&&()) can also be used to compute an elementwise logical
// AND with dense matrices:
\code
using blaze::rowMajor;
using blaze::columnMajor;
blaze::DynamicMatrix<bool,columnMajor> M1( 7UL, 3UL );
blaze::DynamicMatrix<bool,rowMajor> M2( 7UL, 3UL ), M3;
// ... Initializing the matrices
M3 = M1 && M2; // Elementwise logical AND of two dense matrices
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. It is possible to use any combination of row-major and
// column-major matrices. Note however that in favor of performance using two matrices with the
// same storage order is favorable.
//
// \n Previous: \ref logical_not Next: \ref logical_or
*/
//*************************************************************************************************
//**Logical OR*************************************************************************************
/*!\page logical_or Logical OR
//
// \n \section vector_vector_or Vector/Vector Logical OR
// <hr>
//
// Via the logical OR operator (i.e. operator||()) it is possible to perform an elementwise
// logical OR with dense vectors:
\code
blaze::DynamicVector<bool> v1( 5UL ), v3;
blaze::DynamicVector<bool> v2( 5UL );
// ... Initializing the vectors
v3 = v1 || v2; // Elementwise logical OR of two dense column vectors
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. Also note that it is only possible to use vectors with
// the same transpose flag:
\code
using blaze::columnVector;
using blaze::rowVector;
blaze::DynamicVector<unsigned int,columnVector> v1( 5UL );
blaze::DynamicVector<unsigned int,rowVector> v2( 5UL );
v1 || v2; // Compilation error: Cannot OR a column vector and a row vector
v1 || trans( v2 ); // OK: Logical OR of two column vectors
\endcode
// \n \section matrix_matrix_or Matrix/Matrix Logical OR
// <hr>
//
// The logical OR operator (i.e. operator||()) can also be used to perform an elementwise logical
// OR with dense matrices:
\code
using blaze::rowMajor;
using blaze::columnMajor;
blaze::DynamicMatrix<bool,columnMajor> M1( 7UL, 3UL );
blaze::DynamicMatrix<bool,rowMajor> M2( 7UL, 3UL ), M3;
// ... Initializing the matrices
M3 = M1 || M2; // Elementwise logical OR of two dense matrices
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. It is possible to use any combination of row-major and
// column-major matrices. Note however that in favor of performance using two matrices with the
// same storage order is favorable.
//
// \n Previous: \ref logical_and Next: \ref shared_memory_parallelization
*/
//*************************************************************************************************
//**Shared Memory Parallelization******************************************************************
/*!\page shared_memory_parallelization Shared Memory Parallelization
//
// For all possible operations \b Blaze tries to achieve maximum performance on a single CPU
// core. However, today's CPUs are not single core anymore, but provide several (homogeneous
// or heterogeneous) compute cores. In order to fully exploit the performance potential of a
// multicore CPU, computations have to be parallelized across all available cores of a CPU.
// For this purpose, \b Blaze provides four different shared memory parallelization techniques:
//
// - \ref hpx_parallelization
// - \ref cpp_threads_parallelization
// - \ref boost_threads_parallelization
// - \ref openmp_parallelization
//
// When any of the shared memory parallelization techniques is activated, all arithmetic
// operations on dense vectors and matrices (including additions, subtractions, multiplications,
// divisions, and all componentwise arithmetic operations) and most operations on sparse vectors
// and matrices are automatically run in parallel. However, in addition, \b Blaze provides means
// to enforce the serial execution of specific operations:
//
// - \ref serial_execution
//
// \n Previous: \ref logical_or Next: \ref hpx_parallelization
*/
//*************************************************************************************************
//**HPX Parallelization****************************************************************************
/*!\page hpx_parallelization HPX Parallelization
//
// \tableofcontents
//
//
// The first shared memory parallelization provided with \b Blaze is based on
// <a href="http://stellar.cct.lsu.edu/projects/hpx/">HPX</a>.
//
//
// \n \section hpx_setup HPX Setup
// <hr>
//
// In order to enable the HPX-based parallelization, the following steps have to be taken: First,
// the \c BLAZE_USE_HPX_THREADS command line argument has to be explicitly specified during
// compilation:
\code
... -DBLAZE_USE_HPX_THREADS ...
\endcode
// Second, the HPX library and depending libraries such as Boost, hwloc, etc. have to be linked.
// And third, the HPX threads have to be initialized by a call to the \c hpx::init() function (see
// the <a href="http://stellar.cct.lsu.edu/files/hpx_0.9.0/docs/hpx/tutorial.html">HPX tutorial</a>
// for further details). These three actions will cause the \b Blaze library to automatically try
// to run all operations in parallel with the specified number of HPX threads.
//
// Note that the HPX-based parallelization has priority over the OpenMP-based, C++11 thread-based,
// and Boost thread-based parallelizations, i.e. is preferred in case multiple parallelizations
// are enabled in combination with the HPX thread parallelization.
//
// The number of threads used by the HPX backend has to be specified via the command line:
\code
... --hpx:threads 4 ...
\endcode
// Please note that the \b Blaze library does not limit the available number of threads. Therefore
// it is in YOUR responsibility to choose an appropriate number of threads. The best performance,
// though, can be expected if the specified number of threads matches the available number of
// cores.
//
// In order to query the number of threads used for the parallelization of operations, the
// \c getNumThreads() function can be used:
\code
const size_t threads = blaze::getNumThreads();
\endcode
// In the context of HPX threads, the function will return the actual number of threads used by
// the HPX subsystem.
//
//
// \n \section hpx_configuration HPX Configuration
// <hr>
//
// As in case of the other shared memory parallelizations \b Blaze is not unconditionally running
// an operation in parallel (see for instance \ref openmp_parallelization). Only in case a given
// operation is large enough and exceeds a certain threshold the operation is executed in parallel.
// All thresholds related to the HPX-based parallelization are contained within the configuration
// file <tt><blaze/config/Thresholds.h></tt>.
//
// Please note that these thresholds are highly sensitiv to the used system architecture and
// the shared memory parallelization technique. Therefore the default values cannot guarantee
// maximum performance for all possible situations and configurations. They merely provide a
// reasonable standard for the current CPU generation. Also note that the provided defaults
// have been determined using the OpenMP parallelization and require individual adaption for
// the HPX-based parallelization.
//
// \n Previous: \ref shared_memory_parallelization Next: \ref cpp_threads_parallelization
*/
//*************************************************************************************************
//**C++11 Thread Parallelization*******************************************************************
/*!\page cpp_threads_parallelization C++11 Thread Parallelization
//
// \tableofcontents
//
//
// In addition to the HPX-based shared memory parallelization, starting with \b Blaze 2.1,
// \b Blaze also provides a shared memory parallelization based on C++11 threads.
//
//
// \n \section cpp_threads_setup C++11 Thread Setup
// <hr>
//
// In order to enable the C++11 thread-based parallelization, first the according C++11-specific
// compiler flags have to be used and second the \c BLAZE_USE_CPP_THREADS command line argument
// has to be explicitly specified. For instance, in case of the GNU C++ and Clang compilers the
// compiler flags have to be extended by
\code
... -std=c++11 -DBLAZE_USE_CPP_THREADS ...
\endcode
// This simple action will cause the \b Blaze library to automatically try to run all operations
// in parallel with the specified number of C++11 threads. Note that in case both HPX and C++11
// threads are enabled on the command line, the HPX-based parallelization has priority and is
// preferred.
//
// The number of threads can be either specified via the environment variable \c BLAZE_NUM_THREADS
\code
export BLAZE_NUM_THREADS=4 // Unix systems
set BLAZE_NUM_THREADS=4 // Windows systems
\endcode
// or alternatively via the \c setNumThreads() function provided by the \b Blaze library:
\code
blaze::setNumThreads( 4 );
\endcode
// Please note that the \b Blaze library does not limit the available number of threads. Therefore
// it is in YOUR responsibility to choose an appropriate number of threads. The best performance,
// though, can be expected if the specified number of threads matches the available number of
// cores.
//
// In order to query the number of threads used for the parallelization of operations, the
// \c getNumThreads() function can be used:
\code
const size_t threads = blaze::getNumThreads();
\endcode
// In the context of C++11 threads, the function will return the previously specified number of
// threads.
//
//
// \n \section cpp_threads_configuration C++11 Thread Configuration
// <hr>
//
// As in case of the OpenMP-based parallelization \b Blaze is not unconditionally running an
// operation in parallel. In case \b Blaze deems the parallel execution as counterproductive for
// the overall performance, the operation is executed serially. One of the main reasons for not
// executing an operation in parallel is the size of the operands. For instance, a vector addition
// is only executed in parallel if the size of both vector operands exceeds a certain threshold.
// Otherwise, the performance could seriously decrease due to the overhead caused by the thread
// setup. However, in order to be able to adjust the \b Blaze library to a specific system, it
// is possible to configure these thresholds manually. All thresholds are contained within the
// configuration file <tt><blaze/config/Thresholds.h></tt>.
//
// Please note that these thresholds are highly sensitiv to the used system architecture and
// the shared memory parallelization technique. Therefore the default values cannot guarantee
// maximum performance for all possible situations and configurations. They merely provide a
// reasonable standard for the current CPU generation. Also note that the provided defaults
// have been determined using the OpenMP parallelization and require individual adaption for
// the C++11 thread parallelization.
//
//
// \n \section cpp_threads_known_issues Known Issues
// <hr>
//
// There is a known issue in Visual Studio 2012 and 2013 that may cause C++11 threads to hang
// if their destructor is executed after the \c main() function:
//
// http://connect.microsoft.com/VisualStudio/feedback/details/747145
//
// Unfortunately, the C++11 parallelization of the \b Blaze library is affected from this bug.
// In order to circumvent this problem, \b Blaze provides the \c shutDownThreads() function,
// which can be used to manually destroy all threads at the end of the \c main() function:
\code
int main()
{
// ... Using the C++11 thread parallelization of Blaze
shutDownThreads();
}
\endcode
// Please note that this function may only be used at the end of the \c main() function. After
// this function no further computation may be executed! Also note that this function has an
// effect for Visual Studio compilers only and doesn't need to be used with any other compiler.
//
// \n Previous: \ref hpx_parallelization Next: \ref boost_threads_parallelization
*/
//*************************************************************************************************
//**Boost Thread Parallelization*******************************************************************
/*!\page boost_threads_parallelization Boost Thread Parallelization
//
// \tableofcontents
//
//
// The third available shared memory parallelization provided with \b Blaze is based
// on <a href="https://www.boost.org/doc/libs/1_68_0/doc/html/thread.html">Boost threads</a>.
//
//
// \n \section boost_threads_setup Boost Thread Setup
// <hr>
//
// In order to enable the Boost thread-based parallelization, two steps have to be taken: First,
// the \c BLAZE_USE_BOOST_THREADS command line argument has to be explicitly specified during
// compilation:
\code
... -DBLAZE_USE_BOOST_THREADS ...
\endcode
// Second, the according Boost libraries have to be linked. These two simple actions will cause
// the \b Blaze library to automatically try to run all operations in parallel with the specified
// number of Boost threads. Note that the HPX-based and C++11 thread-based parallelizations have
// priority, i.e. are preferred in case either is enabled in combination with the Boost thread
// parallelization.
//
// The number of threads can be either specified via the environment variable \c BLAZE_NUM_THREADS
\code
export BLAZE_NUM_THREADS=4 // Unix systems
set BLAZE_NUM_THREADS=4 // Windows systems
\endcode
// or alternatively via the \c setNumThreads() function provided by the \b Blaze library:
\code
blaze::setNumThreads( 4 );
\endcode
// Please note that the \b Blaze library does not limit the available number of threads. Therefore
// it is in YOUR responsibility to choose an appropriate number of threads. The best performance,
// though, can be expected if the specified number of threads matches the available number of
// cores.
//
// In order to query the number of threads used for the parallelization of operations, the
// \c getNumThreads() function can be used:
\code
const size_t threads = blaze::getNumThreads();
\endcode
// In the context of Boost threads, the function will return the previously specified number of
// threads.
//
//
// \n \section boost_threads_configuration Boost Thread Configuration
// <hr>
//
// As in case of the other shared memory parallelizations \b Blaze is not unconditionally running
// an operation in parallel (see \ref openmp_parallelization or \ref cpp_threads_parallelization).
// All thresholds related to the Boost thread parallelization are also contained within the
// configuration file <tt><blaze/config/Thresholds.h></tt>.
//
// Please note that these thresholds are highly sensitiv to the used system architecture and
// the shared memory parallelization technique. Therefore the default values cannot guarantee
// maximum performance for all possible situations and configurations. They merely provide a
// reasonable standard for the current CPU generation. Also note that the provided defaults
// have been determined using the OpenMP parallelization and require individual adaption for
// the Boost thread parallelization.
//
// \n Previous: \ref cpp_threads_parallelization Next: \ref openmp_parallelization
*/
//*************************************************************************************************
//**OpenMP Parallelization*************************************************************************
/*!\page openmp_parallelization OpenMP Parallelization
//
// \tableofcontents
//
//
// The fourth and final shared memory parallelization provided with \b Blaze is based on
// <a href="https://www.openmp.org">OpenMP</a>.
//
//
// \n \section openmp_setup OpenMP Setup
// <hr>
//
// To enable the OpenMP-based parallelization, all that needs to be done is to explicitly specify
// the use of OpenMP on the command line:
\code
-fopenmp // GNU/Clang C++ compiler
-openmp // Intel C++ compiler
/openmp // Visual Studio
\endcode
// This simple action will cause the \b Blaze library to automatically try to run all operations
// in parallel with the specified number of threads. Note however that the HPX-based, the C++11
// thread-based, and the Boost thread-based parallelizations have priority, i.e. are preferred in
// case either is enabled in combination with the OpenMP thread parallelization.
//
// As common for OpenMP, the number of threads can be specified either via an environment variable
\code
export OMP_NUM_THREADS=4 // Unix systems
set OMP_NUM_THREADS=4 // Windows systems
\endcode
// or via an explicit call to the \c omp_set_num_threads() function:
\code
omp_set_num_threads( 4 );
\endcode
// Alternatively, the number of threads can also be specified via the \c setNumThreads() function
// provided by the \b Blaze library:
\code
blaze::setNumThreads( 4 );
\endcode
// Please note that the \b Blaze library does not limit the available number of threads. Therefore
// it is in YOUR responsibility to choose an appropriate number of threads. The best performance,
// though, can be expected if the specified number of threads matches the available number of
// cores.
//
// In order to query the number of threads used for the parallelization of operations, the
// \c getNumThreads() function can be used:
\code
const size_t threads = blaze::getNumThreads();
\endcode
// In the context of OpenMP, the function returns the maximum number of threads OpenMP will use
// within a parallel region and is therefore equivalent to the \c omp_get_max_threads() function.
//
//
// \n \section openmp_configuration OpenMP Configuration
// <hr>
//
// Note that \b Blaze is not unconditionally running an operation in parallel. In case \b Blaze
// deems the parallel execution as counterproductive for the overall performance, the operation
// is executed serially. One of the main reasons for not executing an operation in parallel is
// the size of the operands. For instance, a vector addition is only executed in parallel if the
// size of both vector operands exceeds a certain threshold. Otherwise, the performance could
// seriously decrease due to the overhead caused by the thread setup. However, in order to be
// able to adjust the \b Blaze library to a specific system, it is possible to configure these
// thresholds manually. All shared memory thresholds are contained within the configuration file
// <tt><blaze/config/Thresholds.h></tt>.
//
// Please note that these thresholds are highly sensitiv to the used system architecture and
// the shared memory parallelization technique (see also \ref cpp_threads_parallelization and
// \ref boost_threads_parallelization). Therefore the default values cannot guarantee maximum
// performance for all possible situations and configurations. They merely provide a reasonable
// standard for the current CPU generation.
//
//
// \n \section openmp_first_touch First Touch Policy
// <hr>
//
// So far the \b Blaze library does not (yet) automatically initialize dynamic memory according
// to the first touch principle. Consider for instance the following vector triad example:
\code
using blaze::columnVector;
const size_t N( 1000000UL );
blaze::DynamicVector<double,columnVector> a( N ), b( N ), c( N ), d( N );
// Initialization of the vectors b, c, and d
for( size_t i=0UL; i<N; ++i ) {
b[i] = rand<double>();
c[i] = rand<double>();
d[i] = rand<double>();
}
// Performing a vector triad
a = b + c * d;
\endcode
// If this code, which is prototypical for many OpenMP applications that have not been optimized
// for ccNUMA architectures, is run across several locality domains (LD), it will not scale
// beyond the maximum performance achievable on a single LD if the working set does not fit into
// the cache. This is because the initialization loop is executed by a single thread, writing to
// \c b, \c c, and \c d for the first time. Hence, all memory pages belonging to those arrays will
// be mapped into a single LD.
//
// As mentioned above, this problem can be solved by performing vector initialization in parallel:
\code
// ...
// Initialization of the vectors b, c, and d
#pragma omp parallel for
for( size_t i=0UL; i<N; ++i ) {
b[i] = rand<double>();
c[i] = rand<double>();
d[i] = rand<double>();
}
// ...
\endcode
// This simple modification makes a huge difference on ccNUMA in memory-bound situations (as for
// instance in all BLAS level 1 operations and partially BLAS level 2 operations). Therefore, in
// order to achieve the maximum possible performance, it is imperative to initialize the memory
// according to the later use of the data structures.
//
//
// \n \section openmp_limitations Limitations of the OpenMP Parallelization
// <hr>
//
// There are a few important limitations to the current \b Blaze OpenMP parallelization. The first
// one involves the explicit use of an OpenMP parallel region (see \ref openmp_parallel), the
// other one the OpenMP \c sections directive (see \ref openmp_sections).
//
//
// \n \subsection openmp_parallel The Parallel Directive
//
// In OpenMP threads are explicitly spawned via the an OpenMP parallel directive:
\code
// Serial region, executed by a single thread
#pragma omp parallel
{
// Parallel region, executed by the specified number of threads
}
// Serial region, executed by a single thread
\endcode
// Conceptually, the specified number of threads (see \ref openmp_setup) is created every time a
// parallel directive is encountered. Therefore, from a performance point of view, it seems to be
// beneficial to use a single OpenMP parallel directive for several operations:
\code
blaze::DynamicVector<double> x, y1, y2;
blaze::DynamicMatrix<double> A, B;
#pragma omp parallel
{
y1 = A * x;
y2 = B * x;
}
\endcode
// Unfortunately, this optimization approach is not allowed within the \b Blaze library. More
// explicitly, it is not allowed to put an operation into a parallel region. The reason is that
// the entire code contained within a parallel region is executed by all threads. Although this
// appears to just comprise the contained computations, a computation (or more specifically the
// assignment of an expression to a vector or matrix) can contain additional logic that must not
// be handled by multiple threads (as for instance memory allocations, setup of temporaries, etc.).
// Therefore it is not possible to manually start a parallel region for several operations, but
// \b Blaze will spawn threads automatically, depending on the specifics of the operation at hand
// and the given operands.
//
// \n \subsection openmp_sections The Sections Directive
//
// OpenMP provides several work-sharing construct to distribute work among threads. One of these
// constructs is the \c sections directive:
\code
blaze::DynamicVector<double> x, y1, y2;
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
#pragma omp sections
{
#pragma omp section
y1 = A * x;
#pragma omp section
y2 = B * x;
}
\endcode
// In this example, two threads are used to compute two distinct matrix/vector multiplications
// concurrently. Thereby each of the \c sections is executed by exactly one thread.
//
// Unfortunately \b Blaze does not support concurrent parallel computations and therefore this
// approach does not work with any of the \b Blaze parallelization techniques. All techniques
// (including the C++11 and Boost thread parallelizations; see \ref cpp_threads_parallelization
// and \ref boost_threads_parallelization) are optimized for the parallel computation of an
// operation within a single thread of execution. This means that \b Blaze tries to use all
// available threads to compute the result of a single operation as efficiently as possible.
// Therefore, for this special case, it is advisable to disable all \b Blaze parallelizations
// and to let \b Blaze compute all operations within a \c sections directive in serial. This can
// be done by either completely disabling the \b Blaze parallelization (see \ref serial_execution)
// or by selectively serializing all operations within a \c sections directive via the \c serial()
// function:
\code
blaze::DynamicVector<double> x, y1, y2;
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
#pragma omp sections
{
#pragma omp section
y1 = serial( A * x );
#pragma omp section
y2 = serial( B * x );
}
\endcode
// Please note that the use of the \c BLAZE_SERIAL_SECTION (see also \ref serial_execution) does
// NOT work in this context!
//
// \n Previous: \ref boost_threads_parallelization Next: \ref serial_execution
*/
//*************************************************************************************************
//**Serial Execution*******************************************************************************
/*!\page serial_execution Serial Execution
//
// Sometimes it may be necessary to enforce the serial execution of specific operations. For this
// purpose, the \b Blaze library offers three possible options: the serialization of a single
// expression via the \c serial() function, the serialization of a block of expressions via the
// \c BLAZE_SERIAL_SECTION, and the general deactivation of the parallel execution.
//
//
// \n \section serial_execution_serial_expression Option 1: Serialization of a Single Expression
// <hr>
//
// The first option is the serialization of a specific operation via the \c serial() function:
\code
blaze::DynamicMatrix<double> A, B, C;
// ... Resizing and initialization
C = serial( A + B );
\endcode
// \c serial() enforces the serial evaluation of the enclosed expression. It can be used on any
// kind of dense or sparse vector or matrix expression.
//
//
// \n \section serial_execution_serial_section Option 2: Serialization of Multiple Expressions
// <hr>
//
// The second option is the temporary and local enforcement of a serial execution via the
// \c BLAZE_SERIAL_SECTION:
\code
using blaze::rowMajor;
using blaze::columnVector;
blaze::DynamicMatrix<double,rowMajor> A;
blaze::DynamicVector<double,columnVector> b, c, d, x, y, z;
// ... Resizing and initialization
// Parallel execution
// If possible and beneficial for performance the following operation is executed in parallel.
x = A * b;
// Serial execution
// All operations executed within the serial section are guaranteed to be executed in
// serial (even if a parallel execution would be possible and/or beneficial).
BLAZE_SERIAL_SECTION
{
y = A * c;
z = A * d;
}
// Parallel execution continued
// ...
\endcode
// Within the scope of the \c BLAZE_SERIAL_SECTION, all operations are guaranteed to run in serial.
// Outside the scope of the serial section, all operations are run in parallel (if beneficial for
// the performance).
//
// Note that the \c BLAZE_SERIAL_SECTION must only be used within a single thread of execution.
// The use of the serial section within several concurrent threads will result undefined behavior!
//
//
// \n \section serial_execution_deactivate_parallelism Option 3: Deactivation of Parallel Execution
// <hr>
//
// The third option is the general deactivation of the parallel execution (even in case OpenMP is
// enabled on the command line). This can be achieved via the \c BLAZE_USE_SHARED_MEMORY_PARALLELIZATION
// switch in the <tt>./blaze/config/SMP.h</tt> configuration file:
\code
#define BLAZE_USE_SHARED_MEMORY_PARALLELIZATION 1
\endcode
// In case the \c BLAZE_USE_SHARED_MEMORY_PARALLELIZATION switch is set to 0, the shared memory
// parallelization is deactivated altogether.
//
// \n Previous: \ref openmp_parallelization Next: \ref serialization
*/
//*************************************************************************************************
//**Serialization**********************************************************************************
/*!\page serialization Serialization
//
// Sometimes it is necessary to store vector and/or matrices on disk, for instance for storing
// results or for sharing specific setups with other people. The \b Blaze math serialization
// module provides the according functionality to create platform independent, portable, binary
// representations of vectors and matrices that can be used to store the \b Blaze data structures
// without loss of precision and to reliably transfer them from one machine to another.
//
// The following two pages explain how to serialize vectors and matrices:
//
// - \ref vector_serialization
// - \ref matrix_serialization
//
// \n Previous: \ref serial_execution Next: \ref vector_serialization
*/
//*************************************************************************************************
//**Vector Serialization***************************************************************************
/*!\page vector_serialization Vector Serialization
//
// The following example demonstrates the (de-)serialization of dense and sparse vectors:
\code
using blaze::columnVector;
using blaze::rowVector;
// Serialization of both vectors
{
blaze::StaticVector<double,5UL,rowVector> d;
blaze::CompressedVector<int,columnVector> s;
// ... Resizing and initialization
// Creating an archive that writes into a the file "vectors.blaze"
blaze::Archive<std::ofstream> archive( "vectors.blaze" );
// Serialization of both vectors into the same archive. Note that d lies before s!
archive << d << s;
}
// Reconstitution of both vectors
{
blaze::DynamicVector<double,rowVector> d1;
blaze::DynamicVector<int,rowVector> d2;
// Creating an archive that reads from the file "vectors.blaze"
blaze::Archive<std::ifstream> archive( "vectors.blaze" );
// Reconstituting the former d vector into d1. Note that it is possible to reconstitute
// the vector into a differrent kind of vector (StaticVector -> DynamicVector), but that
// the type of elements has to be the same.
archive >> d1;
// Reconstituting the former s vector into d2. Note that is is even possible to reconstitute
// a sparse vector as a dense vector (also the reverse is possible) and that a column vector
// can be reconstituted as row vector (and vice versa). Note however that also in this case
// the type of elements is the same!
archive >> d2
}
\endcode
// The (de-)serialization of vectors is not restricted to vectors of built-in data type, but can
// also be used for vectors with vector or matrix element type:
\code
// Serialization
{
blaze::CompressedVector< blaze::DynamicVector< blaze::complex<double> > > vec;
// ... Resizing and initialization
// Creating an archive that writes into a the file "vector.blaze"
blaze::Archive<std::ofstream> archive( "vector.blaze" );
// Serialization of the vector into the archive
archive << vec;
}
// Deserialization
{
blaze::CompressedVector< blaze::DynamicVector< blaze::complex<double> > > vec;
// Creating an archive that reads from the file "vector.blaze"
blaze::Archive<std::ifstream> archive( "vector.blaze" );
// Reconstitution of the vector from the archive
archive >> vec;
}
\endcode
// As the examples demonstrates, the vector serialization offers an enormous flexibility. However,
// several actions result in errors:
//
// - vectors cannot be reconstituted as matrices (and vice versa)
// - the element type of the serialized and reconstituted vector must match, which means
// that on the source and destination platform the general type (signed/unsigned integral
// or floating point) and the size of the type must be exactly the same
// - when reconstituting a \c StaticVector, its size must match the size of the serialized vector
//
// In case an error is encountered during (de-)serialization, a \c std::runtime_exception is
// thrown.
//
// \n Previous: \ref serialization Next: \ref matrix_serialization
*/
//*************************************************************************************************
//**Matrix Serialization***************************************************************************
/*!\page matrix_serialization Matrix Serialization
//
// The serialization of matrices works in the same manner as the serialization of vectors. The
// following example demonstrates the (de-)serialization of dense and sparse matrices:
\code
using blaze::rowMajor;
using blaze::columnMajor;
// Serialization of both matrices
{
blaze::StaticMatrix<double,3UL,5UL,rowMajor> D;
blaze::CompressedMatrix<int,columnMajor> S;
// ... Resizing and initialization
// Creating an archive that writes into a the file "matrices.blaze"
blaze::Archive<std::ofstream> archive( "matrices.blaze" );
// Serialization of both matrices into the same archive. Note that D lies before S!
archive << D << S;
}
// Reconstitution of both matrices
{
blaze::DynamicMatrix<double,rowMajor> D1;
blaze::DynamicMatrix<int,rowMajor> D2;
// Creating an archive that reads from the file "matrices.blaze"
blaze::Archive<std::ifstream> archive( "matrices.blaze" );
// Reconstituting the former D matrix into D1. Note that it is possible to reconstitute
// the matrix into a differrent kind of matrix (StaticMatrix -> DynamicMatrix), but that
// the type of elements has to be the same.
archive >> D1;
// Reconstituting the former S matrix into D2. Note that is is even possible to reconstitute
// a sparse matrix as a dense matrix (also the reverse is possible) and that a column-major
// matrix can be reconstituted as row-major matrix (and vice versa). Note however that also
// in this case the type of elements is the same!
archive >> D2
}
\endcode
// Note that also in case of matrices it is possible to (de-)serialize matrices with vector or
// matrix elements:
\code
// Serialization
{
blaze::CompressedMatrix< blaze::DynamicMatrix< blaze::complex<double> > > mat;
// ... Resizing and initialization
// Creating an archive that writes into a the file "matrix.blaze"
blaze::Archive<std::ofstream> archive( "matrix.blaze" );
// Serialization of the matrix into the archive
archive << mat;
}
// Deserialization
{
blaze::CompressedMatrix< blaze::DynamicMatrix< blaze::complex<double> > > mat;
// Creating an archive that reads from the file "matrix.blaze"
blaze::Archive<std::ifstream> archive( "matrix.blaze" );
// Reconstitution of the matrix from the archive
archive >> mat;
}
\endcode
// Note that just as the vector serialization, the matrix serialization is restricted by a
// few important rules:
//
// - matrices cannot be reconstituted as vectors (and vice versa)
// - the element type of the serialized and reconstituted matrix must match, which means
// that on the source and destination platform the general type (signed/unsigned integral
// or floating point) and the size of the type must be exactly the same
// - when reconstituting a \c StaticMatrix, the number of rows and columns must match those
// of the serialized matrix
//
// In case an error is encountered during (de-)serialization, a \c std::runtime_exception is
// thrown.
//
// \n Previous: \ref vector_serialization Next: \ref customization \n
*/
//*************************************************************************************************
//**Customization**********************************************************************************
/*!\page customization Customization
//
// Although \b Blaze tries to work out of the box for every possible setting, still it may be
// necessary to adapt the library to specific requirements. The following three pages explain
// how to customize the \b Blaze library to your own needs:
//
// - \ref configuration_files
// - \ref vector_and_matrix_customization
// - \ref error_reporting_customization
//
// \n Previous: \ref matrix_serialization Next: \ref configuration_files
*/
//*************************************************************************************************
//**Configuration Files****************************************************************************
/*!\page configuration_files Configuration Files
//
// \tableofcontents
//
//
// Sometimes it is necessary to adapt \b Blaze to specific requirements. For this purpose
// \b Blaze provides several configuration files in the <tt>./blaze/config/</tt> subdirectory,
// which provide ample opportunity to customize internal settings, behavior, and thresholds.
// This chapter explains the most important of these configuration files. For a complete
// overview of all customization opportunities, please go to the configuration files in the
// <tt>./blaze/config/</tt> subdirectory or see the complete \b Blaze documentation.
//
//
// \n \section transpose_flag Default Vector Storage
// <hr>
//
// The \b Blaze default is that all vectors are created as column vectors (if not specified
// explicitly):
\code
blaze::StaticVector<double,3UL> x; // Creates a 3-dimensional static column vector
\endcode
// The header file <tt>./blaze/config/TransposeFlag.h</tt> allows the configuration of the default
// vector storage (i.e. the default transpose flag) of all vectors within the \b Blaze library.
// The default transpose flag is specified via the \c BLAZE_DEFAULT_TRANSPOSE_FLAG macro:
\code
#define BLAZE_DEFAULT_TRANSPOSE_FLAG blaze::columnVector
\endcode
// Alternatively the default transpose flag can be specified via command line or by defining this
// symbol manually before including any \b Blaze header file:
\code
#define BLAZE_DEFAULT_TRANSPOSE_FLAG blaze::columnVector
#include <blaze/Blaze.h>
\endcode
// Valid settings for \c BLAZE_DEFAULT_TRANSPOSE_FLAG are blaze::rowVector and blaze::columnVector.
//
//
// \n \section storage_order Default Matrix Storage
// <hr>
//
// Matrices are by default created as row-major matrices:
\code
blaze::StaticMatrix<double,3UL,3UL> A; // Creates a 3x3 row-major matrix
\endcode
// The header file <tt>./blaze/config/StorageOrder.h</tt> allows the configuration of the default
// matrix storage order. Via the \c BLAZE_DEFAULT_STORAGE_ORDER macro the default storage order
// for all matrices of the \b Blaze library can be specified.
\code
#define BLAZE_DEFAULT_STORAGE_ORDER blaze::rowMajor
\endcode
// Alternatively the default storage order can be specified via command line or by defining this
// symbol manually before including any \b Blaze header file:
\code
#define BLAZE_DEFAULT_STORAGE_ORDER blaze::rowMajor
#include <blaze/Blaze.h>
\endcode
// Valid settings for \c BLAZE_DEFAULT_STORAGE_ORDER are blaze::rowMajor and blaze::columnMajor.
//
//
// \n \section blas_mode BLAS Mode
// <hr>
//
// In order to achieve maximum performance for multiplications with dense matrices, \b Blaze can
// be configured to use a BLAS library. Via the following compilation switch in the configuration
// file <tt>./blaze/config/BLAS.h</tt> BLAS can be enabled:
\code
#define BLAZE_BLAS_MODE 1
\endcode
// In case the selected BLAS library provides parallel execution, the \c BLAZE_BLAS_IS_PARALLEL
// switch should be activated to prevent \b Blaze from parallelizing on its own:
\code
#define BLAZE_BLAS_IS_PARALLEL 1
\endcode
// Additionally, it is possible to specify the name of the BLAS include file via the
// \c BLAZE_BLAS_INCLUDE_FILE switch. The default setting is <tt><cblas.h></tt>:
\code
#define BLAZE_BLAS_INCLUDE_FILE <cblas.h>
\endcode
// Alternatively, all settings can be specified via command line or by defining the symbols
// manually before including any \b Blaze header file:
\code
#define BLAZE_BLAS_MODE 1
#define BLAZE_BLAS_IS_PARALLEL 1
#define BLAZE_BLAS_INCLUDE_FILE <cblas.h>
#include <blaze/Blaze.h>
\endcode
// In case no BLAS library is available, \b Blaze will still work and will not be reduced in
// functionality, but performance may be limited.
//
//
// \n \section cache_size Cache Size
// <hr>
//
// The optimization of several \b Blaze compute kernels depends on the cache size of the target
// architecture. By default, \b Blaze assumes a cache size of 3 MiByte. However, for optimal
// speed the exact cache size of the system should be provided via the \c cacheSize value in the
// <tt>./blaze/config/CacheSize.h</tt> configuration file:
\code
#define BLAZE_CACHE_SIZE 3145728UL;
\endcode
// The cache size can also be specified via command line or by defining this symbol manually
// before including any \b Blaze header file:
\code
#define BLAZE_CACHE_SIZE 3145728UL
#include <blaze/Blaze.h>
\endcode
// \n \section vectorization Vectorization
// <hr>
//
// In order to achieve maximum performance and to exploit the compute power of a target platform
// the \b Blaze library attempts to vectorize all linear algebra operations by SSE, AVX, and/or
// AVX-512 intrinsics, depending on which instruction set is available. However, it is possible
// to disable the vectorization entirely by the compile time switch in the configuration file
// <tt>./blaze/config/Vectorization.h</tt>:
\code
#define BLAZE_USE_VECTORIZATION 1
\endcode
// It is also possible to (de-)activate vectorization via command line or by defining this symbol
// manually before including any \b Blaze header file:
\code
#define BLAZE_USE_VECTORIZATION 1
#include <blaze/Blaze.h>
\endcode
// In case the switch is set to 1, vectorization is enabled and the \b Blaze library is allowed
// to use intrinsics to speed up computations. In case the switch is set to 0, vectorization is
// disabled entirely and the \b Blaze library chooses default, non-vectorized functionality for
// the operations. Note that deactivating the vectorization may pose a severe performance
// limitation for a large number of operations!
//
//
// \n \section thresholds Thresholds
// <hr>
//
// For many computations \b Blaze distinguishes between small and large vectors and matrices.
// This separation is especially important for the parallel execution of computations, since
// the use of several threads only pays off for sufficiently large vectors and matrices.
// Additionally, it also enables \b Blaze to select kernels that are optimized for a specific
// size.
//
// In order to distinguish between small and large data structures \b Blaze provides several
// thresholds that can be adapted to the characteristics of the target platform. For instance,
// the \c DMATDVECMULT_THRESHOLD specifies the threshold between the application of the custom
// \b Blaze kernels for small dense matrix/dense vector multiplications and the BLAS kernels
// for large multiplications. All thresholds, including the thresholds for the OpenMP- and
// thread-based parallelization, are contained within the configuration file
// <tt><blaze/config/Thresholds.h></tt>.
//
//
// \n \section padding Padding
// <hr>
//
// By default the \b Blaze library uses padding for all dense vectors and matrices in order to
// achieve maximum performance in all operations. Due to padding, the proper alignment of data
// elements can be guaranteed and the need for remainder loops is minimized. However, on the
// downside padding introduces an additional memory overhead, which can be large depending on
// the used data type.
//
// The configuration file <tt>./blaze/config/Optimizations.h</tt> provides a compile time switch
// that can be used to (de-)activate padding:
\code
#define BLAZE_USE_PADDING 1
\endcode
// Alternatively it is possible to (de-)activate padding via command line or by defining this
// symbol manually before including any \b Blaze header file:
\code
#define BLAZE_USE_PADDING 1
#include <blaze/Blaze.h>
\endcode
// If \c BLAZE_USE_PADDING is set to 1 padding is enabled for all dense vectors and matrices, if
// it is set to 0 padding is disabled. Note however that disabling padding can considerably reduce
// the performance of all dense vector and matrix operations!
//
//
// \n \section streaming Streaming (Non-Temporal Stores)
// <hr>
//
// For vectors and matrices that don't fit into the cache anymore non-temporal stores can provide
// a significant performance advantage of about 20%. However, this advantage is only in effect in
// case the memory bandwidth of the target architecture is maxed out. If the target architecture's
// memory bandwidth cannot be exhausted the use of non-temporal stores can decrease performance
// instead of increasing it.
//
// The configuration file <tt>./blaze/config/Optimizations.h</tt> provides a compile time switch
// that can be used to (de-)activate streaming:
\code
#define BLAZE_USE_STREAMING 1
\endcode
// Alternatively streaming can be (de-)activated via command line or by defining this symbol
// manually before including any \b Blaze header file:
\code
#define BLAZE_USE_STREAMING 1
#include <blaze/Blaze.h>
\endcode
// If \c BLAZE_USE_STREAMING is set to 1 streaming is enabled, if it is set to 0 streaming is
// disabled. It is recommended to consult the target architecture's white papers to decide whether
// streaming is beneficial or hurtful for performance.
//
//
// \n Previous: \ref customization Next: \ref vector_and_matrix_customization \n
*/
//*************************************************************************************************
//**Customization of Vectors and Matrices**********************************************************
/*!\page vector_and_matrix_customization Customization of Vectors and Matrices
//
// \tableofcontents
//
//
// \n \section custom_data_members Custom Data Members
// <hr>
//
// So far the \b Blaze library does not provide a lot of flexibility to customize the data
// members of existing \ref vector_types and \ref matrix_types. However, to some extend it is
// possible to customize vectors and matrices by inheritance. The following example gives an
// impression on how to create a simple variation of \ref matrix_types_custom_matrix, which
// automatically takes care of acquiring and releasing custom memory.
\code
template< typename Type // Data type of the matrix
, bool SO = defaultStorageOrder > // Storage order
class MyCustomMatrix
: public CustomMatrix< Type, unaligned, unpadded, SO >
{
public:
explicit inline MyCustomMatrix( size_t m, size_t n )
: CustomMatrix<Type,unaligned,unpadded,SO>()
, array_( new Type[m*n] )
{
this->reset( array_.get(), m, n );
}
private:
std::unique_ptr<Type[]> array_;
};
\endcode
// Please note that this is a simplified example with the intent to show the general approach.
// The number of constructors, the memory acquisition, and the kind of memory management can of
// course be adapted to specific requirements. Also, please note that since none of the \b Blaze
// vectors and matrices have virtual destructors polymorphic destruction cannot be used.
//
//
// \n \section custom_operations Custom Operations
// <hr>
//
// There are two approaches to extend \b Blaze with custom operations. First, the \c map()
// functions provide the possibility to execute componentwise custom operations on vectors and
// matrices. Second, it is possible to add customized free functions.
//
// \n \subsection custom_operations_map The map() Functions
//
// Via the unary and binary \c map() functions it is possible to execute componentwise custom
// operations on vectors and matrices. The unary \c map() function can be used to apply a custom
// operation on each single element of a dense vector or matrix or each non-zero element of a
// sparse vector or matrix. For instance, the following example demonstrates a custom square
// root computation on a dense matrix:
\code
blaze::DynamicMatrix<double> A, B;
B = map( A, []( double d ) { return std::sqrt( d ); } );
\endcode
// The binary \c map() function can be used to apply an operation pairwise to the elements of
// two dense vectors or two dense matrices. The following example demonstrates the merging of
// two matrices of double precision values into a matrix of double precision complex numbers:
\code
blaze::DynamicMatrix<double> real{ { 2.1, -4.2 }, { 1.0, 0.6 } };
blaze::DynamicMatrix<double> imag{ { 0.3, 1.4 }, { 2.9, -3.4 } };
blaze::DynamicMatrix< complex<double> > cplx;
// Creating the matrix
// ( ( 2.1, 0.3) (-4.2, 1.4) )
// ( ( 1.0, 2.9) ( 0.6, -3.4) )
cplx = map( real, imag, []( double r, double i ){ return complex<double>( r, i ); } );
\endcode
// These examples demonstrate the most convenient way of defining a unary custom operation by
// passing a lambda to the \c map() function. Alternatively, it is possible to pass a custom
// functor:
\code
struct Sqrt
{
double operator()( double a ) const
{
return std::sqrt( a );
}
};
B = map( A, Sqrt() );
\endcode
// In order for the functor to work in a call to \c map() it must define a function call operator,
// which accepts arguments of the type of the according vector or matrix elements.
//
// Although the operation is automatically parallelized depending on the size of the vector or
// matrix, no automatic vectorization is possible. In order to enable vectorization, a \c load()
// function can be added to the functor, which handles the vectorized computation. Depending on
// the data type this function is passed one of the following \b Blaze SIMD data types:
//
// <ul>
// <li>SIMD data types for fundamental data types
// <ul>
// <li>\c blaze::SIMDint8: Packed SIMD type for 8-bit signed integral data types</li>
// <li>\c blaze::SIMDuint8: Packed SIMD type for 8-bit unsigned integral data types</li>
// <li>\c blaze::SIMDint16: Packed SIMD type for 16-bit signed integral data types</li>
// <li>\c blaze::SIMDuint16: Packed SIMD type for 16-bit unsigned integral data types</li>
// <li>\c blaze::SIMDint32: Packed SIMD type for 32-bit signed integral data types</li>
// <li>\c blaze::SIMDuint32: Packed SIMD type for 32-bit unsigned integral data types</li>
// <li>\c blaze::SIMDint64: Packed SIMD type for 64-bit signed integral data types</li>
// <li>\c blaze::SIMDuint64: Packed SIMD type for 64-bit unsigned integral data types</li>
// <li>\c blaze::SIMDfloat: Packed SIMD type for single precision floating point data</li>
// <li>\c blaze::SIMDdouble: Packed SIMD type for double precision floating point data</li>
// </ul>
// </li>
// <li>SIMD data types for complex data types
// <ul>
// <li>\c blaze::SIMDcint8: Packed SIMD type for complex 8-bit signed integral data types</li>
// <li>\c blaze::SIMDcuint8: Packed SIMD type for complex 8-bit unsigned integral data types</li>
// <li>\c blaze::SIMDcint16: Packed SIMD type for complex 16-bit signed integral data types</li>
// <li>\c blaze::SIMDcuint16: Packed SIMD type for complex 16-bit unsigned integral data types</li>
// <li>\c blaze::SIMDcint32: Packed SIMD type for complex 32-bit signed integral data types</li>
// <li>\c blaze::SIMDcuint32: Packed SIMD type for complex 32-bit unsigned integral data types</li>
// <li>\c blaze::SIMDcint64: Packed SIMD type for complex 64-bit signed integral data types</li>
// <li>\c blaze::SIMDcuint64: Packed SIMD type for complex 64-bit unsigned integral data types</li>
// <li>\c blaze::SIMDcfloat: Packed SIMD type for complex single precision floating point data</li>
// <li>\c blaze::SIMDcdouble: Packed SIMD type for complex double precision floating point data</li>
// </ul>
// </li>
// </ul>
//
// All SIMD types provide the \c value data member for a direct access to the underlying intrinsic
// data element. In the following example, this intrinsic element is passed to the AVX function
// \c _mm256_sqrt_pd():
\code
struct Sqrt
{
double operator()( double a ) const
{
return std::sqrt( a );
}
SIMDdouble load( const SIMDdouble& a ) const
{
return _mm256_sqrt_pd( a.value );
}
};
\endcode
// In this example, whenever vectorization is generally applicable, the \c load() function is
// called instead of the function call operator for as long as the number of remaining elements
// is larger-or-equal to the width of the packed SIMD type. In all other cases (which also
// includes peel-off and remainder loops) the scalar operation is used.
//
// Please note that this example has two drawbacks: First, it will only compile in case the
// intrinsic \c _mm256_sqrt_pd() function is available (i.e. when AVX is active). Second, the
// availability of AVX is not taken into account. The first drawback can be alleviated by making
// the \c load() function a function template. The second drawback can be dealt with by adding a
// \c simdEnabled() function template to the functor:
\code
struct Sqrt
{
double operator()( double a ) const
{
return std::sqrt( a );
}
template< typename T >
T load( const T& a ) const
{
return _mm256_sqrt_pd( a.value );
}
template< typename T >
static constexpr bool simdEnabled() {
#if defined(__AVX__)
return true;
#else
return false;
#endif
}
};
\endcode
// The \c simdEnabled() function must be a \c static, \c constexpr function and must return whether
// or not vectorization is available for the given data type \c T. In case the function returns
// \c true, the \c load() function is used for a vectorized evaluation, in case the function
// returns \c false, \c load() is neither called nor instantiated.
//
// By default the \c map() function uses peel-off and remainder loops if the number of elements is
// not a multiple of the width of the packed SIMD type. However, all dense vector and matrix types
// in \b Blaze provide padding as an optimization. In case the custom operation preserves the
// value zero of the padding elements, it is possible to omit the peel-off and remainder loops,
// include the padding elements in the computation and by that increase performance. For that
// purpose the \c paddingEnabled() function can be added to the functor:
\code
struct Sqrt
{
// ...
static constexpr bool paddingEnabled() { return true; }
};
\endcode
// Also the \c paddingEnabled() function must be a \c static, \c constexpr function and must
// return whether padding elements can be used in the custom operation. In case the function
// returns \c true, the padding elements are used during a vectorized operation, in case the
// function returns \c false, the padding elements are not used.
//
// Note that this is a simplified example that is only working when used for dense vectors and
// matrices with double precision floating point elements. The following code shows the complete
// implementation of the according functor that is used within the \b Blaze library. The \b Blaze
// \c Sqrt functor is working for all data types that are providing a square root operation:
\code
namespace blaze {
struct Sqrt
{
template< typename T >
BLAZE_ALWAYS_INLINE auto operator()( const T& a ) const
{
return sqrt( a );
}
template< typename T >
static constexpr bool simdEnabled() { return HasSIMDSqrt<T>::value; }
static constexpr bool paddingEnabled() { return true; }
template< typename T >
BLAZE_ALWAYS_INLINE auto load( const T& a ) const
{
BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T );
return sqrt( a );
}
};
} // namespace blaze
\endcode
// The same approach can be taken for binary custom operations. The following code demonstrates
// the \c Min functor of the \b Blaze library, which is working for all data types that provide
// a \c min() operation:
\code
struct Min
{
explicit inline Min()
{}
template< typename T1, typename T2 >
BLAZE_ALWAYS_INLINE decltype(auto) operator()( const T1& a, const T2& b ) const
{
return min( a, b );
}
template< typename T1, typename T2 >
static constexpr bool simdEnabled() { return HasSIMDMin<T1,T2>::value; }
static constexpr bool paddingEnabled() { return true; }
template< typename T1, typename T2 >
BLAZE_ALWAYS_INLINE decltype(auto) load( const T1& a, const T2& b ) const
{
BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T1 );
BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T2 );
return min( a, b );
}
};
\endcode
// For more information on the available \b Blaze SIMD data types and functions, please see the
// SIMD module in the complete \b Blaze documentation.
//
// \n \subsection custom_operations_free_functions Free Functions
//
// In order to extend \b Blaze with new functionality it is possible to add free functions. Free
// functions can be used either as wrappers around calls to the map() function or to implement
// general, non-componentwise operations. The following two examples will demonstrate both ideas.
//
// The first example shows the \c setToZero() function, which resets a sparse matrix to zero
// without affecting the sparsity pattern. It is implemented as a convenience wrapper around
// the map() function:
\code
template< typename MT // Type of the sparse matrix
, bool SO > // Storage order
void setToZero( blaze::SparseMatrix<MT,SO>& mat )
{
(~mat) = blaze::map( ~mat, []( const auto& value ){ return decltype(value){}; } );
}
\endcode
// The blaze::SparseMatrix class template is the base class for all kinds of sparse matrices and
// provides an abstraction from the actual type \c MT of the sparse matrix. However, due to the
// <a href="https://en.wikipedia.org/wiki/Curiously_recurring_template_pattern">Curiously Recurring Template Pattern (CRTP)</a>
// it also enables a conversion back to the actual type. This downcast is performed via the tilde
// operator (i.e. \c operator~()). The template parameter \c SO represents the storage order
// (blaze::rowMajor or blaze::columnMajor) of the matrix.
//
// The second example shows the \c countZeros() function, which counts the number of values, which
// are exactly zero, in a dense, row-major matrix:
\code
template< typename MT >
size_t countZeros( blaze::DenseMatrix<MT,rowMajor>& mat )
{
const size_t M( (~mat).rows() );
const size_t N( (~mat).columns() );
size_t count( 0UL );
for( size_t i=0UL; i<M; ++i ) {
for( size_t j=0UL; j<N; ++j ) {
if( blaze::isDefault<strict>( (~mat)(i,j) ) )
++count;
}
}
return count;
}
\endcode
// The blaze::DenseMatrix class template is the base class for all kinds of dense matrices. Again,
// it is possible to perform the conversion to the actual type via the tilde operator.
//
// The following two listings show the declarations of all vector and matrix base classes, which
// can be used for custom free functions:
\code
template< typename VT // Concrete type of the dense or sparse vector
, bool TF > // Transpose flag (blaze::columnVector or blaze::rowVector)
class Vector;
template< typename VT // Concrete type of the dense vector
, bool TF > // Transpose flag (blaze::columnVector or blaze::rowVector)
class DenseVector;
template< typename VT // Concrete type of the sparse vector
, bool TF > // Transpose flag (blaze::columnVector or blaze::rowVector)
class SparseVector;
\endcode
\code
template< typename MT // Concrete type of the dense or sparse matrix
, bool SO > // Storage order (blaze::rowMajor or blaze::columnMajor)
class Matrix;
template< typename MT // Concrete type of the dense matrix
, bool SO > // Storage order (blaze::rowMajor or blaze::columnMajor)
class DenseMatrix;
template< typename MT // Concrete type of the sparse matrix
, bool SO > // Storage order (blaze::rowMajor or blaze::columnMajor)
class SparseMatrix;
\endcode
// \n \section custom_data_types Custom Data Types
// <hr>
//
// The \b Blaze library tries hard to make the use of custom data types as convenient, easy and
// intuitive as possible. However, unfortunately it is not possible to meet the requirements of
// all possible data types. Thus it might be necessary to provide \b Blaze with some additional
// information about the data type. The following sections give an overview of the necessary steps
// to enable the use of the hypothetical custom data type \c custom::double_t for vector and
// matrix operations. For example:
\code
blaze::DynamicVector<custom::double_t> a, b, c;
// ... Resizing and initialization
c = a + b;
\endcode
// The \b Blaze library assumes that the \c custom::double_t data type provides \c operator+()
// for additions, \c operator-() for subtractions, \c operator*() for multiplications and
// \c operator/() for divisions. If any of these functions is missing it is necessary to implement
// the operator to perform the according operation. For this example we assume that the custom
// data type provides the four following functions instead of operators:
\code
namespace custom {
double_t add ( const double_t& a, const double_t b );
double_t sub ( const double_t& a, const double_t b );
double_t mult( const double_t& a, const double_t b );
double_t div ( const double_t& a, const double_t b );
} // namespace custom
\endcode
// The following implementations will satisfy the requirements of the \b Blaze library:
\code
inline custom::double_t operator+( const custom::double_t& a, const custom::double_t& b )
{
return add( a, b );
}
inline custom::double_t operator-( const custom::double_t& a, const custom::double_t& b )
{
return sub( a, b );
}
inline custom::double_t operator*( const custom::double_t& a, const custom::double_t& b )
{
return mult( a, b );
}
inline custom::double_t operator/( const custom::double_t& a, const custom::double_t& b )
{
return div( a, b );
}
\endcode
// \b Blaze will use all the information provided with these functions (for instance the return
// type) to properly handle the operations. In the rare case that the return type cannot be
// automatically determined from the operator it might be additionally necessary to provide a
// specialization of the following four \b Blaze class templates:
\code
namespace blaze {
template<>
struct AddTrait<custom::double_t,custom::double_t> {
using Type = custom::double_t;
};
template<>
struct SubTrait<custom::double_t,custom::double_t> {
using Type = custom::double_t;
};
template<>
struct MultTrait<custom::double_t,custom::double_t> {
using Type = custom::double_t;
};
template<>
struct DivTrait<custom::double_t,custom::double_t> {
using Type = custom::double_t;
};
} // namespace blaze
\endcode
// The same steps are necessary if several custom data types need to be combined (as for instance
// \c custom::double_t and \c custom::float_t). Note that in this case both permutations need to
// be taken into account:
\code
custom::double_t operator+( const custom::double_t& a, const custom::float_t& b );
custom::double_t operator+( const custom::float_t& a, const custom::double_t& b );
// ...
\endcode
// Please note that only built-in data types apply for vectorization and thus custom data types
// cannot achieve maximum performance!
//
//
// \n Previous: \ref configuration_files Next: \ref custom_operations \n
*/
//*************************************************************************************************
//**Customization of the Error Reporting Mechanism*************************************************
/*!\page error_reporting_customization Customization of the Error Reporting Mechanism
//
// \tableofcontents
//
//
// \n \section error_reporting_background Background
// <hr>
//
// The default way of \b Blaze to report errors of any kind is to throw a standard exception.
// However, although in general this approach works well, in certain environments and under
// special circumstances exceptions may not be the mechanism of choice and a different error
// reporting mechanism may be desirable. For this reason, \b Blaze provides several macros,
// which enable the customization of the error reporting mechanism. Via these macros it is
// possible to replace the standard exceptions by some other exception type or a completely
// different approach to report errors.
//
//
// \n \section error_reporting_general_customization Customization of the Reporting Mechanism
// <hr>
//
// In some cases it might be necessary to adapt the entire error reporting mechanism and to
// replace it by some other means to signal failure. The primary macro for this purpose is the
// \c BLAZE_THROW macro:
\code
#define BLAZE_THROW( EXCEPTION ) \
throw EXCEPTION
\endcode
// This macro represents the default mechanism of the \b Blaze library to report errors of any
// kind. In order to customize the error reporing mechanism all that needs to be done is to
// define the macro prior to including any \b Blaze header file. This will cause the \b Blaze
// specific mechanism to be overridden. The following example demonstrates this by replacing
// exceptions by a call to a \c log() function and a direct call to abort:
\code
#define BLAZE_THROW( EXCEPTION ) \
log( "..." ); \
abort()
#include <blaze/Blaze.h>
\endcode
// Doing this will trigger a call to \c log() and an abort instead of throwing an exception
// whenever an error (such as an invalid argument) is detected.
//
// \note It is possible to execute several statements instead of executing a single statement to
// throw an exception. Also note that it is recommended to define the macro such that a subsequent
// semicolon is required!
//
// \warning This macro is provided with the intention to assist in adapting \b Blaze to special
// conditions and environments. However, the customization of the error reporting mechanism via
// this macro can have a significant effect on the library. Thus be advised to use the macro
// with due care!
//
//
// \n \section error_reporting_exception_customization Customization of the Type of Exceptions
// <hr>
//
// In addition to the customization of the entire error reporting mechanism it is also possible
// to customize the type of exceptions being thrown. This can be achieved by customizing any
// number of the following macros:
\code
#define BLAZE_THROW_BAD_ALLOC \
BLAZE_THROW( std::bad_alloc() )
#define BLAZE_THROW_LOGIC_ERROR( MESSAGE ) \
BLAZE_THROW( std::logic_error( MESSAGE ) )
#define BLAZE_THROW_INVALID_ARGUMENT( MESSAGE ) \
BLAZE_THROW( std::invalid_argument( MESSAGE ) )
#define BLAZE_THROW_LENGTH_ERROR( MESSAGE ) \
BLAZE_THROW( std::length_error( MESSAGE ) )
#define BLAZE_THROW_OUT_OF_RANGE( MESSAGE ) \
BLAZE_THROW( std::out_of_range( MESSAGE ) )
#define BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) \
BLAZE_THROW( std::runtime_error( MESSAGE ) )
\endcode
// In order to customize the type of exception the according macro has to be defined prior to
// including any \b Blaze header file. This will override the \b Blaze default behavior. The
// following example demonstrates this by replacing \c std::invalid_argument by a custom
// exception type:
\code
class InvalidArgument
{
public:
InvalidArgument();
explicit InvalidArgument( const std::string& message );
// ...
};
#define BLAZE_THROW_INVALID_ARGUMENT( MESSAGE ) \
BLAZE_THROW( InvalidArgument( MESSAGE ) )
#include <blaze/Blaze.h>
\endcode
// By manually defining the macro, an \c InvalidArgument exception is thrown instead of a
// \c std::invalid_argument exception. Note that it is recommended to define the macro such
// that a subsequent semicolon is required!
//
// \warning These macros are provided with the intention to assist in adapting \b Blaze to
// special conditions and environments. However, the customization of the type of an exception
// via this macro may have an effect on the library. Thus be advised to use the macro with due
// care!
//
//
// \n \section error_reporting_special_errors Customization of Special Errors
// <hr>
//
// Last but not least it is possible to customize the error reporting for special kinds of errors.
// This can be achieved by customizing any number of the following macros:
\code
#define BLAZE_THROW_DIVISION_BY_ZERO( MESSAGE ) \
BLAZE_THROW_RUNTIME_ERROR( MESSAGE )
#define BLAZE_THROW_LAPACK_ERROR( MESSAGE ) \
BLAZE_THROW_RUNTIME_ERROR( MESSAGE )
\endcode
// As explained in the previous sections, in order to customize the handling of special errors
// the according macro has to be defined prior to including any \b Blaze header file. This will
// override the \b Blaze default behavior.
//
//
// \n Previous: \ref vector_and_matrix_customization Next: \ref blas_functions \n
*/
//*************************************************************************************************
//**BLAS Functions*********************************************************************************
/*!\page blas_functions BLAS Functions
//
// \tableofcontents
//
//
// For vector/vector, matrix/vector and matrix/matrix multiplications with large dense matrices
// \b Blaze relies on the efficiency of BLAS libraries. For this purpose, \b Blaze implements
// several convenient C++ wrapper functions for several BLAS functions. The following sections
// give a complete overview of all available BLAS level 1, 2 and 3 functions.
//
//
// \n \section blas_level_1 BLAS Level 1
// <hr>
//
// \subsection blas_level_1_dotu Dot Product (dotu)
//
// The following wrapper functions provide a generic interface for the BLAS functions for the
// dot product of two dense vectors (\c sdot(), \c ddot(), \c cdotu_sub(), and \c zdotu_sub()):
\code
namespace blaze {
float dotu( int n, const float* x, int incX, const float* y, int incY );
double dotu( int n, const double* x, int incX, const double* y, int incY );
complex<float> dotu( int n, const complex<float>* x, int incX,
const complex<float>* y, int incY );
complex<double> dotu( int n, const complex<double>* x, int incX,
const complex<double>* y, int incY );
template< typename VT1, bool TF1, typename VT2, bool TF2 >
ElementType_<VT1> dotu( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y );
} // namespace blaze
\endcode
// \subsection blas_level_1_dotc Complex Conjugate Dot Product (dotc)
//
// The following wrapper functions provide a generic interface for the BLAS functions for the
// complex conjugate dot product of two dense vectors (\c sdot(), \c ddot(), \c cdotc_sub(),
// and \c zdotc_sub()):
\code
namespace blaze {
float dotc( int n, const float* x, int incX, const float* y, int incY );
double dotc( int n, const double* x, int incX, const double* y, int incY );
complex<float> dotc( int n, const complex<float>* x, int incX,
const complex<float>* y, int incY );
complex<double> dotc( int n, const complex<double>* x, int incX,
const complex<double>* y, int incY );
template< typename VT1, bool TF1, typename VT2, bool TF2 >
ElementType_<VT1> dotc( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y );
} // namespace blaze
\endcode
// \subsection blas_level_1_axpy Axpy Product (axpy)
//
// The following wrapper functions provide a generic interface for the BLAS functions for the
// axpy product of two dense vectors (\c saxpy(), \c daxpy(), \c caxpy(), and \c zaxpy()):
\code
namespace blaze {
void axpy( int n, float alpha, const float* x, int incX, float* y, int incY );
void axpy( int n, double alpha, const double* x, int incX, double* y, int incY );
void axpy( int n, complex<float> alpha, const complex<float>* x,
int incX, complex<float>* y, int incY );
void axpy( int n, complex<double> alpha, const complex<double>* x,
int incX, complex<double>* y, int incY );
template< typename VT1, bool TF1, typename VT2, bool TF2, typename ST >
void axpy( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y, ST alpha );
} // namespace blaze
\endcode
// \n \section blas_level_2 BLAS Level 2
// <hr>
//
// \subsection blas_level_2_gemv General Matrix/Vector Multiplication (gemv)
//
// The following wrapper functions provide a generic interface for the BLAS functions for the
// general matrix/vector multiplication (\c sgemv(), \c dgemv(), \c cgemv(), and \c zgemv()):
\code
namespace blaze {
void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, float alpha,
const float* A, int lda, const float* x, int incX,
float beta, float* y, int incY );
void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, double alpha,
const double* A, int lda, const double* x, int incX,
double beta, double* y, int incY );
void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, complex<float> alpha,
const complex<float>* A, int lda, const complex<float>* x, int incX,
complex<float> beta, complex<float>* y, int incY );
void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, complex<double> alpha,
const complex<double>* A, int lda, const complex<double>* x, int incX,
complex<double> beta, complex<double>* y, int incY );
template< typename VT1, typename MT1, bool SO, typename VT2, typename ST >
void gemv( DenseVector<VT1,false>& y, const DenseMatrix<MT1,SO>& A,
const DenseVector<VT2,false>& x, ST alpha, ST beta );
template< typename VT1, typename VT2, typename MT1, bool SO, typename ST >
void gemv( DenseVector<VT1,true>& y, const DenseVector<VT2,true>& x,
const DenseMatrix<MT1,SO>& A, ST alpha, ST beta );
} // namespace blaze
\endcode
// \n \subsection blas_level_2_trmv Triangular Matrix/Vector Multiplication (trmv)
//
// The following wrapper functions provide a generic interface for the BLAS functions for the
// matrix/vector multiplication with a triangular matrix (\c strmv(), \c dtrmv(), \c ctrmv(),
// and \c ztrmv()):
\code
namespace blaze {
void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag,
int n, const float* A, int lda, float* x, int incX );
void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag,
int n, const double* A, int lda, double* x, int incX );
void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag,
int n, const complex<float>* A, int lda, complex<float>* x, int incX );
void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag,
int n, const complex<double>* A, int lda, complex<double>* x, int incX );
template< typename VT, typename MT, bool SO >
void trmv( DenseVector<VT,false>& x, const DenseMatrix<MT,SO>& A, CBLAS_UPLO uplo );
template< typename VT, typename MT, bool SO >
void trmv( DenseVector<VT,true>& x, const DenseMatrix<MT,SO>& A, CBLAS_UPLO uplo );
} // namespace blaze
\endcode
// \n \section blas_level_3 BLAS Level 3
// <hr>
//
// \subsection blas_level_3_gemm General Matrix/Matrix Multiplication (gemm)
//
// The following wrapper functions provide a generic interface for the BLAS functions for the
// general matrix/matrix multiplication (\c sgemm(), \c dgemm(), \c cgemm(), and \c zgemm()):
\code
namespace blaze {
void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB,
int m, int n, int k, float alpha, const float* A, int lda,
const float* B, int ldb, float beta, float* C, int ldc );
void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB,
int m, int n, int k, double alpha, const double* A, int lda,
const double* B, int ldb, double beta, float* C, int ldc );
void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB,
int m, int n, int k, complex<float> alpha, const complex<float>* A, int lda,
const complex<float>* B, int ldb, complex<float> beta, float* C, int ldc );
void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB,
int m, int n, int k, complex<double> alpha, const complex<double>* A, int lda,
const complex<double>* B, int ldb, complex<double> beta, float* C, int ldc );
template< typename MT1, bool SO1, typename MT2, bool SO2, typename MT3, bool SO3, typename ST >
void gemm( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A,
const DenseMatrix<MT3,SO3>& B, ST alpha, ST beta );
} // namespace blaze
\endcode
// \n \subsection blas_level_3_trmm Triangular Matrix/Matrix Multiplication (trmm)
//
// The following wrapper functions provide a generic interface for the BLAS functions for the
// matrix/matrix multiplication with a triangular matrix (\c strmm(), \c dtrmm(), \c ctrmm(), and
// \c ztrmm()):
\code
namespace blaze {
void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, int m, int n, float alpha, const float* A,
int lda, float* B, int ldb );
void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, int m, int n, double alpha, const double* A,
int lda, double* B, int ldb );
void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, int m, int n, complex<float> alpha, const complex<float>* A,
int lda, complex<float>* B, int ldb );
void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, int m, int n, complex<double> alpha, const complex<double>* A,
int lda, complex<double>* B, int ldb );
template< typename MT1, bool SO1, typename MT2, bool SO2, typename ST >
void trmm( DenseMatrix<MT1,SO1>& B, const DenseMatrix<MT2,SO2>& A,
CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha );
} // namespace blaze
\endcode
// \n \subsection blas_level_3_trsm Triangular System Solver (trsm)
//
// The following wrapper functions provide a generic interface for the BLAS functions for solving
// a triangular system of equations (\c strsm(), \c dtrsm(), \c ctrsm(), and \c ztrsm()):
\code
namespace blaze {
void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, int m, int n, float alpha, const float* A,
int lda, float* B, int ldb );
void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, int m, int n, double alpha, const double* A,
int lda, double* B, int ldb );
void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, int m, int n, complex<float> alpha, const complex<float>* A,
int lda, complex<float>* B, int ldb );
void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, int m, int n, complex<double> alpha, const complex<double>* A,
int lda, complex<double>* B, int ldb );
template< typename MT, bool SO, typename VT, bool TF, typename ST >
void trsm( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b,
CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha );
template< typename MT1, bool SO1, typename MT2, bool SO2, typename ST >
void trsm( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B,
CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha );
} // namespace blaze
\endcode
// \n Previous: \ref error_reporting_customization Next: \ref lapack_functions \n
*/
//*************************************************************************************************
//**LAPACK Functions*******************************************************************************
/*!\page lapack_functions LAPACK Functions
//
// \tableofcontents
//
//
// \n \section lapack_introction Introduction
// <hr>
//
// The \b Blaze library makes extensive use of the LAPACK functionality for various compute tasks
// (including the decomposition, inversion and the computation of the determinant of dense matrices).
// For this purpose, \b Blaze implements several convenient C++ wrapper functions for all required
// LAPACK functions. The following sections give a complete overview of all available LAPACK wrapper
// functions. For more details on the individual LAPACK functions see the \b Blaze function
// documentation or the LAPACK online documentation browser:
//
// http://www.netlib.org/lapack/explore-html/
//
// Most of the wrapper functions are implemented as thin wrappers around LAPACK functions. They
// provide the parameters of the original LAPACK functions and thus provide maximum flexibility:
\code
constexpr size_t N( 100UL );
blaze::DynamicMatrix<double,blaze::columnMajor> A( N, N );
// ... Initializing the matrix
const int m ( numeric_cast<int>( A.rows() ) ); // == N
const int n ( numeric_cast<int>( A.columns() ) ); // == N
const int lda ( numeric_cast<int>( A.spacing() ) ); // >= N
const int lwork( n*lda );
const std::unique_ptr<int[]> ipiv( new int[N] ); // No initialization required
const std::unique_ptr<double[]> work( new double[N] ); // No initialization required
int info( 0 );
getrf( m, n, A.data(), lda, ipiv.get(), &info ); // Reports failure via 'info'
getri( n, A.data(), lda, ipiv.get(), work.get(), lwork, &info ); // Reports failure via 'info'
\endcode
// Additionally, \b Blaze provides wrappers that provide a higher level of abstraction. These
// wrappers provide a maximum of convenience:
\code
constexpr size_t N( 100UL );
blaze::DynamicMatrix<double,blaze::columnMajor> A( N, N );
// ... Initializing the matrix
const std::unique_ptr<int[]> ipiv( new int[N] ); // No initialization required
getrf( A, ipiv.get() ); // Cannot fail
getri( A, ipiv.get() ); // Reports failure via exception
\endcode
// \note All functions only work for general, non-adapted matrices with \c float, \c double,
// \c complex<float>, or \c complex<double> element type. The attempt to call the function with
// adaptors or matrices of any other element type results in a compile time error!
//
// \note All functions can only be used if a fitting LAPACK library is available and linked to
// the final executable. Otherwise a call to this function will result in a linker error.
//
// \note For performance reasons all functions do only provide the basic exception safety guarantee,
// i.e. in case an exception is thrown the given matrix may already have been modified.
//
//
// \n \section lapack_decomposition Matrix Decomposition
// <hr>
//
// The following functions decompose/factorize the given dense matrix. Based on this decomposition
// the matrix can be inverted or used to solve a linear system of equations.
//
//
// \n \subsection lapack_lu_decomposition LU Decomposition
//
// The following functions provide an interface for the LAPACK functions \c sgetrf(), \c dgetrf(),
// \c cgetrf(), and \c zgetrf(), which compute the LU decomposition for the given general matrix:
\code
namespace blaze {
void getrf( int m, int n, float* A, int lda, int* ipiv, int* info );
void getrf( int m, int n, double* A, int lda, int* ipiv, int* info );
void getrf( int m, int n, complex<float>* A, int lda, int* ipiv, int* info );
void getrf( int m, int n, complex<double>* A, int lda, int* ipiv, int* info );
template< typename MT, bool SO >
void getrf( DenseMatrix<MT,SO>& A, int* ipiv );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = P \cdot L \cdot U, \f]\n
// where \c P is a permutation matrix, \c L is a lower unitriangular matrix, and \c U is an upper
// triangular matrix. The resulting decomposition is stored within \a A: In case of a column-major
// matrix, \c L is stored in the lower part of \a A and \c U is stored in the upper part. The unit
// diagonal elements of \c L are not stored. In case \a A is a row-major matrix the result is
// transposed.
//
// \note The LU decomposition will never fail, even for singular matrices. However, in case of a
// singular matrix the resulting decomposition cannot be used for a matrix inversion or solving
// a linear system of equations.
//
//
// \n \subsection lapack_ldlt_decomposition LDLT Decomposition
//
// The following functions provide an interface for the LAPACK functions \c ssytrf(), \c dsytrf(),
// \c csytrf(), and \c zsytrf(), which compute the LDLT (Bunch-Kaufman) decomposition for the given
// symmetric indefinite matrix:
\code
namespace blaze {
void sytrf( char uplo, int n, float* A, int lda, int* ipiv, float* work, int lwork, int* info );
void sytrf( char uplo, int n, double* A, int lda, int* ipiv, double* work, int lwork, int* info );
void sytrf( char uplo, int n, complex<float>* A, int lda, int* ipiv, complex<float>* work, int lwork, int* info );
void sytrf( char uplo, int n, complex<double>* A, int lda, int* ipiv, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void sytrf( DenseMatrix<MT,SO>& A, char uplo, int* ipiv );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = U D U^{T} \texttt{ (if uplo = 'U'), or }
A = L D L^{T} \texttt{ (if uplo = 'L'), } \f]
// where \c U (or \c L) is a product of permutation and unit upper (lower) triangular matrices,
// and \c D is symmetric and block diagonal with 1-by-1 and 2-by-2 diagonal blocks. The resulting
// decomposition is stored within \a A: In case \a uplo is set to \c 'L' the result is stored in
// the lower part of the matrix and the upper part remains untouched, in case \a uplo is set to
// \c 'U' the result is stored in the upper part and the lower part remains untouched.
//
// \note The Bunch-Kaufman decomposition will never fail, even for singular matrices. However, in
// case of a singular matrix the resulting decomposition cannot be used for a matrix inversion or
// solving a linear system of equations.
//
//
// \n \subsection lapack_ldlh_decomposition LDLH Decomposition
//
// The following functions provide an interface for the LAPACK functions \c chetrf() and \c zsytrf(),
// which compute the LDLH (Bunch-Kaufman) decomposition for the given Hermitian indefinite matrix:
\code
namespace blaze {
void hetrf( char uplo, int n, complex<float>* A, int lda, int* ipiv, complex<float>* work, int lwork, int* info );
void hetrf( char uplo, int n, complex<double>* A, int lda, int* ipiv, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void hetrf( DenseMatrix<MT,SO>& A, char uplo, int* ipiv );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = U D U^{H} \texttt{ (if uplo = 'U'), or }
A = L D L^{H} \texttt{ (if uplo = 'L'), } \f]
// where \c U (or \c L) is a product of permutation and unit upper (lower) triangular matrices,
// and \c D is Hermitian and block diagonal with 1-by-1 and 2-by-2 diagonal blocks. The resulting
// decomposition is stored within \a A: In case \a uplo is set to \c 'L' the result is stored in
// the lower part of the matrix and the upper part remains untouched, in case \a uplo is set to
// \c 'U' the result is stored in the upper part and the lower part remains untouched.
//
// \note The Bunch-Kaufman decomposition will never fail, even for singular matrices. However, in
// case of a singular matrix the resulting decomposition cannot be used for a matrix inversion or
// solving a linear system of equations.
//
//
// \n \subsection lapack_llh_decomposition Cholesky Decomposition
//
// The following functions provide an interface for the LAPACK functions \c spotrf(), \c dpotrf(),
// \c cpotrf(), and \c zpotrf(), which compute the Cholesky (LLH) decomposition for the given
// positive definite matrix:
\code
namespace blaze {
void potrf( char uplo, int n, float* A, int lda, int* info );
void potrf( char uplo, int n, double* A, int lda, int* info );
void potrf( char uplo, int n, complex<float>* A, int lda, int* info );
void potrf( char uplo, int n, complex<double>* A, int lda, int* info );
template< typename MT, bool SO >
void potrf( DenseMatrix<MT,SO>& A, char uplo );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = U^{T} U \texttt{ (if uplo = 'U'), or }
A = L L^{T} \texttt{ (if uplo = 'L'), } \f]
// where \c U is an upper triangular matrix and \c L is a lower triangular matrix. The Cholesky
// decomposition fails if the given matrix \a A is not a positive definite matrix. In this case
// a \a std::std::invalid_argument exception is thrown.
//
//
// \n \subsection lapack_qr_decomposition QR Decomposition
//
// The following functions provide an interface for the LAPACK functions \c sgeqrf(), \c dgeqrf(),
// \c cgeqrf(), and \c zgeqrf(), which compute the QR decomposition of the given general matrix:
\code
namespace blaze {
void geqrf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info );
void geqrf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info );
void geqrf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info );
void geqrf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void geqrf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = Q \cdot R, \f]
// where the \c Q is represented as a product of elementary reflectors
\f[ Q = H(1) H(2) . . . H(k) \texttt{, with k = min(m,n).} \f]
// Each H(i) has the form
\f[ H(i) = I - tau \cdot v \cdot v^T, \f]
// where \c tau is a real scalar, and \c v is a real vector with <tt>v(0:i-1) = 0</tt> and
// <tt>v(i) = 1</tt>. <tt>v(i+1:m)</tt> is stored on exit in <tt>A(i+1:m,i)</tt>, and \c tau
// in \c tau(i). Thus on exit the elements on and above the diagonal of the matrix contain the
// min(\a m,\a n)-by-\a n upper trapezoidal matrix \c R (\c R is upper triangular if \a m >= \a n);
// the elements below the diagonal, with the array \c tau, represent the orthogonal matrix \c Q as
// a product of min(\a m,\a n) elementary reflectors.
//
// The following functions provide an interface for the LAPACK functions \c sorgqr(), \c dorgqr(),
// \c sorg2r(), \c dorg2r(), \c cungqr(), \c zunqqr(), \c cung2r(), and \c zung2r(), which
// reconstruct the \c Q matrix from a QR decomposition:
\code
namespace blaze {
void orgqr( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info );
void orgqr( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info );
template< typename MT, bool SO >
void orgqr( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
void org2r( int m, int n, int k, float* A, int lda, const float* tau, float* work, int* info );
void org2r( int m, int n, int k, double* A, int lda, const double* tau, double* work, int* info );
template< typename MT, bool SO >
void org2r( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
void ungqr( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info );
void ungqr( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void ungqr( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
void ung2r( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int* info );
void ung2r( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int* info );
template< typename MT, bool SO >
void ung2r( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The following functions provide an interface for the LAPACK functions \c sormqr(), \c dormqr(),
// \c cunmqr(), and \c zunmqr(), which can be used to multiply a matrix with the \c Q matrix from
// a QR decomposition:
\code
namespace blaze {
void ormqr( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info );
void ormqr( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void ormqr( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau );
void unmqr( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info );
void unmqr( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info );
template< typename MT1, bool SO, typename MT2 >
void unmqr( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau );
} // namespace blaze
\endcode
// \n \subsection lapack_rq_decomposition RQ Decomposition
//
// The following functions provide an interface for the LAPACK functions \c sgerqf(), \c dgerqf(),
// \c cgerqf(), and \c zgerqf(), which compute the RQ decomposition of the given general matrix:
\code
namespace blaze {
void gerqf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info );
void gerqf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info );
void gerqf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info );
void gerqf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void gerqf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = R \cdot Q, \f]
// where the \c Q is represented as a product of elementary reflectors
\f[ Q = H(1) H(2) . . . H(k) \texttt{, with k = min(m,n).} \f]
// Each H(i) has the form
\f[ H(i) = I - tau \cdot v \cdot v^T, \f]
// where \c tau is a real scalar, and \c v is a real vector with <tt>v(n-k+i+1:n) = 0</tt> and
// <tt>v(n-k+i) = 1</tt>. <tt>v(1:n-k+i-1)</tt> is stored on exit in <tt>A(m-k+i,1:n-k+i-1)</tt>,
// and \c tau in \c tau(i). Thus in case \a m <= \a n, the upper triangle of the subarray
// <tt>A(1:m,n-m+1:n)</tt> contains the \a m-by-\a m upper triangular matrix \c R and in case
// \a m >= \a n, the elements on and above the (\a m-\a n)-th subdiagonal contain the \a m-by-\a n
// upper trapezoidal matrix \c R; the remaining elements in combination with the array \c tau
// represent the orthogonal matrix \c Q as a product of min(\a m,\a n) elementary reflectors.
//
// The following functions provide an interface for the LAPACK functions \c sorgrq(), \c dorgrq(),
// \c sorgr2(), \c dorgr2(), \c cungrq(), \c zunqrq(), \c cungr2(), and \c zunqr2(), which
// reconstruct the \c Q matrix from a RQ decomposition:
\code
namespace blaze {
void orgrq( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info );
void orgrq( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info );
template< typename MT, bool SO >
void orgrq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
void orgr2( int m, int n, int k, float* A, int lda, const float* tau, float* work, int* info );
void orgr2( int m, int n, int k, double* A, int lda, const double* tau, double* work, int* info );
template< typename MT, bool SO >
void orgr2( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
void ungrq( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info );
void ungrq( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void ungrq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
void ungr2( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int* info );
void ungr2( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int* info );
template< typename MT, bool SO >
void ungr2( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The following functions provide an interface for the LAPACK functions \c sormrq(), \c dormrq(),
// \c cunmrq(), and \c zunmrq(), which can be used to multiply a matrix with the \c Q matrix from
// a RQ decomposition:
\code
namespace blaze {
void ormrq( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info );
void ormrq( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void ormrq( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau );
void unmrq( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info );
void unmrq( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info );
template< typename MT1, bool SO, typename MT2 >
void unmrq( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau );
} // namespace blaze
\endcode
// \n \subsection lapack_ql_decomposition QL Decomposition
//
// The following functions provide an interface for the LAPACK functions \c sgeqlf(), \c dgeqlf(),
// \c cgeqlf(), and \c zgeqlf(), which compute the QL decomposition of the given general matrix:
\code
namespace blaze {
void geqlf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info );
void geqlf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info );
void geqlf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info );
void geqlf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void geqlf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = Q \cdot L, \f]
// where the \c Q is represented as a product of elementary reflectors
\f[ Q = H(k) . . . H(2) H(1) \texttt{, with k = min(m,n).} \f]
// Each H(i) has the form
\f[ H(i) = I - tau \cdot v \cdot v^T, \f]
// where \c tau is a real scalar, and \c v is a real vector with <tt>v(m-k+i+1:m) = 0</tt> and
// <tt>v(m-k+i) = 1</tt>. <tt>v(1:m-k+i-1)</tt> is stored on exit in <tt>A(1:m-k+i-1,n-k+i)</tt>,
// and \c tau in \c tau(i). Thus in case \a m >= \a n, the lower triangle of the subarray
// A(m-n+1:m,1:n) contains the \a n-by-\a n lower triangular matrix \c L and in case \a m <= \a n,
// the elements on and below the (\a n-\a m)-th subdiagonal contain the \a m-by-\a n lower
// trapezoidal matrix \c L; the remaining elements in combination with the array \c tau represent
// the orthogonal matrix \c Q as a product of min(\a m,\a n) elementary reflectors.
//
// The following functions provide an interface for the LAPACK functions \c sorgql(), \c dorgql(),
// \c sorg2l(), \c dorg2l(), \c cungql(), \c zungql(), \c cung2l(), and \c zung2l(), which
// reconstruct the \c Q matrix from an QL decomposition:
\code
namespace blaze {
void orgql( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info );
void orgql( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info );
template< typename MT, bool SO >
void orgql( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
void org2l( int m, int n, int k, float* A, int lda, const float* tau, float* work, int* info );
void org2l( int m, int n, int k, double* A, int lda, const double* tau, double* work, int* info );
template< typename MT, bool SO >
void org2l( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
void ungql( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info );
void ungql( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void ungql( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
void ung2l( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int* info );
void ung2l( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int* info );
template< typename MT, bool SO >
void ung2l( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The following functions provide an interface for the LAPACK functions \c sormql(), \c dormql(),
// \c cunmql(), and \c zunmql(), which can be used to multiply a matrix with the \c Q matrix from
// a QL decomposition:
\code
namespace blaze {
void ormql( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info );
void ormql( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void ormql( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau );
void unmql( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info );
void unmql( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info );
template< typename MT1, bool SO, typename MT2 >
void unmql( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau );
} // namespace blaze
\endcode
// \n \subsection lapack_lq_decomposition LQ Decomposition
//
// The following functions provide an interface for the LAPACK functions \c sgelqf(), \c dgelqf(),
// \c cgelqf(), and \c zgelqf(), which compute the LQ decomposition of the given general matrix:
\code
namespace blaze {
void gelqf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info );
void gelqf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info );
void gelqf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info );
void gelqf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void gelqf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = L \cdot Q, \f]
// where the \c Q is represented as a product of elementary reflectors
\f[ Q = H(k) . . . H(2) H(1) \texttt{, with k = min(m,n).} \f]
// Each H(i) has the form
\f[ H(i) = I - tau \cdot v \cdot v^T, \f]
// where \c tau is a real scalar, and \c v is a real vector with <tt>v(0:i-1) = 0</tt> and
// <tt>v(i) = 1</tt>. <tt>v(i+1:n)</tt> is stored on exit in <tt>A(i,i+1:n)</tt>, and \c tau
// in \c tau(i). Thus on exit the elements on and below the diagonal of the matrix contain the
// \a m-by-min(\a m,\a n) lower trapezoidal matrix \c L (\c L is lower triangular if \a m <= \a n);
// the elements above the diagonal, with the array \c tau, represent the orthogonal matrix \c Q
// as a product of min(\a m,\a n) elementary reflectors.
//
// The following functions provide an interface for the LAPACK functions \c sorglq(), \c dorglq(),
// \c sorgl2(), \c dorgl2(), \c cunglq(), \c zunqlq(), \c cungl2(), and \c zunql2(), which
// reconstruct the \c Q matrix from an LQ decomposition:
\code
namespace blaze {
void orglq( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info );
void orglq( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info );
template< typename MT, bool SO >
void orglq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
void orgl2( int m, int n, int k, float* A, int lda, const float* tau, float* work, int* info );
void orgl2( int m, int n, int k, double* A, int lda, const double* tau, double* work, int* info );
template< typename MT, bool SO >
void orgl2( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
void unglq( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info );
void unglq( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void unglq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
void ungl2( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int* info );
void ungl2( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int* info );
template< typename MT, bool SO >
void ungl2( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The following functions provide an interface for the LAPACK functions \c sormlq(), \c dormlq(),
// \c cunmlq(), and \c zunmlq(), which can be used to multiply a matrix with the \c Q matrix from
// a LQ decomposition:
\code
namespace blaze {
void ormlq( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info );
void ormlq( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void ormlq( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau );
void unmlq( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info );
void unmlq( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info );
template< typename MT1, bool SO, typename MT2 >
void unmlq( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau );
} // namespace blaze
\endcode
// \n \section lapack_inversion Matrix Inversion
// <hr>
//
// Given a matrix that has already been decomposed, the following functions can be used to invert
// the matrix in-place.
//
//
// \n \subsection lapack_lu_inversion LU-based Inversion
//
// The following functions provide an interface for the LAPACK functions \c sgetri(), \c dgetri(),
// \c cgetri(), and \c zgetri(), which invert a general matrix that has already been decomposed by
// an \ref lapack_lu_decomposition :
\code
namespace blaze {
void getri( int n, float* A, int lda, const int* ipiv, float* work, int lwork, int* info );
void getri( int n, double* A, int lda, const int* ipiv, double* work, int lwork, int* info );
void getri( int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int lwork, int* info );
void getri( int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void getri( DenseMatrix<MT,SO>& A, const int* ipiv );
} // namespace blaze
\endcode
// The functions fail if ...
//
// - ... the given matrix is not a square matrix;
// - ... the given matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_ldlt_inversion LDLT-based Inversion
//
// The following functions provide an interface for the LAPACK functions \c ssytri(), \c dsytri(),
// \c csytri(), and \c zsytri(), which invert a symmetric indefinite matrix that has already been
// decomposed by an \ref lapack_ldlt_decomposition :
\code
namespace blaze {
void sytri( char uplo, int n, float* A, int lda, const int* ipiv, float* work, int* info );
void sytri( char uplo, int n, double* A, int lda, const int* ipiv, double* work, int* info );
void sytri( char uplo, int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int* info );
void sytri( char uplo, int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int* info );
template< typename MT, bool SO >
void sytri( DenseMatrix<MT,SO>& A, char uplo, const int* ipiv );
} // namespace blaze
\endcode
// The functions fail if ...
//
// - ... the given matrix is not a square matrix;
// - ... the given matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_ldlh_inversion LDLH-based Inversion
//
// The following functions provide an interface for the LAPACK functions \c chetri() and
// \c zhetri(), which invert an Hermitian indefinite matrix that has already been decomposed by
// an \ref lapack_ldlh_decomposition :
\code
namespace blaze {
void hetri( char uplo, int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int* info );
void hetri( char uplo, int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int* info );
template< typename MT, bool SO >
void hetri( DenseMatrix<MT,SO>& A, char uplo, const int* ipiv );
} // namespace blaze
\endcode
// The functions fail if ...
//
// - ... the given matrix is not a square matrix;
// - ... the given matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_llh_inversion Cholesky-based Inversion
//
// The following functions provide an interface for the LAPACK functions \c spotri(), \c dpotri(),
// \c cpotri(), and \c zpotri(), which invert a positive definite matrix that has already been
// decomposed by an \ref lapack_llh_decomposition :
\code
namespace blaze {
void potri( char uplo, int n, float* A, int lda, int* info );
void potri( char uplo, int n, double* A, int lda, int* info );
void potri( char uplo, int n, complex<float>* A, int lda, int* info );
void potri( char uplo, int n, complex<double>* A, int lda, int* info );
template< typename MT, bool SO >
void potri( DenseMatrix<MT,SO>& A, char uplo );
} // namespace blaze
\endcode
// The functions fail if ...
//
// - ... the given matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the given matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_triangular_inversion Inversion of Triangular Matrices
//
// The following functions provide an interface for the LAPACK functions \c strtri(), \c dtrtri(),
// \c ctrtri(), and \c ztrtri(), which invert the given triangular matrix in-place:
\code
namespace blaze {
void trtri( char uplo, char diag, int n, float* A, int lda, int* info );
void trtri( char uplo, char diag, int n, double* A, int lda, int* info );
void trtri( char uplo, char diag, int n, complex<float>* A, int lda, int* info );
void trtri( char uplo, char diag, int n, complex<double>* A, int lda, int* info );
template< typename MT, bool SO >
void trtri( DenseMatrix<MT,SO>& A, char uplo, char diag );
} // namespace blaze
\endcode
// The functions fail if ...
//
// - ... the given matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the given \a diag argument is neither 'U' nor 'N';
// - ... the given matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \section lapack_substitution Substitution
// <hr>
//
// Given a matrix that has already been decomposed the following functions can be used to perform
// the forward/backward substitution step to compute the solution to a system of linear equations.
// Note that depending on the storage order of the system matrix and the given right-hand side the
// functions solve different equation systems:
//
// Single right-hand side:
// - \f$ A *x=b \f$ if \a A is column-major
// - \f$ A^T*x=b \f$ if \a A is row-major
//
// Multiple right-hand sides:
// - \f$ A *X =B \f$ if both \a A and \a B are column-major
// - \f$ A^T*X =B \f$ if \a A is row-major and \a B is column-major
// - \f$ A *X^T=B^T \f$ if \a A is column-major and \a B is row-major
// - \f$ A^T*X^T=B^T \f$ if both \a A and \a B are row-major
//
// In this context the general system matrix \a A is a n-by-n matrix that has already been
// factorized by the according decomposition function, \a x and \a b are n-dimensional vectors
// and \a X and \a B are either row-major m-by-n matrices or column-major n-by-m matrices.
//
//
// \n \subsection lapack_lu_substitution LU-based Substitution
//
// The following functions provide an interface for the LAPACK functions \c sgetrs(), \c dgetrs(),
// \c cgetrs(), and \c zgetrs(), which perform the substitution step for a general matrix that has
// already been decomposed by an \ref lapack_lu_decomposition :
\code
namespace blaze {
void getrs( char trans, int n, int nrhs, const float* A, int lda, const int* ipiv, float* B, int ldb, int* info );
void getrs( char trans, int n, int nrhs, const double* A, int lda, const int* ipiv, double* B, int ldb, int* info );
void getrs( char trans, int n, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info );
void getrs( char trans, int n, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void getrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char trans, const int* ipiv );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void getrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char trans, const int* ipiv );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s)
// of the linear system of equations. The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a trans argument is neither 'N' nor 'T' nor 'C';
// - ... the sizes of the two given matrices do not match.
//
// The first four functions report failure via the \c info argument, the last two functions throw
// a \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_ldlt_substitution LDLT-based Substitution
//
// The following functions provide an interface for the LAPACK functions \c ssytrs(), \c dsytrs(),
// \c csytrs(), and \c zsytrs(), which perform the substitution step for a symmetric indefinite
// matrix that has already been decomposed by an \ref lapack_ldlt_decomposition :
\code
namespace blaze {
void sytrs( char uplo, int n, int nrhs, const float* A, int lda, const int* ipiv, float* B, int ldb, int* info );
void sytrs( char uplo, int n, int nrhs, const double* A, int lda, const int* ipiv, double* B, int ldb, int* info );
void sytrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info );
void sytrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void sytrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, const int* ipiv );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void sytrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, const int* ipiv );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s)
// of the linear system of equations. The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the sizes of the two given matrices do not match.
//
// The first four functions report failure via the \c info argument, the last two functions throw
// a \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_ldlh_substitution LDLH-based Substitution
//
// The following functions provide an interface for the LAPACK functions \c chetrs(), and \c zhetrs(),
// which perform the substitution step for an Hermitian indefinite matrix that has already been
// decomposed by an \ref lapack_ldlh_decomposition :
\code
namespace blaze {
void hetrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info );
void hetrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void hetrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, const int* ipiv );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void hetrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, const int* ipiv );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s)
// of the linear system of equations. The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the sizes of the two given matrices do not match.
//
// The first two functions report failure via the \c info argument, the last two functions throw
// a \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_llh_substitution Cholesky-based Substitution
//
// The following functions provide an interface for the LAPACK functions \c spotrs(), \c dpotrs(),
// \c cpotrs(), and \c zpotrs(), which perform the substitution step for a positive definite matrix
// that has already been decomposed by an \ref lapack_llh_decomposition :
\code
namespace blaze {
void potrs( char uplo, int n, int nrhs, const float* A, int lda, float* B, int ldb, int* info );
void potrs( char uplo, int n, int nrhs, const double* A, int lda, double* B, int ldb, int* info );
void potrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, complex<float>* B, int ldb, int* info );
void potrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, complex<double>* B, int ldb, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void potrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void potrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s)
// of the linear system of equations. The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the sizes of the two given matrices do not match.
//
// The first two functions report failure via the \c info argument, the last two functions throw
// a \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_triangular_substitution Substitution for Triangular Matrices
//
// The following functions provide an interface for the LAPACK functions \c strtrs(), \c dtrtrs(),
// \c ctrtrs(), and \c ztrtrs(), which perform the substitution step for a triangular matrix:
\code
namespace blaze {
void trtrs( char uplo, char trans, char diag, int n, int nrhs, const float* A, int lda, float* B, int ldb, int* info );
void trtrs( char uplo, char trans, char diag, int n, int nrhs, const double* A, int lda, double* B, int ldb, int* info );
void trtrs( char uplo, char trans, char diag, int n, int nrhs, const complex<float>* A, int lda, complex<float>* B, int ldb, int* info );
void trtrs( char uplo, char trans, char diag, int n, int nrhs, const complex<double>* A, int lda, complex<double>* B, int ldb, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void trtrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, char trans, char diag );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void trtrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, char trans, char diag );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s)
// of the linear system of equations. The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the given \a trans argument is neither 'N' nor 'T' nor 'C';
// - ... the given \a diag argument is neither 'U' nor 'N';
// - ... the sizes of the two given matrices do not match.
//
// The first four functions report failure via the \c info argument, the last two functions throw
// a \a std::invalid_argument exception in case of an error.
//
//
// \n \section lapack_linear_system_solver Linear System Solver
// <hr>
//
// The following functions represent compound functions that perform both the decomposition step
// as well as the substitution step to compute the solution to a system of linear equations. Note
// that depending on the storage order of the system matrix and the given right-hand side the
// functions solve different equation systems:
//
// Single right-hand side:
// - \f$ A *x=b \f$ if \a A is column-major
// - \f$ A^T*x=b \f$ if \a A is row-major
//
// Multiple right-hand sides:
// - \f$ A *X =B \f$ if both \a A and \a B are column-major
// - \f$ A^T*X =B \f$ if \a A is row-major and \a B is column-major
// - \f$ A *X^T=B^T \f$ if \a A is column-major and \a B is row-major
// - \f$ A^T*X^T=B^T \f$ if both \a A and \a B are row-major
//
// In this context the general system matrix \a A is a n-by-n matrix that has already been
// factorized by the according decomposition function, \a x and \a b are n-dimensional vectors
// and \a X and \a B are either row-major m-by-n matrices or column-major n-by-m matrices.
//
//
// \subsection lapack_lu_linear_system_solver LU-based Linear System Solver
//
// The following functions provide an interface for the LAPACK functions \c sgesv(), \c dgesv(),
// \c cgesv(), and \c zgesv(), which combine an \ref lapack_lu_decomposition and the according
// \ref lapack_lu_substitution :
\code
namespace blaze {
void gesv( int n, int nrhs, float* A, int lda, int* ipiv, float* B, int ldb, int* info );
void gesv( int n, int nrhs, double* A, int lda, int* ipiv, double* B, int ldb, int* info );
void gesv( int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, int* info );
void gesv( int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void gesv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, int* ipiv );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void gesv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, int* ipiv );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the
// solution(s) of the linear system of equations and \a A has been decomposed by means of an
// \ref lapack_lu_decomposition.
//
// The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given system matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_ldlt_linear_system_solver LDLT-based Linear System Solver
//
// The following functions provide an interface for the LAPACK functions \c ssysv(), \c dsysv(),
// \c csysv(), and \c zsysv(), which combine an \ref lapack_ldlt_decomposition and the according
// \ref lapack_ldlt_substitution :
\code
namespace blaze {
void sysv( char uplo, int n, int nrhs, float* A, int lda, int* ipiv, float* B, int ldb, float* work, int lwork, int* info );
void sysv( char uplo, int n, int nrhs, double* A, int lda, int* ipiv, double* B, int ldb, double* work, int lwork, int* info );
void sysv( char uplo, int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, complex<float>* work, int lwork, int* info );
void sysv( char uplo, int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void sysv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, int* ipiv );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void sysv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, int* ipiv );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the
// solution(s) of the linear system of equations and \a A has been decomposed by means of an
// \ref lapack_ldlt_decomposition.
//
// The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the sizes of the two given matrices do not match;
// - ... the given system matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_ldlh_linear_system_solver LDLH-based Linear System Solver
//
// The following functions provide an interface for the LAPACK functions \c shesv(), \c dhesv(),
// \c chesv(), and \c zhesv(), which combine an \ref lapack_ldlh_decomposition and the according
// \ref lapack_ldlh_substitution :
\code
namespace blaze {
void hesv( char uplo, int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, complex<float>* work, int lwork, int* info );
void hesv( char uplo, int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void hesv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, int* ipiv );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void hesv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, int* ipiv );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the
// solution(s) of the linear system of equations and \a A has been decomposed by means of an
// \ref lapack_ldlh_decomposition.
//
// The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the sizes of the two given matrices do not match;
// - ... the given system matrix is singular and not invertible.
//
// The first two functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_llh_linear_system_solver Cholesky-based Linear System Solver
//
// The following functions provide an interface for the LAPACK functions \c sposv(), \c dposv(),
// \c cposv(), and \c zposv(), which combine an \ref lapack_llh_decomposition and the according
// \ref lapack_llh_substitution :
\code
namespace blaze {
void posv( char uplo, int n, int nrhs, float* A, int lda, float* B, int ldb, int* info );
void posv( char uplo, int n, int nrhs, double* A, int lda, double* B, int ldb, int* info );
void posv( char uplo, int n, int nrhs, complex<float>* A, int lda, complex<float>* B, int ldb, int* info );
void posv( char uplo, int n, int nrhs, complex<double>* A, int lda, complex<double>* B, int ldb, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void posv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void posv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the
// solution(s) of the linear system of equations and \a A has been decomposed by means of an
// \ref lapack_llh_decomposition.
//
// The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the sizes of the two given matrices do not match;
// - ... the given system matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_triangular_linear_system_solver Linear System Solver for Triangular Matrices
//
// The following functions provide an interface for the LAPACK functions \c strsv(), \c dtrsv(),
// \c ctrsv(), and \c ztrsv():
\code
namespace blaze {
void trsv( char uplo, char trans, char diag, int n, const float* A, int lda, float* x, int incX );
void trsv( char uplo, char trans, char diag, int n, const double* A, int lda, double* x, int incX );
void trsv( char uplo, char trans, char diag, int n, const complex<float>* A, int lda, complex<float>* x, int incX );
void trsv( char uplo, char trans, char diag, int n, const complex<double>* A, int lda, complex<double>* x, int incX );
template< typename MT, bool SO, typename VT, bool TF >
void trsv( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, char trans, char diag );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the
// solution(s) of the linear system of equations.
//
// The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the given \a trans argument is neither 'N' nor 'T' nor 'C';
// - ... the given \a diag argument is neither 'U' nor 'N'.
//
// The last function throws a \a std::invalid_argument exception in case of an error. Note that
// none of the functions does perform any test for singularity or near-singularity. Such tests
// must be performed prior to calling this function!
//
//
// \n \section lapack_eigenvalues Eigenvalues/Eigenvectors
//
// \subsection lapack_eigenvalues_general General Matrices
//
// The following functions provide an interface for the LAPACK functions \c sgeev(), \c dgeev(),
// \c cgeev(), and \c zgeev(), which compute the eigenvalues and optionally the eigenvectors of
// the given general matrix:
\code
namespace blaze {
void geev( char jobvl, char jobvr, int n, float* A, int lda, float* wr, float* wi, float* VL, int ldvl, float* VR, int ldvr, float* work, int lwork, int* info );
void geev( char jobvl, char jobvr, int n, double* A, int lda, double* wr, double* wi, double* VL, int ldvl, double* VR, int ldvr, double* work, int lwork, int* info );
void geev( char jobvl, char jobvr, int n, complex<float>* A, int lda, complex<float>* w, complex<float>* VL, int ldvl, complex<float>* VR, int ldvr, complex<float>* work, int lwork, float* rwork, int* info );
void geev( char jobvl, char jobvr, int n, complex<double>* A, int lda, complex<double>* w, complex<double>* VL, int ldvl, complex<double>* VR, int ldvr, complex<double>* work, int lwork, double* rwork, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void geev( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w );
template< typename MT1, bool SO1, typename MT2, bool SO2, typename VT, bool TF >
void geev( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& VL, DenseVector<VT,TF>& w );
template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 >
void geev( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& VR );
template< typename MT1, bool SO1, typename MT2, bool SO2, typename VT, bool TF, typename MT3, bool SO3 >
void geev( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& VL, DenseVector<VT,TF>& w, DenseMatrix<MT3,SO3>& VR );
} // namespace blaze
\endcode
// The complex eigenvalues of the given matrix \a A are returned in the given vector \a w.
// Please note that no order of eigenvalues can be assumed, except that complex conjugate pairs
// of eigenvalues appear consecutively with the eigenvalue having the positive imaginary part
// first.
//
// If \a VR is provided as an argument, the right eigenvectors are returned in the rows of \a VR
// in case \a VR is a row-major matrix and in the columns of \a VR in case \a VR is a column-major
// matrix. The right eigenvector \f$v[j]\f$ of \a A satisfies
\f[ A * v[j] = lambda[j] * v[j], \f]
// where \f$lambda[j]\f$ is its eigenvalue.
//
// If \a VL is provided as an argument, the left eigenvectors are returned in the rows of \a VL
// in case \a VL is a row-major matrix and in the columns of \a VL in case \a VL is a column-major
// matrix. The left eigenvector \f$u[j]\f$ of \a A satisfies
\f[ u[j]^{H} * A = lambda[j] * u[j]^{H}, \f]
// where \f$u[j]^{H}\f$ denotes the conjugate transpose of \f$u[j]\f$.
//
// \a w, \a VL, and \a VR are resized to the correct dimensions (if possible and necessary). The
// functions fail if ...
//
// - ... the given matrix \a A is not a square matrix;
// - ... the given matrix \a VL is a fixed size matrix and the dimensions don't match;
// - ... the given vector \a w is a fixed size vector and the size doesn't match;
// - ... the given matrix \a VR is a fixed size matrix and the dimensions don't match;
// - ... the eigenvalue computation fails.
//
// The first four functions report failure via the \c info argument, the last four functions throw
// an exception in case of an error.
//
//
// \n \subsection lapack_eigenvalues_symmetric Symmetric Matrices
//
// The following functions provide an interface for the LAPACK functions \c ssyev() and \c dsyev(),
// which compute the eigenvalues and eigenvectors of the given symmetric matrix:
\code
namespace blaze {
void syev( char jobz, char uplo, int n, float* A, int lda, float* w, float* work, int lwork, int* info );
void syev( char jobz, char uplo, int n, double* A, int lda, double* w, double* work, int lwork, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void syev( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo );
} // namespace blaze
\endcode
// Alternatively, the following functions can be used, which provide an interface to the LAPACK
// functions \c ssyevd() and \c dsyevd(). In contrast to the \c syev() functions they use a
// divide-and-conquer strategy for the computation of the left and right eigenvectors:
\code
namespace blaze {
void syevd( char jobz, char uplo, int n, float* A, int lda, float* w, float* work, int lwork, int* iwork, int liwork, int* info );
void syevd( char jobz, char uplo, int n, double* A, int lda, double* w, double* work, int lwork, int* iwork, int liwork, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void syevd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo );
} // namespace blaze
\endcode
// The real eigenvalues are returned in ascending order in the given vector \a w. \a w is resized
// to the correct size (if possible and necessary). In case \a A is a row-major matrix, the left
// eigenvectors are returned in the rows of \a A, in case \a A is a column-major matrix, the right
// eigenvectors are returned in the columns of \a A.
//
// The functions fail if ...
//
// - ... the given matrix \a A is not a square matrix;
// - ... the given vector \a w is a fixed size vector and the size doesn't match;
// - ... the given \a jobz argument is neither \c 'V' nor \c 'N';
// - ... the given \a uplo argument is neither \c 'L' nor \c 'U';
// - ... the eigenvalue computation fails.
//
// The first two functions report failure via the \c info argument, the last function throws an
// exception in case of an error.
//
// Via the following functions, which wrap the LAPACK functions \c ssyevx() and \c dsyevx(), it
// is possible to compute a subset of eigenvalues and/or eigenvectors of a symmetric matrix:
\code
namespace blaze {
void syevx( char jobz, char range, char uplo, int n, float* A, int lda, float vl, float vu, int il, int iu, float abstol, int* m, float* w, float* Z, int ldz, float* work, int lwork, int* iwork, int* ifail, int* info );
void syevx( char jobz, char range, char uplo, int n, double* A, int lda, double vl, double vu, int il, int iu, double abstol, int* m, double* w, double* Z, int ldz, double* work, int lwork, int* iwork, int* ifail, int* info );
template< typename MT, bool SO, typename VT, bool TF >
size_t syevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo );
template< typename MT, bool SO, typename VT, bool TF, typename ST >
size_t syevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo, ST low, ST upp );
template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 >
size_t syevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo );
template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2, typename ST >
size_t syevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo, ST low, ST upp );
} // namespace blaze
\endcode
// The number of eigenvalues to be computed is specified by the lower bound \c low and the upper
// bound \c upp, which either form an integral or a floating point range.
//
// In case \a low and \a upp are of integral type, the function computes all eigenvalues in the
// index range \f$[low..upp]\f$. The \a num resulting real eigenvalues are stored in ascending
// order in the given vector \a w, which is either resized (if possible) or expected to be a
// \a num-dimensional vector. The eigenvectors are returned in the rows of \a Z in case \a Z is
// row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix. \a Z is
// resized (if possible) or expected to be a \a num-by-\a n row-major matrix or a \a n-by-\a num
// column-major matrix.
//
// In case \a low and \a upp are of floating point type, the function computes all eigenvalues
// in the half-open interval \f$(low..upp]\f$. The resulting real eigenvalues are stored in
// ascending order in the given vector \a w, which is either resized (if possible) or expected
// to be an \a n-dimensional vector. The eigenvectors are returned in the rows of \a Z in case
// \a Z is a row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix.
// \a Z is resized (if possible) or expected to be a \a n-by-\a n matrix.
//
// The functions fail if ...
//
// - ... the given matrix \a A is not a square matrix;
// - ... the given vector \a w is a fixed size vector and the size doesn't match;
// - ... the given matrix \a Z is a fixed size matrix and the dimensions don't match;
// - ... the given \a uplo argument is neither \c 'L' nor \c 'U';
// - ... the eigenvalue computation fails.
//
// The first two functions report failure via the \c info argument, the last four functions throw
// an exception in case of an error.
//
//
// \n \subsection lapack_eigenvalues_hermitian Hermitian Matrices
//
// The following functions provide an interface for the LAPACK functions \c cheev() and \c zheev(),
// which compute the eigenvalues and eigenvectors of the given Hermitian matrix:
\code
namespace blaze {
void heev( char jobz, char uplo, int n, complex<float>* A, int lda, float* w, complex<float>* work, int lwork, float* rwork, int* info );
void heev( char jobz, char uplo, int n, complex<double>* A, int lda, double* w, complex<double>* work, int lwork, float* rwork, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void heev( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo );
} // namespace blaze
\endcode
// Alternatively, the following functions can be used, which provide an interface to the LAPACK
// functions \c cheevd() and \c zheevd(). In contrast to the \c heev() functions they use a
// divide-and-conquer strategy for the computation of the left and right eigenvectors:
\code
namespace blaze {
void heevd( char jobz, char uplo, int n, complex<float>* A, int lda, float* w, complex<float>* work, int lwork, float* rwork, int* lrwork, int* iwork, int* liwork, int* info );
void heevd( char jobz, char uplo, int n, complex<double>* A, int lda, double* w, complex<double>* work, int lwork, double* rwork, int lrwork, int* iwork, int* liwork, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void heevd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo );
} // namespace blaze
\endcode
// The real eigenvalues are returned in ascending order in the given vector \a w. \a w is resized
// to the correct size (if possible and necessary). In case \a A is a row-major matrix, the left
// eigenvectors are returned in the rows of \a A, in case \a A is a column-major matrix, the right
// eigenvectors are returned in the columns of \a A.
//
// The functions fail if ...
//
// - ... the given matrix \a A is not a square matrix;
// - ... the given vector \a w is a fixed size vector and the size doesn't match;
// - ... the given \a jobz argument is neither \c 'V' nor \c 'N';
// - ... the given \a uplo argument is neither \c 'L' nor \c 'U';
// - ... the eigenvalue computation fails.
//
// The first two functions report failure via the \c info argument, the last function throws an
// exception in case of an error.
//
// Via the following functions, which wrap the LAPACK functions \c cheevx() and \c zheevx(), it
// is possible to compute a subset of eigenvalues and/or eigenvectors of an Hermitian matrix:
\code
namespace blaze {
void heevx( char jobz, char range, char uplo, int n, complex<float>* A, int lda, float vl, float vu, int il, int iu, float abstol, int* m, float* w, complex<float>* Z, int ldz, complex<float>* work, int lwork, float* rwork, int* iwork, int* ifail, int* info );
void heevx( char jobz, char range, char uplo, int n, complex<double>* A, int lda, double vl, double vu, int il, int iu, double abstol, int* m, double* w, complex<double>* Z, int ldz, complex<double>* work, int lwork, double* rwork, int* iwork, int* ifail, int* info );
template< typename MT, bool SO, typename VT, bool TF >
size_t heevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo );
template< typename MT, bool SO, typename VT, bool TF, typename ST >
size_t heevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo, ST low, ST upp );
template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 >
size_t heevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo );
template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2, typename ST >
size_t heevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo, ST low, ST upp );
} // namespace blaze
\endcode
// The number of eigenvalues to be computed is specified by the lower bound \c low and the upper
// bound \c upp, which either form an integral or a floating point range.
//
// In case \a low and \a upp are of integral type, the function computes all eigenvalues in the
// index range \f$[low..upp]\f$. The \a num resulting real eigenvalues are stored in ascending
// order in the given vector \a w, which is either resized (if possible) or expected to be a
// \a num-dimensional vector. The eigenvectors are returned in the rows of \a Z in case \a Z is
// row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix. \a Z is
// resized (if possible) or expected to be a \a num-by-\a n row-major matrix or a \a n-by-\a num
// column-major matrix.
//
// In case \a low and \a upp are of floating point type, the function computes all eigenvalues
// in the half-open interval \f$(low..upp]\f$. The resulting real eigenvalues are stored in
// ascending order in the given vector \a w, which is either resized (if possible) or expected
// to be an \a n-dimensional vector. The eigenvectors are returned in the rows of \a Z in case
// \a Z is a row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix.
// \a Z is resized (if possible) or expected to be a \a n-by-\a n matrix.
//
// The functions fail if ...
//
// - ... the given matrix \a A is not a square matrix;
// - ... the given vector \a w is a fixed size vector and the size doesn't match;
// - ... the given matrix \a Z is a fixed size matrix and the dimensions don't match;
// - ... the given \a uplo argument is neither \c 'L' nor \c 'U';
// - ... the eigenvalue computation fails.
//
// The first two functions report failure via the \c info argument, the last four functions throw
// an exception in case of an error.
//
//
// \n \section lapack_singular_values Singular Values/Singular Vectors
//
// The following functions provide an interface for the LAPACK functions \c sgesvd(), \c dgesvd(),
// \c cgesvd(), and \c zgesvd(), which perform a singular value decomposition (SVD) on the given
// general matrix:
\code
namespace blaze {
void gesvd( char jobu, char jobv, int m, int n, float* A, int lda, float* s, float* U, int ldu, float* V, int ldv, float* work, int lwork, int* info );
void gesvd( char jobu, char jobv, int m, int n, double* A, int lda, double* s, double* U, int ldu, double* V, int ldv, double* work, int lwork, int* info );
void gesvd( char jobu, char jobv, int m, int n, complex<float>* A, int lda, float* s, complex<float>* U, int ldu, complex<float>* V, int ldv, complex<float>* work, int lwork, float* rwork, int* info );
void gesvd( char jobu, char jobv, int m, int n, complex<double>* A, int lda, double* s, complex<double>* U, int ldu, complex<double>* V, int ldv, complex<double>* work, int lwork, double* rwork, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void gesvd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s, char jobu, char jobv );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF >
void gesvd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, char jobu, char jobv );
template< typename MT1, bool SO, typename VT, bool TF, typename MT2 >
void gesvd( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V, char jobu, char jobv );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3 >
void gesvd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, char jobu, char jobv );
} // namespace blaze
\endcode
// Alternatively, the following functions can be used, which provide an interface to the LAPACK
// functions \c sgesdd(), \c dgesdd(), \c cgesdd(), and \c zgesdd(). In contrast to the \c gesvd()
// functions they compute the singular value decomposition (SVD) of the given general matrix by
// applying a divide-and-conquer strategy for the computation of the left and right singular
// vectors:
\code
namespace blaze {
void gesdd( char jobz, int m, int n, float* A, int lda, float* s, float* U, int ldu, float* V, int ldv, float* work, int lwork, int* iwork, int* info );
void gesdd( char jobz, int m, int n, double* A, int lda, double* s, double* U, int ldu, double* V, int ldv, double* work, int lwork, int* iwork, int* info );
void gesdd( char jobz, int m, int n, complex<float>* A, int lda, float* s, complex<float>* U, int ldu, complex<float>* V, int ldv, complex<float>* work, int lwork, float* rwork, int* iwork, int* info );
void gesdd( char jobz, int m, int n, complex<double>* A, int lda, double* s, complex<double>* U, int ldu, complex<double>* V, int ldv, complex<double>* work, int lwork, double* rwork, int* iwork, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void gesdd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF >
void gesdd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, char jobz );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF >
void gesdd( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V, char jobz );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3 >
void gesdd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, char jobz );
} // namespace blaze
\endcode
// The resulting decomposition has the form
\f[ A = U \cdot S \cdot V, \f]
// where \a S is a \a m-by-\a n matrix, which is zero except for its min(\a m,\a n) diagonal
// elements, \a U is an \a m-by-\a m orthogonal matrix, and \a V is a \a n-by-\a n orthogonal
// matrix. The diagonal elements of \a S are the singular values of \a A, the first min(\a m,\a n)
// columns of \a U and rows of \a V are the left and right singular vectors of \a A, respectively.
//
// The resulting min(\a m,\a n) real and non-negative singular values are returned in descending
// order in the vector \a s, which is resized to the correct size (if possible and necessary).
//
// Via the following functions, which wrap the LAPACK functions \c sgesvdx(), \c dgesvdx(),
// \c cgesvdx(), and \c zgesvdx(), it is possible to compute a subset of singular values and/or
// vectors:
\code
namespace blaze {
void gesvdx( char jobu, char jobv, char range, int m, int n, float* A, int lda, float vl, float vu, int il, int iu, int* ns, float* s, float* U, int ldu, float* V, int ldv, float* work, int lwork, int* iwork, int* info );
void gesvdx( char jobu, char jobv, char range, int m, int n, double* A, int lda, double vl, double vu, int il, int iu, int* ns, double* s, double* U, int ldu, double* V, int ldv, double* work, int lwork, int* iwork, int* info );
void gesvdx( char jobu, char jobv, char range, int m, int n, complex<float>* A, int lda, float vl, float vu, int il, int iu, int* ns, float* s, complex<float>* U, int ldu, complex<float>* V, int ldv, complex<float>* work, int lwork, float* rwork, int* iwork, int* info );
void gesvdx( char jobu, char jobv, char range, int m, int n, complex<double>* A, int lda, double vl, double vu, int il, int iu, int* ns, double* s, complex<double>* U, int ldu, complex<double>* V, int ldv, complex<double>* work, int lwork, double* rwork, int* iwork, int* info );
template< typename MT, bool SO, typename VT, bool TF >
size_t gesvdx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s );
template< typename MT, bool SO, typename VT, bool TF, typename ST >
size_t gesvdx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s, ST low, ST upp );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF >
size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename ST >
size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, ST low, ST upp );
template< typename MT1, bool SO, typename VT, bool TF, typename MT2 >
size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V );
template< typename MT1, bool SO, typename VT, bool TF, typename MT2, typename ST >
size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V, ST low, ST upp );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3 >
size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3, typename ST >
size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, ST low, ST upp );
} // namespace blaze
\endcode
// The number of singular values to be computed is specified by the lower bound \a low and the
// upper bound \a upp, which either form an integral or a floating point range.
//
// In case \a low and \a upp form are of integral type, the function computes all singular values
// in the index range \f$[low..upp]\f$. The \a num resulting real and non-negative singular values
// are stored in descending order in the given vector \a s, which is either resized (if possible)
// or expected to be a \a num-dimensional vector. The resulting left singular vectors are stored
// in the given matrix \a U, which is either resized (if possible) or expected to be a
// \a m-by-\a num matrix. The resulting right singular vectors are stored in the given matrix \a V,
// which is either resized (if possible) or expected to be a \a num-by-\a n matrix.
//
// In case \a low and \a upp are of floating point type, the function computes all singular values
// in the half-open interval \f$(low..upp]\f$. The resulting real and non-negative singular values
// are stored in descending order in the given vector \a s, which is either resized (if possible)
// or expected to be a min(\a m,\a n)-dimensional vector. The resulting left singular vectors are
// stored in the given matrix \a U, which is either resized (if possible) or expected to be a
// \a m-by-min(\a m,\a n) matrix. The resulting right singular vectors are stored in the given
// matrix \a V, which is either resized (if possible) or expected to be a min(\a m,\a n)-by-\a n
// matrix.
//
// The functions fail if ...
//
// - ... the given matrix \a U is a fixed size matrix and the dimensions don't match;
// - ... the given vector \a s is a fixed size vector and the size doesn't match;
// - ... the given matrix \a V is a fixed size matrix and the dimensions don't match;
// - ... the given scalar values don't form a proper range;
// - ... the singular value decomposition fails.
//
// The first four functions report failure via the \c info argument, the remaining functions throw
// an exception in case of an error.
//
//
// \n Previous: \ref blas_functions Next: \ref block_vectors_and_matrices \n
*/
//*************************************************************************************************
//**Block Vectors and Matrices*********************************************************************
/*!\page block_vectors_and_matrices Block Vectors and Matrices
//
// \tableofcontents
//
//
// \n \section block_vectors_and_matrices_general General Concepts
// <hr>
//
// In addition to fundamental element types, the \b Blaze library supports vectors and matrices
// with non-fundamental element type. For instance, it is possible to define block matrices by
// using a matrix type as the element type:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::rowMajor;
using blaze::columnVector;
DynamicMatrix< DynamicMatrix<double,rowMajor>, rowMajor > A;
DynamicVector< DynamicVector<double,columnVector >, columnVector > x, y;
// ... Resizing and initialization
y = A * x;
\endcode
// The matrix/vector multiplication in this example runs fully parallel and uses vectorization
// for every inner matrix/vector multiplication and vector addition.
//
//
// \n \section block_vectors_and_matrices_pitfalls Pitfalls
// <hr>
//
// The only thing to keep in mind when using non-fundamental element types is that all operations
// between the elements have to be well defined. More specifically, the size of vector and matrix
// elements has to match. The attempt to combine two non-matching elements results in either a
// compilation error (in case of statically sized elements) or an exception (for dynamically sized
// elements):
\code
DynamicVector< StaticVector<int,2UL> > a;
DynamicVector< StaticVector<int,3UL> > b;
DynamicVector< DynamicVector<int> > c( a + b ); // Compilation error: element size doesn't match
\endcode
// Therefore please don't forget that dynamically sized elements (e.g. \c blaze::DynamicVector,
// \c blaze::HybridVector, \c blaze::DynamicMatrix, \c blaze::HybridMatrix, ...) need to be sized
// accordingly upfront.
//
//
// \n \section block_vectors_and_matrices_examples Examples
// <hr>
//
// The first example demonstrates the multiplication between a statically sized block matrix
// and a block vector:
\code
using namespace blaze;
// ( ( 1 1 ) ( 2 2 ) ) ( ( 1 ) ) ( ( 10 ) )
// ( ( 1 1 ) ( 2 2 ) ) ( ( 1 ) ) ( ( 10 ) )
// ( ) * ( ) = ( )
// ( ( 3 3 ) ( 4 4 ) ) ( ( 2 ) ) ( ( 22 ) )
// ( ( 3 3 ) ( 4 4 ) ) ( ( 2 ) ) ( ( 22 ) )
using M2x2 = StaticMatrix<int,2UL,2UL,rowMajor>;
using V2 = StaticVector<int,2UL,columnVector>;
DynamicMatrix<M2x2,rowMajor> A{ { M2x2(1), M2x2(2) },
{ M2x2(3), M2x2(4) } };
DynamicVector<V2,columnVector> x{ V2(1), V2(2) };
DynamicVector<V2,columnVector> y( A * x );
\endcode
// The second example shows the multiplication between a compressed block matrix with blocks of
// varying size and a compressed block vector:
\code
using namespace blaze;
// ( ( 1 -2 3 ) ( 5 -1 ) ) ( ( 1 ) ) ( ( -3 ) )
// ( ( 4 1 0 ) ( 1 2 ) ) ( ( 0 ) ) ( ( 7 ) )
// ( ( 0 2 4 ) ( 3 1 ) ) ( ( 1 ) ) ( ( 3 ) )
// ( ) ( ) ( )
// ( ( 1 ) ) * ( ( 2 ) ) = ( ( 2 ) )
// ( ) ( ) ( )
// ( ( 0 -1 1 ) ( 1 0 ) ) ( ( -1 ) ) ( ( 0 ) )
// ( ( 2 -1 2 ) ( 0 1 ) ) ( ( 2 ) ) ( ( 6 ) )
using M3x3 = HybridMatrix<int,3UL,3UL,rowMajor>;
using V3 = HybridVector<int,3UL,columnVector>;
CompressedMatrix<M3x3,rowMajor> A( 3UL, 3UL, 5UL );
A(0,0) = M3x3{ { 1, -2, 3 }, { 4, 1, 0 }, { 0, 2, 4 } };
A(0,2) = M3x3{ { 5, -1 }, { 1, 2 }, { 3, 1 } };
A(1,1) = M3x3{ { 1 } };
A(2,0) = M3x3{ { 0, -1, 1 }, { 2, -1, 2 } };
A(2,2) = M3x3{ { 1, 0 }, { 0, 1 } };
CompressedVector<V3,columnVector> x( 3UL, 3UL );
x[0] = V3{ 1, 0, 1 };
x[1] = V3{ 2 };
x[2] = V3{ -1, 2 };
CompressedVector<V3,columnVector> y( A * x );
\endcode
// \n Previous: \ref lapack_functions Next: \ref intra_statement_optimization \n
*/
//*************************************************************************************************
//**Intra-Statement Optimization*******************************************************************
/*!\page intra_statement_optimization Intra-Statement Optimization
//
// One of the prime features of the \b Blaze library is the automatic intra-statement optimization.
// In order to optimize the overall performance of every single statement \b Blaze attempts to
// rearrange the operands based on their types. For instance, the following addition of dense and
// sparse vectors
\code
blaze::DynamicVector<double> d1, d2, d3;
blaze::CompressedVector<double> s1;
// ... Resizing and initialization
d3 = d1 + s1 + d2;
\endcode
// is automatically rearranged and evaluated as
\code
// ...
d3 = d1 + d2 + s1; // <- Note that s1 and d2 have been rearranged
\endcode
// This order of operands is highly favorable for the overall performance since the addition of
// the two dense vectors \c d1 and \c d2 can be handled much more efficiently in a vectorized
// fashion.
//
// This intra-statement optimization can have a tremendous effect on the performance of a statement.
// Consider for instance the following computation:
\code
blaze::DynamicMatrix<double> A, B;
blaze::DynamicVector<double> x, y;
// ... Resizing and initialization
y = A * B * x;
\endcode
// Since multiplications are evaluated from left to right, this statement would result in a
// matrix/matrix multiplication, followed by a matrix/vector multiplication. However, if the
// right subexpression is evaluated first, the performance can be dramatically improved since the
// matrix/matrix multiplication can be avoided in favor of a second matrix/vector multiplication.
// The \b Blaze library exploits this by automatically restructuring the expression such that the
// right multiplication is evaluated first:
\code
// ...
y = A * ( B * x );
\endcode
// Note however that although this intra-statement optimization may result in a measurable or
// even significant performance improvement, this behavior may be undesirable for several reasons,
// for instance because of numerical stability. Therefore, in case the order of evaluation matters,
// the best solution is to be explicit and to separate a statement into several statements:
\code
blaze::DynamicVector<double> d1, d2, d3;
blaze::CompressedVector<double> s1;
// ... Resizing and initialization
d3 = d1 + s1; // Compute the dense vector/sparse vector addition first ...
d3 += d2; // ... and afterwards add the second dense vector
\endcode
\code
// ...
blaze::DynamicMatrix<double> A, B, C;
blaze::DynamicVector<double> x, y;
// ... Resizing and initialization
C = A * B; // Compute the left-hand side matrix-matrix multiplication first ...
y = C * x; // ... before the right-hand side matrix-vector multiplication
\endcode
// Alternatively, it is also possible to use the \c eval() function to fix the order of evaluation:
\code
blaze::DynamicVector<double> d1, d2, d3;
blaze::CompressedVector<double> s1;
// ... Resizing and initialization
d3 = d1 + eval( s1 + d2 );
\endcode
\code
blaze::DynamicMatrix<double> A, B;
blaze::DynamicVector<double> x, y;
// ... Resizing and initialization
y = eval( A * B ) * x;
\endcode
// \n Previous: \ref block_vectors_and_matrices Next: \ref faq \n
*/
//*************************************************************************************************
//**FAQ********************************************************************************************
/*!\page faq Frequently Asked Questions (FAQ)
//
// \tableofcontents
//
//
// <hr>
// \section faq_padding A StaticVector/StaticMatrix is larger than expected. Is this a bug?
//
// The size of a \c StaticVector, \c StaticMatrix, \c HybridVector, or \c HybridMatrix can
// indeed be larger than expected:
\code
StaticVector<int,3> a;
StaticMatrix<int,3,3> A;
sizeof( a ); // Evaluates to 16, 32, or even 64, but not 12
sizeof( A ); // Evaluates to 48, 96, or even 144, but not 36
\endcode
// In order to achieve the maximum possible performance the \b Blaze library tries to enable
// SIMD vectorization even for small vectors. For that reason \b Blaze by default uses padding
// elements for all dense vectors and matrices to guarantee that at least a single SIMD vector
// can be loaded. Depending on the used SIMD technology that can significantly increase the size
// of a \c StaticVector, \c StaticMatrix, \c HybridVector or \c HybridMatrix:
\code
StaticVector<int,3> a;
StaticMatrix<int,3,3> A;
sizeof( a ); // Evaluates to 16 in case of SSE, 32 in case of AVX, and 64 in case of AVX-512
// (under the assumption that an integer occupies 4 bytes)
sizeof( A ); // Evaluates to 48 in case of SSE, 96 in case of AVX, and 144 in case of AVX-512
// (under the assumption that an integer occupies 4 bytes)
\endcode
// The configuration file <tt>./blaze/config/Optimizations.h</tt> provides a compile time switch
// that can be used to (de-)activate padding:
\code
#define BLAZE_USE_PADDING 1
\endcode
// Alternatively it is possible to (de-)activate padding via command line or by defining this
// symbol manually before including any \b Blaze header file:
\code
#define BLAZE_USE_PADDING 1
#include <blaze/Blaze.h>
\endcode
// If \c BLAZE_USE_PADDING is set to 1 padding is enabled for all dense vectors and matrices, if
// it is set to 0 padding is disabled. Note however that disabling padding can considerably reduce
// the performance of all dense vector and matrix operations!
//
//
// <hr>
// \section faq_alignment Despite disabling padding, a StaticVector/StaticMatrix is still larger than expected. Is this a bug?
//
// Despite disabling padding via the \c BLAZE_USE_PADDING compile time switch (see \ref faq_padding),
// the size of a \c StaticVector, \c StaticMatrix, \c HybridVector, or \c HybridMatrix can still
// be larger than expected:
\code
#define BLAZE_USE_PADDING 1
#include <blaze/Blaze.h>
StaticVector<int,3> a;
StaticVector<int,5> b;
sizeof( a ); // Always evaluates to 12
sizeof( b ); // Evaluates to 32 with SSE (larger than expected) and to 20 with AVX or AVX-512 (expected)
\endcode
// The reason for this behavior is the used SIMD technology. If SSE is used, which provides 128
// bit wide registers, a single SIMD pack can usually hold 4 integers (128 bit divided by 32 bit).
// Since the second vector contains enough elements is possible to benefit from vectorization.
// However, SSE requires an alignment of 16 bytes, which ultimately results in a total size of
// 32 bytes for the \c StaticVector (2 times 16 bytes due to 5 integer elements). If AVX or AVX-512
// is used, which provide 256 bit or 512 bit wide registers, a single SIMD vector can hold 8 or 16
// integers, respectively. Even the second vector does not hold enough elements to benefit from
// vectorization, which is why \b Blaze does not enforce a 32 byte (for AVX) or even 64 byte
// alignment (for AVX-512).
//
// It is possible to disable the vectorization entirely by the compile time switch in the
// <tt>./blaze/config/Vectorization.h</tt> configuration file:
\code
#define BLAZE_USE_VECTORIZATION 1
\endcode
// It is also possible to (de-)activate vectorization via command line or by defining this symbol
// manually before including any \b Blaze header file:
\code
#define BLAZE_USE_VECTORIZATION 1
#include <blaze/Blaze.h>
\endcode
// In case the switch is set to 1, vectorization is enabled and the \b Blaze library is allowed
// to use intrinsics and the necessary alignment to speed up computations. In case the switch is
// set to 0, vectorization is disabled entirely and the \b Blaze library chooses default,
// non-vectorized functionality for the operations. Note that deactivating the vectorization may
// pose a severe performance limitation for a large number of operations!
//
//
// <hr>
// \section faq_blas To which extend does Blaze make use of BLAS functions under the hood?
//
// Currently the only BLAS functions that are utilized by \b Blaze are the \c gemm() functions
// for the multiplication of two dense matrices (i.e. \c sgemm(), \c dgemm(), \c cgemm(), and
// \c zgemm()). All other operations are always and unconditionally performed by native \b Blaze
// kernels.
//
// The \c BLAZE_BLAS_MODE config switch (see <tt>./blaze/config/BLAS.h</tt>) determines whether
// \b Blaze is allowed to use BLAS kernels. If \c BLAZE_BLAS_MODE is set to 0 then \b Blaze
// does not utilize the BLAS kernels and unconditionally uses its own custom kernels. If
// \c BLAZE_BLAS_MODE is set to 1 then \b Blaze is allowed to choose between using BLAS kernels
// or its own custom kernels. In case of the dense matrix multiplication this decision is based
// on the size of the dense matrices. For large matrices, \b Blaze uses the BLAS kernels, for
// small matrices it uses its own custom kernels. The threshold for this decision can be
// configured via the \c BLAZE_DMATDMATMULT_THRESHOLD, \c BLAZE_DMATTDMATMULT_THRESHOLD,
// \c BLAZE_TDMATDMATMULT_THRESHOLD and \c BLAZE_TDMATTDMATMULT_THRESHOLD config switches
// (see <tt>./blaze/config/Thresholds.h</tt>).
//
// Please note that the extend to which \b Blaze uses BLAS kernels can change in future releases
// of \b Blaze!
//
//
// <hr>
// \section faq_lapack To which extend does Blaze make use of LAPACK functions under the hood?
//
// \b Blaze uses LAPACK functions for matrix decomposition, matrix inversion, computing the
// determinants and eigenvalues, and the SVD. In contrast to the BLAS functionality (see
// \ref faq_blas), you cannot disable LAPACK or switch to custom kernels. In case you try to
// use any of these functionalities, but do not provide (i.e. link) a LAPACK library you will
// get link time errors.
//
// Please note that the extend to which \b Blaze uses LAPACK kernels can change in future releases
// of \b Blaze!
//
//
// <hr>
// \section faq_compile_times The compile time is too high if I include <blaze/Blaze.h>. Can I reduce it?
//
// The include file <tt><blaze/Blaze.h></tt> includes the entire functionality of the \b Blaze
// library, which by now is several hundred thousand lines of source code. That means that a lot
// of source code has to be parsed whenever <tt><blaze/Blaze.h></tt> is encountered. However, it
// is rare that everything is required within a single compilation unit. Therefore it is easily
// possible to reduce compile times by including only those \b Blaze features that are used within
// the compilation unit. For instance, instead of including <tt><blaze/Blaze.h></tt> it could be
// enough to include <tt><blaze/math/DynamicVector.h></tt>, which would reduce the compilation
// times by about 20%.
//
// Additionally we are taking care to implement new \b Blaze functionality such that compile times
// do not explode and try to reduce the compile times of existing features. Thus newer releases of
// \b Blaze can also improve compile times.
//
//
// <hr>
// \section faq_custom_operations Blaze does not provide feature XYZ. What can I do?
//
// In some cases you might be able to implement the required functionality very conveniently by
// building on the existing \c map() functions (see \ref custom_operations_map). For instance,
// the following code demonstrates the addition of a function that merges two vectors of floating
// point type into a vector of complex numbers:
\code
template< typename VT1, typename VT2, bool TF >
decltype(auto) zip( const blaze::DenseVector<VT1,TF>& lhs, const blaze::DenseVector<VT2,TF>& rhs )
{
return blaze::map( ~lhs, ~rhs, []( const auto& r, const auto& i ) {
using ET1 = ElementType_t<VT1>;
using ET2 = ElementType_t<VT2>;
return std::complex<std::common_type_t<ET1,ET2>>( r, i );
} );
}
\endcode
// You will find a summary of the necessary steps to create custom features in \ref customization.
//
// Sometimes, however, the available customization points might not be sufficient. In this case
// you are cordially invited to create a pull request that provides the implementation of a
// feature or to create an issue according to our \ref issue_creation_guidelines. Please try
// to explain the feature as descriptive as possible, for instance by providing conceptual code
// examples.
//
// \n Previous: \ref intra_statement_optimization Next: \ref issue_creation_guidelines \n
*/
//*************************************************************************************************
//**FAQ********************************************************************************************
/*!\page issue_creation_guidelines Issue Creation Guidelines
//
// \tableofcontents
//
//
// One of the most important aspects of the \b Blaze project is the
// <a href="https://bitbucket.org/blaze-lib/blaze/issues">issue management</a> on the official
// \b Blaze Bitbucket page. We cordially invite all \b Blaze users to submit feature requests
// and bug reports, as we believe that this is a significant part of making \b Blaze a better
// library. However, we are asking to follow a small set of guidelines when creating an issue
// to facilitate the issue management on our side and also to make issues more useful for users
// of \b Blaze.
//
//
// <hr>
// \section issues_title Title
//
// The title is the most important detail of an issue. A well chosen title makes it easy to grasp
// the idea of an issue and improves the discoverability. Therefore, please choose a title that
// is ...
//
// - ... as descriptive as possible;
// - ... as concise as possible;
// - ... as unambiguous as possible.
//
// Also, please create a separate issue for each idea/problem/etc. A very general title or an
// \"and\" in the title could be an indication that the issue is not specific enough and should
// be split into several issues.
//
// \subsection issues_title_good_examples Good Examples
//
// - \"Provide support for AVX-512 SIMD operations\"
// - \"Add support for the Boost Multiprecision Library\"
// - \"Introduce reduction operations into Blaze\"
// - \"Compilation error on KNL with -march=knl\"
//
// \subsection issues_title_bad_examples Bad Examples
//
// - \"Several requests\" (instead create separate issues for each single request)
// - \"Improve the performance\" (instead specify which operation should perform better)
// - \"Blaze library compilation error\" (instead try to be more specific)
//
//
// <hr>
// \section issues_description Description
//
// The description should help us to understand your idea or problem in as much detail as possible.
// Also, it helps to clearly spell out your expectations (how a feature is supposed to work, how
// the behavior should be, etc.). Please spend a couple of minutes to try to make the description
// as comprehensive as possible.
//
//
// <hr>
// \section issues_assignee Assignee
//
// There is no need to assign the issue to a particular person. It is perfectly ok if you just
// ignore this setting.
//
//
// <hr>
// \section issues_kind Kind of Issue
//
// There are four kinds of issues available in the Bitbucket issue tracker: \ref issues_kind_bug,
// \ref issues_kind_enhancement, \ref issues_kind_proposal, and \ref issues_kind_task. In the
// following we try to give guidelines on which kind to choose for a particular issue:
//
// \subsection issues_kind_bug Bug
//
// Please choose the category \ref issues_kind_bug if ...
//
// - ... you experience a compilation error despite your best efforts to get it right;
// - ... you experience a crash/failure despite your best efforts to get it right;
// - ... you experience problems when combining features;
// - ... a feature does not work as specified/documented (i.e. can be considered broken).
//
// Please \b don't choose the category \ref issues_kind_bug if ...
//
// - ... you feel a feature should work differently than it currently does (instead create a
// \ref issues_kind_proposal with a convincing title and description);
// - ... you are not sure how to use a feature (instead create an \ref issues_kind_enhancement
// issue to extend the documentation);
// - ... you are missing a feature (instead create a \ref issues_kind_proposal or
// \ref issues_kind_enhancement issue).
//
// If you select the category \ref issues_kind_bug, please also try to provide a minimum example
// that fails. That helps us to minimize the time to resolve the bug.
//
// As we try to keep \b Blaze bug-free, we will always prioritize bug issues. However, we will
// also quickly close bug issues as \"wontfix\" if the described issue is not a bug (i.e. one of
// the problems mentioned above). We will \b not relabel a bug issue to \ref issues_kind_enhancement
// or \ref issues_kind_proposal, even if they would be reasonable extensions to \b Blaze.
//
// \subsection issues_kind_enhancement Enhancement
//
// Please choose the category \ref issues_kind_enhancement if ...
//
// - ... you need an add-on to an existing feature;
// - ... you need an extension of an existing feature;
// - ... you need an extended documentation for an existing feature.
//
// \ref issues_kind_enhancement is very similar to \ref issues_kind_proposal, so we don't mind
// if an \ref issues_kind_enhancement is labeled as a \ref issues_kind_proposal or vice versa.
// Just make sure you don't request an extension or new feature as a \ref issues_kind_bug.
//
// \subsection issues_kind_proposal Proposal
//
// Please choose the category \ref issues_kind_proposal if ...
//
// - ... you want to request a new feature;
// - ... you want to change an existing feature.
//
// \ref issues_kind_proposal is very similar to \ref issues_kind_enhancement, so we don't mind if
// a \ref issues_kind_proposal is labeled as an \ref issues_kind_enhancement or vice versa. Just
// make sure you don't request an extension or new feature as a \ref issues_kind_bug.
//
// \subsection issues_kind_task Task
//
// Please choose the category \ref issues_kind_task if ...
//
// - ... you want us to do something not feature related;
// - ... you have something else in mind which does not fall in the other three categories.
//
//
// <hr>
// \section issues_priority Priority
//
// Via the priority of an issue you can tell us how important the issue is to you. Therefore the
// priority can have an influence on when we will deal with the issue. However, unfortunately we
// don't have an infinite amount of time and we can not deal with an arbitrary amount of issues
// at the same time. We will therefore take the priority into account, but mainly schedule the
// issues based on impact to all \b Blaze users and the estimated time to resolve it.
//
// You can choose between \ref issues_priority_blocker, \ref issues_priority_critical,
// \ref issues_priority_major, \ref issues_priority_minor, and \ref issues_priority_trivial.
//
// \subsection issues_priority_blocker Blocker
//
// Please choose a \ref issues_priority_blocker priority if ...
//
// - ... you cannot work with \b Blaze due to the described \ref issues_kind_bug;
// - ... the \ref issues_kind_bug likely has an influence on \b all \b Blaze users.
//
// Please note that the categories \ref issues_kind_enhancement or \ref issues_kind_proposal
// should never be a \ref issues_priority_blocker!
//
// \subsection issues_priority_critical Critical
//
// Please choose a \ref issues_priority_critical priority if ...
//
// - ... you can work around a \ref issues_kind_bug, but the workaround is (much) slower or awful;
// - ... you cannot use \b Blaze without the proposed feature;
// - ... you consider it to be essential for \b all \b Blaze users.
//
// \subsection issues_priority_major Major
//
// Please choose a \ref issues_priority_major priority if ...
//
// - ... a \ref issues_kind_bug or feature request is not \ref issues_priority_critical, but
// still very important to you;
// - ... you consider it to have a \ref issues_priority_major impact on most \b Blaze users.
//
// The \ref issues_priority_major category is the default setting in Bitbucket and we therefore
// consider it as the default priority for issues.
//
// \subsection issues_priority_minor Minor
//
// Please choose a \ref issues_priority_minor priority if ...
//
// - ... a \ref issues_kind_bug does not affect many \b Blaze users;
// - ... a feature request would only be useful for a small number of \b Blaze users;
// - ... a feature would be nice to have, but is not particularly important.
//
// \subsection issues_priority_trivial Trivial
//
// Please choose a \ref issues_priority_trivial priority if ...
//
// - ... a \ref issues_kind_bug hardly affects anyone;
// - ... a feature request would only be useful for very few \b Blaze users;
// - ... the expected time to resolve an issue is very small.
//
//
// <hr>
// \section issues_attachment Attachments
//
// You can always provide us with additional information in the form of attachments. Feel free
// to attach something to the issue if ...
//
// - ... it can help us to analyze a \ref issues_kind_bug;
// - ... you have some source code that demonstrates a problem;
// - ... you already have a working prototype that sketches the idea;
// - ... you have additional resources that could help us.
//
// We appreciate anything that simplifies our work and speeds up our progress.
//
// \n Previous: \ref faq Next: \ref blaze_references \n
*/
//*************************************************************************************************
//**Blaze References*******************************************************************************
/*!\page blaze_references Blaze References
//
// In case you need references to the \b Blaze library (for papers or other publications), please
// feel free to use one of the following references:
\code
@misc{blazelib,
author = "Klaus {Iglberger}",
title = "Blaze C++ Linear Algebra Library",
howpublished = "https://bitbucket.org/blaze-lib",
year = 2012
}
\endcode
\code
@article{iglberger2012_1,
author = "Klaus {Iglberger} and Georg {Hager} and Jan {Treibig} and Ulrich {R{\"u}de}",
title = "Expression Templates Revisited: A Performance Analysis of Current Methodologies",
journal = "SIAM Journal on Scientific Computing",
year = 2012,
volume = 34(2),
pages = C42--C69
}
\endcode
\code
@inproceedings{iglberger2012_2,
author = "Klaus {Iglberger} and Georg {Hager} and Jan {Treibig} and Ulrich {R{\"u}de}",
title = "High Performance Smart Expression Template Math Libraries",
booktitle = "Proceedings of the 2nd International Workshop on New Algorithms and Programming Models for the Manycore Era (APMM 2012) at HPCS 2012",
year = 2012
}
\endcode
// \n Previous: \ref issue_creation_guidelines
*/
//*************************************************************************************************
#endif
|
close_modifier.c | // RUN: %libomptarget-compile-run-and-check-generic
// REQUIRES: unified_shared_memory
// UNSUPPORTED: clang-6, clang-7, clang-8, clang-9
// amdgpu runtime crash
// UNSUPPORTED: amdgcn-amd-amdhsa
#include <omp.h>
#include <stdio.h>
#pragma omp requires unified_shared_memory
#define N 1024
int main(int argc, char *argv[]) {
int fails;
void *host_alloc, *device_alloc;
void *host_data, *device_data;
int *alloc = (int *)malloc(N * sizeof(int));
int data[N];
for (int i = 0; i < N; ++i) {
alloc[i] = 10;
data[i] = 1;
}
host_data = &data[0];
host_alloc = &alloc[0];
//
// Test that updates on the device are not visible to host
// when only a TO mapping is used.
//
#pragma omp target map(tofrom \
: device_data, device_alloc) map(close, to \
: alloc[:N], data \
[:N])
{
device_data = &data[0];
device_alloc = &alloc[0];
for (int i = 0; i < N; i++) {
alloc[i] += 1;
data[i] += 1;
}
}
// CHECK: Address of alloc on device different from host address.
if (device_alloc != host_alloc)
printf("Address of alloc on device different from host address.\n");
// CHECK: Address of data on device different from host address.
if (device_data != host_data)
printf("Address of data on device different from host address.\n");
// On the host, check that the arrays have been updated.
// CHECK: Alloc host values not updated: Succeeded
fails = 0;
for (int i = 0; i < N; i++) {
if (alloc[i] != 10)
fails++;
}
printf("Alloc host values not updated: %s\n",
(fails == 0) ? "Succeeded" : "Failed");
// CHECK: Data host values not updated: Succeeded
fails = 0;
for (int i = 0; i < N; i++) {
if (data[i] != 1)
fails++;
}
printf("Data host values not updated: %s\n",
(fails == 0) ? "Succeeded" : "Failed");
//
// Test that updates on the device are visible on host
// when a from is used.
//
for (int i = 0; i < N; i++) {
alloc[i] += 1;
data[i] += 1;
}
#pragma omp target map(close, tofrom : alloc[:N], data[:N])
{
// CHECK: Alloc device values are correct: Succeeded
fails = 0;
for (int i = 0; i < N; i++) {
if (alloc[i] != 11)
fails++;
}
printf("Alloc device values are correct: %s\n",
(fails == 0) ? "Succeeded" : "Failed");
// CHECK: Data device values are correct: Succeeded
fails = 0;
for (int i = 0; i < N; i++) {
if (data[i] != 2)
fails++;
}
printf("Data device values are correct: %s\n",
(fails == 0) ? "Succeeded" : "Failed");
// Update values on the device
for (int i = 0; i < N; i++) {
alloc[i] += 1;
data[i] += 1;
}
}
// CHECK: Alloc host values updated: Succeeded
fails = 0;
for (int i = 0; i < N; i++) {
if (alloc[i] != 12)
fails++;
}
printf("Alloc host values updated: %s\n",
(fails == 0) ? "Succeeded" : "Failed");
// CHECK: Data host values updated: Succeeded
fails = 0;
for (int i = 0; i < N; i++) {
if (data[i] != 3)
fails++;
}
printf("Data host values updated: %s\n",
(fails == 0) ? "Succeeded" : "Failed");
free(alloc);
// CHECK: Done!
printf("Done!\n");
return 0;
}
|
sparseblockmat_omp_kernels.h | #include <omp.h>
#include "config.h"
//for the fmas it is important to activate -mfma compiler flag
namespace dg{
// general multiply kernel
template<class value_type>
void ell_multiply_kernel( value_type alpha, value_type beta,
const value_type * RESTRICT data, const int * RESTRICT cols_idx,
const int * RESTRICT data_idx,
const int num_rows, const int num_cols, const int blocks_per_line,
const int n,
const int left_size, const int right_size,
const int * RESTRICT right_range,
const value_type * RESTRICT x, value_type * RESTRICT y
)
{
#pragma omp for nowait //manual collapse(2)
for( int si = 0; si<left_size*num_rows; si++)
{
int s = si / num_rows;
int i = si % num_rows;
#ifdef _MSC_VER //MSVC does not support variable lenght arrays...
int* J = (int*)alloca(blocks_per_line * sizeof(int));
#else
int J[blocks_per_line];
#endif
for( int d=0; d<blocks_per_line; d++)
J[d] = (s*num_cols+cols_idx[i*blocks_per_line+d])*n;
for( int k=0; k<n; k++)
{
#ifdef _MSC_VER
int* B = (int*)alloca(blocks_per_line * sizeof(int));
#else
int B[blocks_per_line];
#endif
for( int d=0; d<blocks_per_line; d++)
B[d] = (data_idx[i*blocks_per_line+d]*n+k)*n;
for( int j=right_range[0]; j<right_range[1]; j++)
{
int I = ((s*num_rows + i)*n+k)*right_size+j;
y[I]*= beta;
for( int d=0; d<blocks_per_line; d++)
{
value_type temp = 0;
for( int q=0; q<n; q++) //multiplication-loop
temp = DG_FMA(data[ B[d]+q],
x[(J[d]+q)*right_size+j],
temp);
y[I] = DG_FMA(alpha, temp, y[I]);
}
}
}
}
}
//specialized multiply kernel
template<class value_type, int n, int blocks_per_line>
void ell_multiply_kernel( value_type alpha, value_type beta,
const value_type * RESTRICT data, const int * RESTRICT cols_idx,
const int * RESTRICT data_idx,
const int num_rows, const int num_cols,
const int left_size, const int right_size,
const int * RESTRICT right_range,
const value_type * RESTRICT x, value_type * RESTRICT y
)
{
//basically we check which direction is the largest and parallelize that one
if(right_size==1)
{
//trivial means that the data blocks do not change among rows
bool trivial = true;
for( int i=1; i<num_rows-1; i++)
for( int d=0; d<blocks_per_line; d++)
{
if( data_idx[i*blocks_per_line+d]
!= data_idx[blocks_per_line+d]) trivial = false;
}
if(trivial)
{
value_type xprivate[blocks_per_line*n];
value_type dprivate[blocks_per_line*n*n];
for( int d=0; d<blocks_per_line; d++)
for( int k=0; k<n; k++)
for( int q=0; q<n; q++)
{
int B = data_idx[blocks_per_line+d];
dprivate[(k*blocks_per_line+d)*n+q] = data[(B*n+k)*n+q];
}
#pragma omp for nowait
for( int s=0; s<left_size; s++)
{
for( int i=0; i<1; i++)
{
for( int d=0; d<blocks_per_line; d++)
{
int J = (s*num_cols+cols_idx[i*blocks_per_line+d])*n;
for(int q=0; q<n; q++)
xprivate[d*n+q] = x[J+q];
}
for( int k=0; k<n; k++)
{
value_type temp[blocks_per_line] = {0};
for( int d=0; d<blocks_per_line; d++)
{
int B = (data_idx[i*blocks_per_line+d]*n+k)*n;
for( int q=0; q<n; q++) //multiplication-loop
temp[d] = DG_FMA(data[B+q], xprivate[d*n+q], temp[d]);
}
int I = ((s*num_rows + i)*n+k);
y[I]*= beta;
for( int d=0; d<blocks_per_line; d++)
y[I] = DG_FMA(alpha, temp[d], y[I]);
}
}
#ifndef _MSC_VER
#pragma omp SIMD //very important for KNL
#endif
for( int i=1; i<num_rows-1; i++)
{
for( int k=0; k<n; k++)
{
int I = ((s*num_rows + i)*n+k);
y[I]*= beta;
int B = n*blocks_per_line*k;
for( int d=0; d<blocks_per_line; d++)
{
value_type temp = 0;
for( int q=0; q<n; q++)
{
int J = (s*num_cols+cols_idx[i*blocks_per_line+d])*n+q;
temp = DG_FMA( dprivate[B+d*n+q], x[J], temp);
}
y[I] = DG_FMA(alpha, temp, y[I]);
}
}
}
for( int i=num_rows-1; i<num_rows; i++)
{
for( int d=0; d<blocks_per_line; d++)
{
int J = (s*num_cols+cols_idx[i*blocks_per_line+d])*n;
for(int q=0; q<n; q++)
xprivate[d*n+q] = x[J+q];
}
for( int k=0; k<n; k++)
{
value_type temp[blocks_per_line] = {0};
for( int d=0; d<blocks_per_line; d++)
{
int B = (data_idx[i*blocks_per_line+d]*n+k)*n;
for( int q=0; q<n; q++) //multiplication-loop
temp[d] = DG_FMA( data[B+q], xprivate[d*n+q], temp[d]);
}
int I = ((s*num_rows + i)*n+k);
y[I]*= beta;
for( int d=0; d<blocks_per_line; d++)
y[I] = DG_FMA(alpha, temp[d], y[I]);
}
}
}
} //trivial
else // not trivial
{
value_type xprivate[blocks_per_line*n];
#pragma omp for nowait
for( int s=0; s<left_size; s++)
for( int i=0; i<num_rows; i++)
{
for( int d=0; d<blocks_per_line; d++)
{
int J = (s*num_cols+cols_idx[i*blocks_per_line+d])*n;
for(int q=0; q<n; q++)
xprivate[d*n+q] = x[J+q];
}
for( int k=0; k<n; k++)
{
value_type temp[blocks_per_line] = {0};
for( int d=0; d<blocks_per_line; d++)
{
int B = (data_idx[i*blocks_per_line+d]*n+k)*n;
for( int q=0; q<n; q++) //multiplication-loop
temp[d] = DG_FMA( data[B+q], xprivate[d*n+q], temp[d]);
}
int I = ((s*num_rows + i)*n+k);
y[I]*= beta;
for( int d=0; d<blocks_per_line; d++)
y[I] = DG_FMA(alpha, temp[d], y[I]);
}
}
}// trivial
}// right_size==1
else // right_size != 1
{
value_type dprivate[blocks_per_line*n];
int J[blocks_per_line];
if( !( (right_range[1]-right_range[0]) > 100*left_size*num_rows*n )) //typically a derivative in y ( Ny*Nz >~ Nx)
{
#pragma omp for nowait
for (int sik = 0; sik < left_size*num_rows*n; sik++)
{
int s = sik / (num_rows*n);
int i = (sik % (num_rows*n)) / n;
int k = (sik % (num_rows*n)) % n;
for( int d=0; d<blocks_per_line; d++)
{
J[d] = (s*num_cols+cols_idx[i*blocks_per_line+d])*n;
int B = (data_idx[i*blocks_per_line+d]*n+k)*n;
for(int q=0; q<n; q++)
dprivate[d*n+q] = data[B+q];
}
#ifndef _MSC_VER
#pragma omp SIMD //very important for KNL
#endif
for( int j=right_range[0]; j<right_range[1]; j++)
{
int I = ((s*num_rows + i)*n+k)*right_size+j;
y[I]*= beta;
for( int d=0; d<blocks_per_line; d++)
{
value_type temp = 0;
int Jd = J[d];
for( int q=0; q<n; q++) //multiplication-loop
temp = DG_FMA( dprivate[ d*n+q],
x[(Jd+q)*right_size+j],
temp);
y[I] = DG_FMA(alpha, temp, y[I]);
}
}
}
}
else //typically a derivative in z (since n*n*Nx*Ny > 100*Nz)
{
for (int sik = 0; sik < left_size*num_rows*n; sik++)
{
int s = sik / (num_rows*n);
int i = (sik % (num_rows*n)) / n;
int k = (sik % (num_rows*n)) % n;
for( int d=0; d<blocks_per_line; d++)
{
J[d] = (s*num_cols+cols_idx[i*blocks_per_line+d])*n;
int B = (data_idx[i*blocks_per_line+d]*n+k)*n;
for(int q=0; q<n; q++)
dprivate[d*n+q] = data[B+q];
}
#pragma omp for SIMD nowait
for( int j=right_range[0]; j<right_range[1]; j++)
{
int I = ((s*num_rows + i)*n+k)*right_size+j;
y[I]*= beta;
for( int d=0; d<blocks_per_line; d++)
{
value_type temp = 0;
int Jd = J[d];
for( int q=0; q<n; q++) //multiplication-loop
temp = DG_FMA( dprivate[ d*n+q],
x[(Jd+q)*right_size+j],
temp);
y[I] = DG_FMA(alpha, temp, y[I]);
}
}
}
}
}
}
template<class value_type, int n>
void call_ell_multiply_kernel( value_type alpha, value_type beta,
const value_type * RESTRICT data_ptr, const int * RESTRICT cols_ptr,
const int * RESTRICT block_ptr,
const int num_rows, const int num_cols, const int blocks_per_line,
const int left_size, const int right_size,
const int * RESTRICT right_range_ptr,
const value_type * RESTRICT x_ptr, value_type * RESTRICT y_ptr)
{
if( blocks_per_line == 1)
ell_multiply_kernel<value_type, n, 1> (alpha, beta, data_ptr,
cols_ptr, block_ptr, num_rows, num_cols, left_size, right_size,
right_range_ptr, x_ptr,y_ptr);
else if (blocks_per_line == 2)
ell_multiply_kernel<value_type, n, 2> (alpha, beta, data_ptr,
cols_ptr, block_ptr, num_rows, num_cols, left_size, right_size,
right_range_ptr, x_ptr,y_ptr);
else if (blocks_per_line == 3)
ell_multiply_kernel<value_type, n, 3> (alpha, beta, data_ptr,
cols_ptr, block_ptr, num_rows, num_cols, left_size, right_size,
right_range_ptr, x_ptr,y_ptr);
else if (blocks_per_line == 4)
ell_multiply_kernel<value_type, n, 4> (alpha, beta, data_ptr,
cols_ptr, block_ptr, num_rows, num_cols, left_size, right_size,
right_range_ptr, x_ptr,y_ptr);
else
ell_multiply_kernel<value_type> (alpha, beta, data_ptr, cols_ptr,
block_ptr, num_rows, num_cols, blocks_per_line, n, left_size,
right_size, right_range_ptr, x_ptr,y_ptr);
}
template<class value_type>
void EllSparseBlockMatDevice<value_type>::launch_multiply_kernel( value_type alpha, const value_type* x_ptr, value_type beta, value_type* y_ptr) const
{
const value_type* data_ptr = thrust::raw_pointer_cast( &data[0]);
const int* cols_ptr = thrust::raw_pointer_cast( &cols_idx[0]);
const int* block_ptr = thrust::raw_pointer_cast( &data_idx[0]);
const int* right_range_ptr = thrust::raw_pointer_cast( &right_range[0]);
if( n == 1)
call_ell_multiply_kernel<value_type, 1> (alpha, beta, data_ptr,
cols_ptr, block_ptr, num_rows, num_cols, blocks_per_line, left_size,
right_size, right_range_ptr, x_ptr,y_ptr);
else if( n == 2)
call_ell_multiply_kernel<value_type, 2> (alpha, beta, data_ptr,
cols_ptr, block_ptr, num_rows, num_cols, blocks_per_line, left_size,
right_size, right_range_ptr, x_ptr,y_ptr);
else if( n == 3)
call_ell_multiply_kernel<value_type, 3> (alpha, beta, data_ptr,
cols_ptr, block_ptr, num_rows, num_cols, blocks_per_line, left_size,
right_size, right_range_ptr, x_ptr,y_ptr);
else if( n == 4)
call_ell_multiply_kernel<value_type, 4> (alpha, beta, data_ptr,
cols_ptr, block_ptr, num_rows, num_cols, blocks_per_line, left_size,
right_size, right_range_ptr, x_ptr,y_ptr);
else if( n == 5)
call_ell_multiply_kernel<value_type, 5> (alpha, beta, data_ptr,
cols_ptr, block_ptr, num_rows, num_cols, blocks_per_line, left_size,
right_size, right_range_ptr, x_ptr,y_ptr);
else
ell_multiply_kernel<value_type> ( alpha, beta, data_ptr, cols_ptr,
block_ptr, num_rows, num_cols, blocks_per_line, n, left_size,
right_size, right_range_ptr, x_ptr,y_ptr);
}
template<class value_type>
void coo_multiply_kernel( value_type alpha, const value_type** x, value_type beta, value_type* RESTRICT y, const CooSparseBlockMatDevice<value_type>& m )
{
#pragma omp for nowait
for (int skj = 0; skj < m.left_size*m.n*m.right_size; skj++)
{
int s = skj / (m.n*m.right_size);
int k = (skj % (m.n*m.right_size)) / m.right_size;
int j = (skj % (m.n*m.right_size)) % m.right_size;
for (int i = 0; i < m.num_entries; i++)
{
int I = ((s*m.num_rows + m.rows_idx[i])*m.n + k)*m.right_size + j;
value_type temp = 0;
for (int q = 0; q < m.n; q++) //multiplication-loop
temp = DG_FMA(m.data[(m.data_idx[i] * m.n + k)*m.n + q],
x[m.cols_idx[i]][(q*m.left_size +s )*m.right_size+j],
temp);
y[I] = DG_FMA(alpha, temp, y[I]);
}
}
}
template<class value_type, int n>
void coo_multiply_kernel( value_type alpha, const value_type** x, value_type beta, value_type* RESTRICT y, const CooSparseBlockMatDevice<value_type>& m )
{
bool trivial = true;
int CC = m.cols_idx[0], DD = m.data_idx[0];
for( int i=0; i<m.num_entries; i++)
if( CC+i != m.cols_idx[i] || DD+i != m.data_idx[i])
trivial=false;
if( trivial)
{
#pragma omp for SIMD nowait
for (int sj = 0; sj < m.left_size*m.right_size; sj++)
{
int s = sj / m.right_size;
int j = (sj % m.right_size) % m.right_size;
for( int k=0; k<n; k++)
{
for (int i = 0; i < m.num_entries; i++)
{
int I = ((s*m.num_rows + m.rows_idx[i])*n + k)*m.right_size + j;
int DDD = ((DD +i)*n+k)*n, CCC = CC+i;
value_type temp = 0;
for (int q = 0; q < n; q++) //multiplication-loop
temp = DG_FMA(m.data[DDD + q],
x[CCC][q*m.left_size*m.right_size +sj],
temp);
y[I] = DG_FMA(alpha, temp, y[I]);
}
}
}
}
else
{
#pragma omp for SIMD nowait
for (int sj = 0; sj < m.left_size*m.right_size; sj++)
{
int s = sj / m.right_size;
int j = (sj % m.right_size) % m.right_size;
for( int k=0; k<n; k++)
{
for (int i = 0; i < m.num_entries; i++)
{
int I = ((s*m.num_rows + m.rows_idx[i])*n + k)*m.right_size + j;
value_type temp = 0;
for (int q = 0; q < n; q++) //multiplication-loop
temp = DG_FMA(m.data[(m.data_idx[i] * n + k)*n + q],
x[m.cols_idx[i]][q*m.left_size*m.right_size +sj],
temp);
y[I] = DG_FMA(alpha, temp, y[I]);
}
}
}
}
}
template<class value_type>
void CooSparseBlockMatDevice<value_type>::launch_multiply_kernel( value_type alpha, const value_type** x, value_type beta, value_type* RESTRICT y) const
{
if( n == 1)
coo_multiply_kernel<value_type, 1>( alpha, x, beta, y, *this);
else if( n == 2)
coo_multiply_kernel<value_type, 2>( alpha, x, beta, y, *this);
else if( n == 3)
coo_multiply_kernel<value_type, 3>( alpha, x, beta, y, *this);
else if( n == 4)
coo_multiply_kernel<value_type, 4>( alpha, x, beta, y, *this);
else
coo_multiply_kernel<value_type>( alpha, x, beta, y, *this);
}
}//namespace dg
|
fourier.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF OOO U U RRRR IIIII EEEEE RRRR %
% F O O U U R R I E R R %
% FFF O O U U RRRR I EEE RRRR %
% F O O U U R R I E R R %
% F OOO UUU R R IIIII EEEEE R R %
% %
% %
% MagickCore Discrete Fourier Transform Methods %
% %
% Software Design %
% Sean Burke %
% Fred Weinhaus %
% Cristy %
% July 2009 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/fourier.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#if defined(MAGICKCORE_FFTW_DELEGATE)
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
#include <complex.h>
#endif
#include <fftw3.h>
#if !defined(MAGICKCORE_HAVE_CABS)
#define cabs(z) (sqrt(z[0]*z[0]+z[1]*z[1]))
#endif
#if !defined(MAGICKCORE_HAVE_CARG)
#define carg(z) (atan2(cimag(z),creal(z)))
#endif
#if !defined(MAGICKCORE_HAVE_CIMAG)
#define cimag(z) (z[1])
#endif
#if !defined(MAGICKCORE_HAVE_CREAL)
#define creal(z) (z[0])
#endif
#endif
/*
Typedef declarations.
*/
typedef struct _FourierInfo
{
PixelChannel
channel;
MagickBooleanType
modulus;
size_t
width,
height;
ssize_t
center;
} FourierInfo;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p l e x I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ComplexImages() performs complex mathematics on an image sequence.
%
% The format of the ComplexImages method is:
%
% MagickBooleanType ComplexImages(Image *images,const ComplexOperator op,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o op: A complex operator.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ComplexImages(const Image *images,const ComplexOperator op,
ExceptionInfo *exception)
{
#define ComplexImageTag "Complex/Image"
CacheView
*Ai_view,
*Ar_view,
*Bi_view,
*Br_view,
*Ci_view,
*Cr_view;
const char
*artifact;
const Image
*Ai_image,
*Ar_image,
*Bi_image,
*Br_image;
double
snr;
Image
*Ci_image,
*complex_images,
*Cr_image,
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (images->next == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",images->filename);
return((Image *) NULL);
}
image=CloneImage(images,0,0,MagickTrue,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
image=DestroyImageList(image);
return(image);
}
image->depth=32UL;
complex_images=NewImageList();
AppendImageToList(&complex_images,image);
image=CloneImage(images,0,0,MagickTrue,exception);
if (image == (Image *) NULL)
{
complex_images=DestroyImageList(complex_images);
return(complex_images);
}
AppendImageToList(&complex_images,image);
/*
Apply complex mathematics to image pixels.
*/
artifact=GetImageArtifact(image,"complex:snr");
snr=0.0;
if (artifact != (const char *) NULL)
snr=StringToDouble(artifact,(char **) NULL);
Ar_image=images;
Ai_image=images->next;
Br_image=images;
Bi_image=images->next;
if ((images->next->next != (Image *) NULL) &&
(images->next->next->next != (Image *) NULL))
{
Br_image=images->next->next;
Bi_image=images->next->next->next;
}
Cr_image=complex_images;
Ci_image=complex_images->next;
Ar_view=AcquireVirtualCacheView(Ar_image,exception);
Ai_view=AcquireVirtualCacheView(Ai_image,exception);
Br_view=AcquireVirtualCacheView(Br_image,exception);
Bi_view=AcquireVirtualCacheView(Bi_image,exception);
Cr_view=AcquireAuthenticCacheView(Cr_image,exception);
Ci_view=AcquireAuthenticCacheView(Ci_image,exception);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(images,complex_images,images->rows,1L)
#endif
for (y=0; y < (ssize_t) images->rows; y++)
{
register const Quantum
*magick_restrict Ai,
*magick_restrict Ar,
*magick_restrict Bi,
*magick_restrict Br;
register Quantum
*magick_restrict Ci,
*magick_restrict Cr;
register ssize_t
x;
if (status == MagickFalse)
continue;
Ar=GetCacheViewVirtualPixels(Ar_view,0,y,Ar_image->columns,1,exception);
Ai=GetCacheViewVirtualPixels(Ai_view,0,y,Ai_image->columns,1,exception);
Br=GetCacheViewVirtualPixels(Br_view,0,y,Br_image->columns,1,exception);
Bi=GetCacheViewVirtualPixels(Bi_view,0,y,Bi_image->columns,1,exception);
Cr=QueueCacheViewAuthenticPixels(Cr_view,0,y,Cr_image->columns,1,exception);
Ci=QueueCacheViewAuthenticPixels(Ci_view,0,y,Ci_image->columns,1,exception);
if ((Ar == (const Quantum *) NULL) || (Ai == (const Quantum *) NULL) ||
(Br == (const Quantum *) NULL) || (Bi == (const Quantum *) NULL) ||
(Cr == (Quantum *) NULL) || (Ci == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) images->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(images); i++)
{
switch (op)
{
case AddComplexOperator:
{
Cr[i]=Ar[i]+Br[i];
Ci[i]=Ai[i]+Bi[i];
break;
}
case ConjugateComplexOperator:
default:
{
Cr[i]=Ar[i];
Ci[i]=(-Bi[i]);
break;
}
case DivideComplexOperator:
{
double
gamma;
gamma=PerceptibleReciprocal(Br[i]*Br[i]+Bi[i]*Bi[i]+snr);
Cr[i]=gamma*(Ar[i]*Br[i]+Ai[i]*Bi[i]);
Ci[i]=gamma*(Ai[i]*Br[i]-Ar[i]*Bi[i]);
break;
}
case MagnitudePhaseComplexOperator:
{
Cr[i]=sqrt(Ar[i]*Ar[i]+Ai[i]*Ai[i]);
Ci[i]=atan2(Ai[i],Ar[i])/(2.0*MagickPI)+0.5;
break;
}
case MultiplyComplexOperator:
{
Cr[i]=QuantumScale*(Ar[i]*Br[i]-Ai[i]*Bi[i]);
Ci[i]=QuantumScale*(Ai[i]*Br[i]+Ar[i]*Bi[i]);
break;
}
case RealImaginaryComplexOperator:
{
Cr[i]=Ar[i]*cos(2.0*MagickPI*(Ai[i]-0.5));
Ci[i]=Ar[i]*sin(2.0*MagickPI*(Ai[i]-0.5));
break;
}
case SubtractComplexOperator:
{
Cr[i]=Ar[i]-Br[i];
Ci[i]=Ai[i]-Bi[i];
break;
}
}
}
Ar+=GetPixelChannels(Ar_image);
Ai+=GetPixelChannels(Ai_image);
Br+=GetPixelChannels(Br_image);
Bi+=GetPixelChannels(Bi_image);
Cr+=GetPixelChannels(Cr_image);
Ci+=GetPixelChannels(Ci_image);
}
if (SyncCacheViewAuthenticPixels(Ci_view,exception) == MagickFalse)
status=MagickFalse;
if (SyncCacheViewAuthenticPixels(Cr_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ComplexImages)
#endif
proceed=SetImageProgress(images,ComplexImageTag,progress++,
images->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
Cr_view=DestroyCacheView(Cr_view);
Ci_view=DestroyCacheView(Ci_view);
Br_view=DestroyCacheView(Br_view);
Bi_view=DestroyCacheView(Bi_view);
Ar_view=DestroyCacheView(Ar_view);
Ai_view=DestroyCacheView(Ai_view);
if (status == MagickFalse)
complex_images=DestroyImageList(complex_images);
return(complex_images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F o r w a r d F o u r i e r T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ForwardFourierTransformImage() implements the discrete Fourier transform
% (DFT) of the image either as a magnitude / phase or real / imaginary image
% pair.
%
% The format of the ForwadFourierTransformImage method is:
%
% Image *ForwardFourierTransformImage(const Image *image,
% const MagickBooleanType modulus,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o modulus: if true, return as transform as a magnitude / phase pair
% otherwise a real / imaginary image pair.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(MAGICKCORE_FFTW_DELEGATE)
static MagickBooleanType RollFourier(const size_t width,const size_t height,
const ssize_t x_offset,const ssize_t y_offset,double *roll_pixels)
{
double
*source_pixels;
MemoryInfo
*source_info;
register ssize_t
i,
x;
ssize_t
u,
v,
y;
/*
Move zero frequency (DC, average color) from (0,0) to (width/2,height/2).
*/
source_info=AcquireVirtualMemory(width,height*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
return(MagickFalse);
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
i=0L;
for (y=0L; y < (ssize_t) height; y++)
{
if (y_offset < 0L)
v=((y+y_offset) < 0L) ? y+y_offset+(ssize_t) height : y+y_offset;
else
v=((y+y_offset) > ((ssize_t) height-1L)) ? y+y_offset-(ssize_t) height :
y+y_offset;
for (x=0L; x < (ssize_t) width; x++)
{
if (x_offset < 0L)
u=((x+x_offset) < 0L) ? x+x_offset+(ssize_t) width : x+x_offset;
else
u=((x+x_offset) > ((ssize_t) width-1L)) ? x+x_offset-(ssize_t) width :
x+x_offset;
source_pixels[v*width+u]=roll_pixels[i++];
}
}
(void) memcpy(roll_pixels,source_pixels,height*width*
sizeof(*source_pixels));
source_info=RelinquishVirtualMemory(source_info);
return(MagickTrue);
}
static MagickBooleanType ForwardQuadrantSwap(const size_t width,
const size_t height,double *source_pixels,double *forward_pixels)
{
MagickBooleanType
status;
register ssize_t
x;
ssize_t
center,
y;
/*
Swap quadrants.
*/
center=(ssize_t) (width/2L)+1L;
status=RollFourier((size_t) center,height,0L,(ssize_t) height/2L,
source_pixels);
if (status == MagickFalse)
return(MagickFalse);
for (y=0L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L); x++)
forward_pixels[y*width+x+width/2L]=source_pixels[y*center+x];
for (y=1; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L); x++)
forward_pixels[(height-y)*width+width/2L-x-1L]=
source_pixels[y*center+x+1L];
for (x=0L; x < (ssize_t) (width/2L); x++)
forward_pixels[width/2L-x-1L]=source_pixels[x+1L];
return(MagickTrue);
}
static void CorrectPhaseLHS(const size_t width,const size_t height,
double *fourier_pixels)
{
register ssize_t
x;
ssize_t
y;
for (y=0L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L); x++)
fourier_pixels[y*width+x]*=(-1.0);
}
static MagickBooleanType ForwardFourier(const FourierInfo *fourier_info,
Image *image,double *magnitude,double *phase,ExceptionInfo *exception)
{
CacheView
*magnitude_view,
*phase_view;
double
*magnitude_pixels,
*phase_pixels;
Image
*magnitude_image,
*phase_image;
MagickBooleanType
status;
MemoryInfo
*magnitude_info,
*phase_info;
register Quantum
*q;
register ssize_t
x;
ssize_t
i,
y;
magnitude_image=GetFirstImageInList(image);
phase_image=GetNextImageInList(image);
if (phase_image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",image->filename);
return(MagickFalse);
}
/*
Create "Fourier Transform" image from constituent arrays.
*/
magnitude_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*phase_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL))
{
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (magnitude_info != (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
(void) memset(magnitude_pixels,0,fourier_info->width*
fourier_info->height*sizeof(*magnitude_pixels));
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
(void) memset(phase_pixels,0,fourier_info->width*
fourier_info->height*sizeof(*phase_pixels));
status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height,
magnitude,magnitude_pixels);
if (status != MagickFalse)
status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height,phase,
phase_pixels);
CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels);
if (fourier_info->modulus != MagickFalse)
{
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
phase_pixels[i]/=(2.0*MagickPI);
phase_pixels[i]+=0.5;
i++;
}
}
magnitude_view=AcquireAuthenticCacheView(magnitude_image,exception);
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
q=GetCacheViewAuthenticPixels(magnitude_view,0L,y,fourier_info->width,1UL,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedPixelChannel:
default:
{
SetPixelRed(magnitude_image,ClampToQuantum(QuantumRange*
magnitude_pixels[i]),q);
break;
}
case GreenPixelChannel:
{
SetPixelGreen(magnitude_image,ClampToQuantum(QuantumRange*
magnitude_pixels[i]),q);
break;
}
case BluePixelChannel:
{
SetPixelBlue(magnitude_image,ClampToQuantum(QuantumRange*
magnitude_pixels[i]),q);
break;
}
case BlackPixelChannel:
{
SetPixelBlack(magnitude_image,ClampToQuantum(QuantumRange*
magnitude_pixels[i]),q);
break;
}
case AlphaPixelChannel:
{
SetPixelAlpha(magnitude_image,ClampToQuantum(QuantumRange*
magnitude_pixels[i]),q);
break;
}
}
i++;
q+=GetPixelChannels(magnitude_image);
}
status=SyncCacheViewAuthenticPixels(magnitude_view,exception);
if (status == MagickFalse)
break;
}
magnitude_view=DestroyCacheView(magnitude_view);
i=0L;
phase_view=AcquireAuthenticCacheView(phase_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
q=GetCacheViewAuthenticPixels(phase_view,0L,y,fourier_info->width,1UL,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedPixelChannel:
default:
{
SetPixelRed(phase_image,ClampToQuantum(QuantumRange*
phase_pixels[i]),q);
break;
}
case GreenPixelChannel:
{
SetPixelGreen(phase_image,ClampToQuantum(QuantumRange*
phase_pixels[i]),q);
break;
}
case BluePixelChannel:
{
SetPixelBlue(phase_image,ClampToQuantum(QuantumRange*
phase_pixels[i]),q);
break;
}
case BlackPixelChannel:
{
SetPixelBlack(phase_image,ClampToQuantum(QuantumRange*
phase_pixels[i]),q);
break;
}
case AlphaPixelChannel:
{
SetPixelAlpha(phase_image,ClampToQuantum(QuantumRange*
phase_pixels[i]),q);
break;
}
}
i++;
q+=GetPixelChannels(phase_image);
}
status=SyncCacheViewAuthenticPixels(phase_view,exception);
if (status == MagickFalse)
break;
}
phase_view=DestroyCacheView(phase_view);
phase_info=RelinquishVirtualMemory(phase_info);
magnitude_info=RelinquishVirtualMemory(magnitude_info);
return(status);
}
static MagickBooleanType ForwardFourierTransform(FourierInfo *fourier_info,
const Image *image,double *magnitude_pixels,double *phase_pixels,
ExceptionInfo *exception)
{
CacheView
*image_view;
const char
*value;
double
*source_pixels;
fftw_complex
*forward_pixels;
fftw_plan
fftw_r2c_plan;
MemoryInfo
*forward_info,
*source_info;
register const Quantum
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Generate the forward Fourier transform.
*/
source_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
memset(source_pixels,0,fourier_info->width*fourier_info->height*
sizeof(*source_pixels));
i=0L;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(image_view,0L,y,fourier_info->width,1UL,
exception);
if (p == (const Quantum *) NULL)
break;
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedPixelChannel:
default:
{
source_pixels[i]=QuantumScale*GetPixelRed(image,p);
break;
}
case GreenPixelChannel:
{
source_pixels[i]=QuantumScale*GetPixelGreen(image,p);
break;
}
case BluePixelChannel:
{
source_pixels[i]=QuantumScale*GetPixelBlue(image,p);
break;
}
case BlackPixelChannel:
{
source_pixels[i]=QuantumScale*GetPixelBlack(image,p);
break;
}
case AlphaPixelChannel:
{
source_pixels[i]=QuantumScale*GetPixelAlpha(image,p);
break;
}
}
i++;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
forward_info=AcquireVirtualMemory((size_t) fourier_info->width,
(fourier_info->height/2+1)*sizeof(*forward_pixels));
if (forward_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info);
return(MagickFalse);
}
forward_pixels=(fftw_complex *) GetVirtualMemoryBlob(forward_info);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ForwardFourierTransform)
#endif
fftw_r2c_plan=fftw_plan_dft_r2c_2d(fourier_info->width,fourier_info->height,
source_pixels,forward_pixels,FFTW_ESTIMATE);
fftw_execute_dft_r2c(fftw_r2c_plan,source_pixels,forward_pixels);
fftw_destroy_plan(fftw_r2c_plan);
source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info);
value=GetImageArtifact(image,"fourier:normalize");
if ((value == (const char *) NULL) || (LocaleCompare(value,"forward") == 0))
{
double
gamma;
/*
Normalize fourier transform.
*/
i=0L;
gamma=PerceptibleReciprocal((double) fourier_info->width*
fourier_info->height);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
forward_pixels[i]*=gamma;
#else
forward_pixels[i][0]*=gamma;
forward_pixels[i][1]*=gamma;
#endif
i++;
}
}
/*
Generate magnitude and phase (or real and imaginary).
*/
i=0L;
if (fourier_info->modulus != MagickFalse)
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
magnitude_pixels[i]=cabs(forward_pixels[i]);
phase_pixels[i]=carg(forward_pixels[i]);
i++;
}
else
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
magnitude_pixels[i]=creal(forward_pixels[i]);
phase_pixels[i]=cimag(forward_pixels[i]);
i++;
}
forward_info=(MemoryInfo *) RelinquishVirtualMemory(forward_info);
return(MagickTrue);
}
static MagickBooleanType ForwardFourierTransformChannel(const Image *image,
const PixelChannel channel,const MagickBooleanType modulus,
Image *fourier_image,ExceptionInfo *exception)
{
double
*magnitude_pixels,
*phase_pixels;
FourierInfo
fourier_info;
MagickBooleanType
status;
MemoryInfo
*magnitude_info,
*phase_info;
fourier_info.width=image->columns;
fourier_info.height=image->rows;
if ((image->columns != image->rows) || ((image->columns % 2) != 0) ||
((image->rows % 2) != 0))
{
size_t extent=image->columns < image->rows ? image->rows : image->columns;
fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
fourier_info.height=fourier_info.width;
fourier_info.center=(ssize_t) (fourier_info.width/2L)+1L;
fourier_info.channel=channel;
fourier_info.modulus=modulus;
magnitude_info=AcquireVirtualMemory((size_t) fourier_info.width,
(fourier_info.height/2+1)*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info.width,
(fourier_info.height/2+1)*sizeof(*phase_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL))
{
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (magnitude_info == (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
status=ForwardFourierTransform(&fourier_info,image,magnitude_pixels,
phase_pixels,exception);
if (status != MagickFalse)
status=ForwardFourier(&fourier_info,fourier_image,magnitude_pixels,
phase_pixels,exception);
phase_info=RelinquishVirtualMemory(phase_info);
magnitude_info=RelinquishVirtualMemory(magnitude_info);
return(status);
}
#endif
MagickExport Image *ForwardFourierTransformImage(const Image *image,
const MagickBooleanType modulus,ExceptionInfo *exception)
{
Image
*fourier_image;
fourier_image=NewImageList();
#if !defined(MAGICKCORE_FFTW_DELEGATE)
(void) modulus;
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)",
image->filename);
#else
{
Image
*magnitude_image;
size_t
height,
width;
width=image->columns;
height=image->rows;
if ((image->columns != image->rows) || ((image->columns % 2) != 0) ||
((image->rows % 2) != 0))
{
size_t extent=image->columns < image->rows ? image->rows :
image->columns;
width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
height=width;
magnitude_image=CloneImage(image,width,height,MagickTrue,exception);
if (magnitude_image != (Image *) NULL)
{
Image
*phase_image;
magnitude_image->storage_class=DirectClass;
magnitude_image->depth=32UL;
phase_image=CloneImage(image,width,height,MagickTrue,exception);
if (phase_image == (Image *) NULL)
magnitude_image=DestroyImage(magnitude_image);
else
{
MagickBooleanType
is_gray,
status;
phase_image->storage_class=DirectClass;
phase_image->depth=32UL;
AppendImageToList(&fourier_image,magnitude_image);
AppendImageToList(&fourier_image,phase_image);
status=MagickTrue;
is_gray=IsImageGray(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel sections
#endif
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
if (is_gray != MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
GrayPixelChannel,modulus,fourier_image,exception);
else
thread_status=ForwardFourierTransformChannel(image,
RedPixelChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
GreenPixelChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
BluePixelChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (image->colorspace == CMYKColorspace)
thread_status=ForwardFourierTransformChannel(image,
BlackPixelChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (image->alpha_trait != UndefinedPixelTrait)
thread_status=ForwardFourierTransformChannel(image,
AlphaPixelChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
}
if (status == MagickFalse)
fourier_image=DestroyImageList(fourier_image);
fftw_cleanup();
}
}
}
#endif
return(fourier_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n v e r s e F o u r i e r T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InverseFourierTransformImage() implements the inverse discrete Fourier
% transform (DFT) of the image either as a magnitude / phase or real /
% imaginary image pair.
%
% The format of the InverseFourierTransformImage method is:
%
% Image *InverseFourierTransformImage(const Image *magnitude_image,
% const Image *phase_image,const MagickBooleanType modulus,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o magnitude_image: the magnitude or real image.
%
% o phase_image: the phase or imaginary image.
%
% o modulus: if true, return transform as a magnitude / phase pair
% otherwise a real / imaginary image pair.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(MAGICKCORE_FFTW_DELEGATE)
static MagickBooleanType InverseQuadrantSwap(const size_t width,
const size_t height,const double *source,double *destination)
{
register ssize_t
x;
ssize_t
center,
y;
/*
Swap quadrants.
*/
center=(ssize_t) (width/2L)+1L;
for (y=1L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L+1L); x++)
destination[(height-y)*center-x+width/2L]=source[y*width+x];
for (y=0L; y < (ssize_t) height; y++)
destination[y*center]=source[y*width+width/2L];
for (x=0L; x < center; x++)
destination[x]=source[center-x-1L];
return(RollFourier(center,height,0L,(ssize_t) height/-2L,destination));
}
static MagickBooleanType InverseFourier(FourierInfo *fourier_info,
const Image *magnitude_image,const Image *phase_image,
fftw_complex *fourier_pixels,ExceptionInfo *exception)
{
CacheView
*magnitude_view,
*phase_view;
double
*inverse_pixels,
*magnitude_pixels,
*phase_pixels;
MagickBooleanType
status;
MemoryInfo
*inverse_info,
*magnitude_info,
*phase_info;
register const Quantum
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Inverse fourier - read image and break down into a double array.
*/
magnitude_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*phase_pixels));
inverse_info=AcquireVirtualMemory((size_t) fourier_info->width,
(fourier_info->height/2+1)*sizeof(*inverse_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL) ||
(inverse_info == (MemoryInfo *) NULL))
{
if (magnitude_info != (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (inverse_info != (MemoryInfo *) NULL)
inverse_info=RelinquishVirtualMemory(inverse_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
magnitude_image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
inverse_pixels=(double *) GetVirtualMemoryBlob(inverse_info);
i=0L;
magnitude_view=AcquireVirtualCacheView(magnitude_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(magnitude_view,0L,y,fourier_info->width,1UL,
exception);
if (p == (const Quantum *) NULL)
break;
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedPixelChannel:
default:
{
magnitude_pixels[i]=QuantumScale*GetPixelRed(magnitude_image,p);
break;
}
case GreenPixelChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelGreen(magnitude_image,p);
break;
}
case BluePixelChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelBlue(magnitude_image,p);
break;
}
case BlackPixelChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelBlack(magnitude_image,p);
break;
}
case AlphaPixelChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelAlpha(magnitude_image,p);
break;
}
}
i++;
p+=GetPixelChannels(magnitude_image);
}
}
magnitude_view=DestroyCacheView(magnitude_view);
status=InverseQuadrantSwap(fourier_info->width,fourier_info->height,
magnitude_pixels,inverse_pixels);
(void) memcpy(magnitude_pixels,inverse_pixels,fourier_info->height*
fourier_info->center*sizeof(*magnitude_pixels));
i=0L;
phase_view=AcquireVirtualCacheView(phase_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(phase_view,0,y,fourier_info->width,1,
exception);
if (p == (const Quantum *) NULL)
break;
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedPixelChannel:
default:
{
phase_pixels[i]=QuantumScale*GetPixelRed(phase_image,p);
break;
}
case GreenPixelChannel:
{
phase_pixels[i]=QuantumScale*GetPixelGreen(phase_image,p);
break;
}
case BluePixelChannel:
{
phase_pixels[i]=QuantumScale*GetPixelBlue(phase_image,p);
break;
}
case BlackPixelChannel:
{
phase_pixels[i]=QuantumScale*GetPixelBlack(phase_image,p);
break;
}
case AlphaPixelChannel:
{
phase_pixels[i]=QuantumScale*GetPixelAlpha(phase_image,p);
break;
}
}
i++;
p+=GetPixelChannels(phase_image);
}
}
if (fourier_info->modulus != MagickFalse)
{
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
phase_pixels[i]-=0.5;
phase_pixels[i]*=(2.0*MagickPI);
i++;
}
}
phase_view=DestroyCacheView(phase_view);
CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels);
if (status != MagickFalse)
status=InverseQuadrantSwap(fourier_info->width,fourier_info->height,
phase_pixels,inverse_pixels);
(void) memcpy(phase_pixels,inverse_pixels,fourier_info->height*
fourier_info->center*sizeof(*phase_pixels));
inverse_info=RelinquishVirtualMemory(inverse_info);
/*
Merge two sets.
*/
i=0L;
if (fourier_info->modulus != MagickFalse)
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]=magnitude_pixels[i]*cos(phase_pixels[i])+I*
magnitude_pixels[i]*sin(phase_pixels[i]);
#else
fourier_pixels[i][0]=magnitude_pixels[i]*cos(phase_pixels[i]);
fourier_pixels[i][1]=magnitude_pixels[i]*sin(phase_pixels[i]);
#endif
i++;
}
else
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]=magnitude_pixels[i]+I*phase_pixels[i];
#else
fourier_pixels[i][0]=magnitude_pixels[i];
fourier_pixels[i][1]=phase_pixels[i];
#endif
i++;
}
magnitude_info=RelinquishVirtualMemory(magnitude_info);
phase_info=RelinquishVirtualMemory(phase_info);
return(status);
}
static MagickBooleanType InverseFourierTransform(FourierInfo *fourier_info,
fftw_complex *fourier_pixels,Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
const char
*value;
double
*source_pixels;
fftw_plan
fftw_c2r_plan;
MemoryInfo
*source_info;
register Quantum
*q;
register ssize_t
i,
x;
ssize_t
y;
source_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
value=GetImageArtifact(image,"fourier:normalize");
if (LocaleCompare(value,"inverse") == 0)
{
double
gamma;
/*
Normalize inverse transform.
*/
i=0L;
gamma=PerceptibleReciprocal((double) fourier_info->width*
fourier_info->height);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]*=gamma;
#else
fourier_pixels[i][0]*=gamma;
fourier_pixels[i][1]*=gamma;
#endif
i++;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_InverseFourierTransform)
#endif
fftw_c2r_plan=fftw_plan_dft_c2r_2d(fourier_info->width,fourier_info->height,
fourier_pixels,source_pixels,FFTW_ESTIMATE);
fftw_execute_dft_c2r(fftw_c2r_plan,fourier_pixels,source_pixels);
fftw_destroy_plan(fftw_c2r_plan);
i=0L;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
if (y >= (ssize_t) image->rows)
break;
q=GetCacheViewAuthenticPixels(image_view,0L,y,fourier_info->width >
image->columns ? image->columns : fourier_info->width,1UL,exception);
if (q == (Quantum *) NULL)
break;
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
if (x < (ssize_t) image->columns)
switch (fourier_info->channel)
{
case RedPixelChannel:
default:
{
SetPixelRed(image,ClampToQuantum(QuantumRange*source_pixels[i]),q);
break;
}
case GreenPixelChannel:
{
SetPixelGreen(image,ClampToQuantum(QuantumRange*source_pixels[i]),
q);
break;
}
case BluePixelChannel:
{
SetPixelBlue(image,ClampToQuantum(QuantumRange*source_pixels[i]),
q);
break;
}
case BlackPixelChannel:
{
SetPixelBlack(image,ClampToQuantum(QuantumRange*source_pixels[i]),
q);
break;
}
case AlphaPixelChannel:
{
SetPixelAlpha(image,ClampToQuantum(QuantumRange*source_pixels[i]),
q);
break;
}
}
i++;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
source_info=RelinquishVirtualMemory(source_info);
return(MagickTrue);
}
static MagickBooleanType InverseFourierTransformChannel(
const Image *magnitude_image,const Image *phase_image,
const PixelChannel channel,const MagickBooleanType modulus,
Image *fourier_image,ExceptionInfo *exception)
{
fftw_complex
*inverse_pixels;
FourierInfo
fourier_info;
MagickBooleanType
status;
MemoryInfo
*inverse_info;
fourier_info.width=magnitude_image->columns;
fourier_info.height=magnitude_image->rows;
if ((magnitude_image->columns != magnitude_image->rows) ||
((magnitude_image->columns % 2) != 0) ||
((magnitude_image->rows % 2) != 0))
{
size_t extent=magnitude_image->columns < magnitude_image->rows ?
magnitude_image->rows : magnitude_image->columns;
fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
fourier_info.height=fourier_info.width;
fourier_info.center=(ssize_t) (fourier_info.width/2L)+1L;
fourier_info.channel=channel;
fourier_info.modulus=modulus;
inverse_info=AcquireVirtualMemory((size_t) fourier_info.width,
(fourier_info.height/2+1)*sizeof(*inverse_pixels));
if (inverse_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
magnitude_image->filename);
return(MagickFalse);
}
inverse_pixels=(fftw_complex *) GetVirtualMemoryBlob(inverse_info);
status=InverseFourier(&fourier_info,magnitude_image,phase_image,
inverse_pixels,exception);
if (status != MagickFalse)
status=InverseFourierTransform(&fourier_info,inverse_pixels,fourier_image,
exception);
inverse_info=RelinquishVirtualMemory(inverse_info);
return(status);
}
#endif
MagickExport Image *InverseFourierTransformImage(const Image *magnitude_image,
const Image *phase_image,const MagickBooleanType modulus,
ExceptionInfo *exception)
{
Image
*fourier_image;
assert(magnitude_image != (Image *) NULL);
assert(magnitude_image->signature == MagickCoreSignature);
if (magnitude_image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
magnitude_image->filename);
if (phase_image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",magnitude_image->filename);
return((Image *) NULL);
}
#if !defined(MAGICKCORE_FFTW_DELEGATE)
fourier_image=(Image *) NULL;
(void) modulus;
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)",
magnitude_image->filename);
#else
{
fourier_image=CloneImage(magnitude_image,magnitude_image->columns,
magnitude_image->rows,MagickTrue,exception);
if (fourier_image != (Image *) NULL)
{
MagickBooleanType
is_gray,
status;
status=MagickTrue;
is_gray=IsImageGray(magnitude_image);
if (is_gray != MagickFalse)
is_gray=IsImageGray(phase_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel sections
#endif
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
if (is_gray != MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,GrayPixelChannel,modulus,fourier_image,exception);
else
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,RedPixelChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,GreenPixelChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,BluePixelChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (magnitude_image->colorspace == CMYKColorspace)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,BlackPixelChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (magnitude_image->alpha_trait != UndefinedPixelTrait)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,AlphaPixelChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
}
if (status == MagickFalse)
fourier_image=DestroyImage(fourier_image);
}
fftw_cleanup();
}
#endif
return(fourier_image);
}
|
Heinritz-Hsiao.c | /*
Author: Makarios Christakis
Description:
Parallel implementation of the Heinritz-Hsiao algorithm for the
travelling salesman problem.
For the parameters below the algorithm converged to:
Final total distance: 102157.02
Timed using time() on a 7th gen i7, Ubuntu 18.04 machine we get:
real 0m42,115s
user 5m35,123s
sys 0m0,048s
Not a noticable improvement over the serial implementation, which
is due to the fact that this algorithm is not really parallelizable
and also the problem is NP-hard.
*/
#include <math.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
// **********************************************************
// DEFINITIONS
#define N_POINTS 10000
#define THRESHOLD 0.8
// **********************************************************
// GLOBAL VARS
float cities[N_POINTS][2] = {0}; // Matrix which holds the coordinates of each city
short city_flags[N_POINTS] = {0}; //available = 1, visited = 0
float totDist = 0; // Total route distance
int curr_index = 0; // The index of the city we are in on each iteration.
// **********************************************************
// Initialises the city coordinate vectors.
void initVec() {
for (int i = 0; i < N_POINTS; i++) {
city_flags[i] = 1;
cities[i][0] = (float)rand() / RAND_MAX * 1e3;
cities[i][1] = (float)rand() / RAND_MAX * 1e3;
}
}
// **********************************************************
// Euclidean distance calculation between 2 points in the grid.
float dist(int p1, int p2) {
float register dx = cities[p1][0] - cities[p2][0];
float register dy = cities[p1][1] - cities[p2][1];
return (float)sqrt(dx * dx + dy * dy);
}
// **********************************************************
// Performs one iteration of the algorithm, finding the closest city
// and moving to it.
float moveCity() {
int index1, index2;
float register mindist1 = 100e3;
float register mindist2 = 100e3;
#pragma omp parallel for
for (int i = 1; i < N_POINTS; i++) {
if (city_flags[i] == 1) {
float register tmpDist = dist(curr_index, i);
#pragma omp critical
{
if (tmpDist < mindist1) {
mindist2 = mindist1;
index2 = index1;
index1 = i;
mindist1 = tmpDist;
}
else if (tmpDist < mindist2) {
mindist2 = tmpDist;
index2 = i;
}
}
}
}
if ((float)rand() / RAND_MAX < THRESHOLD) {
city_flags[index1] = 0;
curr_index = index1;
return mindist1;
}
else {
city_flags[index2] = 0;
curr_index = index2;
return mindist2;
}
}
int main() {
initVec();
totDist += moveCity();
for (int i = 0; i < N_POINTS - 2; i++) {
totDist += moveCity();
}
totDist += dist(curr_index, 0);
printf("Final total distance: %.2f\n", totDist);
return 0;
}
|
part_func_co.c | /*
* partiton function for RNA secondary structures
*
* Ivo L Hofacker
* Stephan Bernhart
* Ronny Lorenz
* ViennaRNA package
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <float.h> /* #defines FLT_MAX ... */
#include <limits.h>
#include "ViennaRNA/utils/basic.h"
#include "ViennaRNA/utils/structures.h"
#include "ViennaRNA/params/default.h"
#include "ViennaRNA/fold_vars.h"
#include "ViennaRNA/plotting/probabilities.h"
#include "ViennaRNA/params/basic.h"
#include "ViennaRNA/loops/all.h"
#include "ViennaRNA/mfe.h"
#include "ViennaRNA/part_func.h"
#include "ViennaRNA/part_func_co.h"
#ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY
#ifdef _OPENMP
#include <omp.h>
#endif
#endif
/*
#################################
# GLOBAL VARIABLES #
#################################
*/
#ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY
int mirnatog = 0;
double F_monomer[2] = {
0, 0
}; /* free energies of the two monomers */
#endif
/*
#################################
# PRIVATE VARIABLES #
#################################
*/
#ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY
/* some backward compatibility stuff */
PRIVATE vrna_fold_compound_t *backward_compat_compound = NULL;
PRIVATE int backward_compat = 0;
#ifdef _OPENMP
#pragma omp threadprivate(backward_compat_compound, backward_compat)
#endif
#endif
/*
#################################
# PRIVATE FUNCTION DECLARATIONS #
#################################
*/
#ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY
PRIVATE vrna_dimer_pf_t
wrap_co_pf_fold(char *sequence,
char *structure,
vrna_exp_param_t *parameters,
int calculate_bppm,
int is_constrained);
#endif
/*
#################################
# BEGIN OF FUNCTION DEFINITIONS #
#################################
*/
PUBLIC vrna_dimer_pf_t
vrna_pf_co_fold(const char *seq,
char *structure,
vrna_ep_t **pl)
{
double mfe;
vrna_dimer_pf_t X;
vrna_fold_compound_t *vc;
vrna_md_t md;
vrna_md_set_default(&md);
/* no need to backtrack MFE structure */
md.backtrack = 0;
if (pl)
md.compute_bpp = 1;
else
md.compute_bpp = 0;
vc = vrna_fold_compound(seq, &md, VRNA_OPTION_DEFAULT);
mfe = (double)vrna_mfe_dimer(vc, NULL);
vrna_exp_params_rescale(vc, &mfe);
X = vrna_pf_dimer(vc, structure);
if (pl)
*pl = vrna_plist_from_probs(vc, /*cut_off:*/ 1e-6);
vrna_fold_compound_free(vc);
return X;
}
#ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY
/*
*****************************************
* BEGIN backward compatibility wrappers *
*****************************************
*/
PRIVATE vrna_dimer_pf_t
wrap_co_pf_fold(char *sequence,
char *structure,
vrna_exp_param_t *parameters,
int calculate_bppm,
int is_constrained)
{
int length;
char *seq;
vrna_fold_compound_t *vc;
vrna_md_t md;
vc = NULL;
length = strlen(sequence);
seq = (char *)vrna_alloc(sizeof(char) * (length + 2));
if (cut_point > -1) {
int i;
for (i = 0; i < cut_point - 1; i++)
seq[i] = sequence[i];
seq[i] = '&';
for (; i < (int)length; i++)
seq[i + 1] = sequence[i];
} else {
/* this ensures the allocation of all cofold matrices via vrna_fold_compound_t */
free(seq);
seq = strdup(sequence);
}
/*
* if present, extract model details from provided parameters variable,
* to properly initialize the fold compound. Otherwise use default
* settings taken from deprecated global variables
*/
if (parameters)
vrna_md_copy(&md, &(parameters->model_details));
else
set_model_details(&md);
/* set min_loop_size and backtracing options */
md.compute_bpp = calculate_bppm;
md.min_loop_size = 0;
vc = vrna_fold_compound(seq, &md, VRNA_OPTION_DEFAULT);
/*
* if present, attach a copy of the parameters structure instead of the
* default parameters but take care of re-setting it to (initialized)
* model details
*/
free(vc->exp_params);
if (parameters) {
vrna_md_copy(&(parameters->model_details), &(vc->params->model_details));
vc->exp_params = vrna_exp_params_copy(parameters);
} else {
vc->exp_params = vrna_exp_params(&(vc->params->model_details));
}
/* propagate global pf_scale into vc->exp_params */
vc->exp_params->pf_scale = pf_scale;
if (is_constrained && structure) {
unsigned int constraint_options = 0;
constraint_options |= VRNA_CONSTRAINT_DB
| VRNA_CONSTRAINT_DB_PIPE
| VRNA_CONSTRAINT_DB_DOT
| VRNA_CONSTRAINT_DB_X
| VRNA_CONSTRAINT_DB_ANG_BRACK
| VRNA_CONSTRAINT_DB_RND_BRACK;
vrna_constraints_add(vc, (const char *)structure, constraint_options);
}
if (backward_compat_compound)
vrna_fold_compound_free(backward_compat_compound);
backward_compat_compound = vc;
backward_compat = 1;
iindx = backward_compat_compound->iindx;
free(seq);
return vrna_pf_dimer(vc, structure);
}
/*
*****************************************
* END backward compatibility wrappers *
*****************************************
*/
/*###########################################*/
/*# deprecated functions below #*/
/*###########################################*/
PUBLIC vrna_dimer_pf_t
co_pf_fold(char *sequence,
char *structure)
{
return wrap_co_pf_fold(sequence, structure, NULL, do_backtrack, fold_constrained);
}
PUBLIC vrna_dimer_pf_t
co_pf_fold_par(char *sequence,
char *structure,
vrna_exp_param_t *parameters,
int calculate_bppm,
int is_constrained)
{
return wrap_co_pf_fold(sequence, structure, parameters, calculate_bppm, is_constrained);
}
PUBLIC vrna_ep_t *
get_plist(vrna_ep_t *pl,
int length,
double cut_off)
{
int i, j, n, count, *my_iindx;
my_iindx = backward_compat_compound->iindx;
/* get pair probibilities out of pr array */
count = 0;
n = 2;
for (i = 1; i < length; i++) {
for (j = i + 1; j <= length; j++) {
if (pr[my_iindx[i] - j] < cut_off)
continue;
if (count == n * length - 1) {
n *= 2;
pl = (vrna_ep_t *)vrna_realloc(pl, n * length * sizeof(vrna_ep_t));
}
pl[count].i = i;
pl[count].j = j;
pl[count++].p = pr[my_iindx[i] - j];
/* printf("gpl: %2d %2d %.9f\n",i,j,pr[my_iindx[i]-j]); */
}
}
pl[count].i = 0;
pl[count].j = 0; /* ->?? */
pl[count++].p = 0.;
pl = (vrna_ep_t *)vrna_realloc(pl, (count) * sizeof(vrna_ep_t));
return pl;
}
PUBLIC void
compute_probabilities(double FAB,
double FA,
double FB,
vrna_ep_t *prAB,
vrna_ep_t *prA,
vrna_ep_t *prB,
int Alength)
{
if (backward_compat_compound && backward_compat) {
vrna_pf_dimer_probs(FAB,
FA,
FB,
prAB,
(const vrna_ep_t *)prA,
(const vrna_ep_t *)prB,
Alength,
(const vrna_exp_param_t *)backward_compat_compound->exp_params);
}
}
PUBLIC vrna_dimer_conc_t *
get_concentrations(double FcAB,
double FcAA,
double FcBB,
double FEA,
double FEB,
double *startconc)
{
return vrna_pf_dimer_concentrations(FcAB,
FcAA,
FcBB,
FEA,
FEB,
(const double *)startconc,
(const vrna_exp_param_t *)backward_compat_compound->exp_params);
}
PUBLIC void
init_co_pf_fold(int length)
{
/* DO NOTHING */
}
PUBLIC void
free_co_pf_arrays(void)
{
if (backward_compat_compound && backward_compat) {
vrna_fold_compound_free(backward_compat_compound);
backward_compat_compound = NULL;
backward_compat = 0;
}
}
PUBLIC FLT_OR_DBL *
export_co_bppm(void)
{
if (backward_compat_compound)
return backward_compat_compound->exp_matrices->probs;
else
return NULL;
}
PUBLIC void
update_co_pf_params(int length)
{
if (backward_compat_compound && backward_compat) {
vrna_md_t md;
set_model_details(&md);
vrna_exp_params_reset(backward_compat_compound, &md);
/* compatibility with RNAup, may be removed sometime */
pf_scale = backward_compat_compound->exp_params->pf_scale;
}
}
PUBLIC void
update_co_pf_params_par(int length,
vrna_exp_param_t *parameters)
{
if (backward_compat_compound && backward_compat) {
vrna_md_t md;
if (parameters) {
vrna_exp_params_subst(backward_compat_compound, parameters);
} else {
set_model_details(&md);
vrna_exp_params_reset(backward_compat_compound, &md);
}
/* compatibility with RNAup, may be removed sometime */
pf_scale = backward_compat_compound->exp_params->pf_scale;
}
}
#endif
|
TempAllocator.h | #ifndef TEMPALLOCATOR_H_BIJOGMF5
#define TEMPALLOCATOR_H_BIJOGMF5
#include "base/Basic.h"
namespace snow {
template <typename T, size_t SPACE = 1 << 20>
class TempAllocator {
private:
static byte* ms_Data;
static size_t ms_Offset;
public:
void* operator new(size_t sz) throw() {
void* ret = NULL;
if (!ms_Data)
ms_Data = new byte[SPACE];
#pragma omp critical
{
if (ms_Offset + sz <= SPACE)
{
ret = &ms_Data[ms_Offset];
ms_Offset += sz;
}
}
return ret;
}
inline void operator delete(void*) {}
static void flush() {
#pragma omp critical
{
delete[] ms_Data;
ms_Data = NULL;
ms_Offset = 0;
}
}
};
template <typename T, size_t SPACE>
byte* TempAllocator<T, SPACE>::ms_Data = NULL;
template <typename T, size_t SPACE>
size_t TempAllocator<T, SPACE>::ms_Offset = 0;
}
#endif /* end of include guard: TEMPALLOCATOR_H_BIJOGMF5 */
|
core_math.h | /*
* Copyright (c) 2020 Georgios Damaskinos
* All rights reserved.
* @author Georgios Damaskinos <georgios.damaskinos@gmail.com>
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
// == mojo ====================================================================
//
// Copyright (c) gnawice@gnawice.com. All rights reserved.
// See LICENSE in root folder
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files(the "Software"),
// to deal in the Software without restriction, including without
// limitation the rights to use, copy, modify, merge, publish, distribute,
// sublicense, and/or sell copies of the Software, and to permit persons to
// whom the Software is furnished to do so, subject to the following
// conditions :
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT
// OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
// THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
// ============================================================================
// core_math.h: defines matrix class and math functions
// ==================================================================== mojo ==
#pragma once
#include <math.h>
#include <string.h>
#include <string>
#include <map>
#include <cstdlib>
#include <random>
#include <algorithm>
#ifdef MOJO_ENABLE_NEON
#include <arm_neon.h>
#endif
namespace mojo
{
enum pad_type { zero = 0, edge = 1, median_edge = 2 };
inline float dot(const float *x1, const float *x2, const int size)
{
switch (size)
{
case 1: return x1[0] * x2[0];
case 2: return x1[0] * x2[0] + x1[1] * x2[1];
case 3: return x1[0] * x2[0] + x1[1] * x2[1] + x1[2] * x2[2];
case 4: return x1[0] * x2[0] + x1[1] * x2[1] + x1[2] * x2[2] + x1[3] * x2[3];
case 5: return x1[0] * x2[0] + x1[1] * x2[1] + x1[2] * x2[2] + x1[3] * x2[3] + x1[4] * x2[4];
default:
float v = 0;
for (int i = 0; i<size; i++) v += x1[i] * x2[i];
return v;
};
}
inline float unwrap_2d_dot(const float *x1, const float *x2, const int size, int stride1, int stride2)
{
float v=0;
for(int j=0; j<size; j++)
v+= dot(&x1[stride1*j],&x2[stride2*j],size);
return v;
}
// second item is rotated 180 (this is a convolution)
inline float dot_rot180(const float *x1, const float *x2, const int size)
{
switch(size)
{
case 1: return x1[0]*x2[0];
case 2: return x1[0]*x2[1]+x1[1]*x2[0];
case 3: return x1[0]*x2[2]+x1[1]*x2[1]+x1[2]*x2[0];
case 4: return x1[0]*x2[3]+x1[1]*x2[2]+x1[2]*x2[1]+x1[3]*x2[0];
case 5: return x1[0]*x2[4]+x1[1]*x2[3]+x1[2]*x2[2]+x1[3]*x2[1]+x1[4]*x2[0];
default:
float v=0;
for(int i=0; i<size; i++) v+=x1[i]*x2[size-i-1];
return v;
};
}
inline float unwrap_2d_dot_rot180(const float *x1, const float *x2, const int size, int stride1, int stride2)
{
float v=0;
for(int j=0; j<size; j++)
{
v+= dot_rot180(&x1[stride1*j],&x2[stride2*(size-j-1)],size);
}
return v;
}
inline void unwrap_aligned_NxN(const int N, float *aligned_out, const float *in, const int in_size, const int stride = 1)
{
const int node_size = (in_size - N) + 1;
int c1 = 0;
int off = 0;
const int inc_off = N*N*8;
for (int j = 0; j < node_size; j += 1) // intput h
{
for (int i = 0; i < node_size; i += 1) // intput w
{
const float *tn = in + j*in_size + i;
if(N==5)
{
for (int k = 0; k < 5; k++)
{
aligned_out[c1 + 0 + k * 40 + off] = tn[0 + 0 + in_size*k];
aligned_out[c1 + 8 + k * 40 + off] = tn[0 + 1 + in_size*k];
aligned_out[c1 + 16 + k * 40 + off] = tn[0 + 2 + in_size*k];
aligned_out[c1 + 24 + k * 40 + off] = tn[0 + 3 + in_size*k];
aligned_out[c1 + 32 + k * 40 + off] = tn[0 + 4 + in_size*k];
}
}
else if(N==3)
{
aligned_out[c1 + off] = tn[0];
aligned_out[c1 + 8 + off] = tn[0 + 1];
aligned_out[c1 + 16 + off] = tn[0 + 2];
aligned_out[c1 + 24 + off] = tn[0 + in_size];
aligned_out[c1 + 32 + off] = tn[0 + 1 + in_size];
aligned_out[c1 + 40 + off] = tn[0 + 2 + in_size];
aligned_out[c1 + 48 + off] = tn[0 + 2 * in_size];
aligned_out[c1 + 56 + off] = tn[0 + 1 + 2 * in_size];
aligned_out[c1 + 64 + off] = tn[0 + 2 + 2 * in_size];
}
else
{
int cnt=0;
for (int k = 0; k < N; k++)
{
for (int m = 0; m < N; m++)
{
aligned_out[c1 + cnt*8 + off] = tn[0 + m + in_size*k];
cnt++;
}
}
}
off++;
if (off > 7) { off = 0; c1 += inc_off; }
}
}
}
inline void dotsum_unwrapped_NxN(const int N, const float *im, const float *filter_ptr, float *out, const int outsize)
{
const int NN=N*N;
for (int j = 0; j < outsize; j += 8)
{
float *c = out+j;
for(int i=0; i<NN; i++)
{
const float f = filter_ptr[i];
c[0]+=im[0]*f; c[1]+=im[1]*f; c[2]+=im[2]*f; c[3]+=im[3]*f; c[4]+=im[4]*f; c[5]+=im[5]*f; c[6]+=im[6]*f; c[7]+=im[7]*f;
im+=8;
}
}
}
#ifdef MOJO_AVX
inline void dotsum_unwrapped_2x2(const float *_img, const float *filter_ptr, float *out, const int outsize)
{
_mm256_zeroupper();
const __m256 f0 = _mm256_broadcast_ss(&filter_ptr[0]); const __m256 f1 = _mm256_broadcast_ss(&filter_ptr[1]);
const __m256 f2 = _mm256_broadcast_ss(&filter_ptr[2]); const __m256 f3 = _mm256_broadcast_ss(&filter_ptr[3]);
for (int j = 0; j < outsize; j += 8)
{
__m256 a, c0, c1;
// multiply filter
a = _mm256_load_ps(_img); c0 = _mm256_mul_ps(a, f0);
a = _mm256_load_ps(_img + 8); c1 = _mm256_mul_ps(a, f1); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 16); c1 = _mm256_mul_ps(a, f2); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 24); c1 = _mm256_mul_ps(a, f3); c0 = _mm256_add_ps(c0, c1);
// add result to output
a = _mm256_load_ps(out + j);
c0 = _mm256_add_ps(c0, a);
_mm256_stream_ps(out + j, c0);
_img += 32;
}
_mm256_zeroupper();
}
inline void dotsum_unwrapped_3x3(const float *_img, const float *filter_ptr, float *out, const int outsize)
{
_mm256_zeroupper();
const __m256 f0 = _mm256_broadcast_ss(&filter_ptr[0]); const __m256 f1 = _mm256_broadcast_ss(&filter_ptr[1]);
const __m256 f2 = _mm256_broadcast_ss(&filter_ptr[2]); const __m256 f3 = _mm256_broadcast_ss(&filter_ptr[3]);
const __m256 f4 = _mm256_broadcast_ss(&filter_ptr[4]); const __m256 f5 = _mm256_broadcast_ss(&filter_ptr[5]);
const __m256 f6 = _mm256_broadcast_ss(&filter_ptr[6]); const __m256 f7 = _mm256_broadcast_ss(&filter_ptr[7]);
const __m256 f8 = _mm256_broadcast_ss(&filter_ptr[8]);
for (int j = 0; j < outsize; j += 8)//stride) // intput w
{
__m256 a, c0, c1;
// multiply filter
a = _mm256_load_ps(_img); c0 = _mm256_mul_ps(a, f0);
a = _mm256_load_ps(_img + 8); c1 = _mm256_mul_ps(a, f1); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 16); c1 = _mm256_mul_ps(a, f2); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 24); c1 = _mm256_mul_ps(a, f3); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 32); c1 = _mm256_mul_ps(a, f4); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 40); c1 = _mm256_mul_ps(a, f5); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 48); c1 = _mm256_mul_ps(a, f6); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 56); c1 = _mm256_mul_ps(a, f7); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 64); c1 = _mm256_mul_ps(a, f8); c0 = _mm256_add_ps(c0, c1);
// add result to output
a = _mm256_load_ps(out + j);
c0 = _mm256_add_ps(c0, a);
_mm256_stream_ps(out + j, c0);
_img += 72;
}
_mm256_zeroupper();
}
inline void dotsum_unwrapped_4x4(const float *_img, const float *filter_ptr, float *out, const int outsize)
{
_mm256_zeroupper();
const __m256 f0 = _mm256_broadcast_ss(&filter_ptr[0]); const __m256 f1 = _mm256_broadcast_ss(&filter_ptr[1]);
const __m256 f2 = _mm256_broadcast_ss(&filter_ptr[2]); const __m256 f3 = _mm256_broadcast_ss(&filter_ptr[3]);
const __m256 f4 = _mm256_broadcast_ss(&filter_ptr[4]); const __m256 f5 = _mm256_broadcast_ss(&filter_ptr[5]);
const __m256 f6 = _mm256_broadcast_ss(&filter_ptr[6]); const __m256 f7 = _mm256_broadcast_ss(&filter_ptr[7]);
const __m256 f8 = _mm256_broadcast_ss(&filter_ptr[8]); const __m256 f9 = _mm256_broadcast_ss(&filter_ptr[9]);
const __m256 f10 = _mm256_broadcast_ss(&filter_ptr[10]); const __m256 f11 = _mm256_broadcast_ss(&filter_ptr[11]);
const __m256 f12 = _mm256_broadcast_ss(&filter_ptr[12]); const __m256 f13 = _mm256_broadcast_ss(&filter_ptr[13]);
const __m256 f14 = _mm256_broadcast_ss(&filter_ptr[14]); const __m256 f15 = _mm256_broadcast_ss(&filter_ptr[15]);
for (int j = 0; j < outsize; j += 8)//stride) // intput w
{
__m256 a, c0, c1;
// multiply filter
a = _mm256_load_ps(_img); c0 = _mm256_mul_ps(a, f0);
a = _mm256_load_ps(_img + 8); c1 = _mm256_mul_ps(a, f1); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 16); c1 = _mm256_mul_ps(a, f2); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 24); c1 = _mm256_mul_ps(a, f3); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 32); c1 = _mm256_mul_ps(a, f4); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 40); c1 = _mm256_mul_ps(a, f5); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 48); c1 = _mm256_mul_ps(a, f6); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 56); c1 = _mm256_mul_ps(a, f7); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 64); c1 = _mm256_mul_ps(a, f8); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 72); c1 = _mm256_mul_ps(a, f9); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 80); c1 = _mm256_mul_ps(a, f10); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 88); c1 = _mm256_mul_ps(a, f11); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 96); c1 = _mm256_mul_ps(a, f12); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 104); c1 = _mm256_mul_ps(a, f13); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 112); c1 = _mm256_mul_ps(a, f14); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 120); c1 = _mm256_mul_ps(a, f15); c0 = _mm256_add_ps(c0, c1);
// add result to output
a = _mm256_load_ps(out + j);
c0 = _mm256_add_ps(c0, a);
_mm256_stream_ps(out + j, c0);
_img += 128;
}
_mm256_zeroupper();
}
inline void dotsum_unwrapped_5x5(const float *_img, const float *filter_ptr, float *out, const int outsize)
{
_mm256_zeroupper();
const __m256 f0 = _mm256_broadcast_ss(&filter_ptr[0]); const __m256 f1 = _mm256_broadcast_ss(&filter_ptr[1]);
const __m256 f2 = _mm256_broadcast_ss(&filter_ptr[2]); const __m256 f3 = _mm256_broadcast_ss(&filter_ptr[3]);
const __m256 f4 = _mm256_broadcast_ss(&filter_ptr[4]); const __m256 f5 = _mm256_broadcast_ss(&filter_ptr[5]);
const __m256 f6 = _mm256_broadcast_ss(&filter_ptr[6]); const __m256 f7 = _mm256_broadcast_ss(&filter_ptr[7]);
const __m256 f8 = _mm256_broadcast_ss(&filter_ptr[8]); const __m256 f9 = _mm256_broadcast_ss(&filter_ptr[9]);
const __m256 f10 = _mm256_broadcast_ss(&filter_ptr[10]); const __m256 f11 = _mm256_broadcast_ss(&filter_ptr[11]);
const __m256 f12 = _mm256_broadcast_ss(&filter_ptr[12]); const __m256 f13 = _mm256_broadcast_ss(&filter_ptr[13]);
const __m256 f14 = _mm256_broadcast_ss(&filter_ptr[14]); const __m256 f15 = _mm256_broadcast_ss(&filter_ptr[15]);
const __m256 f16 = _mm256_broadcast_ss(&filter_ptr[16]); const __m256 f17 = _mm256_broadcast_ss(&filter_ptr[17]);
const __m256 f18 = _mm256_broadcast_ss(&filter_ptr[18]); const __m256 f19 = _mm256_broadcast_ss(&filter_ptr[19]);
const __m256 f20 = _mm256_broadcast_ss(&filter_ptr[20]); const __m256 f21 = _mm256_broadcast_ss(&filter_ptr[21]);
const __m256 f22 = _mm256_broadcast_ss(&filter_ptr[22]); const __m256 f23 = _mm256_broadcast_ss(&filter_ptr[23]);
const __m256 f24 = _mm256_broadcast_ss(&filter_ptr[24]);
for (int j = 0; j < outsize; j += 8)
{
__m256 a, c0, c1;
a = _mm256_load_ps(_img); c0 = _mm256_mul_ps(a, f0);
a = _mm256_load_ps(_img + 8); c1 = _mm256_mul_ps(a, f1); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 16); c1 = _mm256_mul_ps(a, f2); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 24); c1 = _mm256_mul_ps(a, f3); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 32); c1 = _mm256_mul_ps(a, f4); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 40); c1 = _mm256_mul_ps(a, f5); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 48); c1 = _mm256_mul_ps(a, f6); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 56); c1 = _mm256_mul_ps(a, f7); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 64); c1 = _mm256_mul_ps(a, f8); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 72); c1 = _mm256_mul_ps(a, f9); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 80); c1 = _mm256_mul_ps(a, f10); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 88); c1 = _mm256_mul_ps(a, f11); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 96); c1 = _mm256_mul_ps(a, f12); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 104); c1 = _mm256_mul_ps(a, f13); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 112); c1 = _mm256_mul_ps(a, f14); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 120); c1 = _mm256_mul_ps(a, f15); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 128); c1 = _mm256_mul_ps(a, f16); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 136); c1 = _mm256_mul_ps(a, f17); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 144); c1 = _mm256_mul_ps(a, f18); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 152); c1 = _mm256_mul_ps(a, f19); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 160); c1 = _mm256_mul_ps(a, f20); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 168); c1 = _mm256_mul_ps(a, f21); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 176); c1 = _mm256_mul_ps(a, f22); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 184); c1 = _mm256_mul_ps(a, f23); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(_img + 192); c1 = _mm256_mul_ps(a, f24); c0 = _mm256_add_ps(c0, c1);
a = _mm256_load_ps(out + j);
c0 = _mm256_add_ps(c0, a);
_mm256_stream_ps(out + j, c0);
_img += 200;
}
_mm256_zeroupper();
}
inline void dotsum_unwrapped_7x7(const float *_img, const float *filter_ptr, float *out, const int outsize)
{
_mm256_zeroupper();
__m256 f[49];//=new __m256(s);
for(int i=0; i<49; i++) f[i]= _mm256_broadcast_ss(&filter_ptr[i]);
for (int j = 0; j < outsize; j += 8)
{
__m256 a, c0, c1;
a = _mm256_load_ps(_img);
c0 = _mm256_mul_ps(a, f[0]);
for(int i=1; i<49;i++)
{
a = _mm256_load_ps(_img + 8*i); c1 = _mm256_mul_ps(a, f[i]); c0 = _mm256_add_ps(c0, c1);
}
a = _mm256_load_ps(out + j);
c0 = _mm256_add_ps(c0, a);
_mm256_stream_ps(out + j, c0);
_img += 49*8;
}
_mm256_zeroupper();
//delete [] f;
}
#else // no AVX
inline void dotsum_unwrapped_2x2(const float *_img, const float *filter_ptr, float *out, const int outsize)
{
dotsum_unwrapped_NxN(2, _img, filter_ptr, out, outsize);
}
inline void dotsum_unwrapped_3x3(const float *_img, const float *filter_ptr, float *out, const int outsize)
{
dotsum_unwrapped_NxN(3, _img, filter_ptr, out, outsize);
}
inline void dotsum_unwrapped_4x4(const float *_img, const float *filter_ptr, float *out, const int outsize)
{
dotsum_unwrapped_NxN(4, _img, filter_ptr, out, outsize);
}
inline int64_t get_cycles() {
uint32_t pmccntr;
uint32_t pmuseren;
uint32_t pmcntenset;
// Read the user mode perf monitor counter access permissions.
asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r"(pmuseren));
if (pmuseren & 1) { // Allows reading perfmon counters for user mode code.
asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r"(pmcntenset));
if (pmcntenset & 0x80000000ul) { // Is it counting?
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(pmccntr));
// The counter is set up to count every 64th cycle
//__android_log_print(ANDROID_LOG_DEBUG, "INFO", "cycles %d", pmccntr);
return (int64_t) pmccntr; // Should optimize to << 6
}
return -1;
}
return -2;
}
#ifdef MOJO_ENABLE_NEON
#define MOJO_Conv2dNeonK5x5SnLoadCalc4 \
/* load filter (4 outch x 1 height x 4 width) */ \
float32x4_t vf00, vf10, vf20, vf30; \
float32x2_t vf01, vf11, vf21, vf31; \
vf00 = vld1q_f32(filter_ptr0); \
vf01 = vld1_f32(filter_ptr0 + 3); \
vf10 = vld1q_f32(filter_ptr1); \
vf11 = vld1_f32(filter_ptr1 + 3); \
vf20 = vld1q_f32(filter_ptr2); \
vf21 = vld1_f32(filter_ptr2 + 3); \
vf30 = vld1q_f32(filter_ptr3); \
vf31 = vld1_f32(filter_ptr3 + 3); \
\
/* outch 0 */ \
vo0 = vmlaq_lane_f32(vo0, vi0, vget_low_f32(vf00), 0); \
vo0 = vmlaq_lane_f32(vo0, vi1, vget_low_f32(vf00), 1); \
vo0 = vmlaq_lane_f32(vo0, vi2, vget_high_f32(vf00), 0); \
vo0 = vmlaq_lane_f32(vo0, vi3, vget_high_f32(vf00), 1); \
vo0 = vmlaq_lane_f32(vo0, vi4, vf01, 1); \
\
/* outch 1 */ \
vo1 = vmlaq_lane_f32(vo1, vi0, vget_low_f32(vf10), 0); \
vo1 = vmlaq_lane_f32(vo1, vi1, vget_low_f32(vf10), 1); \
vo1 = vmlaq_lane_f32(vo1, vi2, vget_high_f32(vf10), 0); \
vo1 = vmlaq_lane_f32(vo1, vi3, vget_high_f32(vf10), 1); \
vo1 = vmlaq_lane_f32(vo1, vi4, vf11, 1); \
\
/* outch 2 */ \
vo2 = vmlaq_lane_f32(vo2, vi0, vget_low_f32(vf20), 0); \
vo2 = vmlaq_lane_f32(vo2, vi1, vget_low_f32(vf20), 1); \
vo2 = vmlaq_lane_f32(vo2, vi2, vget_high_f32(vf20), 0); \
vo2 = vmlaq_lane_f32(vo2, vi3, vget_high_f32(vf20), 1); \
vo2 = vmlaq_lane_f32(vo2, vi4, vf21, 1); \
\
/* outch 3 */ \
vo3 = vmlaq_lane_f32(vo3, vi0, vget_low_f32(vf30), 0); \
vo3 = vmlaq_lane_f32(vo3, vi1, vget_low_f32(vf30), 1); \
vo3 = vmlaq_lane_f32(vo3, vi2, vget_high_f32(vf30), 0); \
vo3 = vmlaq_lane_f32(vo3, vi3, vget_high_f32(vf30), 1); \
vo3 = vmlaq_lane_f32(vo3, vi4, vf31, 1);
inline void dotsum_neon_5x5_4maps(const float *_img, const float *filter_ptr, float *out,
const int map_stride, const int in_size, const int out_size)
{
float *out_ptr0_base = out;
float *out_ptr1_base = out + map_stride;
float *out_ptr2_base = out + 2 * map_stride;
float *out_ptr3_base = out + 3 * map_stride;
const float *filter_ptr0 = filter_ptr;
const float *filter_ptr1 = filter_ptr + 25; // kernel_size = 25
const float *filter_ptr2 = filter_ptr + 2 * 25;
const float *filter_ptr3 = filter_ptr + 3 * 25;
for (int h = 0; h < out_size; ++h) {
for (int w = 0; w + 3 < out_size; w += 4) {
// input offset
int in_offset = h * in_size + w;
// output (4 outch x 1 height x 4 width): vo_outch_height
float32x4_t vo0, vo1, vo2, vo3;
// load output
int out_offset = h * out_size + w;
vo0 = vld1q_f32(out_ptr0_base + out_offset);
vo1 = vld1q_f32(out_ptr1_base + out_offset);
vo2 = vld1q_f32(out_ptr2_base + out_offset);
vo3 = vld1q_f32(out_ptr3_base + out_offset);
for (int r = 0; r < 5; ++r) {
// input (3 slide)
float32x4_t vi0, vi1, vi2, vi3, vi4;
// load input
vi0 = vld1q_f32(_img + in_offset);
vi4 = vld1q_f32(_img + in_offset + 4);
vi1 = vextq_f32(vi0, vi4, 1);
vi2 = vextq_f32(vi0, vi4, 2);
vi3 = vextq_f32(vi0, vi4, 3);
MOJO_Conv2dNeonK5x5SnLoadCalc4;
in_offset += in_size;
filter_ptr0 += 5;
filter_ptr1 += 5;
filter_ptr2 += 5;
filter_ptr3 += 5;
} // r
vst1q_f32(out_ptr0_base + out_offset, vo0);
vst1q_f32(out_ptr1_base + out_offset, vo1);
vst1q_f32(out_ptr2_base + out_offset, vo2);
vst1q_f32(out_ptr3_base + out_offset, vo3);
filter_ptr0 -= 25;
filter_ptr1 -= 25;
filter_ptr2 -= 25;
filter_ptr3 -= 25;
} // w
} // h
}
inline void dotsum_neon_5x5(const float *_img, const float *filter_ptr, float *out,
const int in_size, const int out_size)
{
for (int h = 0; h < out_size; ++h) {
for (int w = 0; w + 3 < out_size; w += 4) {
// input offset
int in_offset = h * in_size + w;
// output (1 outch x 1 height x 4 width): vo_outch_height
float32x4_t vo0;
// load output
int out_offset = h * out_size + w;
vo0 = vld1q_f32(out + out_offset);
for (int r = 0; r < 5; ++r) {
// input (3 slide)
float32x4_t vi0, vi1, vi2, vi3, vi4;
// load input
vi0 = vld1q_f32(_img + in_offset);
vi4 = vld1q_f32(_img + in_offset + 4);
vi1 = vextq_f32(vi0, vi4, 1);
vi2 = vextq_f32(vi0, vi4, 2);
vi3 = vextq_f32(vi0, vi4, 3);
float32x4_t vf00;
float32x2_t vf01;
vf00 = vld1q_f32(filter_ptr);
vf01 = vld1_f32(filter_ptr + 3);
/* outch 0 */
vo0 = vmlaq_lane_f32(vo0, vi0, vget_low_f32(vf00), 0);
vo0 = vmlaq_lane_f32(vo0, vi1, vget_low_f32(vf00), 1);
vo0 = vmlaq_lane_f32(vo0, vi2, vget_high_f32(vf00), 0);
vo0 = vmlaq_lane_f32(vo0, vi3, vget_high_f32(vf00), 1);
vo0 = vmlaq_lane_f32(vo0, vi4, vf01, 1);
in_offset += in_size;
filter_ptr += 5;
} // r
vst1q_f32(out + out_offset, vo0);
filter_ptr -= 25;
} // w
} // h
}
#endif
inline void dotsum_unwrapped_5x5(const float *_img, const float *filter_ptr, float *out, const int outsize)
{
dotsum_unwrapped_NxN(5, _img, filter_ptr, out, outsize);
}
inline void dotsum_unwrapped_7x7(const float *_img, const float *filter_ptr, float *out, const int outsize)
{
dotsum_unwrapped_NxN(7, _img, filter_ptr, out, outsize);
}
#endif
// matrix class ---------------------------------------------------
// should use opencv if available
//
class matrix
{
int _size;
int _capacity;
float *_x_mem;
void delete_x() { delete[] _x_mem; x = NULL; _x_mem = NULL; }
// 4 extra for alignment and 4 for 3 padding for SSE
//float *new_x(const int size) { _x_mem = new float[size + 4+3]; x = (float *)(((uintptr_t)_x_mem + 16) & ~(uintptr_t)0x0F); return x; }
// avx mem aligment
float *new_x(const int size) { _x_mem = new float[size + 8 + 7]; x = (float *)(((uintptr_t)_x_mem + 32) & ~(uintptr_t)0x1F); return x; }
public:
std::string _name;
int cols, rows, chans;
int chan_stride;
int chan_aligned;
float *x;
// size must be divisible by 8 for AVX
virtual int calc_chan_stride(int w, int h)
{
if (chan_aligned)
{
int s = w*h;
const int remainder = s % 8;
if (remainder > 0) s += 8 - remainder;
return s;
}
else return w*h;
}
matrix( ): cols(0), rows(0), chans(0), _size(0), _capacity(0), chan_stride(0), x(NULL), chan_aligned(0)/*, empty_chan(NULL)*/{}
matrix( int _w, int _h, int _c=1, const float *data=NULL, int align_chan=0): cols(_w), rows(_h), chans(_c)
{
chan_aligned = align_chan;
chan_stride = calc_chan_stride(cols, rows);
_size= chan_stride*chans; _capacity=_size; x = new_x(_size);
if(data!=NULL) memcpy(x,data,_size*sizeof(float));
}
// copy constructor - deep copy
matrix( const matrix &m) : cols(m.cols), rows(m.rows), chan_aligned(m.chan_aligned), chans(m.chans), chan_stride(m.chan_stride), _size(m._size), _capacity(m._size) {x = new_x(_size); memcpy(x,m.x,sizeof(float)*_size); /*empty_chan = new unsigned char[chans]; memcpy(empty_chan, m.empty_chan, chans);*/} // { v=m.v; x=(float*)v.data();}
// copy and pad constructor
matrix( const matrix &m, int pad_cols, int pad_rows, mojo::pad_type padding= mojo::zero, int threads=1) : cols(m.cols), rows(m.rows), chans(m.chans), chan_aligned(m.chan_aligned), chan_stride(m.chan_stride), _size(m._size), _capacity(m._size)
{
x = new_x(_size); memcpy(x, m.x, sizeof(float)*_size);
*this = pad(pad_cols, pad_rows, padding, threads);
}
~matrix() { if (x) delete_x(); }
matrix get_chans(int start_channel, int num_chans=1) const
{
return matrix(cols,rows,num_chans,&x[start_channel*chan_stride]);
}
// if edge_pad==0, then the padded area is just 0.
// if edge_pad==1 it fills with edge pixel colors
// if edge_pad==2 it fills with median edge pixel color
matrix pad(int dx, int dy, mojo::pad_type edge_pad = mojo::zero, int threads=1) const
{
return pad(dx, dy, dx, dy, edge_pad, threads);
}
matrix pad(int dx, int dy, int dx_right, int dy_bottom, mojo::pad_type edge_pad = mojo::zero, int threads=1) const
{
matrix v(cols+dx+dx_right,rows+dy+dy_bottom,chans);//,NULL,this->chan_aligned);
v.fill(0);
//float *new_x = new float[chans*w*h];
#pragma omp parallel for num_threads(threads)
for(int k=0; k<chans; k++)
{
const int v_chan_offset=k*v.chan_stride;
const int chan_offset=k*chan_stride;
// find median color of perimeter
float median = 0.f;
if (edge_pad == mojo::median_edge)
{
int perimeter = 2 * (cols + rows - 2);
std::vector<float> d(perimeter);
for (int i = 0; i < cols; i++)
{
d[i] = x[i+ chan_offset]; d[i + cols] = x[i + cols*(rows - 1)+ chan_offset];
}
for (int i = 1; i < (rows - 1); i++)
{
d[i + cols * 2] = x[cols*i+ chan_offset];
// file from back so i dont need to cal index
d[perimeter - i] = x[cols - 1 + cols*i+ chan_offset];
}
std::nth_element(d.begin(), d.begin() + perimeter / 2, d.end());
median = d[perimeter / 2];
//for (int i = 0; i < v.rows*v.cols; i++) v.x[v_chan_offset + i] = solid_fill;
}
for(int j=0; j<rows; j++)
{
memcpy(&v.x[dx+(j+dy)*v.cols+v_chan_offset], &x[j*cols+chan_offset], sizeof(float)*cols);
if(edge_pad== mojo::edge)
{
// do left/right side
for(int i=0; i<dx; i++) v.x[i+(j+dy)*v.cols+v_chan_offset]=x[0+j*cols+chan_offset];
for (int i = 0; i<dx_right; i++) v.x[i + dx + cols + (j + dy)*v.cols + v_chan_offset] = x[(cols - 1) + j*cols + chan_offset];
}
else if (edge_pad == mojo::median_edge)
{
for (int i = 0; i < dx; i++) v.x[i + (j + dy)*v.cols + v_chan_offset] = median;
for (int i = 0; i < dx_right; i++) v.x[i + dx + cols + (j + dy)*v.cols + v_chan_offset] = median;
}
}
// top bottom pad
if(edge_pad== mojo::edge)
{
for(int j=0; j<dy; j++) memcpy(&v.x[(j)*v.cols+v_chan_offset],&v.x[(dy)*v.cols+v_chan_offset], sizeof(float)*v.cols);
for (int j = 0; j<dy_bottom; j++) memcpy(&v.x[(j + dy + rows)*v.cols + v_chan_offset], &v.x[(rows - 1 + dy)*v.cols + v_chan_offset], sizeof(float)*v.cols);
}
if (edge_pad == mojo::median_edge)
{
for (int j = 0; j<dy; j++)
for (int i = 0; i<v.cols; i++)
v.x[i + j*v.cols + v_chan_offset] = median;
for (int j = 0; j<dy_bottom; j++)
for (int i = 0; i<v.cols; i++)
v.x[i + (j + dy + rows)*v.cols + v_chan_offset] = median;
}
}
return v;
}
matrix crop(int dx, int dy, int w, int h, int threads=1) const
{
matrix v(w,h,chans);
#pragma omp parallel for num_threads(threads)
for(int k=0; k<chans; k++)
{
for(int j=0; j<h; j++)
{
memcpy(&v.x[j*w+k*v.chan_stride], &x[dx+(j+dy)*cols+k*chan_stride], sizeof(float)*w);
}
}
return v;
}
mojo::matrix shift(int dx, int dy, mojo::pad_type edge_pad=mojo::zero)
{
int orig_cols=cols;
int orig_rows=rows;
int off_x=abs(dx);
int off_y=abs(dy);
mojo::matrix shifted= pad(off_x, off_y, edge_pad);
return shifted.crop(off_x-dx, off_y-dy,orig_cols,orig_rows);
}
mojo::matrix flip_cols()
{
mojo::matrix v(cols,rows,chans);
for(int k=0; k<chans; k++)
for(int j=0; j<rows; j++)
for(int i=0; i<cols; i++)
v.x[i+j*cols+k*chan_stride]=x[(cols-i-1)+j*cols+k*chan_stride];
return v;
}
mojo::matrix flip_rows()
{
mojo::matrix v(cols, rows, chans);
for (int k = 0; k<chans; k++)
for (int j = 0; j<rows; j++)
memcpy(&v.x[(rows-1-j)*cols + k*chan_stride],&x[j*cols + k*chan_stride], cols*sizeof(float));
return v;
}
void clip(float min, float max)
{
int s = chan_stride*chans;
for (int i = 0; i < s; i++)
{
if (x[i] < min) x[i] = min;
if (x[i] > max) x[i]=max;
}
}
void bucket_scaling(int bucket_size, int s) {
int weight_size = _size;
//std::cout << " The dimensions of the matrix are: " << rw << " chans: " << chans << std::endl;
int num_buckets, start_index, end_index, mini, maxi;
float alpha, beta;
num_buckets = weight_size / bucket_size;
if (num_buckets == 0) {
//Take care of the leftover cells of the matrix
start_index = 0;
//start_index = t + (num_buckets*bucket_size);
end_index = weight_size;
//Find min max phase
mini = start_index;
maxi = start_index;
for (int j = start_index ; j < end_index; j++) {
if (x[j] < x[mini]) mini = j;
if (x[j] > x[maxi]) maxi = j;
}
alpha = x[maxi] - x[mini];
beta = x[mini];
//std::cout<<"MIN: "<<x[mini]<<" MAX: "<<x[maxi]<<" ALPHA: "<<alpha<<" BETA: "<<beta<<std::endl;
//Mutation phase
for (int j = start_index; j < end_index; j++) {
//Scaling
x[j] -= beta;
//std::cout<<"AFTER THE BETA TRANSFORMATION: "<<x[j]<<std::endl;
x[j] /= alpha;
//std::cout<<"AFTER THE ALPHA TRANSFORMATION: "<<x[j]<<std::endl;
//Bit quantization
x[j] *= s;
x[j] = round(x[j]);
x[j] /= s;
//Antiscaling
x[j] *= alpha;
x[j] += beta;
}
//std::cout << "\n\nLol:\n~~~~~~~~~~~~~~~~\n\n\n";
} else {
//std::cout << " num_buckets: " << num_buckets << std::endl;
for (int i = 0 ; i < num_buckets; i++) {
//For each bucket
//start_index = i * bucket_size;
start_index = (i * bucket_size);
//end_index = t;
end_index = start_index + bucket_size;
//Find min max phase
mini = start_index;
maxi = start_index;
for (int j = start_index ; j < end_index; j++) {
if (x[j] < x[mini]) mini = j;
if (x[j] > x[maxi]) maxi = j;
}
alpha = x[maxi] - x[mini];
beta = x[mini];
//Mutation phase
for (int j = start_index; j < end_index; j++) {
//Scaling
x[j] -= beta;
x[j] /= alpha;
//Bit quantization
x[j] *= s;
x[j] = round(x[j]);
x[j] /= s;
//Antiscaling
x[j] *= alpha;
x[j] += beta;
}
//std::cout << "\n\n\n~~~~~~~~~~~~~~~~\n\n\n";
}
if (weight_size % bucket_size != 0) {
//Take care of the leftover cells of the matrix
start_index = weight_size - weight_size % bucket_size;
//start_index = t + (num_buckets*bucket_size);
end_index = weight_size;
//Find min max phase
mini = start_index;
maxi = start_index;
for (int j = start_index ; j < end_index; j++) {
if (x[j] < x[mini]) mini = j;
if (x[j] > x[maxi]) maxi = j;
}
alpha = x[maxi] - x[mini];
beta = x[mini];
//Mutation phase
for (int j = start_index; j < end_index; j++) {
//Scaling
x[j] -= beta;
x[j] /= alpha;
//Bit quantization
x[j] *= s;
x[j] = round(x[j]);
x[j] /= s;
//Antiscaling
x[j] *= alpha;
x[j] += beta;
}
//std::cout << "\n\nExtra:\n~~~~~~~~~~~~~~~~\n\n\n";
}
}
}
void min_max(float *min, float *max, int *min_i=NULL, int *max_i=NULL)
{
int s = rows*cols;
int mini = 0;
int maxi = 0;
for (int c = 0; c < chans; c++)
{
const int t = chan_stride*c;
for (int i = t; i < t+s; i++)
{
if (x[i] < x[mini]) mini = i;
if (x[i] > x[maxi]) maxi = i;
}
}
*min = x[mini];
*max = x[maxi];
if (min_i) *min_i = mini;
if (max_i) *max_i = maxi;
}
void round_matrix(int quantization_s) {
int s = rows * cols;
for (int c = 0; c < chans; c++) {
const int t = chan_stride * c;
for (int i = t; i < t + s; i++) {
if (x[i] - floor(x[i]) > 0.5)
x[i] = floor(x[i]*s)/s + 1/s;
else
x[i] = floor(x[i]*s)/s;
}
}
}
void unmapping(std::map<int,float>& dictionary){
int s = rows * cols;
//Iterate through all dict values
for (int c = 0; c < chans; c++) {
const int t = chan_stride * c;
for (int i = t; i < t + s; i++) {
//If the value of the matrix is equal to the value of matrix then
//assign the index of the dict to the matrix
//? Maybe also needs a cast to int
x[i] = dictionary[x[i]];
}
}
}
float mean()
{
const int s = rows*cols;
int cnt = 0;// channel*s;
float average = 0;
for (int c = 0; c < chans; c++)
{
const int t = chan_stride*c;
for (int i = 0; i < s; i++)
average += x[i + t];
}
average = average / (float)(s*chans);
return average;
}
float remove_mean(int channel)
{
int s = rows*cols;
int offset = channel*chan_stride;
float average=0;
for(int i=0; i<s; i++) average+=x[i+offset];
average= average/(float)s;
for(int i=0; i<s; i++) x[i+offset]-=average;
return average;
}
float remove_mean()
{
float m=mean();
int s = chan_stride*chans;
//int offset = channel*s;
for(int i=0; i<s; i++) x[i]-=m;
return m;
}
void fill(float val) { for(int i=0; i<_size; i++) x[i]=val;
}
void fill_random_uniform(float range)
{
std::mt19937 gen(0);
std::uniform_real_distribution<float> dst(-range, range);
for (int i = 0; i<_size; i++) x[i] = dst(gen);
}
void fill_random_normal(float std)
{
std::mt19937 gen(0);
std::normal_distribution<float> dst(0, std);
for (int i = 0; i<_size; i++) x[i] = dst(gen);
}
// deep copy
inline matrix& operator =(const matrix &m)
{
resize(m.cols, m.rows, m.chans, m.chan_aligned);
memcpy(x,m.x,sizeof(float)*_size);
// memcpy(empty_chan, m.empty_chan, chans);
return *this;
}
int size() const {return _size;}
void resize(int _w, int _h, int _c, int align_chans=0) {
chan_aligned = align_chans;
int new_stride = calc_chan_stride(_w,_h);
int s = new_stride*_c;
if(s>_capacity)
{
if(_capacity>0) delete_x(); _size = s; _capacity=_size; x = new_x(_size);
}
cols = _w; rows = _h; chans = _c; _size = s; chan_stride = new_stride;
}
// dot vector to 2d mat
inline matrix dot_1dx2d(const matrix &m_2d) const
{
mojo::matrix v(m_2d.rows, 1, 1);
for(int j=0; j<m_2d.rows; j++) v.x[j]=dot(x,&m_2d.x[j*m_2d.cols],_size);
return v;
}
// +=
inline matrix& operator+=(const matrix &m2){
for(int i = 0; i < _size; i++) x[i] += m2.x[i];
return *this;
}
// -=
inline matrix& operator-=(const matrix &m2) {
for (int i = 0; i < _size; i++) x[i] -= m2.x[i];
return *this;
}
#ifndef MOJO_AVX
// *= float
inline matrix operator *=(const float v) {
for (int i = 0; i < _size; i++) x[i] = x[i] * v;
return *this;
}
#else
inline matrix operator *=(const float v) {
__m128 b;
b = _mm_set_ps(v, v, v, v);
for (int j = 0; j < _size; j += 4)
_mm_store_ps(x + j, _mm_mul_ps(_mm_load_ps(x + j), b));
return *this;
}
#endif
// *= matrix
inline matrix operator *=(const matrix &v) {
for (int i = 0; i < _size; i++) x[i] = x[i] * v.x[i];
return *this;
}
inline matrix operator *(const matrix &v) {
matrix T(cols, rows, chans);
for (int i = 0; i < _size; i++) T.x[i] = x[i] * v.x[i];
return T;
}
// * float
inline matrix operator *(const float v) {
matrix T(cols, rows, chans);
for (int i = 0; i < _size; i++) T.x[i] = x[i] * v;
return T;
}
// + float
inline matrix operator +(const float v) {
matrix T(cols, rows, chans);
for (int i = 0; i < _size; i++) T.x[i] = x[i] + v;
return T;
}
// +
inline matrix operator +(matrix m2)
{
matrix T(cols,rows,chans);
for(int i = 0; i < _size; i++) T.x[i] = x[i] + m2.x[i];
return T;
}
};
}// namespace
|
tensor_op.h | /* Copyright (c) 2018 NoobsHPC Authors, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifndef TENSOR_OP_H
#define TENSOR_OP_H
#pragma once
#include <random>
#include "tensor.h"
namespace noobshpc{
namespace icesword{
/* example:
fill_tensor_debug<DT_FLOAT>(inputs[0]->mutable_data(),
mb * g, ic/g * ih * iw, true, true);
*/
template <DataType DType>
Status fill_tensor_debug(void* matrix,
const size_t height,
const size_t width,
bool fill_hight_index = false,
bool with_print = false) {
typedef typename DataTrait<X86, DType>::Dtype OP_DType;
auto matrix_ = static_cast<OP_DType *>(matrix);
if (matrix_ == nullptr) {
LOG(ERROR) << "wrong matrix empty pointer !";
return S_InvalidValue;
}
// without openmp to print data
for (auto m = 0; m < height; ++m) {
#pragma omp simd
for (auto n = 0; n < width; ++n) {
matrix_[m * width + n] = fill_hight_index ? m : n;
if (with_print) {
auto index = m * width + n;
LOG(INFO) << "matrix" << "[" << index << "]: "
<< (fill_hight_index ? m : n);
}
}
}
return S_Success;
}
template <typename Dtype>
void fill_tensor_const_func(Dtype* dio, Dtype value, long long size) {
for (long long i = 0; i < size; ++i) {
dio[i] = value;
}
}
template <typename Dtype>
void fill_tensor_rand_func(Dtype* dio, long long size) {
for (long long i = 0; i < size; ++i) {
dio[i] = static_cast<Dtype>(rand());
}
}
template <typename Dtype>
void fill_tensor_rand_func(Dtype* dio, Dtype vstart, Dtype vend, long long size) {
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<float> dis(0, 1.f);
for (long long i = 0; i < size; ++i) {
Dtype random_num = static_cast<Dtype>(vstart + (vend - vstart) * dis(gen));
dio[i] = random_num;
}
}
template <typename Dtype>
void print_tensor(const Dtype* din, long long size, int width) {
for (int i = 0; i < size; ++i) {
printf("%.6f ", static_cast<float>(din[i]));
if ((i + 1) % width == 0) {
printf("\n");
}
}
printf("\n");
}
template <typename Dtype>
double tensor_mean_value(const Dtype* din, long long size) {
double sum = 0.0;
for (long long i = 0; i < size; ++i) {
sum += din[i];
}
return sum / size;
}
/**
* \brief Fill the tensor buffer with rand value.
* \param tensor The reference of input tensor.
**/
template <TargetType TType>
static inline void fill_tensor_const(Tensor<TType>& tensor, float value) {
long long size = tensor.size();
void* dio = tensor.mutable_data();
DataType type = tensor.get_dtype();
switch (type){
case DT_UINT8: fill_tensor_const_func((unsigned char*)dio, static_cast<unsigned char>(value), size); break;
case DT_INT8: fill_tensor_const_func((char*)dio, static_cast<char>(value), size); break;
case DT_INT16: fill_tensor_const_func((short*)dio, static_cast<short>(value), size); break;
case DT_UINT16: fill_tensor_const_func((unsigned short*)dio, static_cast<unsigned short>(value), size); break;
case DT_HALF: fill_tensor_const_func((short*)dio, static_cast<short>(value), size); break;
case DT_UINT32: fill_tensor_const_func((unsigned int*)dio, static_cast<unsigned int>(value), size); break;
case DT_INT32: fill_tensor_const_func((int*)dio, static_cast<int>(value), size); break;
case DT_FLOAT: fill_tensor_const_func((float*)dio, static_cast<float>(value), size); break;
case DT_DOUBLE: fill_tensor_const_func((double*)dio, static_cast<double>(value), size); break;
default: LOG(FATAL) << "data type: " << type << " is unsupported now";
}
}
/**
* \brief Fill the tensor buffer with rand value.
* \param The reference of input tensor.
**/
template <TargetType TType>
void fill_tensor_rand(Tensor<TType>& tensor) {
long long size = tensor.size();
void* dio = tensor.mutable_data();
DataType type = tensor.get_dtype();
switch (type){
case DT_UINT8: fill_tensor_rand_func((unsigned char*)dio, size); break;
case DT_INT8: fill_tensor_rand_func((char*)dio, size); break;
case DT_INT16: fill_tensor_rand_func((short*)dio, size); break;
case DT_UINT16: fill_tensor_rand_func((unsigned short*)dio, size); break;
case DT_UINT32: fill_tensor_rand_func((unsigned int*)dio, size); break;
case DT_INT32: fill_tensor_rand_func((int*)dio, size); break;
case DT_HALF: fill_tensor_rand_func((short*)dio, size); break;
case DT_FLOAT: fill_tensor_rand_func((float*)dio, size); break;
case DT_DOUBLE: fill_tensor_rand_func((double*)dio, size); break;
default: LOG(FATAL) << "data type: " << type << " is unsupported now";
}
}
/**
* \brief Fill the tensor buffer with rand value from vstart to vend.
* \param tensor The reference of input tensor.
**/
template <TargetType TType>
void fill_tensor_rand(Tensor<TType>& tensor, float vstart, float vend) {
long long size = tensor.size();
void* dio = tensor.mutable_data();
DataType type = tensor.get_dtype();
switch (type){
case DT_UINT8: fill_tensor_rand_func((unsigned char*)dio, static_cast<unsigned char>(vstart),
static_cast<unsigned char>(vend), size); break;
case DT_INT8: fill_tensor_rand_func((char*)dio, static_cast<char>(vstart), static_cast<char>(vend), size); break;
case DT_INT16: fill_tensor_rand_func((short*)dio, static_cast<short>(vstart), static_cast<short>(vend), size); break;
case DT_UINT16: fill_tensor_rand_func((unsigned short*)dio, static_cast<unsigned short>(vstart),
static_cast<unsigned short>(vend), size); break;
case DT_UINT32: fill_tensor_rand_func((unsigned int*)dio, static_cast<unsigned int>(vstart),
static_cast<unsigned int>(vend), size); break;
case DT_INT32: fill_tensor_rand_func((int*)dio, static_cast<int>(vstart), static_cast<int>(vend), size); break;
case DT_HALF: fill_tensor_rand_func((short*)dio, static_cast<short>(vstart), static_cast<short>(vend), size); break;
case DT_FLOAT: fill_tensor_rand_func((float*)dio, static_cast<float>(vstart), static_cast<float>(vend), size); break;
case DT_DOUBLE: fill_tensor_rand_func((double*)dio, static_cast<double>(vstart), static_cast<double>(vend), size); break;
default: LOG(FATAL) << "data type: " << type << " is unsupported now";
}
}
/**
* \brief Print the data in host tensor.
* \param tensor The reference of input tensor.
**/
template <TargetType TType>
void print_tensor(Tensor<TType>& tensor) {
LOG(INFO) << "host tensor data:" << tensor.size();
const void* data_ptr = tensor.data();
long long size = tensor.size();
int width = tensor.width();
DataType type = tensor.get_dtype();
switch(type) {
case DT_UINT8: print_tensor((const unsigned char*)data_ptr, size, width); break;
case DT_INT8: print_tensor((const char*)data_ptr, size, width); break;
case DT_UINT16: print_tensor((const unsigned short*)data_ptr, size, width); break;
case DT_INT16: print_tensor((const short*)data_ptr, size, width); break;
case DT_UINT32: print_tensor((const unsigned int*)data_ptr, size, width); break;
case DT_INT32: print_tensor((const int*)data_ptr, size, width); break;
case DT_FLOAT: print_tensor((const float*)data_ptr, size, width); break;
case DT_DOUBLE: print_tensor((const double*)data_ptr, size, width); break;
default: LOG(FATAL) << "data type: " << type << " is unsupported now";
}
printf("\n");
}
/**
* \brief Print the valid data in host tensor.
* \param tensor The reference of input tensor.
**/
template <TargetType TType>
void print_tensor_valid(Tensor<TType>& tensor) {
LOG(INFO) << "host tensor data:" << tensor.valid_size();
const void* data_ptr = (const void*)((const char*)tensor.data() + tensor.data_offset() * type_length(tensor.get_dtype()));
long long size = tensor.valid_size();
int width = tensor.width();
DataType type = tensor.get_dtype();
if (tensor.is_continue_mem()) {
switch(type) {
case DT_UINT8: print_tensor((const unsigned char*)data_ptr, size, width); break;
case DT_INT8: print_tensor((const char*)data_ptr, size, width); break;
case DT_UINT16: print_tensor((const unsigned short*)data_ptr, size, width); break;
case DT_INT16: print_tensor((const short*)data_ptr, size, width); break;
case DT_UINT32: print_tensor((const unsigned int*)data_ptr, size, width); break;
case DT_INT32: print_tensor((const int*)data_ptr, size, width); break;
case DT_FLOAT: print_tensor((const float*)data_ptr, size, width); break;
case DT_DOUBLE: print_tensor((const double*)data_ptr, size, width); break;
default: LOG(FATAL) << "data type: " << type << " is unsupported now";
}
printf("\n");
} else {
Tensor<TType> tvalid(tensor.valid_shape());
tvalid.copy_from(tensor);
print_tensor<TType>(tvalid);
}
}
/**
* \brief compute mean value of the valid data in device tensor.
* \param tensor The reference of input tensor.
**/
template <TargetType TType>
double tensor_mean_value(Tensor<TType>& tensor) {
const void* data_ptr = tensor.data();
long long size = tensor.size();
DataType type = tensor.get_dtype();
switch (type) {
case DT_UINT8: return tensor_mean_value((const unsigned char*)data_ptr, size);
case DT_INT8: return tensor_mean_value((const char*)data_ptr, size);
case DT_UINT16: return tensor_mean_value((const unsigned short*)data_ptr, size);
case DT_INT16: return tensor_mean_value((const short*)data_ptr, size);
case DT_UINT32: return tensor_mean_value((const unsigned int*)data_ptr, size);
case DT_INT32: return tensor_mean_value((const int*)data_ptr, size);
case DT_FLOAT: return tensor_mean_value((const float*)data_ptr, size);
case DT_DOUBLE: return tensor_mean_value((const double*)data_ptr, size);
default: LOG(FATAL) << "data type: " << type << " is unsupported now";
}
return 0.0;
}
/**
* \brief compute mean value of the valid data in device tensor.
* \param tensor The reference of input tensor.
**/
template <TargetType TType>
double tensor_mean_value_valid(Tensor<TType>& tensor) {
const void* data_ptr = (const void*)((const char*)tensor.data() + tensor.data_offset() * type_length(tensor.get_dtype()));
long long size = tensor.valid_size();
DataType type = tensor.get_dtype();
if (tensor.is_continue_mem()) {
switch (type) {
case DT_UINT8: return tensor_mean_value((const unsigned char*)data_ptr, size);
case DT_INT8: return tensor_mean_value((const char*)data_ptr, size);
case DT_UINT16: return tensor_mean_value((const unsigned short*)data_ptr, size);
case DT_INT16: return tensor_mean_value((const short*)data_ptr, size);
case DT_UINT32: return tensor_mean_value((const unsigned int*)data_ptr, size);
case DT_INT32: return tensor_mean_value((const int*)data_ptr, size);
case DT_FLOAT: return tensor_mean_value((const float*)data_ptr, size);
case DT_DOUBLE: return tensor_mean_value((const double*)data_ptr, size);
default: LOG(FATAL) << "data type: " << type << " is unsupported now";
}
} else {
Tensor<TType> tvalid(tensor.valid_shape());
tvalid.copy_from(tensor);
return tensor_mean_value<TType>(tvalid);
}
return 0.0;
}
template <typename Dtype>
void tensor_cmp(const Dtype* src1, const Dtype* src2, \
int size, double& max_ratio, double& max_diff) {
const double eps = 1e-6f;
max_diff = fabs(src1[0] - src2[0]);
max_ratio = fabs(2.0 * max_diff / (src1[0] + src2[0] + eps));
for (int i = 1; i < size; ++i) {
double diff = fabs(src1[i] - src2[i]);
if (max_diff < diff) {
max_diff = diff;
max_ratio = fabs(2.0 * max_diff / (src1[i] + src2[i] + eps));
}
}
}
} // namespace icesword
} // namespace noobshpc
#endif // TENSOR_OP_H
|
residualbased_predictorcorrector_velocity_bossak_scheme_turbulent.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Jordi Cotela
//
#if !defined(KRATOS_RESIDUALBASED_PREDICTOR_CORRECTOR_VELOCITY_BOSSAK_TURBULENT_SCHEME )
#define KRATOS_RESIDUALBASED_PREDICTOR_CORRECTOR_VELOCITY_BOSSAK_TURBULENT_SCHEME
/* System includes */
/* External includes */
#include "boost/smart_ptr.hpp"
/* Project includes */
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/deprecated_variables.h"
#include "solving_strategies/schemes/scheme.h"
#include "includes/variables.h"
#include "includes/cfd_variables.h"
#include "containers/array_1d.h"
#include "utilities/openmp_utils.h"
#include "utilities/dof_updater.h"
#include "utilities/coordinate_transformation_utilities.h"
#include "processes/process.h"
namespace Kratos {
/**@name Kratos Globals */
/*@{ */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
/**@name Enum's */
/*@{ */
/*@} */
/**@name Functions */
/*@{ */
/*@} */
/**@name Kratos Classes */
/*@{ */
/// Bossak time scheme for the incompressible flow problem.
/** This class provides a second order time scheme of the generalized-alpha Newmark
family of methods. It also includes code required to implement slip conditions
on the incompressible flow problem and provides the possibility of using a RANS
model by passing a turbulence model as an argument to the constructor.
This time scheme is intended to be used in combination with elements of type
ASGS2D, ASGS3D, VMS or derived classes.
To use the slip condition, set the SLIP flag on slip wall nodes. To use
a wall law in combination with the slip condition, use MonolithicWallCondition to
mesh the boundary
@see ASGS2D, ASGS3D, VMS, MonolithicWallConditon
*/
template<class TSparseSpace,
class TDenseSpace //= DenseSpace<double>
>
class ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent : public Scheme<TSparseSpace, TDenseSpace> {
public:
/**@name Type Definitions */
/*@{ */
KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent);
typedef Scheme<TSparseSpace, TDenseSpace> BaseType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename Element::DofsVectorType DofsVectorType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef Element::GeometryType GeometryType;
/*@} */
/**@name Life Cycle
*/
/*@{ */
/** Constructor without a turbulence model
*/
ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent(
double NewAlphaBossak,
double MoveMeshStrategy,
unsigned int DomainSize)
:
Scheme<TSparseSpace, TDenseSpace>(),
mRotationTool(DomainSize,DomainSize+1,SLIP), // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs.
mrPeriodicIdVar(Kratos::Variable<int>::StaticObject())
{
//default values for the Newmark Scheme
mAlphaBossak = NewAlphaBossak;
mBetaNewmark = 0.25 * pow((1.00 - mAlphaBossak), 2);
mGammaNewmark = 0.5 - mAlphaBossak;
mMeshVelocity = MoveMeshStrategy;
//Allocate auxiliary memory
int NumThreads = OpenMPUtils::GetNumThreads();
mMass.resize(NumThreads);
mDamp.resize(NumThreads);
mvel.resize(NumThreads);
macc.resize(NumThreads);
maccold.resize(NumThreads);
}
/** Constructor without a turbulence model with periodic conditions
*/
ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent(
double NewAlphaBossak,
unsigned int DomainSize,
const Variable<int>& rPeriodicIdVar)
:
Scheme<TSparseSpace, TDenseSpace>(),
mRotationTool(DomainSize,DomainSize+1,SLIP), // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs.
mrPeriodicIdVar(rPeriodicIdVar)
{
//default values for the Newmark Scheme
mAlphaBossak = NewAlphaBossak;
mBetaNewmark = 0.25 * pow((1.00 - mAlphaBossak), 2);
mGammaNewmark = 0.5 - mAlphaBossak;
mMeshVelocity = 0.0;
//Allocate auxiliary memory
int NumThreads = OpenMPUtils::GetNumThreads();
mMass.resize(NumThreads);
mDamp.resize(NumThreads);
mvel.resize(NumThreads);
macc.resize(NumThreads);
maccold.resize(NumThreads);
}
/** Constructor without a turbulence model
*/
ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent(
double NewAlphaBossak,
double MoveMeshStrategy,
unsigned int DomainSize,
Kratos::Flags& rSlipFlag)
:
Scheme<TSparseSpace, TDenseSpace>(),
mRotationTool(DomainSize,DomainSize+1,rSlipFlag), // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs.
mrPeriodicIdVar(Kratos::Variable<int>::StaticObject())
{
//default values for the Newmark Scheme
mAlphaBossak = NewAlphaBossak;
mBetaNewmark = 0.25 * pow((1.00 - mAlphaBossak), 2);
mGammaNewmark = 0.5 - mAlphaBossak;
mMeshVelocity = MoveMeshStrategy;
//Allocate auxiliary memory
int NumThreads = OpenMPUtils::GetNumThreads();
mMass.resize(NumThreads);
mDamp.resize(NumThreads);
mvel.resize(NumThreads);
macc.resize(NumThreads);
maccold.resize(NumThreads);
}
/** Constructor with a turbulence model
*/
ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent(
double NewAlphaBossak,
double MoveMeshStrategy,
unsigned int DomainSize,
Process::Pointer pTurbulenceModel)
:
Scheme<TSparseSpace, TDenseSpace>(),
mRotationTool(DomainSize,DomainSize+1,SLIP), // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs
mrPeriodicIdVar(Kratos::Variable<int>::StaticObject()),
mpTurbulenceModel(pTurbulenceModel)
{
//default values for the Newmark Scheme
mAlphaBossak = NewAlphaBossak;
mBetaNewmark = 0.25 * pow((1.00 - mAlphaBossak), 2);
mGammaNewmark = 0.5 - mAlphaBossak;
mMeshVelocity = MoveMeshStrategy;
//Allocate auxiliary memory
int NumThreads = OpenMPUtils::GetNumThreads();
mMass.resize(NumThreads);
mDamp.resize(NumThreads);
mvel.resize(NumThreads);
macc.resize(NumThreads);
maccold.resize(NumThreads);
}
/** Destructor.
*/
~ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent() override {
}
/*@} */
/**@name Operators
*/
/*@{ */
/**
Performing the update of the solution.
*/
//***************************************************************************
void Update(ModelPart& r_model_part,
DofsArrayType& rDofSet,
TSystemMatrixType& A,
TSystemVectorType& Dv,
TSystemVectorType& b) override
{
KRATOS_TRY;
mRotationTool.RotateVelocities(r_model_part);
mpDofUpdater->UpdateDofs(rDofSet,Dv);
mRotationTool.RecoverVelocities(r_model_part);
AdditionalUpdateOperations(r_model_part, rDofSet, A, Dv, b);
KRATOS_CATCH("")
}
//***************************************************************************
void AdditionalUpdateOperations(ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& A,
TSystemVectorType& Dv,
TSystemVectorType& b)
{
KRATOS_TRY
int NumThreads = OpenMPUtils::GetNumThreads();
OpenMPUtils::PartitionVector NodePartition;
OpenMPUtils::DivideInPartitions(rModelPart.Nodes().size(), NumThreads, NodePartition);
//updating time derivatives (nodally for efficiency)
#pragma omp parallel
{
array_1d<double, 3 > DeltaVel;
int k = OpenMPUtils::ThisThread();
ModelPart::NodeIterator NodesBegin = rModelPart.NodesBegin() + NodePartition[k];
ModelPart::NodeIterator NodesEnd = rModelPart.NodesBegin() + NodePartition[k + 1];
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; itNode++) {
noalias(DeltaVel) = (itNode)->FastGetSolutionStepValue(VELOCITY) - (itNode)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3 > & CurrentAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 0);
array_1d<double, 3 > & OldAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 1);
UpdateAcceleration(CurrentAcceleration, DeltaVel, OldAcceleration);
if (mMeshVelocity == 2)//Lagrangian
{
if((itNode)->FastGetSolutionStepValue(IS_LAGRANGIAN_INLET) < 1e-15)
{
array_1d<double, 3 > & CurrentDisplacement = (itNode)->FastGetSolutionStepValue(DISPLACEMENT, 0);
array_1d<double, 3 > & OldDisplacement = (itNode)->FastGetSolutionStepValue(DISPLACEMENT, 1);
array_1d<double, 3 > & OldVelocity = (itNode)->FastGetSolutionStepValue(VELOCITY, 1);
noalias(itNode->FastGetSolutionStepValue(MESH_VELOCITY)) = itNode->FastGetSolutionStepValue(VELOCITY);
UpdateDisplacement(CurrentDisplacement, OldDisplacement, OldVelocity, OldAcceleration, CurrentAcceleration);
}
else
{
noalias(itNode->FastGetSolutionStepValue(MESH_VELOCITY)) = ZeroVector(3);
noalias(itNode->FastGetSolutionStepValue(DISPLACEMENT)) = ZeroVector(3);
}
}
}
}
KRATOS_CATCH("")
}
//***************************************************************************
//predicts the solution at the current step as
// v = vold
void Predict(ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& A,
TSystemVectorType& Dv,
TSystemVectorType& b) override
{
// if (rModelPart.GetCommunicator().MyPID() == 0)
// std::cout << "prediction" << std::endl;
int NumThreads = OpenMPUtils::GetNumThreads();
OpenMPUtils::PartitionVector NodePartition;
OpenMPUtils::DivideInPartitions(rModelPart.Nodes().size(), NumThreads, NodePartition);
#pragma omp parallel
{
//array_1d<double, 3 > DeltaDisp;
int k = OpenMPUtils::ThisThread();
ModelPart::NodeIterator NodesBegin = rModelPart.NodesBegin() + NodePartition[k];
ModelPart::NodeIterator NodesEnd = rModelPart.NodesBegin() + NodePartition[k + 1];
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; itNode++) {
array_1d<double, 3 > & OldVelocity = (itNode)->FastGetSolutionStepValue(VELOCITY, 1);
double& OldPressure = (itNode)->FastGetSolutionStepValue(PRESSURE, 1);
//predicting velocity
//ATTENTION::: the prediction is performed only on free nodes
array_1d<double, 3 > & CurrentVelocity = (itNode)->FastGetSolutionStepValue(VELOCITY);
double& CurrentPressure = (itNode)->FastGetSolutionStepValue(PRESSURE);
if ((itNode->pGetDof(VELOCITY_X))->IsFree())
(CurrentVelocity[0]) = OldVelocity[0];
if (itNode->pGetDof(VELOCITY_Y)->IsFree())
(CurrentVelocity[1]) = OldVelocity[1];
if (itNode->HasDofFor(VELOCITY_Z))
if (itNode->pGetDof(VELOCITY_Z)->IsFree())
(CurrentVelocity[2]) = OldVelocity[2];
if (itNode->pGetDof(PRESSURE)->IsFree())
CurrentPressure = OldPressure;
// updating time derivatives ::: please note that displacements and
// their time derivatives can not be consistently fixed separately
array_1d<double, 3 > DeltaVel;
noalias(DeltaVel) = CurrentVelocity - OldVelocity;
array_1d<double, 3 > & OldAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 1);
array_1d<double, 3 > & CurrentAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION);
UpdateAcceleration(CurrentAcceleration, DeltaVel, OldAcceleration);
if (mMeshVelocity == 2) //Lagrangian
{
array_1d<double, 3 > & OldDisplacement = (itNode)->FastGetSolutionStepValue(DISPLACEMENT, 1);
array_1d<double, 3 > & CurrentDisplacement = (itNode)->FastGetSolutionStepValue(DISPLACEMENT, 0);
if((itNode)->FastGetSolutionStepValue(IS_LAGRANGIAN_INLET) < 1e-15)
{
noalias(itNode->FastGetSolutionStepValue(MESH_VELOCITY)) = itNode->FastGetSolutionStepValue(VELOCITY);
UpdateDisplacement(CurrentDisplacement, OldDisplacement, OldVelocity, OldAcceleration, CurrentAcceleration);
}
else
{
itNode->FastGetSolutionStepValue(MESH_VELOCITY_X) = 0.0;
itNode->FastGetSolutionStepValue(MESH_VELOCITY_Y) = 0.0;
itNode->FastGetSolutionStepValue(DISPLACEMENT_X) = 0.0;
itNode->FastGetSolutionStepValue(DISPLACEMENT_Y) = 0.0;
}
}
}
}
// if (rModelPart.GetCommunicator().MyPID() == 0)
// std::cout << "end of prediction" << std::endl;
}
//***************************************************************************
/** this function is designed to be called in the builder and solver
to introduce
the selected time integration scheme. It "asks" the matrix needed to
the element and
performs the operations needed to introduce the seected time
integration scheme.
this function calculates at the same time the contribution to the
LHS and to the RHS
of the system
*/
void CalculateSystemContributions(Element::Pointer rCurrentElement,
LocalSystemMatrixType& LHS_Contribution,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId,
ProcessInfo& CurrentProcessInfo) override
{
KRATOS_TRY
int k = OpenMPUtils::ThisThread();
//Initializing the non linear iteration for the current element
(rCurrentElement) -> InitializeNonLinearIteration(CurrentProcessInfo);
//KRATOS_WATCH(LHS_Contribution);
//basic operations for the element considered
(rCurrentElement)->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo);
//std::cout << rCurrentElement->Id() << " RHS = " << RHS_Contribution << std::endl;
(rCurrentElement)->CalculateMassMatrix(mMass[k], CurrentProcessInfo);
(rCurrentElement)->CalculateLocalVelocityContribution(mDamp[k], RHS_Contribution, CurrentProcessInfo);
(rCurrentElement)->EquationIdVector(EquationId, CurrentProcessInfo);
//adding the dynamic contributions (statics is already included)
AddDynamicsToLHS(LHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo);
AddDynamicsToRHS(rCurrentElement, RHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo);
// If there is a slip condition, apply it on a rotated system of coordinates
mRotationTool.Rotate(LHS_Contribution,RHS_Contribution,rCurrentElement->GetGeometry());
mRotationTool.ApplySlipCondition(LHS_Contribution,RHS_Contribution,rCurrentElement->GetGeometry());
KRATOS_CATCH("")
}
void Calculate_RHS_Contribution(Element::Pointer rCurrentElement,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId,
ProcessInfo& CurrentProcessInfo) override
{
int k = OpenMPUtils::ThisThread();
//Initializing the non linear iteration for the current element
(rCurrentElement) -> InitializeNonLinearIteration(CurrentProcessInfo);
//basic operations for the element considered
(rCurrentElement)->CalculateRightHandSide(RHS_Contribution, CurrentProcessInfo);
(rCurrentElement)->CalculateMassMatrix(mMass[k], CurrentProcessInfo);
(rCurrentElement)->CalculateLocalVelocityContribution(mDamp[k], RHS_Contribution, CurrentProcessInfo);
(rCurrentElement)->EquationIdVector(EquationId, CurrentProcessInfo);
//adding the dynamic contributions (static is already included)
AddDynamicsToRHS(rCurrentElement, RHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo);
// If there is a slip condition, apply it on a rotated system of coordinates
mRotationTool.Rotate(RHS_Contribution,rCurrentElement->GetGeometry());
mRotationTool.ApplySlipCondition(RHS_Contribution,rCurrentElement->GetGeometry());
}
/** functions totally analogous to the precedent but applied to
the "condition" objects
*/
void Condition_CalculateSystemContributions(Condition::Pointer rCurrentCondition,
LocalSystemMatrixType& LHS_Contribution,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId,
ProcessInfo& CurrentProcessInfo) override
{
KRATOS_TRY
int k = OpenMPUtils::ThisThread();
//KRATOS_WATCH("CONDITION LOCALVELOCITYCONTRIBUTION IS NOT DEFINED");
(rCurrentCondition) -> InitializeNonLinearIteration(CurrentProcessInfo);
(rCurrentCondition)->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo);
(rCurrentCondition)->CalculateMassMatrix(mMass[k], CurrentProcessInfo);
//(rCurrentCondition)->CalculateDampingMatrix(VelocityBossakAuxiliaries::mDamp,CurrentProcessInfo);
(rCurrentCondition)->CalculateLocalVelocityContribution(mDamp[k], RHS_Contribution, CurrentProcessInfo);
(rCurrentCondition)->EquationIdVector(EquationId, CurrentProcessInfo);
AddDynamicsToLHS(LHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo);
AddDynamicsToRHS(rCurrentCondition, RHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo);
// Rotate contributions (to match coordinates for slip conditions)
mRotationTool.Rotate(LHS_Contribution,RHS_Contribution,rCurrentCondition->GetGeometry());
mRotationTool.ApplySlipCondition(LHS_Contribution,RHS_Contribution,rCurrentCondition->GetGeometry());
KRATOS_CATCH("")
}
void Condition_Calculate_RHS_Contribution(Condition::Pointer rCurrentCondition,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId,
ProcessInfo& rCurrentProcessInfo) override
{
KRATOS_TRY;
int k = OpenMPUtils::ThisThread();
//KRATOS_WATCH("CONDITION LOCALVELOCITYCONTRIBUTION IS NOT DEFINED");
//Initializing the non linear iteration for the current condition
(rCurrentCondition) -> InitializeNonLinearIteration(rCurrentProcessInfo);
//basic operations for the element considered
(rCurrentCondition)->CalculateRightHandSide(RHS_Contribution,rCurrentProcessInfo);
(rCurrentCondition)->CalculateMassMatrix(mMass[k],rCurrentProcessInfo);
//(rCurrentCondition)->CalculateDampingMatrix(VelocityBossakAuxiliaries::mDamp,CurrentProcessInfo);
(rCurrentCondition)->CalculateLocalVelocityContribution(mDamp[k], RHS_Contribution,rCurrentProcessInfo);
(rCurrentCondition)->EquationIdVector(EquationId,rCurrentProcessInfo);
//adding the dynamic contributions (static is already included)
AddDynamicsToRHS(rCurrentCondition, RHS_Contribution, mDamp[k], mMass[k],rCurrentProcessInfo);
// Rotate contributions (to match coordinates for slip conditions)
mRotationTool.Rotate(RHS_Contribution,rCurrentCondition->GetGeometry());
mRotationTool.ApplySlipCondition(RHS_Contribution,rCurrentCondition->GetGeometry());
KRATOS_CATCH("");
}
//*************************************************************************************
//*************************************************************************************
void InitializeSolutionStep(ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
Scheme<TSparseSpace, TDenseSpace>::InitializeSolutionStep(r_model_part, A, Dx, b);
double DeltaTime = CurrentProcessInfo[DELTA_TIME];
if (DeltaTime == 0)
KRATOS_THROW_ERROR(std::logic_error, "detected delta_time = 0 in the Bossak Scheme ... check if the time step is created correctly for the current model part", "");
//initializing constants
ma0 = 1.0 / (mGammaNewmark * DeltaTime);
ma1 = DeltaTime * mBetaNewmark / mGammaNewmark;
ma2 = (-1 + mGammaNewmark) / mGammaNewmark;
ma3 = DeltaTime;
ma4 = pow(DeltaTime, 2)*(-2.0 * mBetaNewmark + 1.0) / 2.0;
ma5 = pow(DeltaTime, 2) * mBetaNewmark;
mam = (1.0 - mAlphaBossak) / (mGammaNewmark * DeltaTime);
}
//*************************************************************************************
//*************************************************************************************
void InitializeNonLinIteration(ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
KRATOS_TRY
if (mpTurbulenceModel != 0) // If not null
mpTurbulenceModel->Execute();
KRATOS_CATCH("")
}
void FinalizeNonLinIteration(ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override
{
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
//if orthogonal subscales are computed
if (CurrentProcessInfo[OSS_SWITCH] == 1.0) {
KRATOS_INFO("Bossak Scheme") << "Computing OSS projections" << std::endl;
const int nnodes = static_cast<int>(rModelPart.Nodes().size());
auto nbegin = rModelPart.NodesBegin();
#pragma omp parallel for firstprivate(nbegin,nnodes)
for(int i=0; i<nnodes; ++i)
{
auto ind = nbegin + i;
noalias(ind->FastGetSolutionStepValue(ADVPROJ)) = ZeroVector(3);
ind->FastGetSolutionStepValue(DIVPROJ) = 0.0;
ind->FastGetSolutionStepValue(NODAL_AREA) = 0.0;
}//end of loop over nodes
//loop on nodes to compute ADVPROJ CONVPROJ NODALAREA
array_1d<double, 3 > output = ZeroVector(3);
const int nel = static_cast<int>(rModelPart.Elements().size());
auto elbegin = rModelPart.ElementsBegin();
#pragma omp parallel for firstprivate(elbegin,nel,output)
for(int i=0; i<nel; ++i)
{
auto elem = elbegin + i;
elem->Calculate(ADVPROJ, output, CurrentProcessInfo);
}
rModelPart.GetCommunicator().AssembleCurrentData(NODAL_AREA);
rModelPart.GetCommunicator().AssembleCurrentData(DIVPROJ);
rModelPart.GetCommunicator().AssembleCurrentData(ADVPROJ);
// Correction for periodic conditions
this->PeriodicConditionProjectionCorrection(rModelPart);
#pragma omp parallel for firstprivate(nbegin,nnodes)
for(int i=0; i<nnodes; ++i)
{
auto ind = nbegin + i;
if (ind->FastGetSolutionStepValue(NODAL_AREA) == 0.0)
{
ind->FastGetSolutionStepValue(NODAL_AREA) = 1.0;
//KRATOS_WATCH("*********ATTENTION: NODAL AREA IS ZERRROOOO************");
}
const double Area = ind->FastGetSolutionStepValue(NODAL_AREA);
ind->FastGetSolutionStepValue(ADVPROJ) /= Area;
ind->FastGetSolutionStepValue(DIVPROJ) /= Area;
}
}
}
void FinalizeSolutionStep(ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override
{
Element::EquationIdVectorType EquationId;
LocalSystemVectorType RHS_Contribution;
LocalSystemMatrixType LHS_Contribution;
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
//for (ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); ++itNode)
#pragma omp parallel for
for(int k = 0; k<static_cast<int>(rModelPart.Nodes().size()); k++)
{
auto itNode = rModelPart.NodesBegin() + k;
(itNode->FastGetSolutionStepValue(REACTION)).clear();
// calculating relaxed acceleration
const array_1d<double, 3 > & CurrentAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 0);
const array_1d<double, 3 > & OldAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 1);
const array_1d<double, 3> relaxed_acceleration = (1 - mAlphaBossak) * CurrentAcceleration
+ mAlphaBossak * OldAcceleration;
(itNode)->SetValue(RELAXED_ACCELERATION, relaxed_acceleration);
}
//for (ModelPart::ElementsContainerType::ptr_iterator itElem = rModelPart.Elements().ptr_begin(); itElem != rModelPart.Elements().ptr_end(); ++itElem)
#pragma omp parallel for firstprivate(EquationId,RHS_Contribution,LHS_Contribution)
for(int k = 0; k<static_cast<int>(rModelPart.Elements().size()); k++)
{
auto itElem = rModelPart.Elements().ptr_begin()+k;
int thread_id = OpenMPUtils::ThisThread();
(*itElem)->InitializeNonLinearIteration(CurrentProcessInfo);
//KRATOS_WATCH(LHS_Contribution);
//basic operations for the element considered
(*itElem)->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo);
//std::cout << rCurrentElement->Id() << " RHS = " << RHS_Contribution << std::endl;
(*itElem)->CalculateMassMatrix(mMass[thread_id], CurrentProcessInfo);
(*itElem)->CalculateLocalVelocityContribution(mDamp[thread_id], RHS_Contribution, CurrentProcessInfo);
(*itElem)->EquationIdVector(EquationId, CurrentProcessInfo);
//adding the dynamic contributions (statics is already included)
AddDynamicsToLHS(LHS_Contribution, mDamp[thread_id], mMass[thread_id], CurrentProcessInfo);
AddDynamicsToRHS((*itElem), RHS_Contribution, mDamp[thread_id], mMass[thread_id], CurrentProcessInfo);
GeometryType& rGeom = (*itElem)->GetGeometry();
unsigned int NumNodes = rGeom.PointsNumber();
unsigned int Dimension = rGeom.WorkingSpaceDimension();
unsigned int index = 0;
for (unsigned int i = 0; i < NumNodes; i++)
{
auto& reaction = rGeom[i].FastGetSolutionStepValue(REACTION);
double& target_value0 = reaction[0];
const double& origin_value0 = RHS_Contribution[index++];
#pragma omp atomic
target_value0 -= origin_value0;
double& target_value1 = reaction[1];
const double& origin_value1 = RHS_Contribution[index++];
#pragma omp atomic
target_value1 -= origin_value1;
if (Dimension == 3)
{
double& target_value2 = reaction[2];
const double& origin_value2 = RHS_Contribution[index++];
#pragma omp atomic
target_value2 -= origin_value2;
}
// rGeom[i].FastGetSolutionStepValue(REACTION_X,0) -= RHS_Contribution[index++];
// rGeom[i].FastGetSolutionStepValue(REACTION_Y,0) -= RHS_Contribution[index++];
// if (Dimension == 3) rGeom[i].FastGetSolutionStepValue(REACTION_Z,0) -= RHS_Contribution[index++];
index++; // skip pressure dof
}
}
rModelPart.GetCommunicator().AssembleCurrentData(REACTION);
// Base scheme calls FinalizeSolutionStep method of elements and conditions
Scheme<TSparseSpace, TDenseSpace>::FinalizeSolutionStep(rModelPart, A, Dx, b);
}
//************************************************************************************************
//************************************************************************************************
/// Free memory allocated by this object.
void Clear() override
{
this->mpDofUpdater->Clear();
}
/*@} */
/**@name Operations */
/*@{ */
/*@} */
/**@name Access */
/*@{ */
/*@} */
/**@name Inquiry */
/*@{ */
/*@} */
/**@name Friends */
/*@{ */
/*@} */
protected:
/**@name Protected static Member Variables */
/*@{ */
/*@} */
/**@name Protected member Variables */
/*@{ */
double mAlphaBossak;
double mBetaNewmark;
double mGammaNewmark;
double mMeshVelocity;
double ma0;
double ma1;
double ma2;
double ma3;
double ma4;
double ma5;
double mam;
std::vector< Matrix > mMass;
std::vector< Matrix > mDamp;
std::vector< Vector > mvel;
std::vector< Vector > macc;
std::vector< Vector > maccold;
/*@} */
/**@name Protected Operators*/
/*@{ */
/** On periodic boundaries, the nodal area and the values to project need to take into account contributions from elements on
* both sides of the boundary. This is done using the conditions and the non-historical nodal data containers as follows:\n
* 1- The partition that owns the PeriodicCondition adds the values on both nodes to their non-historical containers.\n
* 2- The non-historical containers are added across processes, communicating the right value from the condition owner to all partitions.\n
* 3- The value on all periodic nodes is replaced by the one received in step 2.
*/
void PeriodicConditionProjectionCorrection(ModelPart& rModelPart)
{
const int num_nodes = rModelPart.NumberOfNodes();
const int num_conditions = rModelPart.NumberOfConditions();
#pragma omp parallel for
for (int i = 0; i < num_nodes; i++) {
auto it_node = rModelPart.NodesBegin() + i;
it_node->SetValue(NODAL_AREA,0.0);
it_node->SetValue(ADVPROJ,ZeroVector(3));
it_node->SetValue(DIVPROJ,0.0);
}
#pragma omp parallel for
for (int i = 0; i < num_conditions; i++) {
auto it_cond = rModelPart.ConditionsBegin() + i;
if(it_cond->Is(PERIODIC)) {
this->AssemblePeriodicContributionToProjections(it_cond->GetGeometry());
}
}
rModelPart.GetCommunicator().AssembleNonHistoricalData(NODAL_AREA);
rModelPart.GetCommunicator().AssembleNonHistoricalData(ADVPROJ);
rModelPart.GetCommunicator().AssembleNonHistoricalData(DIVPROJ);
#pragma omp parallel for
for (int i = 0; i < num_nodes; i++) {
auto it_node = rModelPart.NodesBegin() + i;
this->CorrectContributionsOnPeriodicNode(*it_node);
}
}
void AssemblePeriodicContributionToProjections(Geometry< Node<3> >& rGeometry)
{
unsigned int nodes_in_cond = rGeometry.PointsNumber();
double nodal_area = 0.0;
array_1d<double,3> momentum_projection = ZeroVector(3);
double mass_projection = 0.0;
for ( unsigned int i = 0; i < nodes_in_cond; i++ )
{
auto& r_node = rGeometry[i];
nodal_area += r_node.FastGetSolutionStepValue(NODAL_AREA);
noalias(momentum_projection) += r_node.FastGetSolutionStepValue(ADVPROJ);
mass_projection += r_node.FastGetSolutionStepValue(DIVPROJ);
}
for ( unsigned int i = 0; i < nodes_in_cond; i++ )
{
auto& r_node = rGeometry[i];
/* Note that this loop is expected to be threadsafe in normal conditions,
* since each node should belong to a single periodic link. However, I am
* setting the locks for openmp in case that we try more complicated things
* in the future (like having different periodic conditions for different
* coordinate directions).
*/
r_node.SetLock();
r_node.GetValue(NODAL_AREA) = nodal_area;
noalias(r_node.GetValue(ADVPROJ)) = momentum_projection;
r_node.GetValue(DIVPROJ) = mass_projection;
r_node.UnSetLock();
}
}
void CorrectContributionsOnPeriodicNode(Node<3>& rNode)
{
if (rNode.GetValue(NODAL_AREA) != 0.0) // Only periodic nodes will have a non-historical NODAL_AREA set.
{
rNode.FastGetSolutionStepValue(NODAL_AREA) = rNode.GetValue(NODAL_AREA);
noalias(rNode.FastGetSolutionStepValue(ADVPROJ)) = rNode.GetValue(ADVPROJ);
rNode.FastGetSolutionStepValue(DIVPROJ) = rNode.GetValue(DIVPROJ);
}
}
//*********************************************************************************
//Updating first time Derivative
//*********************************************************************************
void UpdateDisplacement(array_1d<double, 3 > & CurrentDisplacement,
const array_1d<double, 3 > & OldDisplacement,
const array_1d<double, 3 > & OldVelocity,
const array_1d<double, 3 > & OldAcceleration,
const array_1d<double, 3 > & CurrentAcceleration)
{
noalias(CurrentDisplacement) = OldDisplacement + ma3 * OldVelocity + ma4 * OldAcceleration + ma5*CurrentAcceleration;
}
//**************************************************************************
void UpdateAcceleration(array_1d<double, 3 > & CurrentAcceleration,
const array_1d<double, 3 > & DeltaVel,
const array_1d<double, 3 > & OldAcceleration)
{
noalias(CurrentAcceleration) = ma0 * DeltaVel + ma2 * OldAcceleration;
}
//****************************************************************************
/**
Kdyn = am*M + D + a1*K
*/
void AddDynamicsToLHS(LocalSystemMatrixType& LHS_Contribution,
LocalSystemMatrixType& D,
LocalSystemMatrixType& M,
ProcessInfo& CurrentProcessInfo)
{
//multipling time scheme factor
LHS_Contribution *= ma1;
// adding mass contribution to the dynamic stiffness
if (M.size1() != 0) // if M matrix declared
{
noalias(LHS_Contribution) += mam*M;
}
//adding damping contribution
if (D.size1() != 0) // if M matrix declared
{
noalias(LHS_Contribution) += D;
}
}
//****************************************************************************
/// Add Bossak contributions from the inertial term to the RHS vector.
/** This essentially performs bdyn = b - M*acc for the current element.
* Note that viscous/pressure contributions to the RHS are expected to be added by the element itself.
* @param[in] rCurrentElement The fluid element we are assembling.
* @param[in/out] rRHS_Contribution The right hand side term where the contribution will be added.
* @param[in] rD The elemental velocity/pressure LHS matrix.
* @param[in] rM The elemental acceleration LHS matrix.
* @param[in] rCurrentProcessInfo ProcessInfo instance for the containing ModelPart.
*/
void AddDynamicsToRHS(Element::Pointer rCurrentElement,
LocalSystemVectorType& rRHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
ProcessInfo& rCurrentProcessInfo)
{
//adding inertia contribution
if (rM.size1() != 0) {
int k = OpenMPUtils::ThisThread();
rCurrentElement->GetSecondDerivativesVector(macc[k], 0);
(macc[k]) *= (1.00 - mAlphaBossak);
rCurrentElement->GetSecondDerivativesVector(maccold[k], 1);
noalias(macc[k]) += mAlphaBossak * maccold[k];
noalias(rRHS_Contribution) -= prod(rM, macc[k]);
}
}
/// Add Bossak contributions from the inertial term to the RHS vector.
/** This essentially performs bdyn = b - M*acc for the current condition.
* Note that viscous/pressure contributions to the RHS are expected to be added by the element condition.
* @param[in] rCurrentCondition The fluid condition we are assembling.
* @param[in/out] rRHS_Contribution The right hand side term where the contribution will be added.
* @param[in] rD The elemental velocity/pressure LHS matrix.
* @param[in] rM The elemental acceleration LHS matrix.
* @param[in] rCurrentProcessInfo ProcessInfo instance for the containing ModelPart.
*/
void AddDynamicsToRHS(
Condition::Pointer rCurrentCondition,
LocalSystemVectorType& rRHS_Contribution,
LocalSystemMatrixType& D,
LocalSystemMatrixType& rM,
ProcessInfo& rCurrentProcessInfo)
{
//adding inertia contribution
if (rM.size1() != 0)
{
int k = OpenMPUtils::ThisThread();
rCurrentCondition->GetSecondDerivativesVector(macc[k], 0);
(macc[k]) *= (1.00 - mAlphaBossak);
rCurrentCondition->GetSecondDerivativesVector(maccold[k], 1);
noalias(macc[k]) += mAlphaBossak * maccold[k];
noalias(rRHS_Contribution) -= prod(rM, macc[k]);
}
}
/*@} */
/**@name Protected Operations*/
/*@{ */
/*@} */
/**@name Protected Access */
/*@{ */
/*@} */
/**@name Protected Inquiry */
/*@{ */
/*@} */
/**@name Protected LifeCycle */
/*@{ */
/*@} */
private:
/**@name Static Member Variables */
/*@{ */
/*@} */
/**@name Member Variables */
/*@{ */
CoordinateTransformationUtils<LocalSystemMatrixType,LocalSystemVectorType,double> mRotationTool;
const Variable<int>& mrPeriodicIdVar;
Process::Pointer mpTurbulenceModel;
typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater();
/*@} */
/**@name Private Operators*/
/*@{ */
/*@} */
/**@name Private Operations*/
/*@{ */
/*@} */
/**@name Private Access */
/*@{ */
/*@} */
/**@name Private Inquiry */
/*@{ */
/*@} */
/**@name Un accessible methods */
/*@{ */
/*@} */
}; /* Class Scheme */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUALBASED_PREDICTOR_CORRECTOR_BOSSAK_SCHEME defined */
|
SE1P_rsrc_cell_force.c | #include "mex.h"
#include "mex_compat.h"
#include "math.h"
#include "cell_list.h"
#ifdef INTEL_MKL
#include "mkl.h"
#endif
#define X prhs[0] // Source locations
#define F prhs[1] // Source strengths
#define RC prhs[2] // cutoff
#define XI prhs[3] // Ewald Param
#define P prhs[4] // Periodic wrap
#define BOX prhs[5] // domain size
#define U plhs[0] // Output
#ifndef VERBOSE
#define VERBOSE 0
#endif
#ifdef _OPENMP
#define CRITICAL _Pragma("omp critical")
#else
#define CRITICAL
#endif
#define PI 3.141592653589793
inline double norm2(double * a)
{
return a[0]*a[0] + a[1]*a[1] + a[2]*a[2];
}
// Compute buffer stuff
#define BUF_SIZE 256
typedef struct {
int n;
int idx_t[BUF_SIZE];
double rvec[3*BUF_SIZE];
double rsq[BUF_SIZE];
} ComputeBuffer;
static void buffer_push(ComputeBuffer* buffer, int idx_t, double* rvec, double rsq)
{
int n = buffer->n;
buffer->idx_t[n] = idx_t;
for(int i=0; i<3; i++)
buffer->rvec[3*n + i] = rvec[i];
buffer->rsq[n] = rsq;
buffer->n = n + 1;
}
static void empty_buffer(ComputeBuffer* buffer,
const double* restrict x,
const double* restrict f,
double* restrict u,
int idx_s,
double xi)
{
int N = buffer->n;
int idx_t;
double rvec[3], fs, ft;
double xi2 = xi*xi;
double us[3] = {0, 0, 0};
fs = f[idx_s];
// Do what we can to help the compiler vectorize exp and erfc, if possible
const double* restrict r2 = buffer->rsq;
double c1[BUF_SIZE];
#ifdef INTEL_MKL
double r[BUF_SIZE];
double xir[BUF_SIZE];
double xi2r2[BUF_SIZE];
for (int n=0; n<N; n++)
{
r[n] = sqrt(r2[n]);
xir[n] = xi*r[n];
xi2r2[n] = -xi2*r2[n];
}
double erfc_vec[BUF_SIZE];
double exp_vec[BUF_SIZE];
vdErfc(N, xir, erfc_vec);
vdExp(N, xi2r2, exp_vec);
for (int n=0; n<N; n++)
{
double xiexp = xi*exp_vec[n];
c1[n] = -(2.0*xiexp / sqrt(PI) + erfc_vec[n] / r[n])/ r2[n];
}
#else
for (int n=0; n<N; n++)
{
double r = sqrt(r2[n]);
double xiexp = xi*exp(-xi2*r2[n]);
c1[n] = -(2.0*xiexp / sqrt(PI) + erfc(xi*r) / r) /r2[n];
}
#endif
// Compute interactions
for (int n=0; n<N; n++)
{
idx_t = buffer->idx_t[n];
ft = f[idx_t];
for(int i=0; i<3; i++)
rvec[i] = buffer->rvec[n*3 + i];
for(int i=0; i<3; i++)
{
u[idx_t*3+i] += fs*c1[n]*rvec[i];
us[i] -= ft*c1[n]*rvec[i];
}
}
for(int i=0; i<3; i++)
u[idx_s*3 + i] += us[i];
buffer->n = 0;
}
// Entry point
void
mexFunction( int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[] )
{
// input target
const int N = mxGetN(X);
const double xi = (double) mxGetScalar(XI);
const double rc = (double) mxGetScalar(RC);
const double rcsq = rc*rc;
const double p = (double) mxGetScalar(P);
const double* x = mxGetPr(X);
const double* f = mxGetPr(F);
const double* box = mxGetPr(BOX);
// output
U = mxCreateDoubleMatrix(3, N, mxREAL);
double* restrict u_out = mxGetPr(U);
// Setup cell list variables
int ncell[3];
int* restrict cell_list;
int* restrict cell_idx;
double rn;
int px[27] = {-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1};
int py[27] = {-1,-1,-1, 0, 0, 0, 1, 1, 1,-1,-1,-1, 0, 0, 0, 1, 1, 1,-1,-1,-1, 0, 0, 0, 1, 1, 1};
int pz[27] = {-1,-1,-1,-1,-1,-1,-1,-1,-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1};
// Build cell list
build_cell_list(x, N, box, rc, &rn, ncell, &cell_list, &cell_idx);
#ifdef _OPENMP
#pragma omp parallel
#endif
{ // Begin parallel section
// Create a thread-local compute buffer
ComputeBuffer buffer;
// Setup local output
double* restrict u;
CRITICAL {
u = __MALLOC(3*N*sizeof(double));
}
for(int i=0;i<3*N;i++)
u[i] = 0.0;
// Main loop
#ifdef _OPENMP
#pragma omp for schedule(dynamic) nowait
#endif
for (int idx_s=0; idx_s<N; idx_s++)
{
double xs[3];
int home_cell[3], icell[3];
for(int i=0; i<3; i++)
{
// Source point
xs[i] = x[idx_s*3 + i];
// Determine home cell
home_cell[i] = xs[i]/rn;
}
// Iterate through near cells (including home cell)
buffer.n = 0;
for(int ip=0; ip<27; ip++)
{
// Get neigh cell
icell[0] = home_cell[0] + px[ip];
icell[1] = home_cell[1] + py[ip];
icell[2] = home_cell[2] + pz[ip];
// Stop at boundaries
int inside = 1;
for(int j=0; j<3; j++)
{
if (icell[j] < 0 || icell[j] == ncell[j])
inside = 0;
}
if (!inside)
continue;
int icell_idx =
icell[0] +
icell[1]*ncell[0] +
icell[2]*ncell[1]*ncell[0];
// Go through cell list
int cell_a = cell_idx[icell_idx];
int cell_b = cell_idx[icell_idx+1];
for(int point_idx=cell_a; point_idx<cell_b; point_idx++)
{
int idx_t = cell_list[point_idx];
if (idx_s >= idx_t)
continue;
double rvec[3];
// periodic wrap
for (int j=-p; j<=p; j++)
{
double pshift[] = {j*box[0],0,0};
for(int i=0; i<3; i++)
rvec[i] = x[idx_t*3 + i] - pshift[i] - xs[i];
double r2 = norm2(rvec);
if (r2 > rcsq)
continue;
buffer_push(&buffer, idx_t, rvec, r2);
if (buffer.n == BUF_SIZE)
empty_buffer(&buffer, x, f, u, idx_s, xi);
}
}
}
empty_buffer(&buffer, x, f, u, idx_s, xi);
}
// End of particle loop, collect results
CRITICAL {
for(int i=0; i<3*N; i++)
u_out[i] += u[i];
}
// free/malloc not thread safe under MEX
CRITICAL {
__FREE(u);
}
}
}
|
pi_omp_sumvector.c | /*
* Compute pi by approximating the area under the curve f(x) = 4 / (1 + x*x)
* between 0 and 1.
*
* Parallel version using OpenMP
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <omp.h> /* OpenMP */
double getusec_() {
struct timeval time;
gettimeofday(&time, NULL);
return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec);
}
#define START_COUNT_TIME stamp = getusec_();
#define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\
stamp = stamp/1e6;\
printf ("%0.6f\n", stamp);
#define MAXTHREADS 16
double sumvector[MAXTHREADS]; // sum for each thread
int main(int argc, char *argv[]) {
double stamp;
double x, sum=0.0, pi=0.0;
double step;
const char Usage[] = "Usage: pi <num_steps> <num_threads>\n";
if (argc < 3) {
fprintf(stderr, Usage);
exit(1);
}
long int num_steps = atoi(argv[1]);
step = 1.0/(double) num_steps;
int num_threads = atoi(argv[2]);
START_COUNT_TIME;
for (int i=0; i<num_threads; i++)
sumvector[i] = 0.0;
#pragma omp parallel private(x) num_threads(num_threads)
{
int myid = omp_get_thread_num();
#pragma omp for
for (long int i=0; i<num_steps; ++i) {
x = (i+0.5)*step;
sumvector[myid] += 4.0/(1.0+x*x);
}
}
for (int i=0; i<num_threads; i++)
sum += sumvector[i];
pi = step * sum;
STOP_COUNT_TIME("Total execution time");
/* print results */
// printf("Number pi after %ld iterations = %.15f\n", num_steps, pi);
return EXIT_SUCCESS;
}
|
yescrypt-simd.c | /*-
* Copyright 2009 Colin Percival
* Copyright 2012-2014 Alexander Peslyak
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* This file was originally written by Colin Percival as part of the Tarsnap
* online backup system.
*/
/*
* On 64-bit, enabling SSE4.1 helps our pwxform code indirectly, via avoiding
* gcc bug 54349 (fixed for gcc 4.9+). On 32-bit, it's of direct help. AVX
* and XOP are of further help either way.
*/
//#ifndef __SSE4_1__
//#warning "Consider enabling SSE4.1, AVX, or XOP in the C compiler for significantly better performance"
//#endif
#include <emmintrin.h>
#ifdef __XOP__
#include <x86intrin.h>
#endif
#include <errno.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include "sha256_Y.h"
#include "sysendian.h"
#include "sph/yescrypt.h"
#include "sph/yescrypt-platform.c"
#if __STDC_VERSION__ >= 199901L
/* have restrict */
#elif defined(__GNUC__)
#define restrict __restrict
#else
#define restrict
#endif
#define PREFETCH(x, hint) _mm_prefetch((const char *)(x), (hint));
#define PREFETCH_OUT(x, hint) /* disabled */
#ifdef __XOP__
#define ARX(out, in1, in2, s) \
out = _mm_xor_si128(out, _mm_roti_epi32(_mm_add_epi32(in1, in2), s));
#else
#define ARX(out, in1, in2, s) \
{ \
__m128i T = _mm_add_epi32(in1, in2); \
out = _mm_xor_si128(out, _mm_slli_epi32(T, s)); \
out = _mm_xor_si128(out, _mm_srli_epi32(T, 32-s)); \
}
#endif
#define SALSA20_2ROUNDS \
/* Operate on "columns" */ \
ARX(X1, X0, X3, 7) \
ARX(X2, X1, X0, 9) \
ARX(X3, X2, X1, 13) \
ARX(X0, X3, X2, 18) \
\
/* Rearrange data */ \
X1 = _mm_shuffle_epi32(X1, 0x93); \
X2 = _mm_shuffle_epi32(X2, 0x4E); \
X3 = _mm_shuffle_epi32(X3, 0x39); \
\
/* Operate on "rows" */ \
ARX(X3, X0, X1, 7) \
ARX(X2, X3, X0, 9) \
ARX(X1, X2, X3, 13) \
ARX(X0, X1, X2, 18) \
\
/* Rearrange data */ \
X1 = _mm_shuffle_epi32(X1, 0x39); \
X2 = _mm_shuffle_epi32(X2, 0x4E); \
X3 = _mm_shuffle_epi32(X3, 0x93);
/**
* Apply the salsa20/8 core to the block provided in (X0 ... X3).
*/
#define SALSA20_8_BASE(maybe_decl, out) \
{ \
maybe_decl Y0 = X0; \
maybe_decl Y1 = X1; \
maybe_decl Y2 = X2; \
maybe_decl Y3 = X3; \
SALSA20_2ROUNDS \
SALSA20_2ROUNDS \
SALSA20_2ROUNDS \
SALSA20_2ROUNDS \
(out)[0] = X0 = _mm_add_epi32(X0, Y0); \
(out)[1] = X1 = _mm_add_epi32(X1, Y1); \
(out)[2] = X2 = _mm_add_epi32(X2, Y2); \
(out)[3] = X3 = _mm_add_epi32(X3, Y3); \
}
#define SALSA20_8(out) \
SALSA20_8_BASE(__m128i, out)
/**
* Apply the salsa20/8 core to the block provided in (X0 ... X3) ^ (Z0 ... Z3).
*/
#define SALSA20_8_XOR_ANY(maybe_decl, Z0, Z1, Z2, Z3, out) \
X0 = _mm_xor_si128(X0, Z0); \
X1 = _mm_xor_si128(X1, Z1); \
X2 = _mm_xor_si128(X2, Z2); \
X3 = _mm_xor_si128(X3, Z3); \
SALSA20_8_BASE(maybe_decl, out)
#define SALSA20_8_XOR_MEM(in, out) \
SALSA20_8_XOR_ANY(__m128i, (in)[0], (in)[1], (in)[2], (in)[3], out)
#define SALSA20_8_XOR_REG(out) \
SALSA20_8_XOR_ANY(/* empty */, Y0, Y1, Y2, Y3, out)
typedef union {
uint32_t w[16];
__m128i q[4];
} salsa20_blk_t;
/**
* blockmix_salsa8(Bin, Bout, r):
* Compute Bout = BlockMix_{salsa20/8, r}(Bin). The input Bin must be 128r
* bytes in length; the output Bout must also be the same size.
*/
static inline void
blockmix_salsa8(const salsa20_blk_t *restrict Bin,
salsa20_blk_t *restrict Bout, size_t r)
{
__m128i X0, X1, X2, X3;
size_t i;
r--;
PREFETCH(&Bin[r * 2 + 1], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin[i * 2], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
PREFETCH(&Bin[i * 2 + 1], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
}
PREFETCH(&Bin[r * 2], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0)
/* 1: X <-- B_{2r - 1} */
X0 = Bin[r * 2 + 1].q[0];
X1 = Bin[r * 2 + 1].q[1];
X2 = Bin[r * 2 + 1].q[2];
X3 = Bin[r * 2 + 1].q[3];
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
SALSA20_8_XOR_MEM(Bin[0].q, Bout[0].q)
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < r;) {
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
SALSA20_8_XOR_MEM(Bin[i * 2 + 1].q, Bout[r + 1 + i].q)
i++;
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
SALSA20_8_XOR_MEM(Bin[i * 2].q, Bout[i].q)
}
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
SALSA20_8_XOR_MEM(Bin[r * 2 + 1].q, Bout[r * 2 + 1].q)
}
/*
* (V)PSRLDQ and (V)PSHUFD have higher throughput than (V)PSRLQ on some CPUs
* starting with Sandy Bridge. Additionally, PSHUFD uses separate source and
* destination registers, whereas the shifts would require an extra move
* instruction for our code when building without AVX. Unfortunately, PSHUFD
* is much slower on Conroe (4 cycles latency vs. 1 cycle latency for PSRLQ)
* and somewhat slower on some non-Intel CPUs (luckily not including AMD
* Bulldozer and Piledriver). Since for many other CPUs using (V)PSHUFD is a
* win in terms of throughput or/and not needing a move instruction, we
* currently use it despite of the higher latency on some older CPUs. As an
* alternative, the #if below may be patched to only enable use of (V)PSHUFD
* when building with SSE4.1 or newer, which is not available on older CPUs
* where this instruction has higher latency.
*/
#if 1
#define HI32(X) \
_mm_shuffle_epi32((X), _MM_SHUFFLE(2,3,0,1))
#elif 0
#define HI32(X) \
_mm_srli_si128((X), 4)
#else
#define HI32(X) \
_mm_srli_epi64((X), 32)
#endif
#if defined(__x86_64__) && (defined(__ICC) || defined(__llvm__))
/* Intel's name, also supported by recent gcc */
#define EXTRACT64(X) _mm_cvtsi128_si64(X)
#elif defined(__x86_64__) && !defined(_MSC_VER) && !defined(__OPEN64__)
/* gcc got the 'x' name earlier than non-'x', MSVC and Open64 had bugs */
#define EXTRACT64(X) _mm_cvtsi128_si64x(X)
#elif defined(__x86_64__) && defined(__SSE4_1__)
/* No known bugs for this intrinsic */
#include <smmintrin.h>
#define EXTRACT64(X) _mm_extract_epi64((X), 0)
#elif defined(__SSE4_1__)
/* 32-bit */
#include <smmintrin.h>
#if 0
/* This is currently unused by the code below, which instead uses these two
* intrinsics explicitly when (!defined(__x86_64__) && defined(__SSE4_1__)) */
#define EXTRACT64(X) \
((uint64_t)(uint32_t)_mm_cvtsi128_si32(X) | \
((uint64_t)(uint32_t)_mm_extract_epi32((X), 1) << 32))
#endif
#else
/* 32-bit or compilers with known past bugs in _mm_cvtsi128_si64*() */
#define EXTRACT64(X) \
((uint64_t)(uint32_t)_mm_cvtsi128_si32(X) | \
((uint64_t)(uint32_t)_mm_cvtsi128_si32(HI32(X)) << 32))
#endif
/* This is tunable */
#define S_BITS 8
/* Not tunable in this implementation, hard-coded in a few places */
#define S_SIMD 2
#define S_P 4
/* Number of S-boxes. Not tunable by design, hard-coded in a few places. */
#define S_N 2
/* Derived values. Not tunable except via S_BITS above. */
#define S_SIZE1 (1 << S_BITS)
#define S_MASK ((S_SIZE1 - 1) * S_SIMD * 8)
#define S_MASK2 (((uint64_t)S_MASK << 32) | S_MASK)
#define S_SIZE_ALL (S_N * S_SIZE1 * S_SIMD * 8)
#if !defined(__x86_64__) && defined(__SSE4_1__)
/* 32-bit with SSE4.1 */
#define PWXFORM_X_T __m128i
#define PWXFORM_SIMD(X, x, s0, s1) \
x = _mm_and_si128(X, _mm_set1_epi64x(S_MASK2)); \
s0 = *(const __m128i *)(S0 + (uint32_t)_mm_cvtsi128_si32(x)); \
s1 = *(const __m128i *)(S1 + (uint32_t)_mm_extract_epi32(x, 1)); \
X = _mm_mul_epu32(HI32(X), X); \
X = _mm_add_epi64(X, s0); \
X = _mm_xor_si128(X, s1);
#else
/* 64-bit, or 32-bit without SSE4.1 */
#define PWXFORM_X_T uint64_t
#define PWXFORM_SIMD(X, x, s0, s1) \
x = EXTRACT64(X) & S_MASK2; \
s0 = *(const __m128i *)(S0 + (uint32_t)x); \
s1 = *(const __m128i *)(S1 + (x >> 32)); \
X = _mm_mul_epu32(HI32(X), X); \
X = _mm_add_epi64(X, s0); \
X = _mm_xor_si128(X, s1);
#endif
#define PWXFORM_ROUND \
PWXFORM_SIMD(X0, x0, s00, s01) \
PWXFORM_SIMD(X1, x1, s10, s11) \
PWXFORM_SIMD(X2, x2, s20, s21) \
PWXFORM_SIMD(X3, x3, s30, s31)
#define PWXFORM \
{ \
PWXFORM_X_T x0, x1, x2, x3; \
__m128i s00, s01, s10, s11, s20, s21, s30, s31; \
PWXFORM_ROUND PWXFORM_ROUND \
PWXFORM_ROUND PWXFORM_ROUND \
PWXFORM_ROUND PWXFORM_ROUND \
}
#define XOR4(in) \
X0 = _mm_xor_si128(X0, (in)[0]); \
X1 = _mm_xor_si128(X1, (in)[1]); \
X2 = _mm_xor_si128(X2, (in)[2]); \
X3 = _mm_xor_si128(X3, (in)[3]);
#define OUT(out) \
(out)[0] = X0; \
(out)[1] = X1; \
(out)[2] = X2; \
(out)[3] = X3;
/**
* blockmix_pwxform(Bin, Bout, r, S):
* Compute Bout = BlockMix_pwxform{salsa20/8, r, S}(Bin). The input Bin must
* be 128r bytes in length; the output Bout must also be the same size.
*/
static void
blockmix(const salsa20_blk_t *restrict Bin, salsa20_blk_t *restrict Bout,
size_t r, const __m128i *restrict S)
{
const uint8_t * S0, * S1;
__m128i X0, X1, X2, X3;
size_t i;
if (!S) {
blockmix_salsa8(Bin, Bout, r);
return;
}
S0 = (const uint8_t *)S;
S1 = (const uint8_t *)S + S_SIZE_ALL / 2;
/* Convert 128-byte blocks to 64-byte blocks */
r *= 2;
r--;
PREFETCH(&Bin[r], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
}
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
/* X <-- B_{r1 - 1} */
X0 = Bin[r].q[0];
X1 = Bin[r].q[1];
X2 = Bin[r].q[2];
X3 = Bin[r].q[3];
/* for i = 0 to r1 - 1 do */
for (i = 0; i < r; i++) {
/* X <-- H'(X \xor B_i) */
XOR4(Bin[i].q)
PWXFORM
/* B'_i <-- X */
OUT(Bout[i].q)
}
/* Last iteration of the loop above */
XOR4(Bin[i].q)
PWXFORM
/* B'_i <-- H(B'_i) */
SALSA20_8(Bout[i].q)
}
#define XOR4_2(in1, in2) \
X0 = _mm_xor_si128((in1)[0], (in2)[0]); \
X1 = _mm_xor_si128((in1)[1], (in2)[1]); \
X2 = _mm_xor_si128((in1)[2], (in2)[2]); \
X3 = _mm_xor_si128((in1)[3], (in2)[3]);
static inline uint32_t
blockmix_salsa8_xor(const salsa20_blk_t *restrict Bin1,
const salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
size_t r, int Bin2_in_ROM)
{
__m128i X0, X1, X2, X3;
size_t i;
r--;
if (Bin2_in_ROM) {
PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_NTA)
PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i * 2], _MM_HINT_NTA)
PREFETCH(&Bin1[i * 2], _MM_HINT_T0)
PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_NTA)
PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
}
PREFETCH(&Bin2[r * 2], _MM_HINT_T0)
} else {
PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_T0)
PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i * 2], _MM_HINT_T0)
PREFETCH(&Bin1[i * 2], _MM_HINT_T0)
PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_T0)
PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
}
PREFETCH(&Bin2[r * 2], _MM_HINT_T0)
}
PREFETCH(&Bin1[r * 2], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0)
/* 1: X <-- B_{2r - 1} */
XOR4_2(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q)
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[0].q)
SALSA20_8_XOR_MEM(Bin2[0].q, Bout[0].q)
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < r;) {
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[i * 2 + 1].q)
SALSA20_8_XOR_MEM(Bin2[i * 2 + 1].q, Bout[r + 1 + i].q)
i++;
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[i * 2].q)
SALSA20_8_XOR_MEM(Bin2[i * 2].q, Bout[i].q)
}
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[r * 2 + 1].q)
SALSA20_8_XOR_MEM(Bin2[r * 2 + 1].q, Bout[r * 2 + 1].q)
return _mm_cvtsi128_si32(X0);
}
static uint32_t
blockmix_xor(const salsa20_blk_t *restrict Bin1,
const salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
size_t r, int Bin2_in_ROM, const __m128i *restrict S)
{
const uint8_t * S0, * S1;
__m128i X0, X1, X2, X3;
size_t i;
if (!S)
return blockmix_salsa8_xor(Bin1, Bin2, Bout, r, Bin2_in_ROM);
S0 = (const uint8_t *)S;
S1 = (const uint8_t *)S + S_SIZE_ALL / 2;
/* Convert 128-byte blocks to 64-byte blocks */
r *= 2;
r--;
if (Bin2_in_ROM) {
PREFETCH(&Bin2[r], _MM_HINT_NTA)
PREFETCH(&Bin1[r], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i], _MM_HINT_NTA)
PREFETCH(&Bin1[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
}
} else {
PREFETCH(&Bin2[r], _MM_HINT_T0)
PREFETCH(&Bin1[r], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i], _MM_HINT_T0)
PREFETCH(&Bin1[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
}
}
PREFETCH_OUT(&Bout[r], _MM_HINT_T0);
/* X <-- B_{r1 - 1} */
XOR4_2(Bin1[r].q, Bin2[r].q)
/* for i = 0 to r1 - 1 do */
for (i = 0; i < r; i++) {
/* X <-- H'(X \xor B_i) */
XOR4(Bin1[i].q)
XOR4(Bin2[i].q)
PWXFORM
/* B'_i <-- X */
OUT(Bout[i].q)
}
/* Last iteration of the loop above */
XOR4(Bin1[i].q)
XOR4(Bin2[i].q)
PWXFORM
/* B'_i <-- H(B'_i) */
SALSA20_8(Bout[i].q)
return _mm_cvtsi128_si32(X0);
}
#undef XOR4
#define XOR4(in, out) \
(out)[0] = Y0 = _mm_xor_si128((in)[0], (out)[0]); \
(out)[1] = Y1 = _mm_xor_si128((in)[1], (out)[1]); \
(out)[2] = Y2 = _mm_xor_si128((in)[2], (out)[2]); \
(out)[3] = Y3 = _mm_xor_si128((in)[3], (out)[3]);
static inline uint32_t
blockmix_salsa8_xor_save(const salsa20_blk_t *restrict Bin1,
salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
size_t r)
{
__m128i X0, X1, X2, X3, Y0, Y1, Y2, Y3;
size_t i;
r--;
PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_T0)
PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i * 2], _MM_HINT_T0)
PREFETCH(&Bin1[i * 2], _MM_HINT_T0)
PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_T0)
PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
}
PREFETCH(&Bin2[r * 2], _MM_HINT_T0)
PREFETCH(&Bin1[r * 2], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0)
/* 1: X <-- B_{2r - 1} */
XOR4_2(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q)
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[0].q, Bin2[0].q)
SALSA20_8_XOR_REG(Bout[0].q)
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < r;) {
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[i * 2 + 1].q, Bin2[i * 2 + 1].q)
SALSA20_8_XOR_REG(Bout[r + 1 + i].q)
i++;
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[i * 2].q, Bin2[i * 2].q)
SALSA20_8_XOR_REG(Bout[i].q)
}
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q)
SALSA20_8_XOR_REG(Bout[r * 2 + 1].q)
return _mm_cvtsi128_si32(X0);
}
#define XOR4_Y \
X0 = _mm_xor_si128(X0, Y0); \
X1 = _mm_xor_si128(X1, Y1); \
X2 = _mm_xor_si128(X2, Y2); \
X3 = _mm_xor_si128(X3, Y3);
static uint32_t
blockmix_xor_save(const salsa20_blk_t *restrict Bin1,
salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
size_t r, const __m128i *restrict S)
{
const uint8_t * S0, * S1;
__m128i X0, X1, X2, X3, Y0, Y1, Y2, Y3;
size_t i;
if (!S)
return blockmix_salsa8_xor_save(Bin1, Bin2, Bout, r);
S0 = (const uint8_t *)S;
S1 = (const uint8_t *)S + S_SIZE_ALL / 2;
/* Convert 128-byte blocks to 64-byte blocks */
r *= 2;
r--;
PREFETCH(&Bin2[r], _MM_HINT_T0)
PREFETCH(&Bin1[r], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i], _MM_HINT_T0)
PREFETCH(&Bin1[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
}
PREFETCH_OUT(&Bout[r], _MM_HINT_T0);
/* X <-- B_{r1 - 1} */
XOR4_2(Bin1[r].q, Bin2[r].q)
/* for i = 0 to r1 - 1 do */
for (i = 0; i < r; i++) {
XOR4(Bin1[i].q, Bin2[i].q)
/* X <-- H'(X \xor B_i) */
XOR4_Y
PWXFORM
/* B'_i <-- X */
OUT(Bout[i].q)
}
/* Last iteration of the loop above */
XOR4(Bin1[i].q, Bin2[i].q)
XOR4_Y
PWXFORM
/* B'_i <-- H(B'_i) */
SALSA20_8(Bout[i].q)
return _mm_cvtsi128_si32(X0);
}
#undef ARX
#undef SALSA20_2ROUNDS
#undef SALSA20_8
#undef SALSA20_8_XOR_ANY
#undef SALSA20_8_XOR_MEM
#undef SALSA20_8_XOR_REG
#undef PWXFORM_SIMD_1
#undef PWXFORM_SIMD_2
#undef PWXFORM_ROUND
#undef PWXFORM
#undef OUT
#undef XOR4
#undef XOR4_2
#undef XOR4_Y
/**
* integerify(B, r):
* Return the result of parsing B_{2r-1} as a little-endian integer.
*/
static inline uint32_t
integerify(const salsa20_blk_t * B, size_t r)
{
return B[2 * r - 1].w[0];
}
/**
* smix1(B, r, N, flags, V, NROM, shared, XY, S):
* Compute first loop of B = SMix_r(B, N). The input B must be 128r bytes in
* length; the temporary storage V must be 128rN bytes in length; the temporary
* storage XY must be 128r bytes in length. The value N must be even and no
* smaller than 2. The array V must be aligned to a multiple of 64 bytes, and
* arrays B and XY to a multiple of at least 16 bytes (aligning them to 64
* bytes as well saves cache lines, but might result in cache bank conflicts).
*/
static void
smix1(uint8_t * B, size_t r, uint32_t N, yescrypt_flags_t flags,
salsa20_blk_t * V, uint32_t NROM, const yescrypt_shared_t * shared,
salsa20_blk_t * XY, void * S)
{
const salsa20_blk_t * VROM = shared->shared1.aligned;
uint32_t VROM_mask = shared->mask1;
size_t s = 2 * r;
salsa20_blk_t * X = V, * Y;
uint32_t i, j;
size_t k;
/* 1: X <-- B */
/* 3: V_i <-- X */
for (k = 0; k < 2 * r; k++) {
for (i = 0; i < 16; i++) {
X[k].w[i] = le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]);
}
}
if (NROM && (VROM_mask & 1)) {
uint32_t n;
salsa20_blk_t * V_n;
const salsa20_blk_t * V_j;
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[s];
blockmix(X, Y, r, S);
X = &V[2 * s];
if ((1 & VROM_mask) == 1) {
/* j <-- Integerify(X) mod NROM */
j = integerify(Y, r) & (NROM - 1);
V_j = &VROM[j * s];
/* X <-- H(X \xor VROM_j) */
j = blockmix_xor(Y, V_j, X, r, 1, S);
} else {
/* X <-- H(X) */
blockmix(Y, X, r, S);
j = integerify(X, r);
}
for (n = 2; n < N; n <<= 1) {
uint32_t m = (n < N / 2) ? n : (N - 1 - n);
V_n = &V[n * s];
/* 2: for i = 0 to N - 1 do */
for (i = 1; i < m; i += 2) {
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += i - 1;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V_n[i * s];
j = blockmix_xor(X, V_j, Y, r, 0, S);
if (((n + i) & VROM_mask) == 1) {
/* j <-- Integerify(X) mod NROM */
j &= NROM - 1;
V_j = &VROM[j * s];
} else {
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += i;
V_j = &V[j * s];
}
/* X <-- H(X \xor VROM_j) */
X = &V_n[(i + 1) * s];
j = blockmix_xor(Y, V_j, X, r, 1, S);
}
}
n >>= 1;
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += N - 2 - n;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[(N - 1) * s];
j = blockmix_xor(X, V_j, Y, r, 0, S);
if (((N - 1) & VROM_mask) == 1) {
/* j <-- Integerify(X) mod NROM */
j &= NROM - 1;
V_j = &VROM[j * s];
} else {
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += N - 1 - n;
V_j = &V[j * s];
}
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
X = XY;
blockmix_xor(Y, V_j, X, r, 1, S);
} else if (flags & YESCRYPT_RW) {
uint32_t n;
salsa20_blk_t * V_n, * V_j;
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[s];
blockmix(X, Y, r, S);
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
X = &V[2 * s];
blockmix(Y, X, r, S);
j = integerify(X, r);
for (n = 2; n < N; n <<= 1) {
uint32_t m = (n < N / 2) ? n : (N - 1 - n);
V_n = &V[n * s];
/* 2: for i = 0 to N - 1 do */
for (i = 1; i < m; i += 2) {
Y = &V_n[i * s];
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += i - 1;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
j = blockmix_xor(X, V_j, Y, r, 0, S);
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += i;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
X = &V_n[(i + 1) * s];
j = blockmix_xor(Y, V_j, X, r, 0, S);
}
}
n >>= 1;
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += N - 2 - n;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[(N - 1) * s];
j = blockmix_xor(X, V_j, Y, r, 0, S);
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += N - 1 - n;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
X = XY;
blockmix_xor(Y, V_j, X, r, 0, S);
} else {
/* 2: for i = 0 to N - 1 do */
for (i = 1; i < N - 1; i += 2) {
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[i * s];
blockmix(X, Y, r, S);
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
X = &V[(i + 1) * s];
blockmix(Y, X, r, S);
}
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[i * s];
blockmix(X, Y, r, S);
/* 4: X <-- H(X) */
X = XY;
blockmix(Y, X, r, S);
}
/* B' <-- X */
for (k = 0; k < 2 * r; k++) {
for (i = 0; i < 16; i++) {
le32enc(&B[(k * 16 + (i * 5 % 16)) * 4], X[k].w[i]);
}
}
}
/**
* smix2(B, r, N, Nloop, flags, V, NROM, shared, XY, S):
* Compute second loop of B = SMix_r(B, N). The input B must be 128r bytes in
* length; the temporary storage V must be 128rN bytes in length; the temporary
* storage XY must be 256r bytes in length. The value N must be a power of 2
* greater than 1. The value Nloop must be even. The array V must be aligned
* to a multiple of 64 bytes, and arrays B and XY to a multiple of at least 16
* bytes (aligning them to 64 bytes as well saves cache lines, but might result
* in cache bank conflicts).
*/
static void
smix2(uint8_t * B, size_t r, uint32_t N, uint64_t Nloop,
yescrypt_flags_t flags, salsa20_blk_t * V, uint32_t NROM,
const yescrypt_shared_t * shared, salsa20_blk_t * XY, void * S)
{
const salsa20_blk_t * VROM = shared->shared1.aligned;
uint32_t VROM_mask = shared->mask1;
size_t s = 2 * r;
salsa20_blk_t * X = XY, * Y = &XY[s];
uint64_t i;
uint32_t j;
size_t k;
if (Nloop == 0)
return;
/* X <-- B' */
/* 3: V_i <-- X */
for (k = 0; k < 2 * r; k++) {
for (i = 0; i < 16; i++) {
X[k].w[i] = le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]);
}
}
i = Nloop / 2;
/* 7: j <-- Integerify(X) mod N */
j = integerify(X, r) & (N - 1);
/*
* Normally, NROM implies YESCRYPT_RW, but we check for these separately
* because YESCRYPT_PARALLEL_SMIX resets YESCRYPT_RW for the smix2() calls
* operating on the entire V.
*/
if (NROM && (flags & YESCRYPT_RW)) {
/* 6: for i = 0 to N - 1 do */
for (i = 0; i < Nloop; i += 2) {
salsa20_blk_t * V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* j <-- Integerify(X) mod NROM */
j = blockmix_xor_save(X, V_j, Y, r, S);
if (((i + 1) & VROM_mask) == 1) {
const salsa20_blk_t * VROM_j;
j &= NROM - 1;
VROM_j = &VROM[j * s];
/* X <-- H(X \xor VROM_j) */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor(Y, VROM_j, X, r, 1, S);
} else {
j &= N - 1;
V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* j <-- Integerify(X) mod NROM */
j = blockmix_xor_save(Y, V_j, X, r, S);
}
j &= N - 1;
V_j = &V[j * s];
}
} else if (NROM) {
/* 6: for i = 0 to N - 1 do */
for (i = 0; i < Nloop; i += 2) {
const salsa20_blk_t * V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* j <-- Integerify(X) mod NROM */
j = blockmix_xor(X, V_j, Y, r, 0, S);
if (((i + 1) & VROM_mask) == 1) {
j &= NROM - 1;
V_j = &VROM[j * s];
} else {
j &= N - 1;
V_j = &V[j * s];
}
/* X <-- H(X \xor VROM_j) */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor(Y, V_j, X, r, 1, S);
j &= N - 1;
V_j = &V[j * s];
}
} else if (flags & YESCRYPT_RW) {
/* 6: for i = 0 to N - 1 do */
do {
salsa20_blk_t * V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor_save(X, V_j, Y, r, S);
j &= N - 1;
V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor_save(Y, V_j, X, r, S);
j &= N - 1;
} while (--i);
} else {
/* 6: for i = 0 to N - 1 do */
do {
const salsa20_blk_t * V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor(X, V_j, Y, r, 0, S);
j &= N - 1;
V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor(Y, V_j, X, r, 0, S);
j &= N - 1;
} while (--i);
}
/* 10: B' <-- X */
for (k = 0; k < 2 * r; k++) {
for (i = 0; i < 16; i++) {
le32enc(&B[(k * 16 + (i * 5 % 16)) * 4], X[k].w[i]);
}
}
}
/**
* p2floor(x):
* Largest power of 2 not greater than argument.
*/
static uint64_t
p2floor(uint64_t x)
{
uint64_t y;
while ((y = x & (x - 1)))
x = y;
return x;
}
/**
* smix(B, r, N, p, t, flags, V, NROM, shared, XY, S):
* Compute B = SMix_r(B, N). The input B must be 128rp bytes in length; the
* temporary storage V must be 128rN bytes in length; the temporary storage XY
* must be 256r or 256rp bytes in length (the larger size is required with
* OpenMP-enabled builds). The value N must be a power of 2 greater than 1.
* The array V must be aligned to a multiple of 64 bytes, and arrays B and
* XY to a multiple of at least 16 bytes (aligning them to 64 bytes as well
* saves cache lines and helps avoid false sharing in OpenMP-enabled builds
* when p > 1, but it might also result in cache bank conflicts).
*/
static void
smix(uint8_t * B, size_t r, uint32_t N, uint32_t p, uint32_t t,
yescrypt_flags_t flags,
salsa20_blk_t * V, uint32_t NROM, const yescrypt_shared_t * shared,
salsa20_blk_t * XY, void * S)
{
size_t s = 2 * r;
uint32_t Nchunk = N / p;
uint64_t Nloop_all, Nloop_rw;
uint32_t i;
Nloop_all = Nchunk;
if (flags & YESCRYPT_RW) {
if (t <= 1) {
if (t)
Nloop_all *= 2; /* 2/3 */
Nloop_all = (Nloop_all + 2) / 3; /* 1/3, round up */
} else {
Nloop_all *= t - 1;
}
} else if (t) {
if (t == 1)
Nloop_all += (Nloop_all + 1) / 2; /* 1.5, round up */
Nloop_all *= t;
}
Nloop_rw = 0;
if (flags & __YESCRYPT_INIT_SHARED)
Nloop_rw = Nloop_all;
else if (flags & YESCRYPT_RW)
Nloop_rw = Nloop_all / p;
Nchunk &= ~(uint32_t)1; /* round down to even */
Nloop_all++; Nloop_all &= ~(uint64_t)1; /* round up to even */
Nloop_rw &= ~(uint64_t)1; /* round down to even */
#ifdef _OPENMP
#pragma omp parallel if (p > 1) default(none) private(i) shared(B, r, N, p, flags, V, NROM, shared, XY, S, s, Nchunk, Nloop_all, Nloop_rw)
{
#pragma omp for
#endif
for (i = 0; i < p; i++) {
uint32_t Vchunk = i * Nchunk;
uint8_t * Bp = &B[128 * r * i];
salsa20_blk_t * Vp = &V[Vchunk * s];
#ifdef _OPENMP
salsa20_blk_t * XYp = &XY[i * (2 * s)];
#else
salsa20_blk_t * XYp = XY;
#endif
uint32_t Np = (i < p - 1) ? Nchunk : (N - Vchunk);
void * Sp = S ? ((uint8_t *)S + i * S_SIZE_ALL) : S;
if (Sp)
smix1(Bp, 1, S_SIZE_ALL / 128,
flags & ~YESCRYPT_PWXFORM,
Sp, NROM, shared, XYp, NULL);
if (!(flags & __YESCRYPT_INIT_SHARED_2))
smix1(Bp, r, Np, flags, Vp, NROM, shared, XYp, Sp);
smix2(Bp, r, p2floor(Np), Nloop_rw, flags, Vp,
NROM, shared, XYp, Sp);
}
if (Nloop_all > Nloop_rw) {
#ifdef _OPENMP
#pragma omp for
#endif
for (i = 0; i < p; i++) {
uint8_t * Bp = &B[128 * r * i];
#ifdef _OPENMP
salsa20_blk_t * XYp = &XY[i * (2 * s)];
#else
salsa20_blk_t * XYp = XY;
#endif
void * Sp = S ? ((uint8_t *)S + i * S_SIZE_ALL) : S;
smix2(Bp, r, N, Nloop_all - Nloop_rw,
flags & ~YESCRYPT_RW, V, NROM, shared, XYp, Sp);
}
}
#ifdef _OPENMP
}
#endif
}
/**
* yescrypt_kdf(shared, local, passwd, passwdlen, salt, saltlen,
* N, r, p, t, flags, buf, buflen):
* Compute scrypt(passwd[0 .. passwdlen - 1], salt[0 .. saltlen - 1], N, r,
* p, buflen), or a revision of scrypt as requested by flags and shared, and
* write the result into buf. The parameters r, p, and buflen must satisfy
* r * p < 2^30 and buflen <= (2^32 - 1) * 32. The parameter N must be a power
* of 2 greater than 1. (This optimized implementation currently additionally
* limits N to the range from 8 to 2^31, but other implementation might not.)
*
* t controls computation time while not affecting peak memory usage. shared
* and flags may request special modes as described in yescrypt.h. local is
* the thread-local data structure, allowing to preserve and reuse a memory
* allocation across calls, thereby reducing its overhead.
*
* Return 0 on success; or -1 on error.
*/
int
yescrypt_kdf(const yescrypt_shared_t * shared, yescrypt_local_t * local,
const uint8_t * passwd, size_t passwdlen,
const uint8_t * salt, size_t saltlen,
uint64_t N, uint32_t r, uint32_t p, uint32_t t, yescrypt_flags_t flags,
uint8_t * buf, size_t buflen)
{
yescrypt_region_t tmp;
uint64_t NROM;
size_t B_size, V_size, XY_size, need;
uint8_t * B, * S;
salsa20_blk_t * V, * XY;
uint8_t sha256[32];
/*
* YESCRYPT_PARALLEL_SMIX is a no-op at p = 1 for its intended purpose,
* so don't let it have side-effects. Without this adjustment, it'd
* enable the SHA-256 password pre-hashing and output post-hashing,
* because any deviation from classic scrypt implies those.
*/
if (p == 1)
flags &= ~YESCRYPT_PARALLEL_SMIX;
/* Sanity-check parameters */
if (flags & ~YESCRYPT_KNOWN_FLAGS) {
errno = EINVAL;
return -1;
}
#if SIZE_MAX > UINT32_MAX
if (buflen > (((uint64_t)(1) << 32) - 1) * 32) {
errno = EFBIG;
return -1;
}
#endif
if ((uint64_t)(r) * (uint64_t)(p) >= (1 << 30)) {
errno = EFBIG;
return -1;
}
if (N > UINT32_MAX) {
errno = EFBIG;
return -1;
}
if (((N & (N - 1)) != 0) || (N <= 7) || (r < 1) || (p < 1)) {
errno = EINVAL;
return -1;
}
if ((flags & YESCRYPT_PARALLEL_SMIX) && (N / p <= 7)) {
errno = EINVAL;
return -1;
}
if ((r > SIZE_MAX / 256 / p) ||
(N > SIZE_MAX / 128 / r)) {
errno = ENOMEM;
return -1;
}
#ifdef _OPENMP
if (!(flags & YESCRYPT_PARALLEL_SMIX) &&
(N > SIZE_MAX / 128 / (r * p))) {
errno = ENOMEM;
return -1;
}
#endif
if ((flags & YESCRYPT_PWXFORM) &&
#ifndef _OPENMP
(flags & YESCRYPT_PARALLEL_SMIX) &&
#endif
p > SIZE_MAX / S_SIZE_ALL) {
errno = ENOMEM;
return -1;
}
NROM = 0;
if (shared->shared1.aligned) {
NROM = shared->shared1.aligned_size / ((size_t)128 * r);
if (NROM > UINT32_MAX) {
errno = EFBIG;
return -1;
}
if (((NROM & (NROM - 1)) != 0) || (NROM <= 7) ||
!(flags & YESCRYPT_RW)) {
errno = EINVAL;
return -1;
}
}
/* Allocate memory */
V = NULL;
V_size = (size_t)128 * r * N;
#ifdef _OPENMP
if (!(flags & YESCRYPT_PARALLEL_SMIX))
V_size *= p;
#endif
need = V_size;
if (flags & __YESCRYPT_INIT_SHARED) {
if (local->aligned_size < need) {
if (local->base || local->aligned ||
local->base_size || local->aligned_size) {
errno = EINVAL;
return -1;
}
if (!alloc_region(local, need))
return -1;
}
V = (salsa20_blk_t *)local->aligned;
need = 0;
}
B_size = (size_t)128 * r * p;
need += B_size;
if (need < B_size) {
errno = ENOMEM;
return -1;
}
XY_size = (size_t)256 * r;
#ifdef _OPENMP
XY_size *= p;
#endif
need += XY_size;
if (need < XY_size) {
errno = ENOMEM;
return -1;
}
if (flags & YESCRYPT_PWXFORM) {
size_t S_size = S_SIZE_ALL;
#ifdef _OPENMP
S_size *= p;
#else
if (flags & YESCRYPT_PARALLEL_SMIX)
S_size *= p;
#endif
need += S_size;
if (need < S_size) {
errno = ENOMEM;
return -1;
}
}
if (flags & __YESCRYPT_INIT_SHARED) {
if (!alloc_region(&tmp, need))
return -1;
B = (uint8_t *)tmp.aligned;
XY = (salsa20_blk_t *)((uint8_t *)B + B_size);
} else {
init_region(&tmp);
if (local->aligned_size < need) {
if (free_region(local))
return -1;
if (!alloc_region(local, need))
return -1;
}
B = (uint8_t *)local->aligned;
V = (salsa20_blk_t *)((uint8_t *)B + B_size);
XY = (salsa20_blk_t *)((uint8_t *)V + V_size);
}
S = NULL;
if (flags & YESCRYPT_PWXFORM)
S = (uint8_t *)XY + XY_size;
if (t || flags) {
SHA256_CTX_Y ctx;
SHA256_Init_Y(&ctx);
SHA256_Update_Y(&ctx, passwd, passwdlen);
SHA256_Final_Y(sha256, &ctx);
passwd = sha256;
passwdlen = sizeof(sha256);
}
/* 1: (B_0 ... B_{p-1}) <-- PBKDF2(P, S, 1, p * MFLen) */
PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, 1, B, B_size);
if (t || flags)
memcpy(sha256, B, sizeof(sha256));
if (p == 1 || (flags & YESCRYPT_PARALLEL_SMIX)) {
smix(B, r, N, p, t, flags, V, NROM, shared, XY, S);
} else {
uint32_t i;
/* 2: for i = 0 to p - 1 do */
#ifdef _OPENMP
#pragma omp parallel for default(none) private(i) shared(B, r, N, p, t, flags, V, NROM, shared, XY, S)
#endif
for (i = 0; i < p; i++) {
/* 3: B_i <-- MF(B_i, N) */
#ifdef _OPENMP
smix(&B[(size_t)128 * r * i], r, N, 1, t, flags,
&V[(size_t)2 * r * i * N],
NROM, shared,
&XY[(size_t)4 * r * i],
S ? &S[S_SIZE_ALL * i] : S);
#else
smix(&B[(size_t)128 * r * i], r, N, 1, t, flags, V,
NROM, shared, XY, S);
#endif
}
}
/* 5: DK <-- PBKDF2(P, B, 1, dkLen) */
PBKDF2_SHA256(passwd, passwdlen, B, B_size, 1, buf, buflen);
/*
* Except when computing classic scrypt, allow all computation so far
* to be performed on the client. The final steps below match those of
* SCRAM (RFC 5802), so that an extension of SCRAM (with the steps so
* far in place of SCRAM's use of PBKDF2 and with SHA-256 in place of
* SCRAM's use of SHA-1) would be usable with yescrypt hashes.
*/
if ((t || flags) && buflen == sizeof(sha256)) {
/* Compute ClientKey */
{
HMAC_SHA256_CTX_Y ctx;
HMAC_SHA256_Init_Y(&ctx, buf, buflen);
#if 0
/* Proper yescrypt */
HMAC_SHA256_Update_Y(&ctx, "Client Key", 10);
#else
/* GlobalBoost-Y buggy yescrypt */
HMAC_SHA256_Update_Y(&ctx, salt, saltlen);
#endif
HMAC_SHA256_Final_Y(sha256, &ctx);
}
/* Compute StoredKey */
{
SHA256_CTX_Y ctx;
SHA256_Init_Y(&ctx);
SHA256_Update_Y(&ctx, sha256, sizeof(sha256));
SHA256_Final_Y(buf, &ctx);
}
}
if (free_region(&tmp))
return -1;
/* Success! */
return 0;
}
|
ast-dump-openmp-begin-declare-variant_nested.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++| FileCheck %s
// expected-no-diagnostics
int also_before(void) {
return 1;
}
#pragma omp begin declare variant match(user = {condition(1)}, device = {kind(cpu)}, implementation = {vendor(llvm)})
#pragma omp begin declare variant match(device = {kind(cpu)}, implementation = {vendor(llvm, pgi), extension(match_any)})
#pragma omp begin declare variant match(device = {kind(any)}, implementation = {dynamic_allocators})
int also_after(void) {
return 0;
}
int also_before(void) {
return 0;
}
#pragma omp end declare variant
#pragma omp end declare variant
#pragma omp end declare variant
int also_after(void) {
return 2;
}
int test() {
// Should return 0.
return also_after() + also_before();
}
#pragma omp begin declare variant match(device = {isa("sse")})
#pragma omp declare variant(test) match(device = {isa(sse)})
int equivalent_isa_trait(void);
#pragma omp end declare variant
#pragma omp begin declare variant match(device = {isa("sse")})
#pragma omp declare variant(test) match(device = {isa("sse2")})
int non_equivalent_isa_trait(void);
#pragma omp end declare variant
// CHECK: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:7:1> line:5:5 used also_before 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:23, line:7:1>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:6:3, col:10>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:10> 'int' 1
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_4:0x[a-z0-9]*]] <<invalid sloc>> Implicit device={kind(any, cpu)}, implementation={dynamic_allocators, vendor(llvm, pgi), extension(match_any)}, user={condition(1)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_5:0x[a-z0-9]*]] <line:15:1> 'int ({{.*}})' {{.*}}Function [[ADDR_6:0x[a-z0-9]*]] 'also_before[device={kind(any, cpu)}, implementation={dynamic_allocators, vendor(llvm, pgi), extension(match_any)}, user={condition(...)}]' 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_7:0x[a-z0-9]*]] <line:12:1, col:20> col:5 implicit used also_after 'int ({{.*}})'
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_8:0x[a-z0-9]*]] <<invalid sloc>> Implicit device={kind(any, cpu)}, implementation={dynamic_allocators, vendor(llvm, pgi), extension(match_any)}, user={condition(1)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_9:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' {{.*}}Function [[ADDR_10:0x[a-z0-9]*]] 'also_after[device={kind(any, cpu)}, implementation={dynamic_allocators, vendor(llvm, pgi), extension(match_any)}, user={condition(...)}]' 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_10]] <col:1, line:14:1> line:12:1 also_after[device={kind(any, cpu)}, implementation={dynamic_allocators, vendor(llvm, pgi), extension(match_any)}, user={condition(...)}] 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_11:0x[a-z0-9]*]] <col:22, line:14:1>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_12:0x[a-z0-9]*]] <line:13:3, col:10>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_13:0x[a-z0-9]*]] <col:10> 'int' 0
// CHECK-NEXT: |-FunctionDecl [[ADDR_6]] <line:15:1, line:17:1> line:15:1 also_before[device={kind(any, cpu)}, implementation={dynamic_allocators, vendor(llvm, pgi), extension(match_any)}, user={condition(...)}] 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_14:0x[a-z0-9]*]] <col:23, line:17:1>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_15:0x[a-z0-9]*]] <line:16:3, col:10>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_16:0x[a-z0-9]*]] <col:10> 'int' 0
// CHECK-NEXT: |-FunctionDecl [[ADDR_17:0x[a-z0-9]*]] prev [[ADDR_7]] <line:22:1, line:24:1> line:22:5 used also_after 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_18:0x[a-z0-9]*]] <col:22, line:24:1>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_19:0x[a-z0-9]*]] <line:23:3, col:10>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_20:0x[a-z0-9]*]] <col:10> 'int' 2
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_21:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit device={kind(any, cpu)}, implementation={dynamic_allocators, vendor(llvm, pgi), extension(match_any)}, user={condition(1)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_9]] <line:12:1> 'int ({{.*}})' {{.*}}Function [[ADDR_10]] 'also_after[device={kind(any, cpu)}, implementation={dynamic_allocators, vendor(llvm, pgi), extension(match_any)}, user={condition(...)}]' 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_22:0x[a-z0-9]*]] <line:26:1, line:29:1> line:26:5 referenced test 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_23:0x[a-z0-9]*]] <col:12, line:29:1>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_24:0x[a-z0-9]*]] <line:28:3, col:37>
// CHECK-NEXT: | `-BinaryOperator [[ADDR_25:0x[a-z0-9]*]] <col:10, col:37> 'int' '+'
// CHECK-NEXT: | |-PseudoObjectExpr [[ADDR_26:0x[a-z0-9]*]] <col:10, col:21> 'int'
// CHECK-NEXT: | | |-CallExpr [[ADDR_27:0x[a-z0-9]*]] <col:10, col:21> 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr [[ADDR_28:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | `-DeclRefExpr [[ADDR_29:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_17]] 'also_after' 'int ({{.*}})'
// CHECK-NEXT: | | `-CallExpr [[ADDR_30:0x[a-z0-9]*]] <line:12:1, line:28:21> 'int'
// CHECK-NEXT: | | `-ImplicitCastExpr [[ADDR_31:0x[a-z0-9]*]] <line:12:1> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | `-DeclRefExpr [[ADDR_9]] <col:1> 'int ({{.*}})' {{.*}}Function [[ADDR_10]] 'also_after[device={kind(any, cpu)}, implementation={dynamic_allocators, vendor(llvm, pgi), extension(match_any)}, user={condition(...)}]' 'int ({{.*}})'
// CHECK-NEXT: | `-PseudoObjectExpr [[ADDR_32:0x[a-z0-9]*]] <line:28:25, col:37> 'int'
// CHECK-NEXT: | |-CallExpr [[ADDR_33:0x[a-z0-9]*]] <col:25, col:37> 'int'
// CHECK-NEXT: | | `-ImplicitCastExpr [[ADDR_34:0x[a-z0-9]*]] <col:25> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | `-DeclRefExpr [[ADDR_35:0x[a-z0-9]*]] <col:25> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'int ({{.*}})'
// CHECK-NEXT: | `-CallExpr [[ADDR_36:0x[a-z0-9]*]] <line:15:1, line:28:37> 'int'
// CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_37:0x[a-z0-9]*]] <line:15:1> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_5]] <col:1> 'int ({{.*}})' {{.*}}Function [[ADDR_6]] 'also_before[device={kind(any, cpu)}, implementation={dynamic_allocators, vendor(llvm, pgi), extension(match_any)}, user={condition(...)}]' 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_38:0x[a-z0-9]*]] <line:33:1, col:30> col:5 equivalent_isa_trait 'int ({{.*}})'
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_39:0x[a-z0-9]*]] <line:32:1, col:61> Implicit device={isa(sse)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_40:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_22]] 'test' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: `-FunctionDecl [[ADDR_41:0x[a-z0-9]*]] <line:38:1, col:34> col:5 non_equivalent_isa_trait 'int ({{.*}})'
// CHECK-NEXT: `-OMPDeclareVariantAttr [[ADDR_42:0x[a-z0-9]*]] <line:37:1, col:64> Implicit device={isa(sse2, sse)}
// CHECK-NEXT: `-DeclRefExpr [[ADDR_43:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_22]] 'test' 'int ({{.*}})' non_odr_use_unevaluated
|
raytrace.c |
#ifndef __OPENCL_VERSION__
#include "raytrace.h"
#include "affine.h"
#include "bbox.h"
#include "cx/bittable.h"
#include "color.h"
#include "lightcut.h"
#include "order.h"
#include "point.h"
#include "simplex.h"
#include "space-junk.h"
#include "xfrm.h"
#include <assert.h>
#include <math.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#endif /* #ifndef __OPENCL_VERSION__ */
/* Note: When setting this to false, set MaxKDTreeDepth = 0 in kdtree.c.*/
static const bool KDTreeRayTrace = true;
#if NDimensions == 3
static const bool BarycentricRayTrace = false;
#else
static const bool BarycentricRayTrace = true;
#endif
#ifndef __OPENCL_VERSION__
//static bool DBogTraceOn = false;
#ifdef TrivialMpiRayTrace
#include <mpi.h>
static void
balancer_triv_sync_mpi_RayImage (RayImage* image,
uint row_off, uint row_nul,
uint nprocs);
static void
computer_triv_sync_mpi_RayImage (const RayImage* image,
uint row_off, uint row_nul,
uint myrank, uint nprocs);
#endif
static void
partition_ObjectRaySpace (ObjectRaySpace* space);
static void
partition_verts_ObjectRaySpace (ObjectRaySpace* space);
static void
init_Scene_KPTreeGrid (KPTreeGrid* grid, const Scene* scene);
static void
init_RaySpace_KDTreeGrid (KDTreeGrid* grid, const RaySpace* space);
static void
fill_pixel (Color* ret_color,
uint hitidx, real mag, uint objidx,
const RayImage* image,
const Point* origin,
const Point* dir,
const RaySpace* space,
Trit front,
uint nbounces,
URandom* urandom);
static void
cast_colors (Color* ret_color,
const RaySpace* restrict space,
const RayImage* restrict image,
const Point* restrict origin,
const Point* restrict dir,
const Color* factor,
Trit front,
uint nbounces,
URandom* urandom);
static void
cast_row_orthographic (RayImage* restrict image,
uint row,
const RaySpace* restrict space,
const RayCastAPriori* restrict known,
URandom* urandom);
static void
cast_row_perspective (RayImage* image, uint row,
const RaySpace* restrict space,
const RayCastAPriori* restrict known,
URandom* urandom);
/* Include all packet tracing stuff here.
* Keep dependence on this minimal.
*/
#ifdef PackOpsAvail
#include "raytrace-pack.c"
#endif
void
init_RaySpace (RaySpace* space)
{
init_ObjectRaySpace (&space->main);
space->nobjects = 0;
space->nlights = 0;
init_KDTree (&space->object_tree);
space->partition = true;
zero_Point (&space->box.min);
zero_Point (&space->box.max);
space->skytxtr = 0;
init_LightCutTree (&space->lightcuts);
}
void
init_ObjectRaySpace (ObjectRaySpace* space)
{
zero_Point (&space->centroid);
identity_PointXfrm (&space->orientation);
init_Scene (&space->scene);
space->nelems = 0;
init_KDTree (&space->tree);
init_KPTree (&space->verttree);
space->visible = true;
}
void
init_PointLightSource (PointLightSource* light)
{
zero_Point (&light->location);
set_Color (&light->intensity, 1);
light->diffuse = false;
light->hemisphere = false;
light->on = true;
}
void
copy_PointLightSource (PointLightSource* dst, const PointLightSource* src)
{
*dst = *src;
}
void
init_filled_RaySpace (RaySpace* space)
{
uint i;
init_filled_ObjectRaySpace (&space->main);
UFor( i, space->nobjects )
init_filled_ObjectRaySpace (&space->objects[i]);
}
static
void
update_internal_transformed_ObjectRaySpace (ObjectRaySpace* space)
{
uint ei;
const Scene* scene;
scene = &space->scene;
init_BBox (&space->box, scene->nverts, scene->verts);
UFor( ei, space->nelems )
{
uint pi;
const SceneElement* elem;
Simplex* tri;
elem = &scene->elems[ei];
tri = &space->elems[ei];
UFor( pi, NDimensions )
{
uint vi;
vi = elem->verts[pi];
assert (vi < scene->nverts);
copy_Point (&tri->pts[pi], &scene->verts[vi]);
}
}
UFor( ei, space->nelems )
{
Simplex raw;
bool good;
simplex_Scene (&raw, scene, ei);
good = init_BarySimplex (&space->simplices[ei], &raw);
if (!good) DBog1( "ei:%u", ei );
/* assert (good); */
}
}
void
init_filled_ObjectRaySpace (ObjectRaySpace* object)
{
object->nelems = object->scene.nelems;
AllocTo( object->elems, object->nelems );
AllocTo( object->simplices, object->nelems );
update_internal_transformed_ObjectRaySpace (object);
partition_ObjectRaySpace (object);
partition_verts_ObjectRaySpace (object);
}
void
init_trivial_ObjectRaySpace (ObjectRaySpace* object)
{
object->nelems = object->scene.nelems;
AllocTo( object->elems, object->nelems );
AllocTo( object->simplices, object->nelems );
update_internal_transformed_ObjectRaySpace (object);
build_trivial_KDTree (&object->tree, object->nelems, &object->box);
}
void
update_trivial_ObjectRaySpace (ObjectRaySpace* object)
{
update_internal_transformed_ObjectRaySpace (object);
object->tree.nodes[0].as.leaf.box = object->box;
}
void
cleanup_RaySpace (RaySpace* space)
{
uint i;
cleanup_ObjectRaySpace (&space->main);
UFor( i, space->nobjects )
cleanup_ObjectRaySpace (&space->objects[i]);
if (space->nobjects > 0) free (space->objects);
if (space->nlights > 0) free (space->lights);
lose_KDTree (&space->object_tree);
lose_LightCutTree (&space->lightcuts);
}
void
cleanup_ObjectRaySpace (ObjectRaySpace* space)
{
cleanup_Scene (&space->scene);
lose_KDTree (&space->tree);
lose_KPTree (&space->verttree);
if (space->nelems > 0)
{
free (space->elems);
free (space->simplices);
}
}
/* Partition the space containing dynamic objects.*/
void
update_dynamic_RaySpace (RaySpace* space)
{
if (space->partition && space->nobjects > 0)
{
KDTreeGrid grid;
init_RaySpace_KDTreeGrid (&grid, space);
space->box = grid.box;
/* Since it's a regeneration, clean up the previous version.*/
lose_KDTree (&space->object_tree);
build_KDTree (&space->object_tree, &grid, 0);
/* output_KDTreeGrid (stderr, &grid); */
/* output_KDTree (stderr, &space->object_tree); */
lose_KDTreeGrid (&grid);
}
else
{
space->partition = false;
space->box = space->main.box;
}
}
void
partition_ObjectRaySpace (ObjectRaySpace* space)
{
KDTreeGrid grid;
init_Scene_KDTreeGrid (&grid, &space->scene, &space->box);
#if 1
build_KDTree (&space->tree, &grid, space->elems);
#else
/* Can use this code for development, less time spent building tree.*/
build_KDTree (&space->tree, &grid, 0);
#endif
#if 0
printf ("nnodes:%u nelemidcs:%u\n",
space->tree.nnodes, space->tree.nelemidcs);
#endif
lose_KDTreeGrid (&grid);
}
void
partition_verts_ObjectRaySpace (ObjectRaySpace* space)
{
KPTreeGrid grid;
init_Scene_KPTreeGrid (&grid, &space->scene);
build_KPTree (&space->verttree, &grid);
lose_KPTreeGrid (&grid);
}
void
init_Scene_KDTreeGrid (KDTreeGrid* grid, const Scene* scene,
const BBox* box)
{
uint i, nelems;
nelems = scene->nelems;
assert (nelems > 0);
init_KDTreeGrid( grid, nelems );
fill_minimal_unique (grid->elemidcs, grid->nelems);
UFor( i, NDimensions )
fill_minimal_unique (grid->intls[i], 2*nelems);
UFor( i, nelems )
{
uint pi, dim, ti;
const SceneElement* elem;
real coords[NDimensions][2];
elem = &scene->elems[i];
UFor( dim, NDimensions )
{
coords[dim][0] = Max_real;
coords[dim][1] = Min_real;
}
UFor( pi, NDimensions )
{
const Point* p;
p = &scene->verts[elem->verts[pi]];
UFor( dim, NDimensions )
{
assert (inside_BBox (box, p));
if (p->coords[dim] < coords[dim][0])
coords[dim][0] = p->coords[dim];
if (p->coords[dim] > coords[dim][1])
coords[dim][1] = p->coords[dim];
}
}
ti = 2*i;
UFor( dim, NDimensions )
{
grid->coords[dim][ti] = coords[dim][0];
grid->coords[dim][ti+1] = coords[dim][1];
}
}
grid->box = *box;
}
void
init_Scene_KPTreeGrid (KPTreeGrid* grid, const Scene* scene)
{
init_KPTreeGrid (grid, scene->nverts);
{:for (i ; scene->nverts)
set1_KPTreeGrid (grid, i, &scene->verts[i]);
}
}
void
init_RaySpace_KDTreeGrid (KDTreeGrid* grid, const RaySpace* space)
{
uint i;
uint nvisible = 0;
init_KDTreeGrid (grid, 1 + space->nobjects);
UFor( i, NDimensions )
{
fill_minimal_unique (grid->intls[i], 2*grid->nelems);
grid->box.max.coords[i] = Min_real;
grid->box.min.coords[i] = Max_real;
}
UFor( i, space->nobjects+1 )
{
uint dim, ti;
const ObjectRaySpace* object;
BBox box;
if (i < space->nobjects) object = &space->objects[i];
else object = &space->main;
if (!object->visible) continue;
ti = 2 * nvisible;
if (i < space->nobjects)
trxfrm_BBox (&box,
&object->orientation,
&object->box,
&object->centroid);
else
box = object->box;
include_BBox (&grid->box, &grid->box, &box);
grid->elemidcs[nvisible] = i;
UFor( dim, NDimensions )
{
real lo, hi;
lo = box.min.coords[dim];
hi = box.max.coords[dim];
grid->coords[dim][ti] = lo;
grid->coords[dim][ti+1] = hi;
}
nvisible += 1;
}
shrink_KDTreeGrid (grid, nvisible);
}
void init_RayImage (RayImage* image)
{
uint i;
image->hits = 0;
image->mags = 0;
image->pixels = 0;
image->nrows = 0;
image->stride = 0;
image->ncols = 0;
/* image->hifov = 60 * M_PI / 180; */
image->hifov = 2 * atan (1.0 / 3);
image->perspective = true;
UFor( i, NColors )
image->ambient[i] = 0.2;
image->view_light = 0;
image->shading_on = true;
image->color_distance_on = true;
image->diffuse_camera_on = false;
image->nbounces_max = 2;
image->culling_on = false;
}
void resize_RayImage (RayImage* image)
{
#ifdef PackOpsAvail
const uint align = RayPacketDimSz;
#else
const uint align = 4;
#endif
uint npixels;
if (image->nrows == 0 || image->ncols == 0) return;
image->stride = align * ceil_uint (image->ncols, align);
npixels = image->nrows * image->stride;
if (image->hits) ResizeT( uint, image->hits, npixels );
if (image->mags) ResizeT( real, image->mags, npixels );
if (image->pixels) ResizeT( byte, image->pixels, 3 * npixels );
}
void restride_RayImage (RayImage* image)
{
uint i;
/* This should be set to the desired stride,
* initialized by a resize.
*/
assert (image->stride >= image->ncols);
if (image->stride == image->ncols) return;
UFor( i, image->nrows )
{
uint row, dst_idx, src_idx;
row = image->nrows - i - 1;
dst_idx = row * image->stride;
src_idx = row * image->ncols;
if (image->hits)
memmove (&image->hits[dst_idx], &image->hits[src_idx],
image->ncols * sizeof (uint));
if (image->mags)
memmove (&image->mags[dst_idx], &image->mags[src_idx],
image->ncols * sizeof (real));
if (image->pixels)
memmove (&image->pixels[3 * dst_idx], &image->pixels[3 * src_idx],
image->ncols * 3 * sizeof (byte));
}
}
/** Remove the stride from a RayImage to make
* /image->cols == image->stride/.
**/
void unstride_RayImage (RayImage* image)
{
uint row;
if (image->ncols == image->stride) return;
UFor( row, image->nrows )
{
uint dst_idx, src_idx;
dst_idx = row * image->ncols;
src_idx = row * image->stride;
if (image->hits)
memmove (&image->hits[dst_idx], &image->hits[src_idx],
image->ncols * sizeof (uint));
if (image->mags)
memmove (&image->mags[dst_idx], &image->mags[src_idx],
image->ncols * sizeof (real));
if (image->pixels)
memmove (&image->pixels[3 * dst_idx], &image->pixels[3 * src_idx],
image->ncols * 3 * sizeof (byte));
}
image->stride = image->ncols;
}
void downsample_RayImage (RayImage* image, uint inv)
{
uint row, i_nrows, i_ncols, o_nrows, o_ncols;
uint inv2;
byte* o_pixline;
byte* o_fracline;
i_nrows = image->nrows;
i_ncols = image->ncols;
o_nrows = i_nrows / inv;
o_ncols = i_ncols / inv;
inv2 = inv * inv;
AllocTo( o_pixline, 3 * (1 + o_ncols) );
AllocTo( o_fracline, 3 * (1 + o_ncols) );
memset (o_pixline, 0, 3 * o_ncols * sizeof(byte));
memset (o_fracline, 0, 3 * o_ncols * sizeof(byte));
if (image->pixels)
{
UFor( row, i_nrows )
{
uint col;
byte* i_pixline;
i_pixline = &image->pixels[row * 3 * image->stride];
UFor( col, i_ncols )
{
uint i, o_off;
o_off = 3 * (col / inv);
UFor( i, 3 )
{
uint x, y;
x = i_pixline[3*col+i];
y = (x % inv2) + o_fracline[o_off+i];
x = (x / inv2) + (y / inv2);
y = (y % inv2);
o_pixline[o_off+i] += x;
o_fracline[o_off+i] = y;
}
}
if ((row + 1) % inv == 0)
{
uint n;
n = 3 * o_ncols;
CopyT( byte, &image->pixels[(row/inv) * n], o_pixline, 0, n );
memset (o_pixline, 0, n * sizeof(byte));
memset (o_fracline, 0, n * sizeof(byte));
}
}
}
UFor( row, o_nrows )
{
uint col;
UFor( col, o_ncols )
{
uint dst_idx, src_idx;
dst_idx = row * o_ncols + col;
src_idx = inv * (row * image->stride + col);
if (image->hits)
image->hits[dst_idx] = image->hits[src_idx];
if (image->mags)
image->mags[dst_idx] = image->mags[src_idx];
}
}
free (o_pixline);
free (o_fracline);
image->nrows = o_nrows;
image->ncols = o_ncols;
resize_RayImage (image);
image->stride = image->ncols;
}
void cleanup_RayImage (RayImage* image)
{
if (image->hits) free (image->hits);
if (image->mags) free (image->mags);
if (image->pixels) free (image->pixels);
}
#endif /* #ifndef __OPENCL_VERSION__ */
void
map_vertex_normal (Point* normal,
const Point* vnmls,
const SceneElement* elem,
const Point* bpoint)
{
uint i;
zero_Point (normal);
UFor( i, NDimensions )
{
assert (elem->vnmls[i] != UINT_MAX);
follow_Point (normal, normal, &vnmls[elem->vnmls[i]],
bpoint->coords[i]);
}
}
static
const ObjectRaySpace*
ray_to_ObjectRaySpace (Point* ret_origin,
Point* ret_dir,
const Point* origin,
const Point* dir,
const RaySpace* space,
uint objidx)
{
const ObjectRaySpace* object;
Claim2( objidx ,<=, space->nobjects );
if (objidx < space->nobjects)
{
object = &space->objects[objidx];
ray_to_basis (ret_origin, ret_dir,
&object->orientation,
origin, dir,
&object->centroid);
}
else
{
object = &space->main;
*ret_origin = *origin;
*ret_dir = *dir;
}
return object;
}
/** Assume no solids inside one another.
* Otherwise, we'd need to track an IOR.
**/
static
void
refraction_ray (Point* dst, const Point* dir, const Point* normal,
real r, bool entering, real cos_normal)
{
Point a, b;
real d;
if (r == 1)
{
*dst = *dir;
return;
}
/* dir
* A = -------
* ||N*D||
*/
scale_Point (&a, dir, 1 / cos_normal);
/* B = A + N */
summ_Point (&b, &a, normal);
/* 1
* d = ------------------------------------
* sqrt(((r1/r2)^2 * ||A||^2) - ||B||^2)
*/
if (!entering) r = 1 / r;
d = r * r * dot_Point (&a, &a) - dot_Point (&b, &b);
d = 1 / sqrt (d);
/* dst = d*A + (1-d)(-N) */
scale_Point (&a, &a, d);
scale_Point (&b, normal, d-1);
summ_Point (dst, &a, &b);
normalize_Point (dst, dst);
}
static
uint
splitting_plane_count (const Point* origin, const Point* direct, real mag,
const KDTree* tree, const BBox* box)
{
uint count = 0;
uint node_idx, parent, destin_nodeidx;
Point destin;
Point invdirect;
Ray ray;
bool inside_box;
ray.origin = *origin;
ray.direct = *direct;
follow_Ray (&destin, &ray, mag);
if (inside_BBox (box, &destin))
destin_nodeidx = find_KDTreeNode (&parent, &destin,
tree->nodes);
else
destin_nodeidx = UINT_MAX;
#if 0
/* Return the number of elements in the hit node.*/
return ((destin_nodeidx == UINT_MAX) ? 0 :
tree->nodes[destin_nodeidx].as.leaf.nelems);
#endif
inside_box = inside_BBox (box, origin);
reci_Point (&invdirect, direct);
node_idx = first_KDTreeNode (&parent, &ray,
tree->nodes,
box, inside_box);
if (node_idx != UINT_MAX && inside_box)
count += 1;
while (node_idx != UINT_MAX && node_idx != destin_nodeidx)
{
count += 1;
node_idx = next_KDTreeNode (&parent, &ray, &invdirect,
Max_real,
node_idx, tree->nodes);
}
return count;
}
#ifndef __OPENCL_VERSION__
/** Get the ambient, diffuse, and specular components
* of a pixel without considering illumination.
**/
static void
pixel_from_Material (Color* ambient, Color* diffuse,
Color* specular, Color* emissive,
const Material* matl,
const BaryPoint* texpoint,
const Scene* scene)
{
if (!matl)
{
/* If the cosine between light and normal is 1,
* and the light intensity is 1,
* then the resulting color value should be exactly 1.
* Thus, diffuse + ambient = 1 by default.
*/
set_Color (ambient, .2);
set_Color (diffuse, .8);
set_Color (specular, 0);
set_Color (emissive, 0);
return;
}
*ambient = matl->ambient;
*diffuse = matl->diffuse;
*specular = matl->specular;
*emissive = matl->emissive;
/* Texture mapping.*/
if (texpoint)
{
const Texture* tex;
Color color;
real alpha;
if (matl->ambient_texture != UINT_MAX)
{
tex = &scene->txtrs[matl->ambient_texture];
alpha = map_Texture (&color, tex, texpoint);
mix_Color (ambient, ambient, &color, alpha);
}
if (matl->diffuse_texture != UINT_MAX)
{
tex = &scene->txtrs[matl->diffuse_texture];
alpha = map_Texture (&color, tex, texpoint);
mix_Color (diffuse, diffuse, &color, alpha);
}
if (matl->specular_texture != UINT_MAX)
{
tex = &scene->txtrs[matl->specular_texture];
alpha = map_Texture (&color, tex, texpoint);
mix_Color (specular, specular, &color, alpha);
}
}
}
void
find_holes (TableT(uint2)* holes, const RayImage* ray_image, uint nelems)
{
const uint* hits = ray_image->hits;
const uint stride = ray_image->stride;
if (ray_image->nrows < 3 || ray_image->ncols < 3)
return;
if (!hits) {
DBog0("hits array is null");
return;
}
for (uint row = 1; row < ray_image->nrows - 1; ++row)
{
for (uint col = 1; col < ray_image->ncols - 1; ++col)
{
if (nelems <= hits[(row-1) * stride + (col-1)]) continue;
if (nelems <= hits[(row-1) * stride + (col+0)]) continue;
if (nelems <= hits[(row-1) * stride + (col+1)]) continue;
if (nelems <= hits[(row+0) * stride + (col-1)]) continue;
if (nelems <= hits[(row+0) * stride + (col+1)]) continue;
if (nelems <= hits[(row+1) * stride + (col-1)]) continue;
if (nelems <= hits[(row+1) * stride + (col+0)]) continue;
if (nelems <= hits[(row+1) * stride + (col+1)]) continue;
if (nelems <= hits[(row+0) * stride + (col+0)]) {
uint2 hole;
hole.s[0] = row;
hole.s[1] = col;
PushTable( *holes, hole );
}
}
}
}
void
diffuse_camera_shading (Color* ret_color, const Ray* ray, const Point* normal)
{
real cos_normal = dot_Point (&ray->direct, normal);
if (cos_normal < 0)
cos_normal = - cos_normal;
{:for (dim ; NColors)
ret_color->coords[dim] = cos_normal;
}
}
void
fill_pixel (Color* ret_color,
uint hitidx, real mag, uint objidx,
const RayImage* image,
const Point* origin,
const Point* dir,
const RaySpace* space,
Trit front,
uint nbounces,
URandom* urandom)
{
const bool shade_by_element = false;
const bool color_by_element = false;
const bool compute_bary_coords = true;
const bool show_splitting_planes = false;
Color color;
const BarySimplex* simplex;
const ObjectRaySpace* object;
Point rel_origin, rel_dir;
const Scene* scene;
const SceneElement* elem;
const Material* material = 0;
bool hit_front;
real cos_normal;
Point bpoint, normal;
BaryPoint texpoint;
uint i;
if (objidx <= space->nobjects)
{
set_Color (&color, 1);
}
else if (space->skytxtr < space->main.scene.ntxtrs)
{
map_sky_Texture (&color,
&space->main.scene.txtrs[space->skytxtr],
dir);
}
else {
set_Color (&color, 0);
}
if (show_splitting_planes && objidx > space->nobjects)
{
const real frac = .1;
real red;
uint nplanes;
nplanes = splitting_plane_count (origin, dir, mag,
#if 1
&space->main.tree,
&space->main.box
#else
&space->object_tree,
&space->box
#endif
);
red = 1;
UFor( i, nplanes )
red = (1 - frac) * red;
UFor( i, NColors )
{
if (i == 0)
color.coords[i] = clamp_real (1 - red * (1 - color.coords[i]), 0, 1);
else
color.coords[i] = clamp_real (red * color.coords[i], 0, 1);
}
}
if (objidx > space->nobjects)
{
summ_Color (ret_color, ret_color, &color);
return;
}
object = ray_to_ObjectRaySpace (&rel_origin, &rel_dir,
origin, dir, space, objidx);
simplex = &object->simplices[hitidx];
hit_front = (0 >= dot_Point (&rel_dir, &simplex->plane.normal));
scene = &object->scene;
elem = &scene->elems[hitidx];
if (elem->material != UINT_MAX)
material = &scene->matls[elem->material];
if (image->color_distance_on && mag < image->view_light)
{
real val;
/* Distance color scale.*/
zero_Color (&color);
val = 2 * NColors * mag / image->view_light;
UFor( i, 2*NColors )
{
if (val < i+1)
{
uint idx1, idx2;
idx1 = i / 2;
idx2 = (idx1 + 1) % NColors;
if (even_uint (i))
{
color.coords[idx1] = 1;
color.coords[idx2] = val - i;
}
else
{
color.coords[idx1] = i+1 - val;
color.coords[idx2] = 1;
}
break;
}
}
}
if (compute_bary_coords)
{
Point rel_isect;
follow_Point (&rel_isect, &rel_origin, &rel_dir, mag);
barycentric_Point (&bpoint, &rel_isect, simplex);
if (elem->txpts[0] < UINT_MAX)
{
Op_s( real, NDimensions-1, texpoint.coords , 0 );
UFor( i, NDimensions )
{
assert (elem->txpts[i] < UINT_MAX);
Op_2020s( real, NDimensions-1, texpoint.coords
,+, texpoint.coords
, *, scene->txpts[elem->txpts[i]].coords
, bpoint.coords[i] );
}
}
}
/* Get the normal.*/
if (material && material->bump_texture < UINT_MAX)
map_bump_Texture (&normal, &scene->txtrs[material->bump_texture],
&texpoint);
else if (compute_bary_coords && 0 < scene->nvnmls)
map_vertex_normal (&normal, scene->vnmls, elem, &bpoint);
else
copy_Point (&normal, &simplex->plane.normal);
/* Rotate it when necessary.*/
if (objidx != space->nobjects)
trxfrm_Point (&normal, &object->orientation, &normal);
/* The above cases do not give a normalized vector!*/
normalize_Point (&normal, &normal);
if (image->diffuse_camera_on) {
Ray ray;
ray.origin = *origin;
ray.direct = *dir;
diffuse_camera_shading (ret_color, &ray, &normal);
return;
}
/* Assure the normal is in our direction.*/
cos_normal = dot_Point (dir, &normal);
if (hit_front)
cos_normal = - cos_normal;
else
negate_Point (&normal, &normal);
if (color_by_element)
{
uint color_diff, x, y;
const uint nincs = 256;
assert (NColors == 3);
color_diff = 0xFFFFFF / scene->nelems;
x = color_diff * (scene->nelems - hitidx);
y = 0;
UFor( i, 3 )
{
uint j;
UFor( j, 8 )
{
if (0 != (x & (1 << (i + 3*j))))
y |= (1 << (8*i + j));
}
}
color.coords[0] *= (real) ((y & 0xFF0000) >> 16) / (nincs-1);
color.coords[1] *= (real) ((y & 0x00FF00) >> 8) / (nincs-1);
color.coords[2] *= (real) ((y & 0x0000FF) >> 0) / (nincs-1);
}
if (shade_by_element)
{
real scale = 0;
scale = 1 - (real) hitidx / scene->nelems;
scale_Color (&color, &color, scale);
}
else if (image->shading_on)
{
Point isect, refldir;
Color dscale, sscale;
Color ambient, diffuse, specular, emissive;
pixel_from_Material (&ambient, &diffuse,
&specular, &emissive,
material,
(compute_bary_coords ? &texpoint : 0),
scene);
zero_Color (&dscale);
zero_Color (&sscale);
follow_Point (&refldir, dir, &normal, 2 * cos_normal);
follow_Point (&isect, origin, dir, mag);
{:for (light_idx ; space->nlights)
real tscale, magtolight;
Point tolight;
const PointLightSource* const light = &space->lights[light_idx];
if (!light->on) continue;
diff_Point (&tolight, &light->location, &isect);
magtolight = magnitude_Point (&tolight);
scale_Point (&tolight, &tolight, 1 / magtolight);
tscale = dot_Point (&tolight, &normal);
if (tscale > 0)
{
Ray tolight_ray;
if (light->hemisphere)
{
real dot = - dot_Point (&tolight, &light->direct);
if (dot <= 0) continue;
tscale *= dot;
}
tolight_ray.origin = isect;
tolight_ray.direct = tolight;
{
const real dot = dot_Point (&simplex->plane.normal, &tolight_ray.direct);
if (dot < 0) {
const real adjust = 1e2*Epsilon_real * taximag_Point (&isect);
if (true || dot < 10 * (M_PI/180))
continue;
magtolight -= adjust;
follow_Point (&tolight_ray.origin, &tolight_ray.origin,
&tolight_ray.direct, adjust);
}
}
if (cast_to_light (space, &tolight_ray,
hit_front ? Yes : Nil,
magtolight))
{
real dist_factor = 1;
tscale *= dist_factor;
/* Add diffuse portion.*/
follow_Color (&dscale, &dscale,
&light->intensity, tscale);
/* Specular */
if (!light->diffuse && material)
{
real dot;
dot = dot_Point (&refldir, &tolight);
if (dot > 0)
{
tscale = pow (dot, material->shininess);
Op_20s( real, NColors, sscale.coords
,+, sscale.coords , tscale );
}
}
}
}
}
UFor( i, NColors )
color.coords[i] *=
ambient.coords[i]
+ diffuse.coords[i] * dscale.coords[i]
+ specular.coords[i] * sscale.coords[i]
+ emissive.coords[i];
if (space->lightcuts.nodes.sz > 0)
{
Color tmp;
RayHit hit;
hit.isect = isect;
negate_Point (&hit.incid, dir);
hit.normal = normal;
hit.front = front;
hit.mag = mag;
cast_LightCutTree (&tmp, &space->lightcuts,
&diffuse, &hit, space, urandom);
summ_Color (&color, &color, &tmp);
}
if (material && material->opacity < 1)
{
Point tmp_dir;
Color factor;
scale_Color (&color, &color, material->opacity);
scale_Color (&factor, &material->transmission,
1 - material->opacity);
refraction_ray (&tmp_dir, dir, &normal,
material->optical_density, hit_front, cos_normal);
cast_colors (&color, space, image, &isect, &tmp_dir,
&factor, front == Nil ? Yes : Nil, nbounces, urandom);
}
if (material && material->reflective)
{
Color factor;
scale_Color (&factor, &material->specular, material->opacity);
cast_colors (&color, space, image, &isect, &refldir,
&factor, front, nbounces, urandom);
}
}
summ_Color (ret_color, ret_color, &color);
}
#endif /* #ifndef __OPENCL_VERSION__ */
static
void
test_intersections (uint* restrict ret_hit,
real* restrict ret_mag,
const Ray* restrict ray,
uint nelemidcs,
__global const uint* restrict elemidcs,
__global const BarySimplex* restrict simplices,
__global const Simplex* restrict tris,
Trit front)
{
uint hit_idx = *ret_hit;
real hit_mag = *ret_mag;
{:for ( i ; nelemidcs )
bool didhit;
uint tmp_hit = elemidcs[i];
real tmp_mag;
if (BarycentricRayTrace)
{
const BarySimplex simplex = simplices[tmp_hit];
didhit = hit_BarySimplex (&tmp_mag, ray,
&simplex,
front);
}
else
{
didhit = hit_Simplex (&tmp_mag, *ray, tris[tmp_hit],
front);
}
//if (DBogTraceOn)
// DBog2( "Test:%u hit?:%s", tmp_hit, didhit ? "yes" : "no" );
if (didhit && tmp_mag < hit_mag)
{
hit_idx = tmp_hit;
hit_mag = tmp_mag;
}
}
*ret_hit = hit_idx;
*ret_mag = hit_mag;
}
void
cast_Ray (uint* restrict ret_hit, real* restrict ret_mag,
const Ray* restrict ray,
const uint nelems,
__global const uint* restrict elemidcs,
__global const KDTreeNode* restrict nodes,
__global const BarySimplex* restrict simplices,
__global const Simplex* restrict tris,
const BBox* restrict box,
bool inside_box,
Trit front)
{
Point invdirect;
uint node_idx, parent = 0;
uint hit_idx = *ret_hit;
real hit_mag = *ret_mag;
if (!KDTreeRayTrace)
{
test_intersections (&hit_idx, &hit_mag, ray,
nelems, elemidcs,
simplices, tris, front);
*ret_hit = hit_idx;
*ret_mag = hit_mag;
return;
}
reci_Point (&invdirect, &ray->direct);
{
const BBox tmp_box = *box;
node_idx = first_KDTreeNode (&parent, ray,
nodes, &tmp_box, inside_box);
}
while (node_idx != UINT_MAX)
{
__global const KDTreeLeaf* restrict leaf;
leaf = &nodes[node_idx].as.leaf;
test_intersections (&hit_idx, &hit_mag, ray,
leaf->nelems, &elemidcs[leaf->elemidcs],
simplices, tris, front);
node_idx = next_KDTreeNode (&parent, ray, &invdirect,
hit_mag,
node_idx, nodes);
}
*ret_hit = hit_idx;
*ret_mag = hit_mag;
}
void
cast1_ObjectRaySpace (uint* ret_hit, real* ret_mag,
const Point* origin,
const Point* direct,
const ObjectRaySpace* object,
bool inside_box,
Trit front)
{
Ray ray;
ray.origin = *origin;
ray.direct = *direct;
cast_Ray (ret_hit, ret_mag, &ray,
object->nelems,
object->tree.elemidcs, object->tree.nodes,
object->simplices, object->elems,
&object->box, inside_box, front);
}
void
cast_nopartition (uint* ret_hit,
real* ret_mag,
uint* ret_object,
const RaySpace* restrict space,
const Point* restrict origin,
const Point* restrict dir,
bool inside_box,
Trit front,
uint ignore_object)
{
uint i;
uint hit_idx;
real hit_mag;
uint hit_object;
hit_idx = *ret_hit;
hit_mag = *ret_mag;
hit_object = *ret_object;
if (space->main.visible)
cast1_ObjectRaySpace (&hit_idx, &hit_mag, origin, dir,
&space->main, inside_box, front);
if (hit_idx < space->main.nelems)
hit_object = space->nobjects;
UFor( i, space->nobjects )
{
Point rel_origin, rel_dir;
const ObjectRaySpace* object;
bool rel_inside_box;
/* Returns from ray cast.*/
uint tmp_hit;
real tmp_mag;
if (i == ignore_object || !space->objects[i].visible) continue;
object = ray_to_ObjectRaySpace (&rel_origin, &rel_dir,
origin, dir, space, i);
rel_inside_box =
inside_BBox (&object->box, &rel_origin);
tmp_hit = UINT_MAX;
tmp_mag = *ret_mag;
cast1_ObjectRaySpace (&tmp_hit, &tmp_mag,
&rel_origin, &rel_dir,
object, rel_inside_box, front);
if (tmp_mag < hit_mag)
{
hit_idx = tmp_hit;
hit_mag = tmp_mag;
hit_object = i;
}
}
*ret_hit = hit_idx;
*ret_mag = hit_mag;
*ret_object = hit_object;
}
#ifndef __OPENCL_VERSION__
static
void
test_object_intersections (uint* ret_hit,
real* ret_mag,
uint* ret_object,
BitTable tested,
const Ray* ray,
uint nobjectidcs,
const uint* objectidcs,
const RaySpace* space,
Trit front)
{
uint i;
UFor( i, nobjectidcs )
{
uint objidx;
Point rel_origin, rel_dir;
const ObjectRaySpace* object;
bool rel_inside_box;
/* Returns from ray cast.*/
uint tmp_hit;
real tmp_mag;
objidx = objectidcs[i];
if (set1_BitTable (tested, objidx)) continue;
object = ray_to_ObjectRaySpace (&rel_origin, &rel_dir,
&ray->origin, &ray->direct,
space, objidx);
rel_inside_box =
inside_BBox (&object->box, &rel_origin);
tmp_hit = UINT_MAX;
tmp_mag = *ret_mag;
cast1_ObjectRaySpace (&tmp_hit, &tmp_mag, &rel_origin, &rel_dir,
object, rel_inside_box, front);
if (tmp_mag < *ret_mag)
{
*ret_hit = tmp_hit;
*ret_mag = tmp_mag;
*ret_object = objidx;
}
}
}
static
void
cast_partitioned (uint* ret_hit,
real* ret_mag,
uint* ret_object,
const RaySpace* restrict space,
const Point* restrict origin,
const Point* restrict dir,
bool inside_box,
Trit front,
uint ignore_object)
{
FixDeclBitTable( tested, 128, 0 );
uint node_idx, parent = 0;
const KDTreeNode* restrict nodes;
const uint* restrict elemidcs;
Point invdirect;
uint hit_idx;
real hit_mag;
uint hit_object;
Ray ray;
copy_Point (&ray.origin, origin);
copy_Point (&ray.direct, dir);
assert (space->nobjects < tested.sz);
if (ignore_object <= space->nobjects)
set1_BitTable (tested, ignore_object);
nodes = space->object_tree.nodes;
elemidcs = space->object_tree.elemidcs;
hit_idx = *ret_hit;
hit_mag = *ret_mag;
hit_object = *ret_object;
reci_Point (&invdirect, &ray.direct);
node_idx = first_KDTreeNode (&parent, &ray, nodes,
&space->box, inside_box);
while (node_idx != UINT_MAX)
{
__global const KDTreeLeaf* restrict leaf;
leaf = &nodes[node_idx].as.leaf;
test_object_intersections (&hit_idx, &hit_mag, &hit_object,
tested,
&ray, leaf->nelems,
&elemidcs[leaf->elemidcs],
space, front);
node_idx = next_KDTreeNode (&parent, &ray,
&invdirect,
hit_mag,
node_idx, nodes);
}
*ret_hit = hit_idx;
*ret_mag = hit_mag;
*ret_object = hit_object;
}
void
cast1_RaySpace (uint* ret_hit, real* ret_mag,
uint* ret_objidx,
const Ray* ray,
const RaySpace* space,
Trit front)
{
bool inside_box = inside_BBox (&space->box, &ray->origin);
if (space->partition)
cast_partitioned (ret_hit,
ret_mag,
ret_objidx,
space,
&ray->origin,
&ray->direct,
inside_box,
front,
UINT_MAX);
else
cast_nopartition (ret_hit, ret_mag, ret_objidx,
space,
&ray->origin,
&ray->direct,
inside_box,
front,
UINT_MAX);
}
void
cast_colors (Color* ret_color,
const RaySpace* restrict space,
const RayImage* restrict image,
const Point* restrict origin,
const Point* restrict dir,
const Color* factor,
Trit front,
uint nbounces,
URandom* urandom)
{
Color color;
uint hit_idx = UINT_MAX;
real hit_mag = Max_real;
uint hit_object = UINT_MAX;
bool inside_box;
if (nbounces >= image->nbounces_max) return;
if (space->partition)
{
inside_box = inside_BBox (&space->box, origin);
cast_partitioned (&hit_idx, &hit_mag, &hit_object,
space, origin, dir, inside_box,
front,
UINT_MAX);
}
else
{
inside_box = inside_BBox (&space->main.box, origin);
cast_nopartition (&hit_idx, &hit_mag, &hit_object,
space, origin, dir, inside_box,
front,
UINT_MAX);
}
zero_Color (&color);
fill_pixel (&color, hit_idx, hit_mag, hit_object,
image, origin, dir, space, front, nbounces+1,
urandom);
prod_Color (&color, &color, factor);
summ_Color (ret_color, ret_color, &color);
}
bool
cast_to_light (const RaySpace* restrict space,
const Ray* restrict ray,
Trit front,
real magtolight)
{
uint hit_idx = UINT_MAX;
real hit_mag = magtolight;
uint hit_object = UINT_MAX;
cast1_RaySpace (&hit_idx, &hit_mag, &hit_object, ray, space, front);
return approx_eql (magtolight, hit_mag, 1, 1e2);
}
static
void
cast_record (uint* hitline,
real* magline,
byte* pixline,
uint col,
const RaySpace* restrict space,
const RayImage* restrict image,
const Point* restrict origin,
const Point* restrict dir,
bool inside_box,
URandom* urandom)
{
uint hit_idx = UINT_MAX;
real hit_mag = Max_real;
uint hit_object = UINT_MAX;
const Trit front = image->culling_on ? Yes : May;
if (space->partition)
cast_partitioned (&hit_idx, &hit_mag, &hit_object,
space, origin, dir, inside_box,
front,
UINT_MAX);
else
cast_nopartition (&hit_idx, &hit_mag, &hit_object,
space, origin, dir, inside_box,
front,
UINT_MAX);
if (hitline) hitline[col] = hit_idx;
if (magline) magline[col] = hit_mag;
if (pixline)
{
Color color;
zero_Color (&color);
fill_pixel (&color, hit_idx, hit_mag, hit_object,
image, origin, dir, space, front, 0,
urandom);
{:for (i ; NColors)
pixline[3*col+i] = (byte)
clamp_real (255.5 * color.coords[i], 0, 255.5);
}
}
}
#endif /* #ifndef __OPENCL_VERSION__ */
#ifndef __OPENCL_VERSION__
void
rays_to_hits_fish (RayImage* restrict image,
const RaySpace* space,
const Point* origin,
const PointXfrm* view_basis,
real view_angle)
{
#ifndef _WIN32
uint row;
#else
int row;
#endif
bool inside_box;
const uint row_dim = UpDim;
const uint col_dim = RightDim;
const uint dir_dim = ForwardDim;
uint nrows, ncols;
real col_start, row_start;
real col_delta, row_delta;
nrows = image->nrows;
ncols = image->ncols;
row_start = - view_angle / 2;
row_delta = view_angle / nrows;
row_start += row_delta / 2;
col_start = - view_angle / 2;
col_delta = view_angle / ncols;
col_start += col_delta / 2;
inside_box = inside_BBox (&space->box, origin);
#pragma omp parallel
{
URandom urandom[1];
#ifdef _OPENMP
init2_URandom (urandom, omp_get_thread_num(), omp_get_num_threads());
#else
init_URandom (urandom);
#endif
#pragma omp for schedule(dynamic)
UFor( row, nrows )
{
uint col;
real row_angle;
uint* hitline = 0;
real* magline = 0;
byte* pixline = 0;
if (image->hits) hitline = &image->hits[row * image->stride];
if (image->mags) magline = &image->mags[row * image->stride];
if (image->pixels) pixline = &image->pixels[row * 3 * image->stride];
row_angle = row_start + row_delta * row;
UFor( col, ncols )
{
Point tdir, dir;
real col_angle;
col_angle = col_start + col_delta * col;
zero_Point (&dir);
dir.coords[row_dim] = sin (row_angle);
dir.coords[col_dim] = sin (col_angle);
dir.coords[dir_dim] = cos (row_angle) + cos (col_angle);
trxfrm_Point (&tdir, view_basis, &dir);
#if 0
dir.coords[row_dim] = (tdir.coords[row_dim] * (1 + cos (row_angle))
+ tdir.coords[dir_dim] * sin (row_angle));
dir.coords[col_dim] = (tdir.coords[col_dim] * (1 + cos (col_angle))
+ tdir.coords[dir_dim] * sin (col_angle));
dir.coords[dir_dim] = (tdir.coords[dir_dim] * (cos (row_angle) + cos (col_angle))
- tdir.coords[row_dim] * sin (row_angle)
- tdir.coords[col_dim] * sin (col_angle));
#endif
normalize_Point (&dir, &tdir);
cast_record (hitline, magline, pixline, col,
space, image,
origin, &dir, inside_box,
urandom);
}
}
}
}
void
rays_to_hits_fixed_plane (uint* hits, real* mags,
uint nrows, uint ncols, uint stride,
const RaySpace* space, real zpos)
{
#ifndef _WIN32
uint row;
#else
int row;
#endif
bool inside_box;
const uint row_dim = UpDim;
const uint col_dim = RightDim;
const uint dir_dim = ForwardDim;
Point origin;
real col_start, row_start;
real col_delta, row_delta;
const BBox* box;
const ObjectRaySpace* object;
object = &space->main;
box = &object->box;
row_start = box->min.coords[row_dim];
row_delta = (box->max.coords[row_dim] - row_start) / nrows;
row_start += row_delta / 2;
col_start = box->min.coords[col_dim];
col_delta = (box->max.coords[col_dim] - col_start) / ncols;
col_start += col_delta / 2;
inside_box = (zpos > box->min.coords[dir_dim] &&
zpos < box->max.coords[dir_dim]);
origin.coords[dir_dim] = zpos;
origin.coords[row_dim] = 50;
origin.coords[col_dim] = 50;
inside_box = inside_BBox (box, &origin);
#pragma omp parallel for schedule(dynamic)
UFor( row, nrows )
{
uint col;
uint* hitline;
real* magline;
hitline = &hits[row * stride];
magline = &mags[row * stride];
UFor( col, ncols )
{
Point dir;
uint hit; real mag;
/* if (! (row == 333 && col == 322)) continue; */
zero_Point (&dir);
dir.coords[row_dim] = row_start + row * row_delta;
dir.coords[col_dim] = col_start + col * col_delta;
diff_Point (&dir, &dir, &origin);
normalize_Point (&dir, &dir);
hit = object->nelems;
mag = Max_real;
cast1_ObjectRaySpace (&hit, &mag,
&origin, &dir,
object, inside_box, Yes);
hitline[col] = hit;
magline[col] = mag;
/* if (row == 333 && col == 322) puts (elem ? "hit" : "miss"); */
}
}
}
void
cast_row_orthographic (RayImage* restrict image,
uint row,
const RaySpace* restrict space,
const RayCastAPriori* restrict known,
URandom* urandom)
{
uint col, ncols;
const BBox* box;
const Point* dir;
uint* hitline = 0;
real* magline = 0;
byte* pixline = 0;
ncols = image->ncols;
box = &space->box;
if (image->hits) hitline = &image->hits[row * image->stride];
if (image->mags) magline = &image->mags[row * image->stride];
if (image->pixels) pixline = &image->pixels[row * 3 * image->stride];
dir = &known->basis.pts[FwDim];
UFor( col, ncols )
{
Point ray_origin;
bool inside_box;
ray_origin = known->origin;
follow_Point (&ray_origin, &ray_origin,
&known->basis.pts[UpDim],
known->up_scale * (-1 + (2*row+1.0) / image->nrows));
follow_Point (&ray_origin, &ray_origin,
&known->basis.pts[RtDim],
known->rt_scale * (-1 + (2*col+1.0) / image->ncols));
inside_box = inside_BBox (box, &ray_origin);
cast_record (hitline, magline, pixline, col,
space, image,
&ray_origin, dir,
inside_box, urandom);
}
}
void
cast_row_perspective (RayImage* image, uint row,
const RaySpace* restrict space,
const RayCastAPriori* restrict known,
URandom* urandom)
{
uint col, ncols;
const Point* origin;
uint* hitline = 0;
real* magline = 0;
byte* pixline = 0;
ncols = image->ncols;
if (image->hits) hitline = &image->hits[row * image->stride];
if (image->mags) magline = &image->mags[row * image->stride];
if (image->pixels) pixline = &image->pixels[row * 3 * image->stride];
origin = &known->origin;
UFor( col, ncols )
{
Point dir;
#if 0
if (row == 103 && col == 61)
{
DBogTraceOn = true;
DBog0( "TRACE ON" );
}
#endif
#if 0
if (! (image->nrows - 1 - row == 31 && col == 27) &&
! (image->nrows - 1 - row == 31 && col == 28) &&
! (image->nrows - 1 - row == 31 && col == 26))
{
if (hitline) hitline[col] = UINT_MAX;
if (magline) magline[col] = Max_real;
if (pixline)
{
pixline[3*col+0] = 255;
pixline[3*col+1] = 255;
pixline[3*col+2] = 255;
}
continue;
}
fprintf (stderr, "\ncol:%u ", col);
#endif
zero_Point (&dir);
dir.coords[UpDim] = known->up_scale * (-1 + (2*row+1.0) / image->nrows);
dir.coords[RtDim] = known->rt_scale * (-1 + (2*col+1.0) / image->ncols);
dir.coords[FwDim] = 1;
trxfrm_Point (&dir, &known->basis, &dir);
normalize_Point (&dir, &dir);
cast_record (hitline, magline, pixline, col,
space, image,
origin, &dir, known->inside_box, urandom);
//if (DBogTraceOn)
// DBogTraceOn = false;
#if 0
{
static bool missed = false;
if (!missed && hitline[col] == UINT_MAX)
{
printf ("row:%u col:%u\n", row, col);
missed = true;
}
}
#endif
}
}
#ifdef TrivialMpiRayTrace
void
balancer_triv_sync_mpi_RayImage (RayImage* image,
uint row_off, uint row_nul,
uint nprocs)
{
uint proc, ncols;
ncols = image->ncols;
for (proc = 1; proc < nprocs; ++proc)
{
uint i;
for (i = proc; i < row_nul; i += nprocs)
{
MPI_Status status;
uint row;
row = row_off + i;
if (image->hits)
MPI_Recv (&image->hits[row*ncols],
ncols * sizeof(uint), MPI_BYTE,
proc, 1, MPI_COMM_WORLD, &status);
if (image->mags)
MPI_Recv (&image->mags[row*ncols],
ncols * sizeof(real), MPI_BYTE,
proc, 1, MPI_COMM_WORLD, &status);
if (image->pixels)
MPI_Recv (&image->pixels[row*3*ncols],
3 * ncols, MPI_BYTE,
proc, 1, MPI_COMM_WORLD, &status);
}
}
}
void
computer_triv_sync_mpi_RayImage (const RayImage* image,
uint row_off, uint row_nul,
uint myrank, uint nprocs)
{
uint i, ncols;
ncols = image->ncols;
for (i = myrank; i < row_nul; i += nprocs)
{
uint row;
row = row_off + i;
if (image->hits)
MPI_Send (&image->hits[row*ncols],
ncols * sizeof(uint), MPI_BYTE,
0, 1, MPI_COMM_WORLD);
if (image->mags)
MPI_Send (&image->mags[row*ncols],
ncols * sizeof(real), MPI_BYTE,
0, 1, MPI_COMM_WORLD);
if (image->pixels)
MPI_Send (&image->pixels[row*3*ncols],
3 * ncols, MPI_BYTE,
0, 1, MPI_COMM_WORLD);
}
}
#endif
void
setup_RayCastAPriori (RayCastAPriori* dst,
const RayImage* image,
const Point* origin,
const PointXfrm* view_basis,
const BBox* box)
{
dst->origin = *origin;
dst->basis = *view_basis;
if (image->perspective)
{
dst->up_scale = tan(.5 * image->hifov);
dst->inside_box = inside_BBox (box, origin);
}
else
{
dst->up_scale = .5 * image->hifov;
dst->inside_box = false;
}
dst->rt_scale = dst->up_scale * ((real)image->ncols / image->nrows);
}
void
ray_from_RayCastAPriori (Ray* ray,
const RayCastAPriori* known,
uint row, uint col,
const RayImage* image)
{
ray->origin = known->origin;
if (image->perspective)
{
zero_Point (&ray->direct);
ray->direct.coords[UpDim] =
known->up_scale * (-1 + (2*row+1.0) / image->nrows);
ray->direct.coords[RtDim] =
known->rt_scale * (-1 + (2*col+1.0) / image->ncols);
ray->direct.coords[FwDim] = 1;
trxfrm_Point (&ray->direct, &known->basis, &ray->direct);
normalize_Point (&ray->direct, &ray->direct);
}
else
{
follow_Point (&ray->origin, &ray->origin,
&known->basis.pts[UpDim],
known->up_scale * (-1 + (2*row+1.0) / image->nrows));
follow_Point (&ray->origin, &ray->origin,
&known->basis.pts[RtDim],
known->rt_scale * (-1 + (2*col+1.0) / image->ncols));
ray->direct = known->basis.pts[FwDim];
}
}
void
cast_partial_RayImage (RayImage* restrict image,
uint row_off,
uint row_nul,
const RaySpace* restrict space,
const RayCastAPriori* restrict known)
{
#ifndef _WIN32
uint i;
#else
int i;
#endif
uint inc, nprocs, myrank;
#ifdef TrivialMpiRayTrace
MPI_Comm_size (MPI_COMM_WORLD, (int*) &nprocs);
MPI_Comm_rank (MPI_COMM_WORLD, (int*) &myrank);
#else
myrank = 0;
nprocs = 1;
#endif
#ifdef PackOpsAvail
inc = RayPacketDimSz * nprocs;
#else
inc = nprocs;
#endif
#pragma omp parallel
{
URandom urandom[1];
uint threadid = myrank;
uint nthreads = nprocs;
#ifdef _OPENMP
// Assume all MPI processes have the same number of threads.
nthreads *= omp_get_num_threads();
threadid = omp_get_thread_num() + myrank * (nthreads/nprocs);
#endif
init2_URandom (urandom, threadid, nthreads);
#pragma omp for schedule(dynamic)
for (i = myrank; i < row_nul; i += inc)
{
#ifdef PackOpsAvail
uint j;
if (RayPacketDimSz <= row_nul)
{
for (j = 0; j < image->ncols; j += RayPacketDimSz)
cast_packet_RayImage (image, row_off + i, j, space, known);
}
else
{
uint n;
n = row_nul - i;
UFor( j, n )
if (image->perspective)
cast_row_perspective (image, row_off + i + j, space, known, urandom);
else
cast_row_orthographic (image, row_off + i + j, space, known, urandom);
}
#else
if (image->perspective)
cast_row_perspective (image, row_off + i, space, known, urandom);
else
cast_row_orthographic (image, row_off + i, space, known, urandom);
#endif
}
}
#ifdef TrivialMpiRayTrace
if (myrank == 0)
balancer_triv_sync_mpi_RayImage (image, row_off, row_nul, nprocs);
else
computer_triv_sync_mpi_RayImage (image, row_off, row_nul,
myrank, nprocs);
#endif
}
void
cast_RayImage (RayImage* restrict image,
const RaySpace* restrict space,
const Point* restrict origin,
const PointXfrm* restrict view_basis)
{
RayCastAPriori known;
setup_RayCastAPriori (&known, image, origin, view_basis,
&space->box);
cast_partial_RayImage (image, 0, image->nrows,
space, &known);
}
#endif /* #ifndef __OPENCL_VERSION__ */
|
parallel-simple.c | /*
Copyright (c) 2015-2019, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Simone Atzeni (simone@cs.utah.edu), Joachim Protze
(joachim.protze@tu-dresden.de), Jonas Hahnfeld
(hahnfeld@itc.rwth-aachen.de), Ganesh Gopalakrishnan, Zvonimir
Rakamaric, Dong H. Ahn, Gregory L. Lee, Ignacio Laguna, and Martin
Schulz.
LLNL-CODE-773957
All rights reserved.
This file is part of Archer. For details, see
https://pruners.github.io/archer. Please also read
https://github.com/PRUNERS/archer/blob/master/LICENSE.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE
LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// RUN: %libarcher-compile-and-run | FileCheck %s
#include <omp.h>
#include <stdio.h>
int main(int argc, char* argv[])
{
int var = 0;
#pragma omp parallel num_threads(2) shared(var)
{
if (omp_get_thread_num() == 1) {
var++;
}
} // implicit barrier
var++;
fprintf(stderr, "DONE\n");
int error = (var != 2);
return error;
}
// CHECK: DONE
|
Stmt.h | //===- Stmt.h - Classes for representing statements -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Stmt interface and subclasses.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMT_H
#define LLVM_CLANG_AST_STMT_H
#include "clang/AST/DeclGroup.h"
#include "clang/AST/DependenceFlags.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/CapturedStmt.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
#include <string>
namespace llvm {
class FoldingSetNodeID;
} // namespace llvm
namespace clang {
class ASTContext;
class Attr;
class CapturedDecl;
class Decl;
class Expr;
class AddrLabelExpr;
class LabelDecl;
class ODRHash;
class PrinterHelper;
struct PrintingPolicy;
class RecordDecl;
class SourceManager;
class StringLiteral;
class Token;
class VarDecl;
//===----------------------------------------------------------------------===//
// AST classes for statements.
//===----------------------------------------------------------------------===//
/// Stmt - This represents one statement.
///
class alignas(void *) Stmt {
public:
enum StmtClass {
NoStmtClass = 0,
#define STMT(CLASS, PARENT) CLASS##Class,
#define STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class,
#define LAST_STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class
#define ABSTRACT_STMT(STMT)
#include "clang/AST/StmtNodes.inc"
};
// Make vanilla 'new' and 'delete' illegal for Stmts.
protected:
friend class ASTStmtReader;
friend class ASTStmtWriter;
void *operator new(size_t bytes) noexcept {
llvm_unreachable("Stmts cannot be allocated with regular 'new'.");
}
void operator delete(void *data) noexcept {
llvm_unreachable("Stmts cannot be released with regular 'delete'.");
}
//===--- Statement bitfields classes ---===//
class StmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class Stmt;
/// The statement class.
unsigned sClass : 8;
};
enum { NumStmtBits = 8 };
class NullStmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class NullStmt;
unsigned : NumStmtBits;
/// True if the null statement was preceded by an empty macro, e.g:
/// @code
/// #define CALL(x)
/// CALL(0);
/// @endcode
unsigned HasLeadingEmptyMacro : 1;
/// The location of the semi-colon.
SourceLocation SemiLoc;
};
class CompoundStmtBitfields {
friend class ASTStmtReader;
friend class CompoundStmt;
unsigned : NumStmtBits;
unsigned NumStmts : 32 - NumStmtBits;
/// The location of the opening "{".
SourceLocation LBraceLoc;
};
class LabelStmtBitfields {
friend class LabelStmt;
unsigned : NumStmtBits;
SourceLocation IdentLoc;
};
class AttributedStmtBitfields {
friend class ASTStmtReader;
friend class AttributedStmt;
unsigned : NumStmtBits;
/// Number of attributes.
unsigned NumAttrs : 32 - NumStmtBits;
/// The location of the attribute.
SourceLocation AttrLoc;
};
class IfStmtBitfields {
friend class ASTStmtReader;
friend class IfStmt;
unsigned : NumStmtBits;
/// True if this if statement is a constexpr if.
unsigned IsConstexpr : 1;
/// True if this if statement has storage for an else statement.
unsigned HasElse : 1;
/// True if this if statement has storage for a variable declaration.
unsigned HasVar : 1;
/// True if this if statement has storage for an init statement.
unsigned HasInit : 1;
/// The location of the "if".
SourceLocation IfLoc;
};
class SwitchStmtBitfields {
friend class SwitchStmt;
unsigned : NumStmtBits;
/// True if the SwitchStmt has storage for an init statement.
unsigned HasInit : 1;
/// True if the SwitchStmt has storage for a condition variable.
unsigned HasVar : 1;
/// If the SwitchStmt is a switch on an enum value, records whether all
/// the enum values were covered by CaseStmts. The coverage information
/// value is meant to be a hint for possible clients.
unsigned AllEnumCasesCovered : 1;
/// The location of the "switch".
SourceLocation SwitchLoc;
};
class WhileStmtBitfields {
friend class ASTStmtReader;
friend class WhileStmt;
unsigned : NumStmtBits;
/// True if the WhileStmt has storage for a condition variable.
unsigned HasVar : 1;
/// The location of the "while".
SourceLocation WhileLoc;
};
class DoStmtBitfields {
friend class DoStmt;
unsigned : NumStmtBits;
/// The location of the "do".
SourceLocation DoLoc;
};
class ForStmtBitfields {
friend class ForStmt;
unsigned : NumStmtBits;
/// The location of the "for".
SourceLocation ForLoc;
};
class GotoStmtBitfields {
friend class GotoStmt;
friend class IndirectGotoStmt;
unsigned : NumStmtBits;
/// The location of the "goto".
SourceLocation GotoLoc;
};
class ContinueStmtBitfields {
friend class ContinueStmt;
unsigned : NumStmtBits;
/// The location of the "continue".
SourceLocation ContinueLoc;
};
class BreakStmtBitfields {
friend class BreakStmt;
unsigned : NumStmtBits;
/// The location of the "break".
SourceLocation BreakLoc;
};
class ReturnStmtBitfields {
friend class ReturnStmt;
unsigned : NumStmtBits;
/// True if this ReturnStmt has storage for an NRVO candidate.
unsigned HasNRVOCandidate : 1;
/// The location of the "return".
SourceLocation RetLoc;
};
class SwitchCaseBitfields {
friend class SwitchCase;
friend class CaseStmt;
unsigned : NumStmtBits;
/// Used by CaseStmt to store whether it is a case statement
/// of the form case LHS ... RHS (a GNU extension).
unsigned CaseStmtIsGNURange : 1;
/// The location of the "case" or "default" keyword.
SourceLocation KeywordLoc;
};
//===--- Expression bitfields classes ---===//
class ExprBitfields {
friend class ASTStmtReader; // deserialization
friend class AtomicExpr; // ctor
friend class BlockDeclRefExpr; // ctor
friend class CallExpr; // ctor
friend class CXXConstructExpr; // ctor
friend class CXXDependentScopeMemberExpr; // ctor
friend class CXXNewExpr; // ctor
friend class CXXUnresolvedConstructExpr; // ctor
friend class DeclRefExpr; // computeDependence
friend class DependentScopeDeclRefExpr; // ctor
friend class DesignatedInitExpr; // ctor
friend class Expr;
friend class InitListExpr; // ctor
friend class ObjCArrayLiteral; // ctor
friend class ObjCDictionaryLiteral; // ctor
friend class ObjCMessageExpr; // ctor
friend class OffsetOfExpr; // ctor
friend class OpaqueValueExpr; // ctor
friend class OverloadExpr; // ctor
friend class ParenListExpr; // ctor
friend class PseudoObjectExpr; // ctor
friend class ShuffleVectorExpr; // ctor
unsigned : NumStmtBits;
unsigned ValueKind : 2;
unsigned ObjectKind : 3;
unsigned /*ExprDependence*/ Dependent : llvm::BitWidth<ExprDependence>;
};
enum { NumExprBits = NumStmtBits + 5 + llvm::BitWidth<ExprDependence> };
class ConstantExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class ConstantExpr;
unsigned : NumExprBits;
/// The kind of result that is trail-allocated.
unsigned ResultKind : 2;
/// Kind of Result as defined by APValue::Kind
unsigned APValueKind : 4;
/// When ResultKind == RSK_Int64. whether the trail-allocated integer is
/// signed.
unsigned IsUnsigned : 1;
/// When ResultKind == RSK_Int64. the BitWidth of the trail-allocated
/// integer. 7 bits because it is the minimal number of bit to represent a
/// value from 0 to 64 (the size of the trail-allocated number).
unsigned BitWidth : 7;
/// When ResultKind == RSK_APValue. Wether the ASTContext will cleanup the
/// destructor on the trail-allocated APValue.
unsigned HasCleanup : 1;
/// Whether this ConstantExpr was created for immediate invocation.
unsigned IsImmediateInvocation : 1;
};
class PredefinedExprBitfields {
friend class ASTStmtReader;
friend class PredefinedExpr;
unsigned : NumExprBits;
/// The kind of this PredefinedExpr. One of the enumeration values
/// in PredefinedExpr::IdentKind.
unsigned Kind : 4;
/// True if this PredefinedExpr has a trailing "StringLiteral *"
/// for the predefined identifier.
unsigned HasFunctionName : 1;
/// The location of this PredefinedExpr.
SourceLocation Loc;
};
class DeclRefExprBitfields {
friend class ASTStmtReader; // deserialization
friend class DeclRefExpr;
unsigned : NumExprBits;
unsigned HasQualifier : 1;
unsigned HasTemplateKWAndArgsInfo : 1;
unsigned HasFoundDecl : 1;
unsigned HadMultipleCandidates : 1;
unsigned RefersToEnclosingVariableOrCapture : 1;
unsigned NonOdrUseReason : 2;
/// The location of the declaration name itself.
SourceLocation Loc;
};
class FloatingLiteralBitfields {
friend class FloatingLiteral;
unsigned : NumExprBits;
unsigned Semantics : 3; // Provides semantics for APFloat construction
unsigned IsExact : 1;
};
class StringLiteralBitfields {
friend class ASTStmtReader;
friend class StringLiteral;
unsigned : NumExprBits;
/// The kind of this string literal.
/// One of the enumeration values of StringLiteral::StringKind.
unsigned Kind : 3;
/// The width of a single character in bytes. Only values of 1, 2,
/// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps
/// the target + string kind to the appropriate CharByteWidth.
unsigned CharByteWidth : 3;
unsigned IsPascal : 1;
/// The number of concatenated token this string is made of.
/// This is the number of trailing SourceLocation.
unsigned NumConcatenated;
};
class CharacterLiteralBitfields {
friend class CharacterLiteral;
unsigned : NumExprBits;
unsigned Kind : 3;
};
class UnaryOperatorBitfields {
friend class UnaryOperator;
unsigned : NumExprBits;
unsigned Opc : 5;
unsigned CanOverflow : 1;
SourceLocation Loc;
};
class UnaryExprOrTypeTraitExprBitfields {
friend class UnaryExprOrTypeTraitExpr;
unsigned : NumExprBits;
unsigned Kind : 3;
unsigned IsType : 1; // true if operand is a type, false if an expression.
};
class ArraySubscriptExprBitfields {
friend class ArraySubscriptExpr;
unsigned : NumExprBits;
SourceLocation RBracketLoc;
};
class CallExprBitfields {
friend class CallExpr;
unsigned : NumExprBits;
unsigned NumPreArgs : 1;
/// True if the callee of the call expression was found using ADL.
unsigned UsesADL : 1;
/// Padding used to align OffsetToTrailingObjects to a byte multiple.
unsigned : 24 - 2 - NumExprBits;
/// The offset in bytes from the this pointer to the start of the
/// trailing objects belonging to CallExpr. Intentionally byte sized
/// for faster access.
unsigned OffsetToTrailingObjects : 8;
};
enum { NumCallExprBits = 32 };
class MemberExprBitfields {
friend class ASTStmtReader;
friend class MemberExpr;
unsigned : NumExprBits;
/// IsArrow - True if this is "X->F", false if this is "X.F".
unsigned IsArrow : 1;
/// True if this member expression used a nested-name-specifier to
/// refer to the member, e.g., "x->Base::f", or found its member via
/// a using declaration. When true, a MemberExprNameQualifier
/// structure is allocated immediately after the MemberExpr.
unsigned HasQualifierOrFoundDecl : 1;
/// True if this member expression specified a template keyword
/// and/or a template argument list explicitly, e.g., x->f<int>,
/// x->template f, x->template f<int>.
/// When true, an ASTTemplateKWAndArgsInfo structure and its
/// TemplateArguments (if any) are present.
unsigned HasTemplateKWAndArgsInfo : 1;
/// True if this member expression refers to a method that
/// was resolved from an overloaded set having size greater than 1.
unsigned HadMultipleCandidates : 1;
/// Value of type NonOdrUseReason indicating why this MemberExpr does
/// not constitute an odr-use of the named declaration. Meaningful only
/// when naming a static member.
unsigned NonOdrUseReason : 2;
/// This is the location of the -> or . in the expression.
SourceLocation OperatorLoc;
};
class CastExprBitfields {
friend class CastExpr;
friend class ImplicitCastExpr;
unsigned : NumExprBits;
unsigned Kind : 6;
unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr.
/// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough
/// here. ([implimits] Direct and indirect base classes [16384]).
unsigned BasePathSize;
};
class BinaryOperatorBitfields {
friend class BinaryOperator;
unsigned : NumExprBits;
unsigned Opc : 6;
/// This is only meaningful for operations on floating point
/// types and 0 otherwise.
unsigned FPFeatures : 8;
SourceLocation OpLoc;
};
class InitListExprBitfields {
friend class InitListExpr;
unsigned : NumExprBits;
/// Whether this initializer list originally had a GNU array-range
/// designator in it. This is a temporary marker used by CodeGen.
unsigned HadArrayRangeDesignator : 1;
};
class ParenListExprBitfields {
friend class ASTStmtReader;
friend class ParenListExpr;
unsigned : NumExprBits;
/// The number of expressions in the paren list.
unsigned NumExprs;
};
class GenericSelectionExprBitfields {
friend class ASTStmtReader;
friend class GenericSelectionExpr;
unsigned : NumExprBits;
/// The location of the "_Generic".
SourceLocation GenericLoc;
};
class PseudoObjectExprBitfields {
friend class ASTStmtReader; // deserialization
friend class PseudoObjectExpr;
unsigned : NumExprBits;
// These don't need to be particularly wide, because they're
// strictly limited by the forms of expressions we permit.
unsigned NumSubExprs : 8;
unsigned ResultIndex : 32 - 8 - NumExprBits;
};
class SourceLocExprBitfields {
friend class ASTStmtReader;
friend class SourceLocExpr;
unsigned : NumExprBits;
/// The kind of source location builtin represented by the SourceLocExpr.
/// Ex. __builtin_LINE, __builtin_FUNCTION, ect.
unsigned Kind : 2;
};
class StmtExprBitfields {
friend class ASTStmtReader;
friend class StmtExpr;
unsigned : NumExprBits;
/// The number of levels of template parameters enclosing this statement
/// expression. Used to determine if a statement expression remains
/// dependent after instantiation.
unsigned TemplateDepth;
};
//===--- C++ Expression bitfields classes ---===//
class CXXOperatorCallExprBitfields {
friend class ASTStmtReader;
friend class CXXOperatorCallExpr;
unsigned : NumCallExprBits;
/// The kind of this overloaded operator. One of the enumerator
/// value of OverloadedOperatorKind.
unsigned OperatorKind : 6;
// Only meaningful for floating point types.
unsigned FPFeatures : 8;
};
class CXXRewrittenBinaryOperatorBitfields {
friend class ASTStmtReader;
friend class CXXRewrittenBinaryOperator;
unsigned : NumCallExprBits;
unsigned IsReversed : 1;
};
class CXXBoolLiteralExprBitfields {
friend class CXXBoolLiteralExpr;
unsigned : NumExprBits;
/// The value of the boolean literal.
unsigned Value : 1;
/// The location of the boolean literal.
SourceLocation Loc;
};
class CXXNullPtrLiteralExprBitfields {
friend class CXXNullPtrLiteralExpr;
unsigned : NumExprBits;
/// The location of the null pointer literal.
SourceLocation Loc;
};
class CXXThisExprBitfields {
friend class CXXThisExpr;
unsigned : NumExprBits;
/// Whether this is an implicit "this".
unsigned IsImplicit : 1;
/// The location of the "this".
SourceLocation Loc;
};
class CXXThrowExprBitfields {
friend class ASTStmtReader;
friend class CXXThrowExpr;
unsigned : NumExprBits;
/// Whether the thrown variable (if any) is in scope.
unsigned IsThrownVariableInScope : 1;
/// The location of the "throw".
SourceLocation ThrowLoc;
};
class CXXDefaultArgExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultArgExpr;
unsigned : NumExprBits;
/// The location where the default argument expression was used.
SourceLocation Loc;
};
class CXXDefaultInitExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultInitExpr;
unsigned : NumExprBits;
/// The location where the default initializer expression was used.
SourceLocation Loc;
};
class CXXScalarValueInitExprBitfields {
friend class ASTStmtReader;
friend class CXXScalarValueInitExpr;
unsigned : NumExprBits;
SourceLocation RParenLoc;
};
class CXXNewExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class CXXNewExpr;
unsigned : NumExprBits;
/// Was the usage ::new, i.e. is the global new to be used?
unsigned IsGlobalNew : 1;
/// Do we allocate an array? If so, the first trailing "Stmt *" is the
/// size expression.
unsigned IsArray : 1;
/// Should the alignment be passed to the allocation function?
unsigned ShouldPassAlignment : 1;
/// If this is an array allocation, does the usual deallocation
/// function for the allocated type want to know the allocated size?
unsigned UsualArrayDeleteWantsSize : 1;
/// What kind of initializer do we have? Could be none, parens, or braces.
/// In storage, we distinguish between "none, and no initializer expr", and
/// "none, but an implicit initializer expr".
unsigned StoredInitializationStyle : 2;
/// True if the allocated type was expressed as a parenthesized type-id.
unsigned IsParenTypeId : 1;
/// The number of placement new arguments.
unsigned NumPlacementArgs;
};
class CXXDeleteExprBitfields {
friend class ASTStmtReader;
friend class CXXDeleteExpr;
unsigned : NumExprBits;
/// Is this a forced global delete, i.e. "::delete"?
unsigned GlobalDelete : 1;
/// Is this the array form of delete, i.e. "delete[]"?
unsigned ArrayForm : 1;
/// ArrayFormAsWritten can be different from ArrayForm if 'delete' is
/// applied to pointer-to-array type (ArrayFormAsWritten will be false
/// while ArrayForm will be true).
unsigned ArrayFormAsWritten : 1;
/// Does the usual deallocation function for the element type require
/// a size_t argument?
unsigned UsualArrayDeleteWantsSize : 1;
/// Location of the expression.
SourceLocation Loc;
};
class TypeTraitExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class TypeTraitExpr;
unsigned : NumExprBits;
/// The kind of type trait, which is a value of a TypeTrait enumerator.
unsigned Kind : 8;
/// If this expression is not value-dependent, this indicates whether
/// the trait evaluated true or false.
unsigned Value : 1;
/// The number of arguments to this type trait.
unsigned NumArgs : 32 - 8 - 1 - NumExprBits;
};
class DependentScopeDeclRefExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class DependentScopeDeclRefExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
};
class CXXConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXConstructExpr;
unsigned : NumExprBits;
unsigned Elidable : 1;
unsigned HadMultipleCandidates : 1;
unsigned ListInitialization : 1;
unsigned StdInitListInitialization : 1;
unsigned ZeroInitialization : 1;
unsigned ConstructionKind : 3;
SourceLocation Loc;
};
class ExprWithCleanupsBitfields {
friend class ASTStmtReader; // deserialization
friend class ExprWithCleanups;
unsigned : NumExprBits;
// When false, it must not have side effects.
unsigned CleanupsHaveSideEffects : 1;
unsigned NumObjects : 32 - 1 - NumExprBits;
};
class CXXUnresolvedConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXUnresolvedConstructExpr;
unsigned : NumExprBits;
/// The number of arguments used to construct the type.
unsigned NumArgs;
};
class CXXDependentScopeMemberExprBitfields {
friend class ASTStmtReader;
friend class CXXDependentScopeMemberExpr;
unsigned : NumExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether this member expression has info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// See getFirstQualifierFoundInScope() and the comment listing
/// the trailing objects.
unsigned HasFirstQualifierFoundInScope : 1;
/// The location of the '->' or '.' operator.
SourceLocation OperatorLoc;
};
class OverloadExprBitfields {
friend class ASTStmtReader;
friend class OverloadExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// Padding used by the derived classes to store various bits. If you
/// need to add some data here, shrink this padding and add your data
/// above. NumOverloadExprBits also needs to be updated.
unsigned : 32 - NumExprBits - 1;
/// The number of results.
unsigned NumResults;
};
enum { NumOverloadExprBits = NumExprBits + 1 };
class UnresolvedLookupExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedLookupExpr;
unsigned : NumOverloadExprBits;
/// True if these lookup results should be extended by
/// argument-dependent lookup if this is the operand of a function call.
unsigned RequiresADL : 1;
/// True if these lookup results are overloaded. This is pretty trivially
/// rederivable if we urgently need to kill this field.
unsigned Overloaded : 1;
};
static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4,
"UnresolvedLookupExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class UnresolvedMemberExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedMemberExpr;
unsigned : NumOverloadExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether the lookup results contain an unresolved using declaration.
unsigned HasUnresolvedUsing : 1;
};
static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4,
"UnresolvedMemberExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class CXXNoexceptExprBitfields {
friend class ASTStmtReader;
friend class CXXNoexceptExpr;
unsigned : NumExprBits;
unsigned Value : 1;
};
class SubstNonTypeTemplateParmExprBitfields {
friend class ASTStmtReader;
friend class SubstNonTypeTemplateParmExpr;
unsigned : NumExprBits;
/// The location of the non-type template parameter reference.
SourceLocation NameLoc;
};
class RequiresExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class RequiresExpr;
unsigned : NumExprBits;
unsigned IsSatisfied : 1;
SourceLocation RequiresKWLoc;
};
//===--- C++ Coroutines TS bitfields classes ---===//
class CoawaitExprBitfields {
friend class CoawaitExpr;
unsigned : NumExprBits;
unsigned IsImplicit : 1;
};
//===--- Obj-C Expression bitfields classes ---===//
class ObjCIndirectCopyRestoreExprBitfields {
friend class ObjCIndirectCopyRestoreExpr;
unsigned : NumExprBits;
unsigned ShouldCopy : 1;
};
//===--- Clang Extensions bitfields classes ---===//
class OpaqueValueExprBitfields {
friend class ASTStmtReader;
friend class OpaqueValueExpr;
unsigned : NumExprBits;
/// The OVE is a unique semantic reference to its source expression if this
/// bit is set to true.
unsigned IsUnique : 1;
SourceLocation Loc;
};
union {
// Same order as in StmtNodes.td.
// Statements
StmtBitfields StmtBits;
NullStmtBitfields NullStmtBits;
CompoundStmtBitfields CompoundStmtBits;
LabelStmtBitfields LabelStmtBits;
AttributedStmtBitfields AttributedStmtBits;
IfStmtBitfields IfStmtBits;
SwitchStmtBitfields SwitchStmtBits;
WhileStmtBitfields WhileStmtBits;
DoStmtBitfields DoStmtBits;
ForStmtBitfields ForStmtBits;
GotoStmtBitfields GotoStmtBits;
ContinueStmtBitfields ContinueStmtBits;
BreakStmtBitfields BreakStmtBits;
ReturnStmtBitfields ReturnStmtBits;
SwitchCaseBitfields SwitchCaseBits;
// Expressions
ExprBitfields ExprBits;
ConstantExprBitfields ConstantExprBits;
PredefinedExprBitfields PredefinedExprBits;
DeclRefExprBitfields DeclRefExprBits;
FloatingLiteralBitfields FloatingLiteralBits;
StringLiteralBitfields StringLiteralBits;
CharacterLiteralBitfields CharacterLiteralBits;
UnaryOperatorBitfields UnaryOperatorBits;
UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits;
ArraySubscriptExprBitfields ArraySubscriptExprBits;
CallExprBitfields CallExprBits;
MemberExprBitfields MemberExprBits;
CastExprBitfields CastExprBits;
BinaryOperatorBitfields BinaryOperatorBits;
InitListExprBitfields InitListExprBits;
ParenListExprBitfields ParenListExprBits;
GenericSelectionExprBitfields GenericSelectionExprBits;
PseudoObjectExprBitfields PseudoObjectExprBits;
SourceLocExprBitfields SourceLocExprBits;
// GNU Extensions.
StmtExprBitfields StmtExprBits;
// C++ Expressions
CXXOperatorCallExprBitfields CXXOperatorCallExprBits;
CXXRewrittenBinaryOperatorBitfields CXXRewrittenBinaryOperatorBits;
CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits;
CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits;
CXXThisExprBitfields CXXThisExprBits;
CXXThrowExprBitfields CXXThrowExprBits;
CXXDefaultArgExprBitfields CXXDefaultArgExprBits;
CXXDefaultInitExprBitfields CXXDefaultInitExprBits;
CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits;
CXXNewExprBitfields CXXNewExprBits;
CXXDeleteExprBitfields CXXDeleteExprBits;
TypeTraitExprBitfields TypeTraitExprBits;
DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits;
CXXConstructExprBitfields CXXConstructExprBits;
ExprWithCleanupsBitfields ExprWithCleanupsBits;
CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits;
CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits;
OverloadExprBitfields OverloadExprBits;
UnresolvedLookupExprBitfields UnresolvedLookupExprBits;
UnresolvedMemberExprBitfields UnresolvedMemberExprBits;
CXXNoexceptExprBitfields CXXNoexceptExprBits;
SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits;
RequiresExprBitfields RequiresExprBits;
// C++ Coroutines TS expressions
CoawaitExprBitfields CoawaitBits;
// Obj-C Expressions
ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits;
// Clang Extensions
OpaqueValueExprBitfields OpaqueValueExprBits;
};
public:
// Only allow allocation of Stmts using the allocator in ASTContext
// or by doing a placement new.
void* operator new(size_t bytes, const ASTContext& C,
unsigned alignment = 8);
void* operator new(size_t bytes, const ASTContext* C,
unsigned alignment = 8) {
return operator new(bytes, *C, alignment);
}
void *operator new(size_t bytes, void *mem) noexcept { return mem; }
void operator delete(void *, const ASTContext &, unsigned) noexcept {}
void operator delete(void *, const ASTContext *, unsigned) noexcept {}
void operator delete(void *, size_t) noexcept {}
void operator delete(void *, void *) noexcept {}
public:
/// A placeholder type used to construct an empty shell of a
/// type, that will be filled in later (e.g., by some
/// de-serialization).
struct EmptyShell {};
protected:
/// Iterator for iterating over Stmt * arrays that contain only T *.
///
/// This is needed because AST nodes use Stmt* arrays to store
/// references to children (to be compatible with StmtIterator).
template<typename T, typename TPtr = T *, typename StmtPtr = Stmt *>
struct CastIterator
: llvm::iterator_adaptor_base<CastIterator<T, TPtr, StmtPtr>, StmtPtr *,
std::random_access_iterator_tag, TPtr> {
using Base = typename CastIterator::iterator_adaptor_base;
CastIterator() : Base(nullptr) {}
CastIterator(StmtPtr *I) : Base(I) {}
typename Base::value_type operator*() const {
return cast_or_null<T>(*this->I);
}
};
/// Const iterator for iterating over Stmt * arrays that contain only T *.
template <typename T>
using ConstCastIterator = CastIterator<T, const T *const, const Stmt *const>;
using ExprIterator = CastIterator<Expr>;
using ConstExprIterator = ConstCastIterator<Expr>;
private:
/// Whether statistic collection is enabled.
static bool StatisticsEnabled;
protected:
/// Construct an empty statement.
explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
Stmt() = delete;
Stmt(const Stmt &) = delete;
Stmt(Stmt &&) = delete;
Stmt &operator=(const Stmt &) = delete;
Stmt &operator=(Stmt &&) = delete;
Stmt(StmtClass SC) {
static_assert(sizeof(*this) <= 8,
"changing bitfields changed sizeof(Stmt)");
static_assert(sizeof(*this) % alignof(void *) == 0,
"Insufficient alignment!");
StmtBits.sClass = SC;
if (StatisticsEnabled) Stmt::addStmtClass(SC);
}
StmtClass getStmtClass() const {
return static_cast<StmtClass>(StmtBits.sClass);
}
const char *getStmtClassName() const;
/// SourceLocation tokens are not useful in isolation - they are low level
/// value objects created/interpreted by SourceManager. We assume AST
/// clients will have a pointer to the respective SourceManager.
SourceRange getSourceRange() const LLVM_READONLY;
SourceLocation getBeginLoc() const LLVM_READONLY;
SourceLocation getEndLoc() const LLVM_READONLY;
// global temp stats (until we have a per-module visitor)
static void addStmtClass(const StmtClass s);
static void EnableStatistics();
static void PrintStats();
/// Dumps the specified AST fragment and all subtrees to
/// \c llvm::errs().
void dump() const;
void dump(SourceManager &SM) const;
void dump(raw_ostream &OS, SourceManager &SM) const;
void dump(raw_ostream &OS) const;
/// \return Unique reproducible object identifier
int64_t getID(const ASTContext &Context) const;
/// dumpColor - same as dump(), but forces color highlighting.
void dumpColor() const;
/// dumpPretty/printPretty - These two methods do a "pretty print" of the AST
/// back to its original source language syntax.
void dumpPretty(const ASTContext &Context) const;
void printPretty(raw_ostream &OS, PrinterHelper *Helper,
const PrintingPolicy &Policy, unsigned Indentation = 0,
StringRef NewlineSymbol = "\n",
const ASTContext *Context = nullptr) const;
/// Pretty-prints in JSON format.
void printJson(raw_ostream &Out, PrinterHelper *Helper,
const PrintingPolicy &Policy, bool AddQuotes) const;
/// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only
/// works on systems with GraphViz (Mac OS X) or dot+gv installed.
void viewAST() const;
/// Skip no-op (attributed, compound) container stmts and skip captured
/// stmt at the top, if \a IgnoreCaptured is true.
Stmt *IgnoreContainers(bool IgnoreCaptured = false);
const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const {
return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured);
}
const Stmt *stripLabelLikeStatements() const;
Stmt *stripLabelLikeStatements() {
return const_cast<Stmt*>(
const_cast<const Stmt*>(this)->stripLabelLikeStatements());
}
/// Child Iterators: All subclasses must implement 'children'
/// to permit easy iteration over the substatements/subexpessions of an
/// AST node. This permits easy iteration over all nodes in the AST.
using child_iterator = StmtIterator;
using const_child_iterator = ConstStmtIterator;
using child_range = llvm::iterator_range<child_iterator>;
using const_child_range = llvm::iterator_range<const_child_iterator>;
child_range children();
const_child_range children() const {
auto Children = const_cast<Stmt *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_iterator child_begin() { return children().begin(); }
child_iterator child_end() { return children().end(); }
const_child_iterator child_begin() const { return children().begin(); }
const_child_iterator child_end() const { return children().end(); }
/// Produce a unique representation of the given statement.
///
/// \param ID once the profiling operation is complete, will contain
/// the unique representation of the given statement.
///
/// \param Context the AST context in which the statement resides
///
/// \param Canonical whether the profile should be based on the canonical
/// representation of this statement (e.g., where non-type template
/// parameters are identified by index/level rather than their
/// declaration pointers) or the exact representation of the statement as
/// written in the source.
void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
bool Canonical) const;
/// Calculate a unique representation for a statement that is
/// stable across compiler invocations.
///
/// \param ID profile information will be stored in ID.
///
/// \param Hash an ODRHash object which will be called where pointers would
/// have been used in the Profile function.
void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const;
};
/// DeclStmt - Adaptor class for mixing declarations with statements and
/// expressions. For example, CompoundStmt mixes statements, expressions
/// and declarations (variables, types). Another example is ForStmt, where
/// the first statement can be an expression or a declaration.
class DeclStmt : public Stmt {
DeclGroupRef DG;
SourceLocation StartLoc, EndLoc;
public:
DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc)
: Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {}
/// Build an empty declaration statement.
explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {}
/// isSingleDecl - This method returns true if this DeclStmt refers
/// to a single Decl.
bool isSingleDecl() const { return DG.isSingleDecl(); }
const Decl *getSingleDecl() const { return DG.getSingleDecl(); }
Decl *getSingleDecl() { return DG.getSingleDecl(); }
const DeclGroupRef getDeclGroup() const { return DG; }
DeclGroupRef getDeclGroup() { return DG; }
void setDeclGroup(DeclGroupRef DGR) { DG = DGR; }
void setStartLoc(SourceLocation L) { StartLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DeclStmtClass;
}
// Iterators over subexpressions.
child_range children() {
return child_range(child_iterator(DG.begin(), DG.end()),
child_iterator(DG.end(), DG.end()));
}
const_child_range children() const {
auto Children = const_cast<DeclStmt *>(this)->children();
return const_child_range(Children);
}
using decl_iterator = DeclGroupRef::iterator;
using const_decl_iterator = DeclGroupRef::const_iterator;
using decl_range = llvm::iterator_range<decl_iterator>;
using decl_const_range = llvm::iterator_range<const_decl_iterator>;
decl_range decls() { return decl_range(decl_begin(), decl_end()); }
decl_const_range decls() const {
return decl_const_range(decl_begin(), decl_end());
}
decl_iterator decl_begin() { return DG.begin(); }
decl_iterator decl_end() { return DG.end(); }
const_decl_iterator decl_begin() const { return DG.begin(); }
const_decl_iterator decl_end() const { return DG.end(); }
using reverse_decl_iterator = std::reverse_iterator<decl_iterator>;
reverse_decl_iterator decl_rbegin() {
return reverse_decl_iterator(decl_end());
}
reverse_decl_iterator decl_rend() {
return reverse_decl_iterator(decl_begin());
}
};
/// NullStmt - This is the null statement ";": C99 6.8.3p3.
///
class NullStmt : public Stmt {
public:
NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false)
: Stmt(NullStmtClass) {
NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro;
setSemiLoc(L);
}
/// Build an empty null statement.
explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {}
SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; }
void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; }
bool hasLeadingEmptyMacro() const {
return NullStmtBits.HasLeadingEmptyMacro;
}
SourceLocation getBeginLoc() const { return getSemiLoc(); }
SourceLocation getEndLoc() const { return getSemiLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == NullStmtClass;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// CompoundStmt - This represents a group of statements like { stmt stmt }.
class CompoundStmt final : public Stmt,
private llvm::TrailingObjects<CompoundStmt, Stmt *> {
friend class ASTStmtReader;
friend TrailingObjects;
/// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits.
SourceLocation RBraceLoc;
CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB);
explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {}
void setStmts(ArrayRef<Stmt *> Stmts);
public:
static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts,
SourceLocation LB, SourceLocation RB);
// Build an empty compound statement with a location.
explicit CompoundStmt(SourceLocation Loc)
: Stmt(CompoundStmtClass), RBraceLoc(Loc) {
CompoundStmtBits.NumStmts = 0;
CompoundStmtBits.LBraceLoc = Loc;
}
// Build an empty compound statement.
static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts);
bool body_empty() const { return CompoundStmtBits.NumStmts == 0; }
unsigned size() const { return CompoundStmtBits.NumStmts; }
using body_iterator = Stmt **;
using body_range = llvm::iterator_range<body_iterator>;
body_range body() { return body_range(body_begin(), body_end()); }
body_iterator body_begin() { return getTrailingObjects<Stmt *>(); }
body_iterator body_end() { return body_begin() + size(); }
Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; }
Stmt *body_back() {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
using const_body_iterator = Stmt *const *;
using body_const_range = llvm::iterator_range<const_body_iterator>;
body_const_range body() const {
return body_const_range(body_begin(), body_end());
}
const_body_iterator body_begin() const {
return getTrailingObjects<Stmt *>();
}
const_body_iterator body_end() const { return body_begin() + size(); }
const Stmt *body_front() const {
return !body_empty() ? body_begin()[0] : nullptr;
}
const Stmt *body_back() const {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
using reverse_body_iterator = std::reverse_iterator<body_iterator>;
reverse_body_iterator body_rbegin() {
return reverse_body_iterator(body_end());
}
reverse_body_iterator body_rend() {
return reverse_body_iterator(body_begin());
}
using const_reverse_body_iterator =
std::reverse_iterator<const_body_iterator>;
const_reverse_body_iterator body_rbegin() const {
return const_reverse_body_iterator(body_end());
}
const_reverse_body_iterator body_rend() const {
return const_reverse_body_iterator(body_begin());
}
// Get the Stmt that StmtExpr would consider to be the result of this
// compound statement. This is used by StmtExpr to properly emulate the GCC
// compound expression extension, which ignores trailing NullStmts when
// getting the result of the expression.
// i.e. ({ 5;;; })
// ^^ ignored
// If we don't find something that isn't a NullStmt, just return the last
// Stmt.
Stmt *getStmtExprResult() {
for (auto *B : llvm::reverse(body())) {
if (!isa<NullStmt>(B))
return B;
}
return body_back();
}
const Stmt *getStmtExprResult() const {
return const_cast<CompoundStmt *>(this)->getStmtExprResult();
}
SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getEndLoc() const { return RBraceLoc; }
SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getRBracLoc() const { return RBraceLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CompoundStmtClass;
}
// Iterators
child_range children() { return child_range(body_begin(), body_end()); }
const_child_range children() const {
return const_child_range(body_begin(), body_end());
}
};
// SwitchCase is the base class for CaseStmt and DefaultStmt,
class SwitchCase : public Stmt {
protected:
/// The location of the ":".
SourceLocation ColonLoc;
// The location of the "case" or "default" keyword. Stored in SwitchCaseBits.
// SourceLocation KeywordLoc;
/// A pointer to the following CaseStmt or DefaultStmt class,
/// used by SwitchStmt.
SwitchCase *NextSwitchCase = nullptr;
SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc)
: Stmt(SC), ColonLoc(ColonLoc) {
setKeywordLoc(KWLoc);
}
SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; }
SwitchCase *getNextSwitchCase() { return NextSwitchCase; }
void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; }
SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; }
void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
inline Stmt *getSubStmt();
const Stmt *getSubStmt() const {
return const_cast<SwitchCase *>(this)->getSubStmt();
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
inline SourceLocation getEndLoc() const LLVM_READONLY;
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass ||
T->getStmtClass() == DefaultStmtClass;
}
};
/// CaseStmt - Represent a case statement. It can optionally be a GNU case
/// statement of the form LHS ... RHS representing a range of cases.
class CaseStmt final
: public SwitchCase,
private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// CaseStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing objects
// at the end but this would impact children().
// The trailing objects are in order:
//
// * A "Stmt *" for the LHS of the case statement. Always present.
//
// * A "Stmt *" for the RHS of the case statement. This is a GNU extension
// which allow ranges in cases statement of the form LHS ... RHS.
// Present if and only if caseStmtIsGNURange() is true.
//
// * A "Stmt *" for the substatement of the case statement. Always present.
//
// * A SourceLocation for the location of the ... if this is a case statement
// with a range. Present if and only if caseStmtIsGNURange() is true.
enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + caseStmtIsGNURange();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return caseStmtIsGNURange();
}
unsigned lhsOffset() const { return LhsOffset; }
unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); }
unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; }
/// Build a case statement assuming that the storage for the
/// trailing objects has been properly allocated.
CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc,
SourceLocation ellipsisLoc, SourceLocation colonLoc)
: SwitchCase(CaseStmtClass, caseLoc, colonLoc) {
// Handle GNU case statements of the form LHS ... RHS.
bool IsGNURange = rhs != nullptr;
SwitchCaseBits.CaseStmtIsGNURange = IsGNURange;
setLHS(lhs);
setSubStmt(nullptr);
if (IsGNURange) {
setRHS(rhs);
setEllipsisLoc(ellipsisLoc);
}
}
/// Build an empty switch case statement.
explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange)
: SwitchCase(CaseStmtClass, Empty) {
SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange;
}
public:
/// Build a case statement.
static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs,
SourceLocation caseLoc, SourceLocation ellipsisLoc,
SourceLocation colonLoc);
/// Build an empty case statement.
static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange);
/// True if this case statement is of the form case LHS ... RHS, which
/// is a GNU extension. In this case the RHS can be obtained with getRHS()
/// and the location of the ellipsis can be obtained with getEllipsisLoc().
bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; }
SourceLocation getCaseLoc() const { return getKeywordLoc(); }
void setCaseLoc(SourceLocation L) { setKeywordLoc(L); }
/// Get the location of the ... in a case statement of the form LHS ... RHS.
SourceLocation getEllipsisLoc() const {
return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
/// Set the location of the ... in a case statement of the form LHS ... RHS.
/// Assert that this case statement is of this form.
void setEllipsisLoc(SourceLocation L) {
assert(
caseStmtIsGNURange() &&
"setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!");
*getTrailingObjects<SourceLocation>() = L;
}
Expr *getLHS() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
const Expr *getLHS() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
void setLHS(Expr *Val) {
getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Expr *getRHS() {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
const Expr *getRHS() const {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
void setRHS(Expr *Val) {
assert(caseStmtIsGNURange() &&
"setRHS but this is not a case stmt of the form LHS ... RHS!");
getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; }
const Stmt *getSubStmt() const {
return getTrailingObjects<Stmt *>()[subStmtOffset()];
}
void setSubStmt(Stmt *S) {
getTrailingObjects<Stmt *>()[subStmtOffset()] = S;
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
// Handle deeply nested case statements with iteration instead of recursion.
const CaseStmt *CS = this;
while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt()))
CS = CS2;
return CS->getSubStmt()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
class DefaultStmt : public SwitchCase {
Stmt *SubStmt;
public:
DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt)
: SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {}
/// Build an empty default statement.
explicit DefaultStmt(EmptyShell Empty)
: SwitchCase(DefaultStmtClass, Empty) {}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *S) { SubStmt = S; }
SourceLocation getDefaultLoc() const { return getKeywordLoc(); }
void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); }
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return SubStmt->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == DefaultStmtClass;
}
// Iterators
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
};
SourceLocation SwitchCase::getEndLoc() const {
if (const auto *CS = dyn_cast<CaseStmt>(this))
return CS->getEndLoc();
else if (const auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getEndLoc();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
Stmt *SwitchCase::getSubStmt() {
if (auto *CS = dyn_cast<CaseStmt>(this))
return CS->getSubStmt();
else if (auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getSubStmt();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
/// Represents a statement that could possibly have a value and type. This
/// covers expression-statements, as well as labels and attributed statements.
///
/// Value statements have a special meaning when they are the last non-null
/// statement in a GNU statement expression, where they determine the value
/// of the statement expression.
class ValueStmt : public Stmt {
protected:
using Stmt::Stmt;
public:
const Expr *getExprStmt() const;
Expr *getExprStmt() {
const ValueStmt *ConstThis = this;
return const_cast<Expr*>(ConstThis->getExprStmt());
}
static bool classof(const Stmt *T) {
return T->getStmtClass() >= firstValueStmtConstant &&
T->getStmtClass() <= lastValueStmtConstant;
}
};
/// LabelStmt - Represents a label, which has a substatement. For example:
/// foo: return;
class LabelStmt : public ValueStmt {
LabelDecl *TheDecl;
Stmt *SubStmt;
public:
/// Build a label statement.
LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt)
: ValueStmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) {
setIdentLoc(IL);
}
/// Build an empty label statement.
explicit LabelStmt(EmptyShell Empty) : ValueStmt(LabelStmtClass, Empty) {}
SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; }
void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; }
LabelDecl *getDecl() const { return TheDecl; }
void setDecl(LabelDecl *D) { TheDecl = D; }
const char *getName() const;
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *SS) { SubStmt = SS; }
SourceLocation getBeginLoc() const { return getIdentLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == LabelStmtClass;
}
};
/// Represents an attribute applied to a statement.
///
/// Represents an attribute applied to a statement. For example:
/// [[omp::for(...)]] for (...) { ... }
class AttributedStmt final
: public ValueStmt,
private llvm::TrailingObjects<AttributedStmt, const Attr *> {
friend class ASTStmtReader;
friend TrailingObjects;
Stmt *SubStmt;
AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs,
Stmt *SubStmt)
: ValueStmt(AttributedStmtClass), SubStmt(SubStmt) {
AttributedStmtBits.NumAttrs = Attrs.size();
AttributedStmtBits.AttrLoc = Loc;
std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr());
}
explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs)
: ValueStmt(AttributedStmtClass, Empty) {
AttributedStmtBits.NumAttrs = NumAttrs;
AttributedStmtBits.AttrLoc = SourceLocation{};
std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr);
}
const Attr *const *getAttrArrayPtr() const {
return getTrailingObjects<const Attr *>();
}
const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); }
public:
static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc,
ArrayRef<const Attr *> Attrs, Stmt *SubStmt);
// Build an empty attributed statement.
static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs);
SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; }
ArrayRef<const Attr *> getAttrs() const {
return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs);
}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
SourceLocation getBeginLoc() const { return getAttrLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == AttributedStmtClass;
}
};
/// IfStmt - This represents an if/then/else.
class IfStmt final
: public Stmt,
private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// IfStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing
// objects at then end but this would change the order of the children.
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact a "Expr *".
//
// * A "Stmt *" for the then statement.
// Always present.
//
// * A "Stmt *" for the else statement.
// Present if and only if hasElseStorage().
//
// * A "SourceLocation" for the location of the "else".
// Present if and only if hasElseStorage().
enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() +
hasInitStorage();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return hasElseStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; }
unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; }
/// Build an if/then/else statement.
IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init,
VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL, Stmt *Else);
/// Build an empty if/then/else statement.
explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit);
public:
/// Create an IfStmt.
static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL,
bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond,
Stmt *Then, SourceLocation EL = SourceLocation(),
Stmt *Else = nullptr);
/// Create an empty IfStmt optionally with storage for an else statement,
/// condition variable and init expression.
static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar,
bool HasInit);
/// True if this IfStmt has the storage for an init statement.
bool hasInitStorage() const { return IfStmtBits.HasInit; }
/// True if this IfStmt has storage for a variable declaration.
bool hasVarStorage() const { return IfStmtBits.HasVar; }
/// True if this IfStmt has storage for an else statement.
bool hasElseStorage() const { return IfStmtBits.HasElse; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; }
const Stmt *getThen() const {
return getTrailingObjects<Stmt *>()[thenOffset()];
}
void setThen(Stmt *Then) {
getTrailingObjects<Stmt *>()[thenOffset()] = Then;
}
Stmt *getElse() {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
const Stmt *getElse() const {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
void setElse(Stmt *Else) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
getTrailingObjects<Stmt *>()[elseOffset()] = Else;
}
/// Retrieve the variable declared in this "if" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// if (int x = foo()) {
/// printf("x is %d", x);
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<IfStmt *>(this)->getConditionVariable();
}
/// Set the condition variable for this if statement.
/// The if statement must have storage for the condition variable.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this IfStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This if statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; }
void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; }
SourceLocation getElseLoc() const {
return hasElseStorage() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
void setElseLoc(SourceLocation ElseLoc) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
*getTrailingObjects<SourceLocation>() = ElseLoc;
}
bool isConstexpr() const { return IfStmtBits.IsConstexpr; }
void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; }
/// If this is an 'if constexpr', determine which substatement will be taken.
/// Otherwise, or if the condition is value-dependent, returns None.
Optional<const Stmt*> getNondiscardedCase(const ASTContext &Ctx) const;
bool isObjCAvailabilityCheck() const;
SourceLocation getBeginLoc() const { return getIfLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
if (getElse())
return getElse()->getEndLoc();
return getThen()->getEndLoc();
}
// Iterators over subexpressions. The iterators will include iterating
// over the initialization expression referenced by the condition variable.
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == IfStmtClass;
}
};
/// SwitchStmt - This represents a 'switch' stmt.
class SwitchStmt final : public Stmt,
private llvm::TrailingObjects<SwitchStmt, Stmt *> {
friend TrailingObjects;
/// Points to a linked list of case and default statements.
SwitchCase *FirstCase;
// SwitchStmt is followed by several trailing objects,
// some of which optional. Note that it would be more convenient to
// put the optional trailing objects at the end but this would change
// the order in children().
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
enum { InitOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
/// Build a switch statement.
SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond);
/// Build a empty switch statement.
explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar);
public:
/// Create a switch statement.
static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var,
Expr *Cond);
/// Create an empty switch statement optionally with storage for
/// an init expression and a condition variable.
static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit,
bool HasVar);
/// True if this SwitchStmt has storage for an init statement.
bool hasInitStorage() const { return SwitchStmtBits.HasInit; }
/// True if this SwitchStmt has storage for a condition variable.
bool hasVarStorage() const { return SwitchStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This switch statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
/// Retrieve the variable declared in this "switch" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// switch (int x = foo()) {
/// case 0: break;
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<SwitchStmt *>(this)->getConditionVariable();
}
/// Set the condition variable in this switch statement.
/// The switch statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *VD);
/// If this SwitchStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SwitchCase *getSwitchCaseList() { return FirstCase; }
const SwitchCase *getSwitchCaseList() const { return FirstCase; }
void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; }
SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; }
void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; }
void setBody(Stmt *S, SourceLocation SL) {
setBody(S);
setSwitchLoc(SL);
}
void addSwitchCase(SwitchCase *SC) {
assert(!SC->getNextSwitchCase() &&
"case/default already added to a switch");
SC->setNextSwitchCase(FirstCase);
FirstCase = SC;
}
/// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a
/// switch over an enum value then all cases have been explicitly covered.
void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; }
/// Returns true if the SwitchStmt is a switch of an enum value and all cases
/// have been explicitly covered.
bool isAllEnumCasesCovered() const {
return SwitchStmtBits.AllEnumCasesCovered;
}
SourceLocation getBeginLoc() const { return getSwitchLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody() ? getBody()->getEndLoc()
: reinterpret_cast<const Stmt *>(getCond())->getEndLoc();
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SwitchStmtClass;
}
};
/// WhileStmt - This represents a 'while' stmt.
class WhileStmt final : public Stmt,
private llvm::TrailingObjects<WhileStmt, Stmt *> {
friend TrailingObjects;
// WhileStmt is followed by several trailing objects,
// some of which optional. Note that it would be more
// convenient to put the optional trailing object at the end
// but this would affect children().
// The trailing objects are in order:
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
//
enum { VarOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned varOffset() const { return VarOffset; }
unsigned condOffset() const { return VarOffset + hasVarStorage(); }
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasVarStorage();
}
/// Build a while statement.
WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body,
SourceLocation WL);
/// Build an empty while statement.
explicit WhileStmt(EmptyShell Empty, bool HasVar);
public:
/// Create a while statement.
static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond,
Stmt *Body, SourceLocation WL);
/// Create an empty while statement optionally with storage for
/// a condition variable.
static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar);
/// True if this WhileStmt has storage for a condition variable.
bool hasVarStorage() const { return WhileStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
/// Retrieve the variable declared in this "while" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// while (int x = random()) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<WhileStmt *>(this)->getConditionVariable();
}
/// Set the condition variable of this while statement.
/// The while statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this WhileStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; }
SourceLocation getBeginLoc() const { return getWhileLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == WhileStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
/// DoStmt - This represents a 'do/while' stmt.
class DoStmt : public Stmt {
enum { BODY, COND, END_EXPR };
Stmt *SubExprs[END_EXPR];
SourceLocation WhileLoc;
SourceLocation RParenLoc; // Location of final ')' in do stmt condition.
public:
DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL,
SourceLocation RP)
: Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) {
setCond(Cond);
setBody(Body);
setDoLoc(DL);
}
/// Build an empty do-while statement.
explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {}
Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); }
const Expr *getCond() const {
return reinterpret_cast<Expr *>(SubExprs[COND]);
}
void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setBody(Stmt *Body) { SubExprs[BODY] = Body; }
SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; }
void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; }
SourceLocation getWhileLoc() const { return WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getDoLoc(); }
SourceLocation getEndLoc() const { return getRParenLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DoStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
const_child_range children() const {
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of
/// the init/cond/inc parts of the ForStmt will be null if they were not
/// specified in the source.
class ForStmt : public Stmt {
enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt.
SourceLocation LParenLoc, RParenLoc;
public:
ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
SourceLocation RP);
/// Build an empty for statement.
explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {}
Stmt *getInit() { return SubExprs[INIT]; }
/// Retrieve the variable declared in this "for" statement, if any.
///
/// In the following example, "y" is the condition variable.
/// \code
/// for (int x = random(); int y = mangle(x); ++x) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this ForStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]);
}
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getInit() const { return SubExprs[INIT]; }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setInit(Stmt *S) { SubExprs[INIT] = S; }
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getForLoc() const { return ForStmtBits.ForLoc; }
void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation L) { LParenLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getForLoc(); }
SourceLocation getEndLoc() const { return getBody()->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ForStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
const_child_range children() const {
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// GotoStmt - This represents a direct goto.
class GotoStmt : public Stmt {
LabelDecl *Label;
SourceLocation LabelLoc;
public:
GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL)
: Stmt(GotoStmtClass), Label(label), LabelLoc(LL) {
setGotoLoc(GL);
}
/// Build an empty goto statement.
explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {}
LabelDecl *getLabel() const { return Label; }
void setLabel(LabelDecl *D) { Label = D; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getLabelLoc() const { return LabelLoc; }
void setLabelLoc(SourceLocation L) { LabelLoc = L; }
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const { return getLabelLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GotoStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// IndirectGotoStmt - This represents an indirect goto.
class IndirectGotoStmt : public Stmt {
SourceLocation StarLoc;
Stmt *Target;
public:
IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target)
: Stmt(IndirectGotoStmtClass), StarLoc(starLoc) {
setTarget(target);
setGotoLoc(gotoLoc);
}
/// Build an empty indirect goto statement.
explicit IndirectGotoStmt(EmptyShell Empty)
: Stmt(IndirectGotoStmtClass, Empty) {}
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setStarLoc(SourceLocation L) { StarLoc = L; }
SourceLocation getStarLoc() const { return StarLoc; }
Expr *getTarget() { return reinterpret_cast<Expr *>(Target); }
const Expr *getTarget() const {
return reinterpret_cast<const Expr *>(Target);
}
void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); }
/// getConstantTarget - Returns the fixed target of this indirect
/// goto, if one exists.
LabelDecl *getConstantTarget();
const LabelDecl *getConstantTarget() const {
return const_cast<IndirectGotoStmt *>(this)->getConstantTarget();
}
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == IndirectGotoStmtClass;
}
// Iterators
child_range children() { return child_range(&Target, &Target + 1); }
const_child_range children() const {
return const_child_range(&Target, &Target + 1);
}
};
/// ContinueStmt - This represents a continue.
class ContinueStmt : public Stmt {
public:
ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) {
setContinueLoc(CL);
}
/// Build an empty continue statement.
explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {}
SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; }
void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; }
SourceLocation getBeginLoc() const { return getContinueLoc(); }
SourceLocation getEndLoc() const { return getContinueLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ContinueStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// BreakStmt - This represents a break.
class BreakStmt : public Stmt {
public:
BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) {
setBreakLoc(BL);
}
/// Build an empty break statement.
explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {}
SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; }
void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; }
SourceLocation getBeginLoc() const { return getBreakLoc(); }
SourceLocation getEndLoc() const { return getBreakLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == BreakStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// ReturnStmt - This represents a return, optionally of an expression:
/// return;
/// return 4;
///
/// Note that GCC allows return with no argument in a function declared to
/// return a value, and it allows returning a value in functions declared to
/// return void. We explicitly model this in the AST, which means you can't
/// depend on the return type of the function and the presence of an argument.
class ReturnStmt final
: public Stmt,
private llvm::TrailingObjects<ReturnStmt, const VarDecl *> {
friend TrailingObjects;
/// The return expression.
Stmt *RetExpr;
// ReturnStmt is followed optionally by a trailing "const VarDecl *"
// for the NRVO candidate. Present if and only if hasNRVOCandidate().
/// True if this ReturnStmt has storage for an NRVO candidate.
bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; }
unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const {
return hasNRVOCandidate();
}
/// Build a return statement.
ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate);
/// Build an empty return statement.
explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate);
public:
/// Create a return statement.
static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E,
const VarDecl *NRVOCandidate);
/// Create an empty return statement, optionally with
/// storage for an NRVO candidate.
static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate);
Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); }
const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); }
void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); }
/// Retrieve the variable that might be used for the named return
/// value optimization.
///
/// The optimization itself can only be performed if the variable is
/// also marked as an NRVO object.
const VarDecl *getNRVOCandidate() const {
return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>()
: nullptr;
}
/// Set the variable that might be used for the named return value
/// optimization. The return statement must have storage for it,
/// which is the case if and only if hasNRVOCandidate() is true.
void setNRVOCandidate(const VarDecl *Var) {
assert(hasNRVOCandidate() &&
"This return statement has no storage for an NRVO candidate!");
*getTrailingObjects<const VarDecl *>() = Var;
}
SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; }
void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; }
SourceLocation getBeginLoc() const { return getReturnLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return RetExpr ? RetExpr->getEndLoc() : getReturnLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == ReturnStmtClass;
}
// Iterators
child_range children() {
if (RetExpr)
return child_range(&RetExpr, &RetExpr + 1);
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
if (RetExpr)
return const_child_range(&RetExpr, &RetExpr + 1);
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
class AsmStmt : public Stmt {
protected:
friend class ASTStmtReader;
SourceLocation AsmLoc;
/// True if the assembly statement does not have any input or output
/// operands.
bool IsSimple;
/// If true, treat this inline assembly as having side effects.
/// This assembly statement should not be optimized, deleted or moved.
bool IsVolatile;
unsigned NumOutputs;
unsigned NumInputs;
unsigned NumClobbers;
Stmt **Exprs = nullptr;
AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile,
unsigned numoutputs, unsigned numinputs, unsigned numclobbers)
: Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile),
NumOutputs(numoutputs), NumInputs(numinputs),
NumClobbers(numclobbers) {}
public:
/// Build an empty inline-assembly statement.
explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {}
SourceLocation getAsmLoc() const { return AsmLoc; }
void setAsmLoc(SourceLocation L) { AsmLoc = L; }
bool isSimple() const { return IsSimple; }
void setSimple(bool V) { IsSimple = V; }
bool isVolatile() const { return IsVolatile; }
void setVolatile(bool V) { IsVolatile = V; }
SourceLocation getBeginLoc() const LLVM_READONLY { return {}; }
SourceLocation getEndLoc() const LLVM_READONLY { return {}; }
//===--- Asm String Analysis ---===//
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
unsigned getNumOutputs() const { return NumOutputs; }
/// getOutputConstraint - Return the constraint string for the specified
/// output operand. All output constraints are known to be non-empty (either
/// '=' or '+').
StringRef getOutputConstraint(unsigned i) const;
/// isOutputPlusConstraint - Return true if the specified output constraint
/// is a "+" constraint (which is both an input and an output) or false if it
/// is an "=" constraint (just an output).
bool isOutputPlusConstraint(unsigned i) const {
return getOutputConstraint(i)[0] == '+';
}
const Expr *getOutputExpr(unsigned i) const;
/// getNumPlusOperands - Return the number of output operands that have a "+"
/// constraint.
unsigned getNumPlusOperands() const;
//===--- Input operands ---===//
unsigned getNumInputs() const { return NumInputs; }
/// getInputConstraint - Return the specified input constraint. Unlike output
/// constraints, these can be empty.
StringRef getInputConstraint(unsigned i) const;
const Expr *getInputExpr(unsigned i) const;
//===--- Other ---===//
unsigned getNumClobbers() const { return NumClobbers; }
StringRef getClobber(unsigned i) const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass ||
T->getStmtClass() == MSAsmStmtClass;
}
// Input expr iterators.
using inputs_iterator = ExprIterator;
using const_inputs_iterator = ConstExprIterator;
using inputs_range = llvm::iterator_range<inputs_iterator>;
using inputs_const_range = llvm::iterator_range<const_inputs_iterator>;
inputs_iterator begin_inputs() {
return &Exprs[0] + NumOutputs;
}
inputs_iterator end_inputs() {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); }
const_inputs_iterator begin_inputs() const {
return &Exprs[0] + NumOutputs;
}
const_inputs_iterator end_inputs() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_const_range inputs() const {
return inputs_const_range(begin_inputs(), end_inputs());
}
// Output expr iterators.
using outputs_iterator = ExprIterator;
using const_outputs_iterator = ConstExprIterator;
using outputs_range = llvm::iterator_range<outputs_iterator>;
using outputs_const_range = llvm::iterator_range<const_outputs_iterator>;
outputs_iterator begin_outputs() {
return &Exprs[0];
}
outputs_iterator end_outputs() {
return &Exprs[0] + NumOutputs;
}
outputs_range outputs() {
return outputs_range(begin_outputs(), end_outputs());
}
const_outputs_iterator begin_outputs() const {
return &Exprs[0];
}
const_outputs_iterator end_outputs() const {
return &Exprs[0] + NumOutputs;
}
outputs_const_range outputs() const {
return outputs_const_range(begin_outputs(), end_outputs());
}
child_range children() {
return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
const_child_range children() const {
return const_child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
};
/// This represents a GCC inline-assembly statement extension.
class GCCAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation RParenLoc;
StringLiteral *AsmStr;
// FIXME: If we wanted to, we could allocate all of these in one big array.
StringLiteral **Constraints = nullptr;
StringLiteral **Clobbers = nullptr;
IdentifierInfo **Names = nullptr;
unsigned NumLabels = 0;
public:
GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple,
bool isvolatile, unsigned numoutputs, unsigned numinputs,
IdentifierInfo **names, StringLiteral **constraints, Expr **exprs,
StringLiteral *asmstr, unsigned numclobbers,
StringLiteral **clobbers, unsigned numlabels,
SourceLocation rparenloc);
/// Build an empty inline-assembly statement.
explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {}
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
//===--- Asm String Analysis ---===//
const StringLiteral *getAsmString() const { return AsmStr; }
StringLiteral *getAsmString() { return AsmStr; }
void setAsmString(StringLiteral *E) { AsmStr = E; }
/// AsmStringPiece - this is part of a decomposed asm string specification
/// (for use with the AnalyzeAsmString function below). An asm string is
/// considered to be a concatenation of these parts.
class AsmStringPiece {
public:
enum Kind {
String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%".
Operand // Operand reference, with optional modifier %c4.
};
private:
Kind MyKind;
std::string Str;
unsigned OperandNo;
// Source range for operand references.
CharSourceRange Range;
public:
AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {}
AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin,
SourceLocation End)
: MyKind(Operand), Str(S), OperandNo(OpNo),
Range(CharSourceRange::getCharRange(Begin, End)) {}
bool isString() const { return MyKind == String; }
bool isOperand() const { return MyKind == Operand; }
const std::string &getString() const { return Str; }
unsigned getOperandNo() const {
assert(isOperand());
return OperandNo;
}
CharSourceRange getRange() const {
assert(isOperand() && "Range is currently used only for Operands.");
return Range;
}
/// getModifier - Get the modifier for this operand, if present. This
/// returns '\0' if there was no modifier.
char getModifier() const;
};
/// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing
/// it into pieces. If the asm string is erroneous, emit errors and return
/// true, otherwise return false. This handles canonicalization and
/// translation of strings from GCC syntax to LLVM IR syntax, and handles
//// flattening of named references like %[foo] to Operand AsmStringPiece's.
unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces,
const ASTContext &C, unsigned &DiagOffs) const;
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; }
StringRef getOutputName(unsigned i) const {
if (IdentifierInfo *II = getOutputIdentifier(i))
return II->getName();
return {};
}
StringRef getOutputConstraint(unsigned i) const;
const StringLiteral *getOutputConstraintLiteral(unsigned i) const {
return Constraints[i];
}
StringLiteral *getOutputConstraintLiteral(unsigned i) {
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
IdentifierInfo *getInputIdentifier(unsigned i) const {
return Names[i + NumOutputs];
}
StringRef getInputName(unsigned i) const {
if (IdentifierInfo *II = getInputIdentifier(i))
return II->getName();
return {};
}
StringRef getInputConstraint(unsigned i) const;
const StringLiteral *getInputConstraintLiteral(unsigned i) const {
return Constraints[i + NumOutputs];
}
StringLiteral *getInputConstraintLiteral(unsigned i) {
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getInputExpr(i);
}
//===--- Labels ---===//
bool isAsmGoto() const {
return NumLabels > 0;
}
unsigned getNumLabels() const {
return NumLabels;
}
IdentifierInfo *getLabelIdentifier(unsigned i) const {
return Names[i + NumOutputs + NumInputs];
}
AddrLabelExpr *getLabelExpr(unsigned i) const;
StringRef getLabelName(unsigned i) const;
using labels_iterator = CastIterator<AddrLabelExpr>;
using const_labels_iterator = ConstCastIterator<AddrLabelExpr>;
using labels_range = llvm::iterator_range<labels_iterator>;
using labels_const_range = llvm::iterator_range<const_labels_iterator>;
labels_iterator begin_labels() {
return &Exprs[0] + NumOutputs + NumInputs;
}
labels_iterator end_labels() {
return &Exprs[0] + NumOutputs + NumInputs + NumLabels;
}
labels_range labels() {
return labels_range(begin_labels(), end_labels());
}
const_labels_iterator begin_labels() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
const_labels_iterator end_labels() const {
return &Exprs[0] + NumOutputs + NumInputs + NumLabels;
}
labels_const_range labels() const {
return labels_const_range(begin_labels(), end_labels());
}
private:
void setOutputsAndInputsAndClobbers(const ASTContext &C,
IdentifierInfo **Names,
StringLiteral **Constraints,
Stmt **Exprs,
unsigned NumOutputs,
unsigned NumInputs,
unsigned NumLabels,
StringLiteral **Clobbers,
unsigned NumClobbers);
public:
//===--- Other ---===//
/// getNamedOperand - Given a symbolic operand reference like %[foo],
/// translate this into a numeric value needed to reference the same operand.
/// This returns -1 if the operand name is invalid.
int getNamedOperand(StringRef SymbolicName) const;
StringRef getClobber(unsigned i) const;
StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; }
const StringLiteral *getClobberStringLiteral(unsigned i) const {
return Clobbers[i];
}
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass;
}
};
/// This represents a Microsoft inline-assembly statement extension.
class MSAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation LBraceLoc, EndLoc;
StringRef AsmStr;
unsigned NumAsmToks = 0;
Token *AsmToks = nullptr;
StringRef *Constraints = nullptr;
StringRef *Clobbers = nullptr;
public:
MSAsmStmt(const ASTContext &C, SourceLocation asmloc,
SourceLocation lbraceloc, bool issimple, bool isvolatile,
ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs,
ArrayRef<StringRef> constraints,
ArrayRef<Expr*> exprs, StringRef asmstr,
ArrayRef<StringRef> clobbers, SourceLocation endloc);
/// Build an empty MS-style inline-assembly statement.
explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {}
SourceLocation getLBraceLoc() const { return LBraceLoc; }
void setLBraceLoc(SourceLocation L) { LBraceLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
bool hasBraces() const { return LBraceLoc.isValid(); }
unsigned getNumAsmToks() { return NumAsmToks; }
Token *getAsmToks() { return AsmToks; }
//===--- Asm String Analysis ---===//
StringRef getAsmString() const { return AsmStr; }
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
StringRef getOutputConstraint(unsigned i) const {
assert(i < NumOutputs);
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
StringRef getInputConstraint(unsigned i) const {
assert(i < NumInputs);
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getInputExpr(i);
}
//===--- Other ---===//
ArrayRef<StringRef> getAllConstraints() const {
return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs);
}
ArrayRef<StringRef> getClobbers() const {
return llvm::makeArrayRef(Clobbers, NumClobbers);
}
ArrayRef<Expr*> getAllExprs() const {
return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs),
NumInputs + NumOutputs);
}
StringRef getClobber(unsigned i) const { return getClobbers()[i]; }
private:
void initialize(const ASTContext &C, StringRef AsmString,
ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints,
ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers);
public:
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == MSAsmStmtClass;
}
child_range children() {
return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
const_child_range children() const {
return const_child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
};
class SEHExceptStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Children[2];
enum { FILTER_EXPR, BLOCK };
SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block);
explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {}
public:
static SEHExceptStmt* Create(const ASTContext &C,
SourceLocation ExceptLoc,
Expr *FilterExpr,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); }
SourceLocation getExceptLoc() const { return Loc; }
SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); }
Expr *getFilterExpr() const {
return reinterpret_cast<Expr*>(Children[FILTER_EXPR]);
}
CompoundStmt *getBlock() const {
return cast<CompoundStmt>(Children[BLOCK]);
}
child_range children() {
return child_range(Children, Children+2);
}
const_child_range children() const {
return const_child_range(Children, Children + 2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHExceptStmtClass;
}
};
class SEHFinallyStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Block;
SEHFinallyStmt(SourceLocation Loc, Stmt *Block);
explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {}
public:
static SEHFinallyStmt* Create(const ASTContext &C,
SourceLocation FinallyLoc,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); }
SourceLocation getFinallyLoc() const { return Loc; }
SourceLocation getEndLoc() const { return Block->getEndLoc(); }
CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); }
child_range children() {
return child_range(&Block,&Block+1);
}
const_child_range children() const {
return const_child_range(&Block, &Block + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHFinallyStmtClass;
}
};
class SEHTryStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
bool IsCXXTry;
SourceLocation TryLoc;
Stmt *Children[2];
enum { TRY = 0, HANDLER = 1 };
SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try'
SourceLocation TryLoc,
Stmt *TryBlock,
Stmt *Handler);
explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {}
public:
static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry,
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); }
SourceLocation getTryLoc() const { return TryLoc; }
SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); }
bool getIsCXXTry() const { return IsCXXTry; }
CompoundStmt* getTryBlock() const {
return cast<CompoundStmt>(Children[TRY]);
}
Stmt *getHandler() const { return Children[HANDLER]; }
/// Returns 0 if not defined
SEHExceptStmt *getExceptHandler() const;
SEHFinallyStmt *getFinallyHandler() const;
child_range children() {
return child_range(Children, Children+2);
}
const_child_range children() const {
return const_child_range(Children, Children + 2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHTryStmtClass;
}
};
/// Represents a __leave statement.
class SEHLeaveStmt : public Stmt {
SourceLocation LeaveLoc;
public:
explicit SEHLeaveStmt(SourceLocation LL)
: Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {}
/// Build an empty __leave statement.
explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {}
SourceLocation getLeaveLoc() const { return LeaveLoc; }
void setLeaveLoc(SourceLocation L) { LeaveLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHLeaveStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// This captures a statement into a function. For example, the following
/// pragma annotated compound statement can be represented as a CapturedStmt,
/// and this compound statement is the body of an anonymous outlined function.
/// @code
/// #pragma omp parallel
/// {
/// compute();
/// }
/// @endcode
class CapturedStmt : public Stmt {
public:
/// The different capture forms: by 'this', by reference, capture for
/// variable-length array type etc.
enum VariableCaptureKind {
VCK_This,
VCK_ByRef,
VCK_ByCopy,
VCK_VLAType,
};
/// Describes the capture of either a variable, or 'this', or
/// variable-length array type.
class Capture {
llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind;
SourceLocation Loc;
public:
friend class ASTStmtReader;
/// Create a new capture.
///
/// \param Loc The source location associated with this capture.
///
/// \param Kind The kind of capture (this, ByRef, ...).
///
/// \param Var The variable being captured, or null if capturing this.
Capture(SourceLocation Loc, VariableCaptureKind Kind,
VarDecl *Var = nullptr);
/// Determine the kind of capture.
VariableCaptureKind getCaptureKind() const;
/// Retrieve the source location at which the variable or 'this' was
/// first used.
SourceLocation getLocation() const { return Loc; }
/// Determine whether this capture handles the C++ 'this' pointer.
bool capturesThis() const { return getCaptureKind() == VCK_This; }
/// Determine whether this capture handles a variable (by reference).
bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; }
/// Determine whether this capture handles a variable by copy.
bool capturesVariableByCopy() const {
return getCaptureKind() == VCK_ByCopy;
}
/// Determine whether this capture handles a variable-length array
/// type.
bool capturesVariableArrayType() const {
return getCaptureKind() == VCK_VLAType;
}
/// Retrieve the declaration of the variable being captured.
///
/// This operation is only valid if this capture captures a variable.
VarDecl *getCapturedVar() const;
};
private:
/// The number of variable captured, including 'this'.
unsigned NumCaptures;
/// The pointer part is the implicit the outlined function and the
/// int part is the captured region kind, 'CR_Default' etc.
llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind;
/// The record for captured variables, a RecordDecl or CXXRecordDecl.
RecordDecl *TheRecordDecl = nullptr;
/// Construct a captured statement.
CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD);
/// Construct an empty captured statement.
CapturedStmt(EmptyShell Empty, unsigned NumCaptures);
Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); }
Stmt *const *getStoredStmts() const {
return reinterpret_cast<Stmt *const *>(this + 1);
}
Capture *getStoredCaptures() const;
void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; }
public:
friend class ASTStmtReader;
static CapturedStmt *Create(const ASTContext &Context, Stmt *S,
CapturedRegionKind Kind,
ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits,
CapturedDecl *CD, RecordDecl *RD);
static CapturedStmt *CreateDeserialized(const ASTContext &Context,
unsigned NumCaptures);
/// Retrieve the statement being captured.
Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; }
const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; }
/// Retrieve the outlined function declaration.
CapturedDecl *getCapturedDecl();
const CapturedDecl *getCapturedDecl() const;
/// Set the outlined function declaration.
void setCapturedDecl(CapturedDecl *D);
/// Retrieve the captured region kind.
CapturedRegionKind getCapturedRegionKind() const;
/// Set the captured region kind.
void setCapturedRegionKind(CapturedRegionKind Kind);
/// Retrieve the record declaration for captured variables.
const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; }
/// Set the record declaration for captured variables.
void setCapturedRecordDecl(RecordDecl *D) {
assert(D && "null RecordDecl");
TheRecordDecl = D;
}
/// True if this variable has been captured.
bool capturesVariable(const VarDecl *Var) const;
/// An iterator that walks over the captures.
using capture_iterator = Capture *;
using const_capture_iterator = const Capture *;
using capture_range = llvm::iterator_range<capture_iterator>;
using capture_const_range = llvm::iterator_range<const_capture_iterator>;
capture_range captures() {
return capture_range(capture_begin(), capture_end());
}
capture_const_range captures() const {
return capture_const_range(capture_begin(), capture_end());
}
/// Retrieve an iterator pointing to the first capture.
capture_iterator capture_begin() { return getStoredCaptures(); }
const_capture_iterator capture_begin() const { return getStoredCaptures(); }
/// Retrieve an iterator pointing past the end of the sequence of
/// captures.
capture_iterator capture_end() const {
return getStoredCaptures() + NumCaptures;
}
/// Retrieve the number of captures, including 'this'.
unsigned capture_size() const { return NumCaptures; }
/// Iterator that walks over the capture initialization arguments.
using capture_init_iterator = Expr **;
using capture_init_range = llvm::iterator_range<capture_init_iterator>;
/// Const iterator that walks over the capture initialization
/// arguments.
using const_capture_init_iterator = Expr *const *;
using const_capture_init_range =
llvm::iterator_range<const_capture_init_iterator>;
capture_init_range capture_inits() {
return capture_init_range(capture_init_begin(), capture_init_end());
}
const_capture_init_range capture_inits() const {
return const_capture_init_range(capture_init_begin(), capture_init_end());
}
/// Retrieve the first initialization argument.
capture_init_iterator capture_init_begin() {
return reinterpret_cast<Expr **>(getStoredStmts());
}
const_capture_init_iterator capture_init_begin() const {
return reinterpret_cast<Expr *const *>(getStoredStmts());
}
/// Retrieve the iterator pointing one past the last initialization
/// argument.
capture_init_iterator capture_init_end() {
return capture_init_begin() + NumCaptures;
}
const_capture_init_iterator capture_init_end() const {
return capture_init_begin() + NumCaptures;
}
SourceLocation getBeginLoc() const LLVM_READONLY {
return getCapturedStmt()->getBeginLoc();
}
SourceLocation getEndLoc() const LLVM_READONLY {
return getCapturedStmt()->getEndLoc();
}
SourceRange getSourceRange() const LLVM_READONLY {
return getCapturedStmt()->getSourceRange();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CapturedStmtClass;
}
child_range children();
const_child_range children() const;
};
} // namespace clang
#endif // LLVM_CLANG_AST_STMT_H
|
cones.c | #include "cones.h"
#define CONE_RATE (2)
#define CONE_TOL (1e-7)
#define EXP_CONE_MAX_ITERS (100)
#ifdef LAPACK_LIB_FOUND
/* underscore for blas / lapack, single or double precision */
#ifdef NOBLASUNDERSCORE
#ifndef FLOAT
#define BLAS(x) d ## x
#else
#define BLAS(x) s ## x
#endif
#else
#ifndef FLOAT
#define BLAS(x) d ## x ## _
#else
#define BLAS(x) s ## x ## _
#endif
#endif
#ifdef MATLAB_MEX_FILE
typedef ptrdiff_t blasint;
#elif defined BLAS64
#include <stdint.h>
typedef int64_t blasint;
#else
typedef int blasint;
#endif
void BLAS(syevr)(char* jobz, char* range, char* uplo, blasint* n, pfloat* a, blasint* lda, pfloat* vl,
pfloat* vu, blasint* il, blasint* iu, pfloat* abstol, blasint* m, pfloat* w, pfloat* z, blasint* ldz,
blasint* isuppz, pfloat* work, blasint* lwork, blasint* iwork, blasint* liwork, blasint* info);
void BLAS(syr)(const char *uplo, const blasint *n, const pfloat *alpha, const pfloat *x, const blasint *incx,
pfloat *a, const blasint *lda);
void BLAS(axpy)(const blasint *n, const pfloat *alpha, const pfloat *dx, const blasint *incx, pfloat *dy,
const blasint *incy);
/* private data to help cone projection step */
static struct ConeData_t {
/* workspace for eigenvector decompositions: */
pfloat * Xs, *Z, *e, *work;
blasint *iwork, lwork, liwork;
}c;
#endif
static timer coneTimer;
static pfloat totalConeTime;
/*
* boundaries will contain array of indices of rows of A corresponding to
* cone boundaries, boundaries[0] is starting index for cones of size strictly larger than 1
* returns length of boundaries array, boundaries malloc-ed here so should be freed
*/
idxint getConeBoundaries(Cone * k, idxint ** boundaries) {
idxint i, count = 0;
idxint len = 1 + k->qsize + k->ssize + k->ed + k->ep;
idxint * b = scs_malloc(sizeof(idxint) * len);
b[count] = k->f + k->l;
count += 1;
if (k->qsize > 0) {
memcpy(&b[count], k->q, k->qsize * sizeof(idxint));
}
count += k->qsize;
for (i = 0; i < k->ssize; ++i) {
b[count + i] = k->s[i] * k->s[i];
}
count += k->ssize;
for (i = 0; i < k->ep + k->ed; ++i) {
b[count + i] = 3;
}
count += k->ep + k->ed;
*boundaries = b;
return len;
}
idxint getFullConeDims(Cone * k) {
idxint i, c = 0;
if (k->f)
c += k->f;
if (k->l)
c += k->l;
if (k->qsize && k->q) {
for (i = 0; i < k->qsize; ++i) {
c += k->q[i];
}
}
if (k->ssize && k->s) {
for (i = 0; i < k->ssize; ++i) {
c += k->s[i] * k->s[i];
}
}
if (k->ed)
c += 3 * k->ed;
if (k->ep)
c += 3 * k->ep;
return c;
}
idxint validateCones(Data * d, Cone * k) {
idxint i;
if (k->f && k->f < 0) {
scs_printf("free cone error\n");
return -1;
}
if (k->l && k->l < 0) {
scs_printf("lp cone error\n");
return -1;
}
if (k->qsize && k->q) {
for (i = 0; i < k->qsize; ++i) {
if (k->q[i] < 0) {
scs_printf("soc cone error\n");
return -1;
}
}
}
if (k->ssize && k->s) {
for (i = 0; i < k->ssize; ++i) {
if (k->s[i] < 0) {
scs_printf("sd cone error\n");
return -1;
}
}
}
if (k->ed && k->ed < 0) {
scs_printf("ep cone error\n");
return -1;
}
if (k->ep && k->ep < 0) {
scs_printf("ed cone error\n");
return -1;
}
if (getFullConeDims(k) != d->m) {
scs_printf("cone dimensions %i not equal to num rows in A = m = %i\n", (int) getFullConeDims(k), (int) d->m);
return -1;
}
return 0;
}
char * getConeSummary(Info * info) {
char * str = scs_malloc(sizeof(char) * 64);
sprintf(str, "\tCones: avg projection time: %1.2es\n", totalConeTime / (info->iter + 1) / 1e3);
totalConeTime = 0.0;
return str;
}
void finishCone() {
#ifdef LAPACK_LIB_FOUND
if (c.Xs)
scs_free(c.Xs);
if (c.Z)
scs_free(c.Z);
if (c.e)
scs_free(c.e);
if (c.work)
scs_free(c.work);
if (c.iwork)
scs_free(c.iwork);
#endif
}
char * getConeHeader(Cone * k) {
char * tmp = scs_malloc(sizeof(char) * 512);
idxint i, socVars, socBlks, sdVars, sdBlks, expPvars, expDvars;
sprintf(tmp, "Cones:");
if (k->f) {
sprintf(tmp + strlen(tmp), "\tprimal zero / dual free vars: %i\n", (int) k->f);
}
if (k->l) {
sprintf(tmp + strlen(tmp), "\tlinear vars: %i\n", (int) k->l);
}
socVars = 0;
socBlks = 0;
if (k->qsize && k->q) {
socBlks = k->qsize;
for (i = 0; i < k->qsize; i++) {
socVars += k->q[i];
}
sprintf(tmp + strlen(tmp), "\tsoc vars: %i, soc blks: %i\n", (int) socVars, (int) socBlks);
}
sdVars = 0;
sdBlks = 0;
if (k->ssize && k->s) {
sdBlks = k->ssize;
for (i = 0; i < k->ssize; i++) {
sdVars += k->s[i] * k->s[i];
}
sprintf(tmp + strlen(tmp), "\tsd vars: %i, sd blks: %i\n", (int) sdVars, (int) sdBlks);
}
if (k->ep || k->ed) {
expPvars = k->ep ? 3 * k->ep : 0;
expDvars = k->ed ? 3 * k->ed : 0;
sprintf(tmp + strlen(tmp), "\texp vars: %i, dual exp vars: %i\n", (int) expPvars, (int) expDvars);
}
return tmp;
}
idxint isSimpleSemiDefiniteCone(idxint * s, idxint ssize) {
idxint i;
for (i = 0; i < ssize; i++) {
if (s[i] >= 3) {
return 0; /* false */
}
}
return 1; /* true */
}
pfloat expNewtonOneD(pfloat rho, pfloat y_hat, pfloat z_hat) {
pfloat t = MAX(-z_hat, 1e-6);
pfloat f, fp;
idxint i;
for (i = 0; i < EXP_CONE_MAX_ITERS; ++i) {
f = t * (t + z_hat) / rho / rho - y_hat / rho + log(t / rho) + 1;
fp = (2 * t + z_hat) / rho / rho + 1 / t;
t = t - f / fp;
if (t <= -z_hat) {
return 0;
} else if (t <= 0) {
return z_hat;
} else if ( ABS(f) < CONE_TOL) {
break;
}
}
return t + z_hat;
}
void expSolveForXWithRho(pfloat * v, pfloat * x, pfloat rho) {
x[2] = expNewtonOneD(rho, v[1], v[2]);
x[1] = (x[2] - v[2]) * x[2] / rho;
x[0] = v[0] - rho;
}
pfloat expCalcGrad(pfloat * v, pfloat * x, pfloat rho) {
expSolveForXWithRho(v, x, rho);
if (x[1] <= 1e-12) {
return x[0];
} else {
return x[0] + x[1] * log(x[1] / x[2]);
}
}
void expGetRhoUb(pfloat * v, pfloat * x, pfloat * ub, pfloat * lb) {
*lb = 0;
*ub = 0.125;
while (expCalcGrad(v, x, *ub) > 0) {
*lb = *ub;
(*ub) *= 2;
}
}
/* project onto the exponential cone, v has dimension *exactly* 3 */
static idxint projExpCone(pfloat * v, idxint iter) {
idxint i;
pfloat ub, lb, rho, g, x[3];
pfloat r = v[0], s = v[1], t = v[2];
pfloat tol = CONE_TOL; /* iter < 0 ? CONE_TOL : MAX(CONE_TOL, 1 / POWF((iter + 1), CONE_RATE)); */
/* v in cl(Kexp) */
if ((s * exp(r / s) <= t && s > 0) || (r <= 0 && s == 0 && t >= 0)) {
return 0;
}
/* -v in Kexp^* */
if ((-r < 0 && r * exp(s / r) <= -exp(1) * t) || (-r == 0 && -s >= 0 && -t >= 0)) {
memset(v, 0, 3 * sizeof(pfloat));
return 0;
}
/* special case with analytical solution */
if (r < 0 && s < 0) {
v[1] = 0.0;
v[2] = MAX(v[2], 0);
return 0;
}
/* iterative procedure to find projection, bisects on dual variable: */
expGetRhoUb(v, x, &ub, &lb); /* get starting upper and lower bounds */
for (i = 0; i < EXP_CONE_MAX_ITERS; ++i) {
rho = (ub + lb) / 2; /* halfway between upper and lower bounds */
g = expCalcGrad(v, x, rho); /* calculates gradient wrt dual var */
if (g > 0) {
lb = rho;
} else {
ub = rho;
}
if (ub - lb < tol) {
break;
}
}
/*
#ifdef EXTRAVERBOSE
scs_printf("exponential cone proj iters %i\n", i);
#endif
*/
v[0] = x[0];
v[1] = x[1];
v[2] = x[2];
return 0;
}
idxint initCone(Cone * k) {
#ifdef LAPACK_LIB_FOUND
idxint i;
blasint nMax = 0;
pfloat eigTol = 1e-8;
blasint negOne = -1;
blasint m = 0;
blasint info;
pfloat wkopt;
c.Xs = NULL;
c.Z = NULL;
c.e = NULL;
c.work = NULL;
c.iwork = NULL;
#endif
totalConeTime = 0.0;
#ifdef EXTRAVERBOSE
scs_printf("initCone\n");
#ifdef MATLAB_MEX_FILE
mexEvalString("drawnow;");
#endif
#endif
if (k->ssize && k->s) {
if (isSimpleSemiDefiniteCone(k->s, k->ssize)) {
return 0;
}
#ifdef LAPACK_LIB_FOUND
/* eigenvector decomp workspace */
for (i = 0; i < k->ssize; ++i) {
if (k->s[i] > nMax) {
nMax = (blasint) k->s[i];
}
}
c.Xs = scs_calloc(nMax * nMax, sizeof(pfloat));
c.Z = scs_calloc(nMax * nMax, sizeof(pfloat));
c.e = scs_calloc(nMax, sizeof(pfloat));
BLAS(syevr)("Vectors", "All", "Upper", &nMax, c.Xs, &nMax, NULL, NULL, NULL, NULL,
&eigTol, &m, c.e, c.Z, &nMax, NULL, &wkopt, &negOne, &(c.liwork), &negOne, &info);
if (info != 0) {
scs_printf("FATAL: syevr failure, info = %i\n", (int) info);
return -1;
}
c.lwork = (blasint) (wkopt + 0.01); /* 0.01 for int casting safety */
c.work = scs_malloc(c.lwork * sizeof(pfloat));
c.iwork = scs_malloc(c.liwork * sizeof(blasint));
if (!c.Xs || !c.Z || !c.e || !c.work || !c.iwork) {
return -1;
}
#else
scs_printf("FATAL: Cannot solve SDPs with > 2x2 matrices without linked blas+lapack libraries\n");
scs_printf("Edit scs.mk to point to blas+lapack libray locations\n");
return -1;
#endif
}
#ifdef EXTRAVERBOSE
scs_printf("initCone complete\n");
#ifdef MATLAB_MEX_FILE
mexEvalString("drawnow;");
#endif
#endif
return 0;
}
idxint project2By2Sdc(pfloat *X) {
pfloat a, b, d, l1, l2, x1, x2, rad;
a = X[0];
b = 0.5 * (X[1] + X[2]);
d = X[3];
rad = SQRTF((a - d) * (a - d) + 4 * b * b);
/* l1 >= l2 always, since rad >= 0 */
l1 = 0.5 * (a + d + rad);
l2 = 0.5 * (a + d - rad);
if (l2 >= 0) { /* both positive, just symmetrize */
X[1] = b;
X[2] = b;
return 0;
}
if (l1 <= 0) { /* both negative, set to 0 */
X[0] = 0;
X[1] = 0;
X[2] = 0;
X[3] = 0;
return 0;
}
/* l1 pos, l2 neg */
x1 = 1 / SQRTF(1 + (l1 - a) * (l1 - a) / b / b);
x2 = x1 * (l1 - a) / b;
X[0] = l1 * x1 * x1;
X[1] = l1 * x1 * x2;
X[2] = X[1];
X[3] = l1 * x2 * x2;
return 0;
}
static idxint projSemiDefiniteCone(pfloat *X, idxint n, idxint iter) {
/* project onto the positive semi-definite cone */
#ifdef LAPACK_LIB_FOUND
idxint i, j;
blasint one = 1;
blasint m = 0;
blasint nb = (blasint) n;
pfloat * Xs = c.Xs;
pfloat * Z = c.Z;
pfloat * e = c.e;
pfloat * work = c.work;
blasint * iwork = c.iwork;
blasint lwork = c.lwork;
blasint liwork = c.liwork;
pfloat eigTol = CONE_TOL; /* iter < 0 ? CONE_TOL : MAX(CONE_TOL, 1 / POWF(iter + 1, CONE_RATE)); */
pfloat onef = 1.0;
pfloat zero = 0.0;
blasint info;
pfloat vupper;
#endif
if (n == 0) {
return 0;
}
if (n == 1) {
if (X[0] < 0.0) {
X[0] = 0.0;
}
return 0;
}
if (n == 2) {
return project2By2Sdc(X);
}
#ifdef LAPACK_LIB_FOUND
memcpy(Xs, X, n * n * sizeof(pfloat));
/* Xs = X + X', save div by 2 for eigen-recomp */
for (i = 0; i < n; ++i) {
BLAS(axpy)(&nb, &onef, &(X[i]), &nb, &(Xs[i * n]), &one);
}
vupper = MAX(calcNorm(Xs, n * n), 0.001);
/* Solve eigenproblem, reuse workspaces */
BLAS(syevr)("Vectors", "VInterval", "Upper", &nb, Xs, &nb, &zero, &vupper,
NULL, NULL, &eigTol, &m, e, Z, &nb, NULL, work, &lwork, iwork, &liwork, &info);
if (info != 0) {
scs_printf("WARN: LAPACK syevr error, info = %i, attempting to continue...\n", info);
#ifdef EXTRAVERBOSE
scs_printf("syevr input parameter dump:\n");
scs_printf("nb = %li\n", (long) nb);
scs_printf("lwork = %li\n", (long) lwork);
scs_printf("liwork = %li\n", (long) liwork);
scs_printf("vupper = %f\n", vupper);
scs_printf("eigTol = %e\n", eigTol);
printArray(Xs, n * n, "Xs");
printArray(X, n * n, "X");
printArray(e, m, "e");
printArray(Z, m * n, "Z");
#endif
/* return -1; */
}
memset(X, 0, n * n * sizeof(pfloat));
for (i = 0; i < m; ++i) {
pfloat a = e[i] / 2;
BLAS(syr)("Lower", &nb, &a, &(Z[i * n]), &one, X, &nb);
}
/* fill in upper half */
for (i = 0; i < n; ++i) {
for (j = i + 1; j < n; ++j) {
X[i + j * n] = X[j + i * n];
}
}
#else
scs_printf("FAILURE: solving SDP with > 2x2 matrices, but no blas/lapack libraries were linked!\n");
scs_printf("scs will return nonsense!\n");
scaleArray(X, NAN, n);
return -1;
#endif
return 0;
}
/* outward facing cone projection routine, iter is outer algorithm iteration, if iter < 0 then iter is ignored
warm_start contains guess of projection (can be set to NULL) */
idxint projDualCone(pfloat *x, Cone * k, const pfloat * warm_start, idxint iter) {
idxint i;
idxint count = (k->f ? k->f : 0);
#ifdef EXTRAVERBOSE
timer projTimer;
tic(&projTimer);
#endif
tic(&coneTimer);
if (k->l) {
/* project onto positive orthant */
for (i = count; i < count + k->l; ++i) {
if (x[i] < 0.0)
x[i] = 0.0;
/*x[i] = (x[i] < 0.0) ? 0.0 : x[i]; */
}
count += k->l;
#ifdef EXTRAVERBOSE
scs_printf("pos orthant proj time: %1.2es\n", tocq(&projTimer) / 1e3);
tic(&projTimer);
#endif
}
if (k->qsize && k->q) {
/* project onto SOC */
for (i = 0; i < k->qsize; ++i) {
if (k->q[i] == 0) {
continue;
}
if (k->q[i] == 1) {
if (x[count] < 0.0)
x[count] = 0.0;
} else {
pfloat v1 = x[count];
pfloat s = calcNorm(&(x[count + 1]), k->q[i] - 1);
pfloat alpha = (s + v1) / 2.0;
if (s <= v1) { /* do nothing */
} else if (s <= -v1) {
memset(&(x[count]), 0, k->q[i] * sizeof(pfloat));
} else {
x[count] = alpha;
scaleArray(&(x[count + 1]), alpha / s, k->q[i] - 1);
}
}
count += k->q[i];
}
#ifdef EXTRAVERBOSE
scs_printf("SOC proj time: %1.2es\n", tocq(&projTimer) / 1e3);
tic(&projTimer);
#endif
}
if (k->ssize && k->s) {
/* project onto PSD cone */
for (i = 0; i < k->ssize; ++i) {
if (k->s[i] == 0) {
continue;
}
if (projSemiDefiniteCone(&(x[count]), k->s[i], iter) < 0) return -1;
count += (k->s[i]) * (k->s[i]);
}
#ifdef EXTRAVERBOSE
scs_printf("SD proj time: %1.2es\n", tocq(&projTimer) / 1e3);
tic(&projTimer);
#endif
}
if (k->ep) {
pfloat r, s, t;
idxint idx;
/*
* exponential cone is not self dual, if s \in K
* then y \in K^* and so if K is the primal cone
* here we project onto K^*, via Moreau
* \Pi_C^*(y) = y + \Pi_C(-y)
*/
scaleArray(&(x[count]), -1, 3 * k->ep); /* x = -x; */
#ifdef OPENMP
#pragma omp parallel for private(r,s,t,idx)
#endif
for (i = 0; i < k->ep; ++i) {
idx = count + 3 * i;
r = x[idx];
s = x[idx + 1];
t = x[idx + 2];
if (projExpCone(&(x[idx]), iter) < 0) return -1;
x[idx] -= r;
x[idx + 1] -= s;
x[idx + 2] -= t;
}
count += 3 * k->ep;
#ifdef EXTRAVERBOSE
scs_printf("EP proj time: %1.2es\n", tocq(&projTimer) / 1e3);
tic(&projTimer);
#endif
}
if (k->ed) {
/* exponential cone: */
#ifdef OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < k->ed; ++i) {
if (projExpCone(&(x[count + 3 * i]), iter) < 0) return -1;
}
count += 3 * k->ed;
#ifdef EXTRAVERBOSE
scs_printf("ED proj time: %1.2es\n", tocq(&projTimer) / 1e3);
tic(&projTimer);
#endif
}
/* project onto OTHER cones */
totalConeTime += tocq(&coneTimer);
return 0;
}
|
HybridRepSetReader.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2019 QMCPACK developers.
//
// File developed by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory
//
// File created by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory
//////////////////////////////////////////////////////////////////////////////////////
/** @file
*
* derived from SplineSetReader
*/
#ifndef QMCPLUSPLUS_HYBRIDREP_READER_H
#define QMCPLUSPLUS_HYBRIDREP_READER_H
#include <Numerics/Quadrature.h>
#include <Numerics/Bessel.h>
#include <QMCWaveFunctions/BsplineFactory/HybridRepCenterOrbitals.h>
#include "OhmmsData/AttributeSet.h"
#include <config/stdlib/math.hpp>
//#include <QMCHamiltonians/Ylm.h>
//#define PRINT_RADIAL
namespace qmcplusplus
{
template<typename ST, typename LT>
struct Gvectors
{
typedef TinyVector<ST, 3> PosType;
typedef std::complex<ST> ValueType;
const LT& Lattice;
std::vector<PosType> gvecs_cart; //Cartesian.
std::vector<ST> gmag;
const size_t NumGvecs;
Gvectors(const std::vector<TinyVector<int, 3>>& gvecs_in,
const LT& Lattice_in,
const TinyVector<int, 3>& HalfG,
size_t first,
size_t last)
: Lattice(Lattice_in), NumGvecs(last - first)
{
gvecs_cart.resize(NumGvecs);
gmag.resize(NumGvecs);
#pragma omp parallel for
for (size_t ig = 0; ig < NumGvecs; ig++)
{
TinyVector<ST, 3> gvec_shift;
gvec_shift = gvecs_in[ig + first] + HalfG * 0.5;
gvecs_cart[ig] = Lattice.k_cart(gvec_shift);
gmag[ig] = std::sqrt(dot(gvecs_cart[ig], gvecs_cart[ig]));
}
}
template<typename YLM_ENGINE, typename VVT>
void calc_Ylm_G(const size_t ig, YLM_ENGINE& Ylm, VVT& YlmG) const
{
PosType Ghat(0.0, 0.0, 1.0);
if (gmag[ig] > 0)
Ghat = gvecs_cart[ig] / gmag[ig];
Ylm.evaluateV(Ghat[0], Ghat[1], Ghat[2], YlmG.data());
}
template<typename VVT>
inline void calc_jlm_G(const int lmax, ST& r, const size_t ig, VVT& j_lm_G) const
{
bessel_steed_array_cpu(lmax, gmag[ig] * r, j_lm_G.data());
for (size_t l = lmax; l > 0; l--)
for (size_t lm = l * l; lm < (l + 1) * (l + 1); lm++)
j_lm_G[lm] = j_lm_G[l];
}
template<typename PT, typename VT>
inline void calc_phase_shift(const PT& RSoA, const size_t ig, VT& phase_shift_real, VT& phase_shift_imag) const
{
const ST* restrict px = RSoA.data(0);
const ST* restrict py = RSoA.data(1);
const ST* restrict pz = RSoA.data(2);
ST* restrict v_r = phase_shift_real.data();
ST* restrict v_i = phase_shift_imag.data();
const ST& gv_x = gvecs_cart[ig][0];
const ST& gv_y = gvecs_cart[ig][1];
const ST& gv_z = gvecs_cart[ig][2];
#pragma omp simd aligned(px, py, pz, v_r, v_i)
for (size_t iat = 0; iat < RSoA.size(); iat++)
qmcplusplus::sincos(px[iat] * gv_x + py[iat] * gv_y + pz[iat] * gv_z, v_i + iat, v_r + iat);
}
template<typename PT>
ValueType evaluate_psi_r(const Vector<std::complex<double>>& cG, const PT& pos)
{
assert(cG.size() == NumGvecs);
std::complex<ST> val(0.0, 0.0);
for (size_t ig = 0; ig < NumGvecs; ig++)
{
ST s, c;
qmcplusplus::sincos(dot(gvecs_cart[ig], pos), &s, &c);
ValueType pw0(c, s);
val += cG[ig] * pw0;
}
return val;
}
template<typename PT>
void evaluate_psi_r(const Vector<std::complex<double>>& cG, const PT& pos, ValueType& phi, ValueType& d2phi)
{
assert(cG.size() == NumGvecs);
d2phi = phi = 0.0;
for (size_t ig = 0; ig < NumGvecs; ig++)
{
ST s, c;
qmcplusplus::sincos(dot(gvecs_cart[ig], pos), &s, &c);
ValueType pw0(c, s);
phi += cG[ig] * pw0;
d2phi += cG[ig] * pw0 * (-dot(gvecs_cart[ig], gvecs_cart[ig]));
}
}
double evaluate_KE(const Vector<std::complex<double>>& cG)
{
assert(cG.size() == NumGvecs);
double KE = 0;
for (size_t ig = 0; ig < NumGvecs; ig++)
KE += dot(gvecs_cart[ig], gvecs_cart[ig]) * (cG[ig].real() * cG[ig].real() + cG[ig].imag() * cG[ig].imag());
return KE / 2.0;
}
};
/** General HybridRepSetReader to handle any unitcell
*/
template<typename SA>
struct HybridRepSetReader : public SplineSetReader<SA>
{
typedef SplineSetReader<SA> BaseReader;
using BaseReader::bspline;
using BaseReader::mybuilder;
using BaseReader::rotate_phase_i;
using BaseReader::rotate_phase_r;
using typename BaseReader::DataType;
HybridRepSetReader(EinsplineSetBuilder* e) : BaseReader(e) {}
/** initialize basic parameters of atomic orbitals */
void initialize_hybridrep_atomic_centers() override
{
OhmmsAttributeSet a;
std::string scheme_name("Consistent");
std::string s_function_name("LEKS2018");
a.add(scheme_name, "smoothing_scheme");
a.add(s_function_name, "smoothing_function");
a.put(mybuilder->XMLRoot);
// assign smooth_scheme
if (scheme_name == "Consistent")
bspline->smooth_scheme = SA::smoothing_schemes::CONSISTENT;
else if (scheme_name == "SmoothAll")
bspline->smooth_scheme = SA::smoothing_schemes::SMOOTHALL;
else if (scheme_name == "SmoothPartial")
bspline->smooth_scheme = SA::smoothing_schemes::SMOOTHPARTIAL;
else
APP_ABORT("initialize_hybridrep_atomic_centers wrong smoothing_scheme name! Only allows Consistent, SmoothAll or "
"SmoothPartial.");
// assign smooth_function
if (s_function_name == "LEKS2018")
bspline->smooth_func_id = smoothing_functions::LEKS2018;
else if (s_function_name == "coscos")
bspline->smooth_func_id = smoothing_functions::COSCOS;
else if (s_function_name == "linear")
bspline->smooth_func_id = smoothing_functions::LINEAR;
else
APP_ABORT(
"initialize_hybridrep_atomic_centers wrong smoothing_function name! Only allows LEKS2018, coscos or linear.");
app_log() << "Hybrid orbital representation uses " << scheme_name << " smoothing scheme and " << s_function_name
<< " smoothing function." << std::endl;
bspline->set_info(*(mybuilder->SourcePtcl), mybuilder->TargetPtcl, mybuilder->Super2Prim);
auto& centers = bspline->AtomicCenters;
auto& ACInfo = mybuilder->AtomicCentersInfo;
// load atomic center info only when it is not initialized
if (centers.size() == 0)
{
bool success = true;
app_log() << "Reading atomic center info for hybrid representation" << std::endl;
for (int center_idx = 0; center_idx < ACInfo.Ncenters; center_idx++)
{
const int my_GroupID = ACInfo.GroupID[center_idx];
if (ACInfo.cutoff[center_idx] < 0)
{
app_error() << "Hybrid orbital representation needs parameter 'cutoff_radius' for atom " << center_idx
<< std::endl;
success = false;
}
if (ACInfo.inner_cutoff[center_idx] < 0)
{
const double inner_cutoff = std::max(ACInfo.cutoff[center_idx] - 0.3, 0.0);
app_log() << "Hybrid orbital representation setting 'inner_cutoff' to " << inner_cutoff << " for group "
<< my_GroupID << " as atom " << center_idx << std::endl;
// overwrite the inner_cutoff of all the atoms of the same species
for (int id = 0; id < ACInfo.Ncenters; id++)
if (my_GroupID == ACInfo.GroupID[id])
ACInfo.inner_cutoff[id] = inner_cutoff;
}
else if (ACInfo.inner_cutoff[center_idx] > ACInfo.cutoff[center_idx])
{
app_error() << "Hybrid orbital representation 'inner_cutoff' must be smaller than 'spline_radius' for atom "
<< center_idx << std::endl;
success = false;
}
if (ACInfo.cutoff[center_idx] > 0)
{
if (ACInfo.lmax[center_idx] < 0)
{
app_error() << "Hybrid orbital representation needs parameter 'lmax' for atom " << center_idx << std::endl;
success = false;
}
if (ACInfo.spline_radius[center_idx] < 0 && ACInfo.spline_npoints[center_idx] < 0)
{
app_log() << "Parameters 'spline_radius' and 'spline_npoints' for group " << my_GroupID << " as atom "
<< center_idx << " are not specified." << std::endl;
const double delta = std::min(0.02, ACInfo.cutoff[center_idx] / 4.0);
const int n_grid_point = std::ceil((ACInfo.cutoff[center_idx] + 1e-4) / delta) + 3;
for (int id = 0; id < ACInfo.Ncenters; id++)
if (my_GroupID == ACInfo.GroupID[id])
{
ACInfo.spline_npoints[id] = n_grid_point;
ACInfo.spline_radius[id] = (n_grid_point - 1) * delta;
}
app_log() << " Based on default grid point distance " << delta << std::endl;
app_log() << " Setting 'spline_npoints' to " << ACInfo.spline_npoints[center_idx] << std::endl;
app_log() << " Setting 'spline_radius' to " << ACInfo.spline_radius[center_idx] << std::endl;
}
else
{
if (ACInfo.spline_radius[center_idx] < 0)
{
app_error() << "Hybrid orbital representation needs parameter 'spline_radius' for atom " << center_idx
<< std::endl;
success = false;
}
if (ACInfo.spline_npoints[center_idx] < 0)
{
app_error() << "Hybrid orbital representation needs parameter 'spline_npoints' for atom " << center_idx
<< std::endl;
success = false;
}
}
// check maximally allowed cutoff_radius
double max_allowed_cutoff = ACInfo.spline_radius[center_idx] -
2.0 * ACInfo.spline_radius[center_idx] / (ACInfo.spline_npoints[center_idx] - 1);
if (success && ACInfo.cutoff[center_idx] > max_allowed_cutoff)
{
app_error() << "Hybrid orbital representation requires cutoff_radius<=" << max_allowed_cutoff
<< " calculated by spline_radius-2*spline_radius/(spline_npoints-1) for atom " << center_idx
<< std::endl;
success = false;
}
}
else
{
// no atomic regions for this atom type
ACInfo.spline_radius[center_idx] = 0.0;
ACInfo.spline_npoints[center_idx] = 0;
ACInfo.lmax[center_idx] = 0;
}
}
if (!success)
BaseReader::myComm->barrier_and_abort("initialize_hybridrep_atomic_centers Failed to initialize atomic centers "
"in hybrid orbital representation!");
for (int center_idx = 0; center_idx < ACInfo.Ncenters; center_idx++)
{
AtomicOrbitals<DataType> oneCenter(ACInfo.lmax[center_idx]);
oneCenter.set_info(ACInfo.ion_pos[center_idx], ACInfo.cutoff[center_idx], ACInfo.inner_cutoff[center_idx],
ACInfo.spline_radius[center_idx], ACInfo.non_overlapping_radius[center_idx],
ACInfo.spline_npoints[center_idx]);
centers.push_back(oneCenter);
}
}
}
/** initialize construct atomic orbital radial functions from plane waves */
inline void create_atomic_centers_Gspace(Vector<std::complex<double>>& cG,
Communicate& band_group_comm,
int iorb) override
{
band_group_comm.bcast(rotate_phase_r);
band_group_comm.bcast(rotate_phase_i);
band_group_comm.bcast(cG);
//distribute G-vectors over processor groups
const int Ngvecs = mybuilder->Gvecs[0].size();
const int Nprocs = band_group_comm.size();
const int Ngvecgroups = std::min(Ngvecs, Nprocs);
Communicate gvec_group_comm(band_group_comm, Ngvecgroups);
std::vector<int> gvec_groups(Ngvecgroups + 1, 0);
FairDivideLow(Ngvecs, Ngvecgroups, gvec_groups);
const int gvec_first = gvec_groups[gvec_group_comm.getGroupID()];
const int gvec_last = gvec_groups[gvec_group_comm.getGroupID() + 1];
// prepare Gvecs Ylm(G)
typedef typename EinsplineSetBuilder::UnitCellType UnitCellType;
Gvectors<double, UnitCellType> Gvecs(mybuilder->Gvecs[0], mybuilder->PrimCell, bspline->HalfG, gvec_first,
gvec_last);
// if(band_group_comm.isGroupLeader()) std::cout << "print band=" << iorb << " KE=" << Gvecs.evaluate_KE(cG) << std::endl;
std::vector<AtomicOrbitals<DataType>>& centers = bspline->AtomicCenters;
app_log() << "Transforming band " << iorb << " on Rank 0" << std::endl;
// collect atomic centers by group
std::vector<int> uniq_species;
for (int center_idx = 0; center_idx < centers.size(); center_idx++)
{
auto& ACInfo = mybuilder->AtomicCentersInfo;
const int my_GroupID = ACInfo.GroupID[center_idx];
int found_idx = -1;
for (size_t idx = 0; idx < uniq_species.size(); idx++)
if (my_GroupID == uniq_species[idx])
{
found_idx = idx;
break;
}
if (found_idx < 0)
uniq_species.push_back(my_GroupID);
}
// construct group list
std::vector<std::vector<int>> group_list(uniq_species.size());
for (int center_idx = 0; center_idx < centers.size(); center_idx++)
{
auto& ACInfo = mybuilder->AtomicCentersInfo;
const int my_GroupID = ACInfo.GroupID[center_idx];
for (size_t idx = 0; idx < uniq_species.size(); idx++)
if (my_GroupID == uniq_species[idx])
{
group_list[idx].push_back(center_idx);
break;
}
}
for (int group_idx = 0; group_idx < group_list.size(); group_idx++)
{
const auto& mygroup = group_list[group_idx];
const double spline_radius = centers[mygroup[0]].getSplineRadius();
const int spline_npoints = centers[mygroup[0]].getSplineNpoints();
const int lmax = centers[mygroup[0]].getLmax();
const double delta = spline_radius / static_cast<double>(spline_npoints - 1);
const int lm_tot = (lmax + 1) * (lmax + 1);
const size_t natoms = mygroup.size();
const int policy = lm_tot > natoms ? 0 : 1;
std::vector<std::complex<double>> i_power(lm_tot);
// rotate phase is introduced here.
std::complex<double> i_temp(rotate_phase_r, rotate_phase_i);
for (size_t l = 0; l <= lmax; l++)
{
for (size_t lm = l * l; lm < (l + 1) * (l + 1); lm++)
i_power[lm] = i_temp;
i_temp *= std::complex<double>(0.0, 1.0);
}
std::vector<Matrix<double>> all_vals(natoms);
std::vector<std::vector<aligned_vector<double>>> vals_local(spline_npoints * omp_get_max_threads());
VectorSoaContainer<double, 3> myRSoA(natoms);
for (size_t idx = 0; idx < natoms; idx++)
{
all_vals[idx].resize(spline_npoints, lm_tot * 2);
all_vals[idx] = 0.0;
myRSoA(idx) = centers[mygroup[idx]].getCenterPos();
}
#pragma omp parallel
{
const size_t tid = omp_get_thread_num();
const size_t nt = omp_get_num_threads();
for (int ip = 0; ip < spline_npoints; ip++)
{
const size_t ip_idx = tid * spline_npoints + ip;
if (policy == 1)
{
vals_local[ip_idx].resize(lm_tot * 2);
for (size_t lm = 0; lm < lm_tot * 2; lm++)
{
auto& vals = vals_local[ip_idx][lm];
vals.resize(natoms);
std::fill(vals.begin(), vals.end(), 0.0);
}
}
else
{
vals_local[ip_idx].resize(natoms * 2);
for (size_t iat = 0; iat < natoms * 2; iat++)
{
auto& vals = vals_local[ip_idx][iat];
vals.resize(lm_tot);
std::fill(vals.begin(), vals.end(), 0.0);
}
}
}
const size_t size_pw_tile = 32;
const size_t num_pw_tiles = (Gvecs.NumGvecs + size_pw_tile - 1) / size_pw_tile;
aligned_vector<double> j_lm_G(lm_tot, 0.0);
std::vector<aligned_vector<double>> phase_shift_r(size_pw_tile);
std::vector<aligned_vector<double>> phase_shift_i(size_pw_tile);
std::vector<aligned_vector<double>> YlmG(size_pw_tile);
for (size_t ig = 0; ig < size_pw_tile; ig++)
{
phase_shift_r[ig].resize(natoms);
phase_shift_i[ig].resize(natoms);
YlmG[ig].resize(lm_tot);
}
SoaSphericalTensor<double> Ylm(lmax);
#pragma omp for
for (size_t tile_id = 0; tile_id < num_pw_tiles; tile_id++)
{
const size_t ig_first = tile_id * size_pw_tile;
const size_t ig_last = std::min((tile_id + 1) * size_pw_tile, Gvecs.NumGvecs);
for (size_t ig = ig_first; ig < ig_last; ig++)
{
const size_t ig_local = ig - ig_first;
// calculate phase shift for all the centers of this group
Gvecs.calc_phase_shift(myRSoA, ig, phase_shift_r[ig_local], phase_shift_i[ig_local]);
Gvecs.calc_Ylm_G(ig, Ylm, YlmG[ig_local]);
}
for (int ip = 0; ip < spline_npoints; ip++)
{
double r = delta * static_cast<double>(ip);
const size_t ip_idx = tid * spline_npoints + ip;
for (size_t ig = ig_first; ig < ig_last; ig++)
{
const size_t ig_local = ig - ig_first;
// calculate spherical bessel function
Gvecs.calc_jlm_G(lmax, r, ig, j_lm_G);
for (size_t lm = 0; lm < lm_tot; lm++)
j_lm_G[lm] *= YlmG[ig_local][lm];
const double cG_r = cG[ig + gvec_first].real();
const double cG_i = cG[ig + gvec_first].imag();
if (policy == 1)
{
for (size_t lm = 0; lm < lm_tot; lm++)
{
double* restrict vals_r = vals_local[ip_idx][lm * 2].data();
double* restrict vals_i = vals_local[ip_idx][lm * 2 + 1].data();
const double* restrict ps_r_ptr = phase_shift_r[ig_local].data();
const double* restrict ps_i_ptr = phase_shift_i[ig_local].data();
double cG_j_r = cG_r * j_lm_G[lm];
double cG_j_i = cG_i * j_lm_G[lm];
#pragma omp simd aligned(vals_r, vals_i, ps_r_ptr, ps_i_ptr)
for (size_t idx = 0; idx < natoms; idx++)
{
const double ps_r = ps_r_ptr[idx];
const double ps_i = ps_i_ptr[idx];
vals_r[idx] += cG_j_r * ps_r - cG_j_i * ps_i;
vals_i[idx] += cG_j_i * ps_r + cG_j_r * ps_i;
}
}
}
else
{
for (size_t idx = 0; idx < natoms; idx++)
{
double* restrict vals_r = vals_local[ip_idx][idx * 2].data();
double* restrict vals_i = vals_local[ip_idx][idx * 2 + 1].data();
const double* restrict j_lm_G_ptr = j_lm_G.data();
double cG_ps_r = cG_r * phase_shift_r[ig_local][idx] - cG_i * phase_shift_i[ig_local][idx];
double cG_ps_i = cG_i * phase_shift_r[ig_local][idx] + cG_r * phase_shift_i[ig_local][idx];
#pragma omp simd aligned(vals_r, vals_i, j_lm_G_ptr)
for (size_t lm = 0; lm < lm_tot; lm++)
{
const double jlm = j_lm_G_ptr[lm];
vals_r[lm] += cG_ps_r * jlm;
vals_i[lm] += cG_ps_i * jlm;
}
}
}
}
}
}
#pragma omp for collapse(2)
for (int ip = 0; ip < spline_npoints; ip++)
for (size_t idx = 0; idx < natoms; idx++)
{
double* vals = all_vals[idx][ip];
for (size_t tid = 0; tid < nt; tid++)
for (size_t lm = 0; lm < lm_tot; lm++)
{
double vals_th_r, vals_th_i;
const size_t ip_idx = tid * spline_npoints + ip;
if (policy == 1)
{
vals_th_r = vals_local[ip_idx][lm * 2][idx];
vals_th_i = vals_local[ip_idx][lm * 2 + 1][idx];
}
else
{
vals_th_r = vals_local[ip_idx][idx * 2][lm];
vals_th_i = vals_local[ip_idx][idx * 2 + 1][lm];
}
const double real_tmp = 4.0 * M_PI * i_power[lm].real();
const double imag_tmp = 4.0 * M_PI * i_power[lm].imag();
vals[lm] += vals_th_r * real_tmp - vals_th_i * imag_tmp;
vals[lm + lm_tot] += vals_th_i * real_tmp + vals_th_r * imag_tmp;
}
}
}
//app_log() << "Building band " << iorb << " at center " << center_idx << std::endl;
for (size_t idx = 0; idx < natoms; idx++)
{
// reduce all_vals
band_group_comm.reduce_in_place(all_vals[idx].data(), all_vals[idx].size());
if (!band_group_comm.isGroupLeader())
continue;
#pragma omp parallel for
for (int lm = 0; lm < lm_tot; lm++)
{
auto& mycenter = centers[mygroup[idx]];
aligned_vector<double> splineData_r(spline_npoints);
UBspline_1d_d* atomic_spline_r;
for (size_t ip = 0; ip < spline_npoints; ip++)
splineData_r[ip] = all_vals[idx][ip][lm];
atomic_spline_r = einspline::create(atomic_spline_r, 0.0, spline_radius, spline_npoints, splineData_r.data(),
((lm == 0) || (lm > 3)));
if (!bspline->is_complex)
{
mycenter.set_spline(atomic_spline_r, lm, iorb);
einspline::destroy(atomic_spline_r);
}
else
{
aligned_vector<double> splineData_i(spline_npoints);
UBspline_1d_d* atomic_spline_i;
for (size_t ip = 0; ip < spline_npoints; ip++)
splineData_i[ip] = all_vals[idx][ip][lm + lm_tot];
atomic_spline_i = einspline::create(atomic_spline_i, 0.0, spline_radius, spline_npoints,
splineData_i.data(), ((lm == 0) || (lm > 3)));
mycenter.set_spline(atomic_spline_r, lm, iorb * 2);
mycenter.set_spline(atomic_spline_i, lm, iorb * 2 + 1);
einspline::destroy(atomic_spline_r);
einspline::destroy(atomic_spline_i);
}
}
}
#ifdef PRINT_RADIAL
char fname[64];
sprintf(fname, "band_%d_center_%d_pw.dat", iorb, center_idx);
FILE* fout_pw = fopen(fname, "w");
sprintf(fname, "band_%d_center_%d_spline_v.dat", iorb, center_idx);
FILE* fout_spline_v = fopen(fname, "w");
sprintf(fname, "band_%d_center_%d_spline_g.dat", iorb, center_idx);
FILE* fout_spline_g = fopen(fname, "w");
sprintf(fname, "band_%d_center_%d_spline_l.dat", iorb, center_idx);
FILE* fout_spline_l = fopen(fname, "w");
fprintf(fout_pw, "# r vals(lm)\n");
fprintf(fout_spline_v, "# r vals(lm)\n");
fprintf(fout_spline_g, "# r grads(lm)\n");
fprintf(fout_spline_l, "# r lapls(lm)\n");
// write to file for plotting
for (int ip = 0; ip < spline_npoints - 1; ip++)
{
double r = delta * static_cast<double>(ip);
mycenter.SplineInst->evaluate_vgl(r, mycenter.localV, mycenter.localG, mycenter.localL);
fprintf(fout_pw, "%15.10lf ", r);
fprintf(fout_spline_v, "%15.10lf ", r);
fprintf(fout_spline_g, "%15.10lf ", r);
fprintf(fout_spline_l, "%15.10lf ", r);
for (int lm = 0; lm < lm_tot; lm++)
{
fprintf(fout_pw, "%15.10lf %15.10lf ", all_vals[center_idx][ip][lm].real(),
all_vals[center_idx][ip][lm].imag());
fprintf(fout_spline_v, "%15.10lf %15.10lf ", mycenter.localV[lm * mycenter.Npad + iorb * 2],
mycenter.localV[lm * mycenter.Npad + iorb * 2 + 1]);
fprintf(fout_spline_g, "%15.10lf %15.10lf ", mycenter.localG[lm * mycenter.Npad + iorb * 2],
mycenter.localG[lm * mycenter.Npad + iorb * 2 + 1]);
fprintf(fout_spline_l, "%15.10lf %15.10lf ", mycenter.localL[lm * mycenter.Npad + iorb * 2],
mycenter.localL[lm * mycenter.Npad + iorb * 2 + 1]);
}
fprintf(fout_pw, "\n");
fprintf(fout_spline_v, "\n");
fprintf(fout_spline_g, "\n");
fprintf(fout_spline_l, "\n");
}
fclose(fout_pw);
fclose(fout_spline_v);
fclose(fout_spline_g);
fclose(fout_spline_l);
#endif
}
}
};
} // namespace qmcplusplus
#endif
|
pf3dbench.h | extern rcomplex *t0, *t2, *t0_sav, *t2_sav, *tN_new;
extern rcomplex *t0_big, *t0_big_sav;
extern real *thetb_sav, *thetbig;
extern long nbig;
extern int nxy_mx;
extern int num_thr;
extern double *rnd;
extern double tot_tim;
extern real *iptmp, *optmp;
extern rcomplex *sndbuf, *rcvbuf;
extern rcomplex *afftbuf;
extern double complex *tmp_dbcom;
extern rcomplex *bigbuf;
extern rcomplex *t0DevPtr;
extern int nrows, ncols, nxfull, nyfull, nzfull;
extern int chunksize_x, chunksize_y;
extern void parm_init(void);
extern double wsecond(double offset);
extern double second(double oldsec);
extern void *wmalloc(size_t nbytes);
extern rcomplex *make_wave(double tvar_lo, double tvar_hi);
extern void free_wave(rcomplex *tvar);
extern void copy_wave(rcomplex *tvar, rcomplex *tvar_sav);
extern void t0_fixup(void);
extern void copy_arr2d(real *arr_old, real *arr_new);
extern void copy_carr2d(rcomplex *arr_old, rcomplex *arr_new);
extern void copy_arr3d(real *arr_old, real *arr_new);
extern void copy_carr3d(rcomplex *arr_old, rcomplex *arr_new);
extern void copy_to_tN(rcomplex * restrict tN, rcomplex * restrict tN_new);
extern void copy_from_tN(rcomplex * restrict tN_new, rcomplex * restrict tN);
extern void do_init(int nxl_in, int nyl_in, int nzl_in, int nthr_in);
extern void do_cleanup(void);
extern void reset_light(void);
extern void reset_thetb(void);
extern void reset_damp(void);
extern int max_threads(void);
extern void getref(int maxnum);
#ifdef _OPENMP
#pragma omp declare target
#endif
void copy_tN_pre(rcomplex * restrict tN, rcomplex * restrict tN_new);
void copy_to_tN_omp45(rcomplex * restrict tvar, rcomplex * restrict tN_new);
void copy_from_tN_omp45(rcomplex * restrict tN_new, rcomplex * restrict tvar);
#ifdef _OPENMP
#pragma omp end declare target
#endif
|
DRB064-outeronly2-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Only the outmost loop can be parallelized.
The inner loop has loop carried true data dependence.
However, the loop is not parallelized so no race condition.
*/
int n=100, m=100;
double b[100][100];
int init()
{
int i,j,k;
#pragma omp parallel for private(i ,j )
for (i = 0; i < n; i++) {
#pragma omp parallel for private(j )
for (j = 0; j < m; j++) {
b[i][j] = i * j;
}
}
return 0;
}
void foo(int n, int m)
{
int i,j;
#pragma omp parallel for private(i ,j )
for (i=0;i<n;i++)
for (j=1;j<m;j++) // Be careful about bounds of j
b[i][j]=b[i][j-1];
}
int print()
{
int i,j,k;
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++) {
printf("%lf\n", b[i][j]);
}
}
return 0;
}
int main()
{
init();
foo(100, 100);
print();
return 0;
}
|
GB_unaryop__abs_uint64_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint64_int16
// op(A') function: GB_tran__abs_uint64_int16
// C type: uint64_t
// A type: int16_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT64 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint64_int16
(
uint64_t *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint64_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
init_a.c | #define N 1000
#include <stdio.h>
void init_array(int a[N][N]) {
/* The OpenMP part is understood by smecc/Par4All to generate 2D OpenCL
workitems */
#pragma omp parallel for
for (int i = 0; i < N; i++)
#pragma omp parallel for
for (int j = 0; j < N; j++)
a[i][j] = 2*i + 3*j;
}
int main() {
int a[N][N];
#pragma smecy map(OpenCL) arg(a, out)
init_array(a);
printf("a[27][42] = %d\n", a[27][42]);
return 0;
}
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 8;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,12);t1++) {
lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24));
ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(3*t1-1,2)),ceild(24*t2-Nz-4,8));t3<=min(min(min(floord(Nt+Ny-4,8),floord(12*t1+Ny+21,8)),floord(24*t2+Ny+20,8)),floord(24*t1-24*t2+Nz+Ny+19,8));t3++) {
for (t4=max(max(max(0,ceild(3*t1-15,16)),ceild(24*t2-Nz-60,64)),ceild(8*t3-Ny-60,64));t4<=min(min(min(min(floord(Nt+Nx-4,64),floord(12*t1+Nx+21,64)),floord(24*t2+Nx+20,64)),floord(8*t3+Nx+4,64)),floord(24*t1-24*t2+Nz+Nx+19,64));t4++) {
for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),8*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),8*t3+6),64*t4+62),24*t1-24*t2+Nz+21);t5++) {
for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) {
lbv=max(64*t4,t5+1);
ubv=min(64*t4+63,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
GB_binop__lt_bool.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__lt_bool
// A.*B function (eWiseMult): GB_AemultB__lt_bool
// A*D function (colscale): GB_AxD__lt_bool
// D*A function (rowscale): GB_DxB__lt_bool
// C+=B function (dense accum): GB_Cdense_accumB__lt_bool
// C+=b function (dense accum): GB_Cdense_accumb__lt_bool
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lt_bool
// C=scalar+B GB_bind1st__lt_bool
// C=scalar+B' GB_bind1st_tran__lt_bool
// C=A+scalar GB_bind2nd__lt_bool
// C=A'+scalar GB_bind2nd_tran__lt_bool
// C type: bool
// A type: bool
// B,b type: bool
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
bool
#define GB_BTYPE \
bool
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
bool bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x < y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LT || GxB_NO_BOOL || GxB_NO_LT_BOOL)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__lt_bool
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__lt_bool
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__lt_bool
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type bool
bool bwork = (*((bool *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__lt_bool
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__lt_bool
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__lt_bool
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__lt_bool
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__lt_bool
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
bool x = (*((bool *) x_input)) ;
bool *Bx = (bool *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
bool bij = Bx [p] ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__lt_bool
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
bool *Ax = (bool *) Ax_input ;
bool y = (*((bool *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
bool aij = Ax [p] ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = Ax [pA] ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB_bind1st_tran__lt_bool
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
bool
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool x = (*((const bool *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
bool
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = Ax [pA] ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB_bind2nd_tran__lt_bool
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool y = (*((const bool *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
opencl_7z_fmt_plug.c | /*
* This software is Copyright (c) 2015-2017 magnum
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
/*
* We've seen one single sample where we could not trust the padding check
* (early rejection). To be able to crack such hashes, define this to 0.
* This hits performance in some cases.
*/
#define TRUST_PADDING 0
#ifdef HAVE_OPENCL
#if FMT_EXTERNS_H
extern struct fmt_main fmt_opencl_sevenzip;
#elif FMT_REGISTERS_H
john_register_one(&fmt_opencl_sevenzip);
#else
#include <stdint.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "arch.h"
#include "formats.h"
#include "common.h"
#include "misc.h"
#include "common-opencl.h"
#include "options.h"
#include "aes.h"
#include "crc32.h"
#include "unicode.h"
#include "dyna_salt.h"
#include "lzma/LzmaDec.h"
#include "lzma/Lzma2Dec.h"
#include "memdbg.h"
#define FORMAT_LABEL "7z-opencl"
#define FORMAT_NAME "7-Zip"
#define FORMAT_TAG "$7z$"
#define TAG_LENGTH (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "SHA256 AES OPENCL"
#define BENCHMARK_COMMENT " (512K iterations)"
#define BENCHMARK_LENGTH -1000
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define PLAINTEXT_LENGTH ((55-8)/2) // 23, rar3 uses 22
#define UNICODE_LENGTH (2 * PLAINTEXT_LENGTH)
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define SALT_SIZE sizeof(struct custom_salt*)
#define SALT_ALIGN sizeof(struct custom_salt*)
typedef struct {
uint32_t length;
uint16_t v[PLAINTEXT_LENGTH];
} sevenzip_password;
typedef struct {
uint32_t key[32/4];
uint32_t round;
uint32_t reject;
} sevenzip_hash;
typedef struct {
size_t length;
size_t unpacksize;
uint32_t iterations;
//uint32_t salt_size;
//uint8_t salt[16];
uint8_t data[32]; /* Last two blocks of data */
} sevenzip_salt;
typedef struct {
cl_uint total[2];
cl_uint state[8];
cl_uchar buffer[64];
} SHA256_CTX;
typedef struct {
cl_ulong t;
SHA256_CTX ctx;
cl_uint len;
cl_ushort buffer[PLAINTEXT_LENGTH];
} sevenzip_state;
static int *cracked;
static int any_cracked;
static int new_keys;
static struct custom_salt {
dyna_salt dsalt;
size_t length; /* used in decryption */
size_t unpacksize; /* used in padding check */
size_t crc_len; /* used in CRC calculation */
int NumCyclesPower;
int SaltSize;
int ivSize;
int type;
unsigned char iv[16];
unsigned char salt[16];
unsigned int crc;
unsigned char props[LZMA_PROPS_SIZE];
unsigned char data[1];
} *cur_salt;
static struct fmt_tests sevenzip_tests[] = {
/* CRC checks passes for this hash (4 bytes of padding) */
{"$7z$128$19$0$1122$8$a264c94f2cd72bec0000000000000000$725883103$112$108$64749c0963e20c74602379ca740165b9511204619859d1914819bc427b7e5f0f8fc67f53a0b53c114f6fcf4542a28e4a9d3914b4bc76baaa616d6a7ec9efc3f051cb330b682691193e6fa48159208329460c3025fb273232b82450645f2c12a9ea38b53a2331a1d0858813c8bf25a831", "openwall"},
/* LZMA before CRC (9 bytes of padding) */
{"$7z$1$19$0$1122$8$732b59fd26896e410000000000000000$2955316379$192$183$7544a3a7ec3eb99a33d80e57907e28fb8d0e140ec85123cf90740900429136dcc8ba0692b7e356a4d4e30062da546a66b92ec04c64c0e85b22e3c9a823abef0b57e8d7b8564760611442ecceb2ca723033766d9f7c848e5d234ca6c7863a2683f38d4605322320765938049305655f7fb0ad44d8781fec1bf7a2cb3843f269c6aca757e509577b5592b60b8977577c20aef4f990d2cb665de948004f16da9bf5507bf27b60805f16a9fcc4983208297d3affc4455ca44f9947221216f58c337f$232$5d00000100", "password"},
/* CRC checks passes for this hash (no padding) */
{"$7z$0$19$0$1122$8$d1f50227759415890000000000000000$1412385885$112$112$5e5b8b734adf52a64c541a5a5369023d7cccb78bd910c0092535dfb013a5df84ac692c5311d2e7bbdc580f5b867f7b5dd43830f7b4f37e41c7277e228fb92a6dd854a31646ad117654182253706dae0c069d3f4ce46121d52b6f20741a0bb39fc61113ce14d22f9184adafd6b5333fb1", "password"},
/* This requires LZMA (no padding) */
{"$7z$1$19$0$1122$8$5fdbec1569ff58060000000000000000$2465353234$112$112$58ba7606aafc7918e3db7f6e0920f410f61f01e9c1533c40850992fee4c5e5215bc6b4ea145313d0ac065b8ec5b47d9fb895bb7f97609be46107d71e219544cfd24b52c2ecd65477f72c466915dcd71b80782b1ac46678ab7f437fd9f7b8e9d9fad54281d252de2a7ae386a65fc69eda$176$5d00000100", "password"},
/* Length checks */
#if DEBUG
{"$7z$128$19$0$1122$8$94fb9024fdd3e6c40000000000000000$3965424295$112$99$1127828817ff126bc45ff3c5225d9d0c5d00a52094909674e6ed3dc431546d9a672738f2fa07556340d604d2efd2901b9d2ac2c0686c25af9c520c137b16c50c54df8703fd0b0606fa721ad70aafb9c4e3b288ef49864e6034021969b4ce11e3b8e269a92090ccf593c6a0da06262116", ""},
{"$7z$128$19$0$1122$8$6fd059d516d5490f0000000000000000$460747259$112$99$af163eb5532c557efca78fbb448aa04f348cd258c94233e6669f4e5025f220274c244d4f2347a7512571d9b6015a1e1a90e281983b743da957437b33092eddb55a5bc76f3ab6c7dbabb001578d1043285f5fa791fd94dd9779b461e44cbfe869f891007335b766774ccee3813ec8cd57", "&"},
{"$7z$128$19$0$1122$8$6d4a12af68d83bfe0000000000000000$993697592$112$99$7c308faa36b667599ee4418435ab621884c5c115ee3b70be454fe99236422f4f2d5cd9c8fcfbe6b6b0805ee602ce8488a08f7ea14a4f5c0c060fc685bff187720a402b23a5cfe3c9c5a5ae07f91209031b8f9804ac10459e15a0158031f6c58e507401ec6e1e6de8f64d94201159432b", "&'"},
{"$7z$128$19$0$1122$8$7527d758a59181830000000000000000$3917710544$112$99$61a9ca9e835bd0f2dc474b34d5d89bcf8cd1bb071a984ee1dcf224174a60bcee140fcf2fde8927fe4f3f4eb4a2cc39faff73f1898ae25cc92bd02939f4317ebb173bf3b6f01eef183163ddd533ad5c076f87341bd8b86d8460c68fc390aa8df89fc4076bdfd24e157f6c07e105c07612", "&'("},
{"$7z$128$19$0$1122$8$68928bade860a2b80000000000000000$3235890186$112$99$4b685a569c3aed78d217bae9ec64fa06b614df55c1cb0d160563d87efe38813accb38dd7037f86cebc91751c2488769c7398dfefaf491c024f2d640dcb388a56404cd5ac475ba16b5f8206fa45d5923b3a0c8dd0f24460ccee0d93bea03ad58b8a8db502a55ba1775560b3d194f342f7", "&'()"},
{"$7z$128$19$0$1122$8$81931b9ba0b069820000000000000000$3094344848$112$99$fdbb2622143d25b13992b1467ce9edce4e3df8ca07535735b76e8abcb0791e384a1d5547483e19c3bd6e5a0742d29c403cfc8b3a003b285e80b350ea9157600eb91c49b329903de9ec9b17d1c95b0e136b579e165a6e80550464fa99830bfd9ee58fc14516b614ff9f84ec80e6880a36", "&'()*"},
{"$7z$128$19$0$1122$8$ccf696913989510d0000000000000000$1238556212$112$99$647264fbc665e73ecfe3ef7055fef0d91cb86833d6df08b2f7a3c1c89cf7cdaa09a802c8bfb2e5c6b55143a315df74d841b349fc8b43613d0f87cc90325fd56fc17ee08df7ce76cdc9cda61bd4d5632e20af3db16e921c755174f291c0aa6581844def4547380e2dd4a574435d17e1e8", "&'()*+"},
{"$7z$128$19$0$1122$8$d618bd3ec8bafd800000000000000000$1349785$112$99$6514e2e7468e6f0ed63796cfc0588ac2d75f024c4a0fa03778bd252d316d03e48a08ffcc0011725ad4f867e9a9666630dff4f352c59bcbadb94b9d0e2c42d653b80f480005ce868a0b1a075b2e00abd743de0867d69cdc8b56c7f9770537d50e6bb11eb0d2d7d8b6af5dd8ecb50ab553", "&'()*+,"},
{"$7z$128$19$0$1122$8$1c1586d191f190890000000000000000$642253888$112$99$f55cf9ab802b10a83471abe9319711ae79906cd6921365167c389470a3a8a72b0d877379daae2c24ea2258e8586f12d5036aff9ddc8e26861467b0843ffb72e4410c2be76ec111d37f875c81b244ed172f1f4765a220d830a9615787e9d07f8582146556e9c566b64897a47d18a82b36", "&'()*+,-"},
{"$7z$128$19$0$1122$8$0df03cbdbc73e22a0000000000000000$3194757927$112$99$df53e9d8b4e02cf2962ad87912021508a36910c399a7abc4a3a5423fa2184816af7172418eb4763924ec8b099b7ca95abdc6faac9aaa6e181ffa60b7e8bdb2bf576536ca69152e3b6b97302c796bbc9dec78db6ba7a4a58e68f8ee28f27dea26bd4f848dc3a3315e97e1463b5c171ce5", "&'()*+,-."},
{"$7z$128$19$0$1122$8$7785351cf9fe5dfa0000000000000000$1304801610$112$99$7b35280384726da8521fee0786ef43e0aa621394a6f015b65cbd7f1329f43c4543b8a451a0007c03a3ce3f61e639c54ede3e580600b113777822b6d562390d14ed236e5bac3d3af63ae23015148a95e7ccbc9eea653b52c606ca09ec51fd2b0c4cfc2b760fccc1fe0ccdd9ee3fcb8129", "&'()*+,-./"},
{"$7z$128$19$0$1122$8$70eb7f4b821cf5310000000000000000$3381356868$112$99$c26db2cb89df1237f323d92044726d03cfc7ba83115e789243c3b2570ae674d8356a23e004b103638b1ea9fe6ff5db844a1ddcaaed8a71a8d8e343f73868b4acafd34d493345439b0e0be87d2cf52eb4cceaafcff0dfaf9cf25080693ede267460320e1282b869a5f0b6c8789e769640", "&'()*+,-./0"},
{"$7z$128$19$0$1122$8$2ac0f1307794d8e10000000000000000$2871514580$112$99$4783d91fa72c377310654e961120e71ecdd27ec2e67366e83291daefcea03514ca9ecea031fcbd25c0759c1f242219e673cee093ef361664f18dacf85ca0620fd7092477ceeff7c548df0a475ce93278a564fe4ddb4ee2e4695cbe417a792e822204390ca5a530208a8ed51bc01f79e6", "&'()*+,-./01"},
{"$7z$128$19$0$1122$8$5bc4988c71cba8b70000000000000000$2815498089$112$99$0e4368dde66925e2bfac9a450291f8f817beaa891f08c4d2735d20b3147df581e2f3c53abfe2b0971186ac39280eb354ca5989f9043ad0288302d0ac59a3c8fa99d26c9619b81d22996f24eec1dba361afdd5e50060c2599a40a00c83c4ee0bc4ebe6e3126a64a743af95d9b22ee5867", "&'()*+,-./012"},
{"$7z$128$19$0$1122$8$33ab0ad513b7d6910000000000000000$107430285$112$99$f9f1195a4210eadc5b23f046f81c8cfaec3b90d8b6b67893f10bd9bedd0d859d0695bca5ce315cecbc2910dce27e4c1a1416675d841901c8d84846360b1919ebcba91143713c6b755758d3db64d39344da18222341818220cc43f3ee3a91cbc288f1aafe377b53def310d3b83d32aee3", "&'()*+,-./0123"},
{"$7z$128$19$0$1122$8$dd490a165c1b90f90000000000000000$2897354864$112$99$51efe41b67875503acebe2e199cb542a279520b468a61ba67b54612e317a84e95879a34eaad82124798f32c19f9c0786e8faaac768da5f6b2c91e3ba9f97a03a992c18b5b9b21a5f2b67ae9daeef37ec115f44bfb8b10ac3cb7862b6c024413a2ee801aa674df05e8b56bd8654f279f5", "&'()*+,-./01234"},
{"$7z$128$19$0$1122$8$9077cb191a5969b40000000000000000$3637063426$112$99$1e74746c59bdfe6b3f3d957493c9d5b92ba358f97e19d30e20443cb2fbac0501e07a162344ac7cf7cfa727c70a2bcf52593accc5c2c070c2331863ac76da5ad2f5de374292a87c6af67ab561f9cf71ae472ed1267d481c250f5b4d82d0ec0b2b8531db1fe4637c3f4e3a08de1b9b5418", "&'()*+,-./012345"},
{"$7z$128$19$0$1122$8$adc090d27b0343d30000000000000000$1147570982$112$99$ac14b9dc3751cfe6c1c719ceef3d73946fff2b0f924e06cd3177883df770e5505551bcf5598277801f46584a4f41530f50007c776d2bb91fd160148042275dfe4e420ff72244409f59c687a5bb2d0fc1bb29138689094fe40bb0f22785c63c631cd05abf4f7f3c9b6832e192e103d2f1", "&'()*+,-./0123456"},
{"$7z$128$19$0$1122$8$8dee69dc35517a2a0000000000000000$87427823$112$99$ea36cf8b577a0b5f31115f8550987f05f174b347a8a6433a08c013ecd816c8ecaad163c62db9bae6c57ace3c2a6ce0b36f78ad4723328cc022906400eed55e0e3685a5e8e6b369df780ee72f3d25ccd49d7f40d013052e080723dd4c0b1c75302c884ea956e3b6fd27261eb8c49dea51", "&'()*+,-./01234567"},
{"$7z$128$19$0$1122$8$200ce603d6f355f10000000000000000$3012105149$112$99$0ae42342f52172ad921178a25df3666e34e5a217d0afb3655088806f821d374bf522c197e59b131dbc574d4c936472f59f8892f69e47724ea52ecc5dc7d3ed734c557c9698a6f01519039714c065ad25008003c93cb7f694ee07267d5fcdebab5d149d5404023a0112faec2264d33ff6", "&'()*+,-./012345678"},
{"$7z$128$19$0$1122$8$a5007fc77fa5cc0b0000000000000000$1082728565$112$99$32c404c9633e9c61b76556e169695248008c51ca8f7f0f79c4a271ac6eb1d905a2622132f2f6988f9f3f5e375c592ec63d92d7b183b5801b149595ed440b23a083633de9f1cb5b6ac3238b7523b23141e686e6cbe9d4d3a28fc6489e902c17aeff6cd4cb516bef5cd5c6def78cb88ad4", "&'()*+,-./0123456789"},
{"$7z$128$19$0$1122$8$fd531c4e580be9a60000000000000000$1843420503$112$99$704289830b1add1c8ee6fd622ecf5b8da01988580bdb52f6269cc61c21838849d3a04299eaee15e0cae0eff9f6c3c82f71e434b3aa1c0ca824b90438c1c983130218acd128d9186e5dc2d19a8db602a0382cb60dadb4641b46fe532b799d29a4b882beaa9217f48ddccc99578617f8a0", "&'()*+,-./0123456789:"},
{"$7z$128$19$0$1122$8$7f94a95f71c1b0df0000000000000000$141406606$112$99$1a510a6fda9788b4f4b2274ea929044c00b61b23946bc417ead90ad64dcc9a55378f9ab74f7d693a5dcf455c00f82f6c2a885b664f4ab10c9969026714ce2773030f1c5872ca3948cd612e21b321826c2a561104d57a3ba2055f03aa9cc264821544ec4bccc41f4ac76aab97accb8f9c", "&'()*+,-./0123456789:;"},
{"$7z$128$19$0$1122$8$e24e93c7a9ebde080000000000000000$718561925$112$99$580bf36388526c932c22e3227b51774b6963a9c5b96fc8e2ac70a4302864fa88f50e7c00d9a79e0bca0f07a236e51200dc23435b7680e6fa99b19d790ac093af615a972f8b232686c21279234a2582f9714c5a1a2d326084158eba3e81b4f8ad40784d84baa8ddbed19f1c6603156d2c", "&'()*+,-./0123456789:;<"},
#if PLAINTEXT_LENGTH > 23
{"$7z$128$19$0$1122$8$6fbd519735b131710000000000000000$1248418560$112$99$cc9e3c97073d7fd37f04d4e6983b386e3ac00f6292dedb0f566dccf22cdbbb55fee8669edade383e96aa0a740e2b42aa7fddbe5831cac10828c624ee03a1a256c6e777c3d714c55296cb815c509a252b9426fe8d4566c944efe3fac5ea94910e55a390aef2c729a031e832c406049810", "&'()*+,-./0123456789:;<="},
{"$7z$128$19$0$1122$8$3ce1b899fc03d9c30000000000000000$1452122600$112$99$d4be60d5ab390713c7189f0dd808227c01f15f71fcf4bbccce6cb9238d6418c115eff59784d96ff8944575710a5799c7bcb761e8f1bfb7646a0e8fac3728ba4cca44fb82e5dd9f87bb26828566af64374b512fa094d35af8d743bded88b6257ec98a99b50dd225d4608b283bf035ac08", "&'()*+,-./0123456789:;<=>"},
{"$7z$128$19$0$1122$8$656e2285aabed25b0000000000000000$3885982465$112$99$77f2871e556e7f5278a9e896e91cd386ca8935128957d31fdce0603ea0e71c08b908a4c2d9f2d279757ced848be9482067c9d7935c88e5233aaa94a101d29908f7f015646758029d2078d25d0886bb9f0cdc0dd5136d72e90ceeea678564b199866dd8c9e5fe927102ee2dcf1cd4167f", "&'()*+,-./0123456789:;<=>?"},
{"$7z$128$19$0$1122$8$44ffefa48fa5a5b00000000000000000$1011653568$112$99$5d2504a1eb819218b9ad552e377d37e811ffccb64a554f404d982d209edfafb893b679cc881bbcbc606e67ffa055f712d7f140b554769511bc00321765830ea7c5db810fa2000ae7f4250b74aa61d881db66ae6f30e4c8e71887960c117b268d9934b8b5d52d4abdcb42b0e4ff40b805", "&'()*+,-./0123456789:;<=>?@"},
{"$7z$128$19$0$1122$8$b6e089dd0c52b6b80000000000000000$1229766981$112$99$49a8334d64d9cc7d710fe3b9c35f5d7cb0ec44d5db8a90966fbee93f85fdeeeca859c55519addb20c4628c9204dd24d1169b34dc53a2a685440fae7ed6748c172a8e9dcc42c8dffe60196818ad17a6f9314fcfd4d97cab3c18cf279df344e00fd04eaff32f29cbfcdb6832cfb69fe351", "&'()*+,-./0123456789:;<=>?@A"},
#endif /* PLAINTEXT_LENGTH > 23 */
#endif /* DEBUG */
{NULL}
};
static sevenzip_password *inbuffer;
static sevenzip_hash *outbuffer;
static sevenzip_salt currentsalt;
static cl_mem mem_in, mem_out, mem_salt;
static cl_kernel sevenzip_init, sevenzip_final, sevenzip_aes;
#define insize (sizeof(sevenzip_password) * global_work_size)
#define outsize (sizeof(sevenzip_hash) * global_work_size)
#define statesize (sizeof(sevenzip_state) * global_work_size)
#define saltsize sizeof(sevenzip_salt)
#define cracked_size (sizeof(*cracked) * global_work_size)
static struct fmt_main *self;
#define HASH_LOOPS 0x4000
#define LOOP_COUNT ((1 << currentsalt.iterations) / HASH_LOOPS)
#define STEP 0
#define SEED 16
static int split_events[] = { 2, -1, -1 };
static const char *warn[] = {
"xfer: ", ", init: ", ", crypt: ", ", final: ", ", aes: ", ", xfer: "
};
// This file contains auto-tuning routine(s). It has to be included after formats definitions.
#include "opencl-autotune.h"
#include "memdbg.h"
/* ------- Helper functions ------- */
static size_t get_task_max_work_group_size()
{
size_t s;
s = autotune_get_task_max_work_group_size(FALSE, 0, sevenzip_init);
s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel));
s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, sevenzip_final));
s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, sevenzip_aes));
return s;
}
static void create_clobj(size_t global_work_size, struct fmt_main *self)
{
cl_int cl_error;
inbuffer = (sevenzip_password*) mem_calloc(1, insize);
outbuffer = (sevenzip_hash*) mem_alloc(outsize);
cracked = mem_calloc(1, cracked_size);
// Allocate memory
mem_in =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem in");
mem_salt =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, saltsize,
NULL, &cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem salt");
mem_out =
clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, outsize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem out");
HANDLE_CLERROR(clSetKernelArg(sevenzip_init, 0, sizeof(mem_out),
&mem_out), "Error while setting mem_out kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in),
&mem_in), "Error while setting mem_in kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out),
&mem_out), "Error while setting mem_out kernel argument");
HANDLE_CLERROR(clSetKernelArg(sevenzip_final, 0, sizeof(mem_in),
&mem_in), "Error while setting mem_in kernel argument");
HANDLE_CLERROR(clSetKernelArg(sevenzip_final, 1, sizeof(mem_salt),
&mem_salt), "Error while setting mem_salt kernel argument");
HANDLE_CLERROR(clSetKernelArg(sevenzip_final, 2, sizeof(mem_out),
&mem_out), "Error while setting mem_out kernel argument");
HANDLE_CLERROR(clSetKernelArg(sevenzip_aes, 0, sizeof(mem_salt),
&mem_salt), "Error while setting mem_salt kernel argument");
HANDLE_CLERROR(clSetKernelArg(sevenzip_aes, 1, sizeof(mem_out),
&mem_out), "Error while setting mem_out kernel argument");
}
static void release_clobj(void)
{
if (cracked) {
HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in");
HANDLE_CLERROR(clReleaseMemObject(mem_salt), "Release mem salt");
HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out");
MEM_FREE(inbuffer);
MEM_FREE(outbuffer);
MEM_FREE(cracked);
}
}
static void done(void)
{
if (autotuned) {
release_clobj();
HANDLE_CLERROR(clReleaseKernel(sevenzip_init), "Release kernel");
HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel");
HANDLE_CLERROR(clReleaseKernel(sevenzip_final), "Release kernel");
HANDLE_CLERROR(clReleaseKernel(sevenzip_aes), "Release kernel");
HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
autotuned--;
}
}
static void init(struct fmt_main *_self)
{
CRC32_t crc;
self = _self;
opencl_prepare_dev(gpu_id);
CRC32_Init(&crc);
if (options.target_enc == UTF_8)
self->params.plaintext_length = MIN(125, 3 * PLAINTEXT_LENGTH);
}
static void reset(struct db_main *db)
{
if (!autotuned) {
char build_opts[64];
cl_int cl_error;
snprintf(build_opts, sizeof(build_opts),
"-DPLAINTEXT_LENGTH=%d -DHASH_LOOPS=%d",
PLAINTEXT_LENGTH, HASH_LOOPS);
opencl_init("$JOHN/kernels/7z_kernel.cl",
gpu_id, build_opts);
sevenzip_init = clCreateKernel(program[gpu_id], "sevenzip_init",
&cl_error);
HANDLE_CLERROR(cl_error, "Error creating kernel");
crypt_kernel = clCreateKernel(program[gpu_id], "sevenzip_loop",
&cl_error);
HANDLE_CLERROR(cl_error, "Error creating kernel");
sevenzip_final = clCreateKernel(program[gpu_id], "sevenzip_final",
&cl_error);
HANDLE_CLERROR(cl_error, "Error creating kernel");
sevenzip_aes = clCreateKernel(program[gpu_id], "sevenzip_aes",
&cl_error);
HANDLE_CLERROR(cl_error, "Error creating kernel");
// Initialize openCL tuning (library) for this format.
opencl_init_auto_setup(SEED, HASH_LOOPS, split_events,
warn, 2, self,
create_clobj, release_clobj,
sizeof(sevenzip_state), 0, db);
// Auto tune execution from shared/included code.
autotune_run(self, 1 << 19, 0, 15000000000ULL);
}
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr, *p;
int type, len, NumCyclesPower;
if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += TAG_LENGTH;
if ((p = strtokm(ctcopy, "$")) == NULL)
goto err;
if (strlen(p) > 3 || !isdec(p))
goto err;
type = atoi(p);
if (strlen(p) == 0 || type < 0 || type > 128) /* Compression type */
goto err;
if (type > 2 && type != 128) /* none, LZMA or LZMA2 */
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* NumCyclesPower */
goto err;
if (strlen(p) > 2)
goto err;
if (!isdec(p))
goto err;
NumCyclesPower = atoi(p);
if (NumCyclesPower > 24 || NumCyclesPower < 1)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* salt length */
goto err;
if (!isdec(p))
goto err;
len = atoi(p);
if (len != 0) /* salt length, we currently only support it in CPU format */
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* salt */
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* iv length */
goto err;
if (strlen(p) > 2)
goto err;
if (!isdec(p))
goto err;
len = atoi(p);
if (len > 16) /* iv length */
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* iv */
goto err;
if (!ishexlc(p))
goto err;
if (strlen(p) / 2 > len && strcmp(p+len*2, "0000000000000000"))
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* crc */
goto err;
if (!isdecu(p))
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* data length */
goto err;
if (!isdec(p))
goto err;
len = atoi(p);
if ((p = strtokm(NULL, "$")) == NULL) /* unpacksize */
goto err;
if (!isdec(p)) /* no way to validate, other than atoi() works for it */
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* data */
goto err;
if (strlen(p) / 2 != len) /* validates data_len atoi() */
goto err;
if (!ishexlc(p))
goto err;
if (type && type != 128) {
if ((p = strtokm(NULL, "$")) == NULL) /* CRC len */
goto err;
if (!isdec(p))
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* Coder props */
goto err;
if (!ishexlc(p))
goto err;
if (type == 1 && strlen(p) != 10)
goto err;
else if (type == 2 && strlen(p) != 2)
goto err;
}
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
struct custom_salt cs;
struct custom_salt *psalt;
static void *ptr;
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
if (!ptr)
ptr = mem_alloc_tiny(sizeof(struct custom_salt*),
sizeof(struct custom_salt*));
memset(&cs, 0, sizeof(cs));
ctcopy += TAG_LENGTH;
p = strtokm(ctcopy, "$");
cs.type = atoi(p);
p = strtokm(NULL, "$");
cs.NumCyclesPower = atoi(p);
p = strtokm(NULL, "$");
cs.SaltSize = atoi(p);
p = strtokm(NULL, "$"); /* salt */
p = strtokm(NULL, "$");
cs.ivSize = atoi(p);
p = strtokm(NULL, "$"); /* iv */
for (i = 0; i < cs.ivSize; i++)
cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "$"); /* crc */
cs.crc = atou(p); /* unsigned function */
p = strtokm(NULL, "$");
cs.length = atoll(p);
psalt = malloc(sizeof(struct custom_salt) + cs.length - 1);
memcpy(psalt, &cs, sizeof(cs));
p = strtokm(NULL, "$");
psalt->unpacksize = atoll(p);
p = strtokm(NULL, "$"); /* data */
for (i = 0; i < psalt->length; i++)
psalt->data[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
if (cs.type && cs.type != 128) {
p = strtokm(NULL, "$"); /* CRC length */
psalt->crc_len = atoi(p);
p = strtokm(NULL, "$"); /* Coder properties */
for (i = 0; p[i * 2] ; i++)
psalt->props[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
}
MEM_FREE(keeptr);
psalt->dsalt.salt_cmp_offset = SALT_CMP_OFF(struct custom_salt, length);
psalt->dsalt.salt_cmp_size = SALT_CMP_SIZE(struct custom_salt, length, data, psalt->length);
psalt->dsalt.salt_alloc_needs_free = 1;
memcpy(ptr, &psalt, sizeof(void*));
return ptr;
}
static void set_salt(void *salt)
{
cur_salt = *((struct custom_salt **)salt);
//memcpy(currentsalt.salt, cur_salt->salt, cur_salt->SaltSize);
//currentsalt.salt_size = cur_salt->SaltSize;
if (currentsalt.iterations != cur_salt->NumCyclesPower)
new_keys = 1;
if (cur_salt->length >= 32)
memcpy(currentsalt.data, cur_salt->data + cur_salt->length - 32, 32);
currentsalt.length = cur_salt->length;
currentsalt.unpacksize = cur_salt->unpacksize;
currentsalt.iterations = cur_salt->NumCyclesPower;
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_salt,
CL_FALSE, 0, saltsize, ¤tsalt, 0, NULL, NULL),
"Transfer salt to gpu");
}
static void clear_keys(void)
{
memset(inbuffer, 0, insize);
}
static void sevenzip_set_key(char *key, int index)
{
UTF16 c_key[PLAINTEXT_LENGTH + 1];
int length = strlen(key);
/* Convert password to utf-16-le format (--encoding aware) */
length = enc_to_utf16(c_key, PLAINTEXT_LENGTH,
(UTF8*)key, length);
if (length <= 0)
length = strlen16(c_key);
length *= 2;
inbuffer[index].length = length;
memcpy(inbuffer[index].v, c_key, length);
new_keys = 1;
}
static char *get_key(int index)
{
UTF16 c_key[PLAINTEXT_LENGTH + 1];
int length = inbuffer[index].length;
memcpy(c_key, inbuffer[index].v, length);
c_key[length / 2] = 0;
return (char*)utf16_to_enc(c_key);
}
static int salt_compare(const void *x, const void *y)
{
int c;
const struct custom_salt *s1 = *((struct custom_salt**)x);
const struct custom_salt *s2 = *((struct custom_salt**)y);
// we had to make the salt order deterministic, so that intersalt-restore works
if (s1->NumCyclesPower != s2->NumCyclesPower)
return (s1->NumCyclesPower - s2->NumCyclesPower);
c = memcmp(s1->salt, s2->salt, 16);
if (c) return c;
return memcmp(s1->iv, s2->iv, 16);
}
static void *SzAlloc(void *p, size_t size) { return mem_alloc(size); }
static void SzFree(void *p, void *address) { MEM_FREE(address) };
static int sevenzip_decrypt(sevenzip_hash *derived)
{
unsigned char *out = NULL;
AES_KEY akey;
unsigned char iv[16];
union {
unsigned char crcc[4];
unsigned int crci;
} _crc_out;
unsigned char *crc_out = _crc_out.crcc;
unsigned int ccrc;
CRC32_t crc;
size_t crc_len = cur_salt->unpacksize;
size_t aes_len = cur_salt->crc_len ?
(cur_salt->crc_len * 11 + 150) / 160 * 16 : crc_len;
/*
* Early rejection (only decrypt last 16 bytes). We don't seem to
* be able to trust this, see #2532, so we only do it for truncated
* hashes (it's the only thing we can do!).
*/
if ((TRUST_PADDING || cur_salt->type == 0x80) && derived->reject)
return 0;
if (cur_salt->type == 0x80) /* We only have truncated data */
return 1;
/* Complete decryption, or partial if possible */
aes_len = MIN(aes_len, cur_salt->length);
out = mem_alloc(aes_len);
memcpy(iv, cur_salt->iv, 16);
AES_set_decrypt_key((unsigned char*)derived->key, 256, &akey);
AES_cbc_encrypt(cur_salt->data, out, aes_len, &akey, iv, AES_DECRYPT);
/* Optional decompression before CRC */
if (cur_salt->type == 1) {
ISzAlloc st_alloc = {SzAlloc, SzFree};
ELzmaStatus status;
size_t in_size = aes_len;
uint8_t *new_out;
SRes rc;
size_t out_size = cur_salt->crc_len;
new_out = mem_alloc(out_size);
if ((rc = LzmaDecode(new_out, &out_size, out, &in_size,
cur_salt->props, LZMA_PROPS_SIZE,
LZMA_FINISH_ANY, &status,
&st_alloc)) == SZ_OK &&
out_size == cur_salt->crc_len) {
MEM_FREE(out);
out = new_out;
crc_len = cur_salt->crc_len;
} else {
MEM_FREE(new_out);
goto exit_bad;
}
}
else if (cur_salt->type == 2) {
Byte prop = cur_salt->props[0];
ISzAlloc st_alloc = {SzAlloc, SzFree};
ELzmaStatus status;
size_t in_size = aes_len;
uint8_t *new_out;
SRes rc;
size_t out_size = cur_salt->crc_len;
new_out = mem_alloc(out_size);
if ((rc = Lzma2Decode((Byte*)new_out, &out_size, out, &in_size,
prop, LZMA_FINISH_ANY, &status,
&st_alloc)) == SZ_OK &&
out_size == cur_salt->crc_len) {
MEM_FREE(out);
out = new_out;
crc_len = cur_salt->crc_len;
} else {
MEM_FREE(new_out);
goto exit_bad;
}
}
/* CRC test */
CRC32_Init(&crc);
CRC32_Update(&crc, out, crc_len);
CRC32_Final(crc_out, crc);
ccrc = _crc_out.crci; /* computed CRC */
#if !ARCH_LITTLE_ENDIAN
ccrc = JOHNSWAP(ccrc);
#endif
if (ccrc == cur_salt->crc)
goto exit_good;
exit_bad:
MEM_FREE(out);
return 0;
exit_good:
MEM_FREE(out);
return 1;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index;
size_t *lws = local_work_size ? &local_work_size : NULL;
//fprintf(stderr, "%s(%d) lws %zu gws %zu\n", __FUNCTION__, count, local_work_size, global_work_size);
if (any_cracked) {
memset(cracked, 0, cracked_size);
any_cracked = 0;
}
if (ocl_autotune_running || new_keys) {
int i;
global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size);
// Copy data to gpu
BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0,
insize, inbuffer, 0, NULL, multi_profilingEvent[0]),
"Copy data to gpu");
// Run 1st kernel
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], sevenzip_init, 1,
NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]),
"Run init kernel");
// Run loop kernel
for (i = 0; i < (ocl_autotune_running ? 1 : LOOP_COUNT); i++) {
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id],
crypt_kernel, 1, NULL, &global_work_size, lws, 0,
NULL, multi_profilingEvent[2]),
"Run loop kernel");
BENCH_CLERROR(clFinish(queue[gpu_id]),
"Error running loop kernel");
opencl_process_event();
}
// Run final kernel
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], sevenzip_final, 1,
NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[3]),
"Run final loop kernel");
}
new_keys = 0;
if (TRUST_PADDING || cur_salt->type == 0x80) {
// Run AES kernel (only for truncated hashes)
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], sevenzip_aes, 1,
NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[4]),
"Run AES kernel");
}
// Read the result back
BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0,
outsize, outbuffer, 0, NULL, multi_profilingEvent[5]),
"Copy result back");
if (!ocl_autotune_running) {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++) {
/* decrypt and check */
if ((cracked[index] = sevenzip_decrypt(&outbuffer[index])))
{
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = *((struct custom_salt **)salt);
return (unsigned int)(1 << my_salt->NumCyclesPower);
}
static unsigned int padding_size(void *salt)
{
struct custom_salt *my_salt;
my_salt = *((struct custom_salt **)salt);
return my_salt->length - my_salt->unpacksize;
}
static unsigned int compression_type(void *salt)
{
struct custom_salt *my_salt;
my_salt = *((struct custom_salt **)salt);
return my_salt->type;
}
struct fmt_main fmt_opencl_sevenzip = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_UNICODE | FMT_UTF8 | FMT_DYNA_SALT | FMT_HUGE_INPUT,
{
"iteration count",
"padding size",
"compression type",
},
{ FORMAT_TAG },
sevenzip_tests
}, {
init,
done,
reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{
iteration_count,
padding_size,
compression_type,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
salt_compare,
set_salt,
sevenzip_set_key,
get_key,
clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_OPENCL */
|
matrix_op-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file matrix_op-inl.h
* \brief Function definition of matrix related operators
*/
#ifndef MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_
#define MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_
#include <mxnet/operator_util.h>
#include <vector>
#include <string>
#include <algorithm>
#include <utility>
#include <type_traits>
#include "../mshadow_op.h"
#include "../elemwise_op_common.h"
#include "../channel_op_common.h"
#include "../mxnet_op.h"
#include "broadcast_reduce_op.h"
#include "./init_op.h"
#include "../../common/static_array.h"
#include "./slice-inl.h"
#if MXNET_USE_CUDA
#include <thrust/device_vector.h>
#endif
#ifdef __CUDACC__
#include "./pseudo2DTranspose_op-inl.cuh"
#endif
namespace mxnet {
namespace op {
struct ReshapeParam : public dmlc::Parameter<ReshapeParam> {
mxnet::TShape target_shape;
bool keep_highest;
mxnet::Tuple<int> shape;
bool reverse;
DMLC_DECLARE_PARAMETER(ReshapeParam) {
DMLC_DECLARE_FIELD(shape)
.set_default(mxnet::Tuple<int>())
.describe("The target shape");
DMLC_DECLARE_FIELD(reverse)
.set_default(false)
.describe("If true then the special values are inferred from right to left");
DMLC_DECLARE_FIELD(target_shape)
.set_default(mxnet::TShape(0, -1))
.describe("(Deprecated! Use ``shape`` instead.) "
"Target new shape. One and only one dim can be 0, "
"in which case it will be inferred from the rest of dims");
DMLC_DECLARE_FIELD(keep_highest).set_default(false)
.describe("(Deprecated! Use ``shape`` instead.) Whether keep the highest dim unchanged."
"If set to true, then the first dim in target_shape is ignored,"
"and always fixed as input");
}
bool operator==(const ReshapeParam &other) const {
return this->target_shape == other.target_shape &&
this->keep_highest == other.keep_highest &&
this->shape == other.shape &&
this->reverse == other.reverse;
}
};
template<typename IType>
inline mxnet::TShape InferReshapeShape(const mxnet::Tuple<IType>& shape,
const mxnet::TShape& dshape, bool reverse) {
std::vector<IType> dshape_vec;
std::vector<IType> param_shape_vec(shape.begin(), shape.end());
for (int i = 0; i < dshape.ndim(); ++i) {
dshape_vec.push_back(dshape[i]);
}
std::vector<IType> tmp;
size_t src_idx = 0;
int inf_idx = -1;
if (reverse) {
std::reverse(dshape_vec.begin(), dshape_vec.end());
std::reverse(param_shape_vec.begin(), param_shape_vec.end());
}
auto dshape_len = dshape_vec.size();
auto params_len = param_shape_vec.size();
for (size_t i = 0; i < params_len; ++i) {
IType proposed_dim = param_shape_vec[i];
if (proposed_dim == 0) {
// keep same
CHECK_LT(src_idx, dshape_len);
tmp.push_back(dshape_vec[src_idx++]);
} else if (proposed_dim == -1) {
// infer
CHECK_LT(inf_idx, 0) << "One and only one dim can be inferred";
inf_idx = i;
tmp.push_back(1);
src_idx++;
} else if (proposed_dim == -2) {
// copy all remaining dims from source
while (src_idx < dshape_len) {
const int dn = dshape_vec[src_idx++];
tmp.push_back(dn);
}
} else if (proposed_dim == -3) {
// merge two dims from source
CHECK_LT(src_idx, dshape_len-1);
const int d1 = dshape_vec[src_idx++];
const int d2 = dshape_vec[src_idx++];
if (!mxnet::dim_size_is_known(d1) || !mxnet::dim_size_is_known(d2)) {
tmp.push_back(-1);
} else {
tmp.push_back(d1 * d2);
}
} else if (proposed_dim == -4) {
// split the source dim s into two dims
// read the left dim and then the right dim (either can be -1)
CHECK_LT(i + 2, params_len);
CHECK_LT(src_idx, dshape_len);
const int d0 = dshape_vec[src_idx++];
IType d1 = param_shape_vec[++i];
IType d2 = param_shape_vec[++i];
CHECK(d1 != -1 || d2 != -1) << "Split dims cannot both be -1.";
if (d1 == -1 && d0 >= 0) d1 = d0 / d2; // d0 must be known to do this
if (d2 == -1 && d0 >= 0) d2 = d0 / d1; // d0 must be known to do this
CHECK(d1 * d2 == static_cast<IType>(d0) || static_cast<IType>(d0) == IType(-1)) <<
"Split dims " << d1 << ", " << d2 << " do not divide original dim " << d0;
tmp.push_back(d1);
tmp.push_back(d2);
} else {
// greater than 0, new shape
tmp.push_back(proposed_dim);
src_idx++;
}
}
if (inf_idx >= 0) {
if (shape_is_known(dshape)) {
IType new_size = 1;
for (IType x : tmp) new_size *= x;
tmp[inf_idx] = dshape.Size() / new_size;
} else {
tmp[inf_idx] = -1;
}
}
if (reverse) {
std::reverse(param_shape_vec.begin(), param_shape_vec.end());
std::reverse(dshape_vec.begin(), dshape_vec.end());
std::reverse(tmp.begin(), tmp.end());
}
mxnet::TShape oshape(tmp.begin(), tmp.end());
return oshape;
}
inline bool ReverseReshapeInferShape(mxnet::TShape *in, const mxnet::TShape& out) {
if (shape_is_known(*in) && shape_is_known(out)) {
return true;
} else if (!shape_is_known(out)) {
return false;
} else {
int zero_axis = -1;
int known_dim_size_prod = 1;
for (int i = 0; i < in->ndim(); i++) {
if (!mxnet::dim_size_is_known(*in, i)) {
if (zero_axis != -1)
return false; // more than 1 zero found.
else
zero_axis = i;
} else {
known_dim_size_prod *= (*in)[i];
}
}
(*in)[zero_axis] = out.Size() / known_dim_size_prod;
return true;
}
}
inline bool ReshapeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const ReshapeParam& param_ = nnvm::get<ReshapeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]";
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape &dshape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(dshape)) return false;
mxnet::TShape oshape;
if (param_.shape.ndim() != 0) {
oshape = InferReshapeShape(param_.shape, dshape, param_.reverse);
} else if (param_.target_shape.ndim() != -1) {
LOG(INFO) << "Using target_shape will be deprecated.";
oshape = param_.target_shape;
int neg_count = 0;
index_t inf_idx = 0;
index_t start_idx = param_.keep_highest ? 1 : 0;
if (param_.keep_highest) {
oshape[0] = dshape[0];
}
for (int i = start_idx; i < oshape.ndim(); ++i) {
if (oshape[i] == 0) {
neg_count++;
inf_idx = i;
}
}
if (neg_count == 1) {
oshape[inf_idx] = 1;
oshape[inf_idx] = dshape.Size() / oshape.Size();
}
} else {
return shape_is_known((*out_attrs)[0])
&& ReverseReshapeInferShape(&(*in_attrs)[0], (*out_attrs)[0]);
}
ReverseReshapeInferShape(&dshape, oshape);
#if 0
CHECK_EQ(oshape.Size(), dshape.Size())
<< "Target shape size is different to source. "
<< "Target: " << oshape
<< "\nSource: " << dshape;
#endif
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return ReverseReshapeInferShape(&(*in_attrs)[0], (*out_attrs)[0]);
}
inline bool FlattenShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]";
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape &dshape = (*in_attrs)[0];
if (!shape_is_known(dshape)) return false;
int target_dim = 1;
for (int i = 1; i < dshape.ndim(); ++i) {
target_dim *= dshape[i];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::Shape2(dshape[0], target_dim));
return true;
}
struct TransposeParam : public dmlc::Parameter<TransposeParam> {
mxnet::TShape axes;
DMLC_DECLARE_PARAMETER(TransposeParam) {
DMLC_DECLARE_FIELD(axes).set_default(mxnet::TShape(0, -1))
.describe("Target axis order. By default the axes will be inverted.");
}
bool operator==(const TransposeParam &other) const {
return this->axes == other.axes;
}
};
/*!
* \brief This function performs transpose operation on a 2D matrix by utilizing the L1 cache
* \param in input tensor
* \param out output tensor
* \param row shape of dim 0 of input
* \param col shape of dim 1 of input
* \tparam DType Data type
* \tparam is_addto
*/
template<typename DType, bool is_addto>
MSHADOW_XINLINE void Transpose2D(const DType *in, DType *out, index_t row, index_t col) {
// ensure cache line hits and prevent cache miss for any configuration
// L1 cache size to be utilized = 32kb = 2^15
// Largest size of a single unit of any dtype <= 8 byte = 2^3
// Number of elements - (2^15/2^3) = 2^12
// Block-size - 2^6 v 2^6 (64 v 64)
// But we could leverage unrolling of for loops (for parallelization)
// Block-size - 2^5 v 2^5 (32 v 32) with potential 4 pragma for loop unrolled
// blocksize * blocksize * num_threads = cache_size / dtype_size
// Instead of explicit unroll, let compiler figure out optimal unroll factor
const index_t blocksize = 32;
// collapse 2 parallelizes 2 for loops
// inner 2 for loops aren't parallelized to prevent cache miss
// Microsoft Visual C++ compiler does not support omp collapse
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for collapse(2)
#endif // _MSC_VER
for (index_t i = 0; i < row; i += blocksize) {
for (index_t j = 0; j < col; j += blocksize) {
// transpose the block
for (index_t a = j; (a < blocksize + j) && (a < col); ++a) {
for (index_t b = i; (b < blocksize + i) && (b < row); ++b) {
if (!is_addto) {
out[a * row + b] = in[b * col + a];
} else {
out[a * row + b] += in[b * col + a];
}
}
}
}
}
}
inline bool IsIdentityTranspose(const TShape& axes) {
for (dim_t i = 0; i < axes.ndim(); i++) {
if (axes[i] != i) return false;
}
return true;
}
template<typename xpu, bool is_addto = false>
void TransposeImpl(RunContext ctx,
const TBlob& src,
const TBlob& ret,
const mxnet::TShape& axes) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(src.type_flag_, ret.type_flag_);
// zero-size tensor, no need to compute
if (src.shape_.Size() == 0U) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
#ifdef __CUDACC__
// This transpose can be used only if there exist n and m such that:
// params = (0, ..., n-1, n+m, ..., params.size, n, ..., n+m-1)
// Example: (0, 2, 3, 1) or (0, 3, 1, 2), but not (0, 2, 1, 3).
if (isPseudo2DTranspose(axes)) {
MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, {
transpose_pseudo2D<DType, is_addto>(ret, src, axes, s);
});
return;
}
#endif
// Special handle the identity case
if (IsIdentityTranspose(axes)) {
MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, {
Tensor<xpu, 1, DType> in = src.get_with_shape<xpu, 1, DType>(mshadow::Shape1(src.Size()), s);
Tensor<xpu, 1, DType> out = ret.get_with_shape<xpu, 1, DType>(mshadow::Shape1(ret.Size()), s);
if (!is_addto) {
// Use memcpy to accelerate the speed
Copy(out, in, s);
} else {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, kAddTo>, xpu>::Launch(
s, ret.Size(), out.dptr_, in.dptr_);
}
});
return;
}
// Handle the general transpose case
MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, {
switch (axes.ndim()) {
case 2: {
Tensor<xpu, 2, DType> in = src.get<xpu, 2, DType>(s);
Tensor<xpu, 2, DType> out = ret.get<xpu, 2, DType>(s);
if (ctx.get_ctx().dev_mask() == cpu::kDevMask) {
Transpose2D<DType, is_addto>(in.dptr_, out.dptr_, in.shape_[0], in.shape_[1]);
} else {
LOG(FATAL) << "Not Implemented. We should never reach here because the 2D case "
"in GPU has been covered by transpose_pseudo2D."
" Report an issue in Github.";
}
break;
}
case 3: {
Tensor<xpu, 3, DType> in = src.get<xpu, 3, DType>(s);
Tensor<xpu, 3, DType> out = ret.get<xpu, 3, DType>(s);
if (!is_addto) {
out = transpose(in, axes.get<3>());
} else {
out += transpose(in, axes.get<3>());
}
break;
}
case 4: {
Tensor<xpu, 4, DType> in = src.get<xpu, 4, DType>(s);
Tensor<xpu, 4, DType> out = ret.get<xpu, 4, DType>(s);
if (!is_addto) {
out = transpose(in, axes.get<4>());
} else {
out += transpose(in, axes.get<4>());
}
break;
}
case 5: {
Tensor<xpu, 5, DType> in = src.get<xpu, 5, DType>(s);
Tensor<xpu, 5, DType> out = ret.get<xpu, 5, DType>(s);
if (!is_addto) {
out = transpose(in, axes.get<5>());
} else {
out += transpose(in, axes.get<5>());
}
break;
}
case 6: {
Tensor<xpu, 6, DType> in = src.get<xpu, 6, DType>(s);
Tensor<xpu, 6, DType> out = ret.get<xpu, 6, DType>(s);
if (!is_addto) {
out = transpose(in, axes.get<6>());
} else {
out += transpose(in, axes.get<6>());
}
break;
}
default:
LOG(FATAL) << "Transpose support at most 6 dimensions";
break;
}
});
}
// matrix transpose
template<typename xpu>
void Transpose(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
if (req[0] == kNullOp) {
return;
}
const TransposeParam& param = nnvm::get<TransposeParam>(attrs.parsed);
CHECK(req[0] == kWriteTo || req[0] == kAddTo)
<< "Transpose only supports kNullOp, kWriteTo and kAddTo";
mxnet::TShape axes;
if (param.axes.ndim() == 0) {
axes = mxnet::TShape(inputs[0].ndim(), -1);
for (int i = 0; i < axes.ndim(); ++i) {
axes[i] = axes.ndim() - 1 - i;
}
} else {
axes = common::CanonicalizeAxes(param.axes);
}
if (req[0] == kAddTo) {
TransposeImpl<xpu, true>(ctx.run_ctx, inputs[0], outputs[0], axes);
} else {
TransposeImpl<xpu, false>(ctx.run_ctx, inputs[0], outputs[0], axes);
}
}
inline bool TransposeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const TransposeParam& param = nnvm::get<TransposeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& shp = (*in_attrs)[0];
mxnet::TShape& out_shp = (*out_attrs)[0];
CHECK_LE(shp.ndim(), 6) << "Transpose support at most 6 dimensions";
if (shp.ndim() == -1 && out_shp.ndim() == -1)
return false; // none of the shapes is known
if (out_shp.ndim() >= 0 && shp.ndim() >= 0)
CHECK_EQ(out_shp.ndim(), shp.ndim());
mxnet::TShape get(std::max(shp.ndim(), out_shp.ndim()), -1);
mxnet::TShape ret(std::max(shp.ndim(), out_shp.ndim()), -1);
if (param.axes.ndim() == 0) {
for (int i = 0; i < shp.ndim(); ++i) {
ret[i] = shp[shp.ndim()-1-i];
}
for (int i = 0; i < out_shp.ndim(); ++i) {
get[shp.ndim()-1-i] = out_shp[i];
}
} else {
CHECK_EQ(std::max(shp.ndim(), out_shp.ndim()), param.axes.ndim());
for (int i = 0; i < shp.ndim(); ++i) {
CHECK(param.axes[i] < static_cast<int64_t>(shp.ndim()));
ret[i] = shp[param.axes[i]];
}
for (int i = 0; i < out_shp.ndim(); ++i) {
get[param.axes[i]] = out_shp[i];
}
}
SHAPE_ASSIGN_CHECK(*in_attrs, 0, get);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ret);
return shape_is_known(ret);
}
struct ExpandDimParam : public dmlc::Parameter<ExpandDimParam> {
int axis;
DMLC_DECLARE_PARAMETER(ExpandDimParam) {
DMLC_DECLARE_FIELD(axis)
.describe("Position where new axis is to be inserted. Suppose that "
"the input `NDArray`'s dimension is `ndim`, the range of "
"the inserted axis is `[-ndim, ndim]`");
}
bool operator==(const ExpandDimParam &other) const {
return this->axis == other.axis;
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream axis_s;
axis_s << axis;
(*dict)["axis"] = axis_s.str();
}
};
inline bool ExpandDimShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const ExpandDimParam& param = nnvm::get<ExpandDimParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
if (!mxnet::ndim_is_known(in_attrs->at(0)) && !mxnet::ndim_is_known(out_attrs->at(0))) {
return false;
}
mxnet::TShape& ishape = (*in_attrs)[0];
mxnet::TShape& oshape = (*out_attrs)[0];
int indim = ishape.ndim();
bool unknown_ishape = false;
if (-1 == indim) {
indim = oshape.ndim() - 1;
unknown_ishape = true;
}
int axis = param.axis;
if (axis < 0) {
axis += indim + 1;
}
CHECK(axis >= 0 && axis <= indim)
<< "axis must be in the range [" << -indim << ", " << indim << "] ("
<< param.axis << " provided)";
mxnet::TShape ret(indim + 1, -1);
for (int i = 0; i < axis; ++i) {
ret[i] = (unknown_ishape? -1 : ishape[i]);
}
ret[axis] = 1;
for (int i = axis+1; i < indim+1; ++i) {
ret[i] = (unknown_ishape? -1 : ishape[i-1]);
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ret);
ret = mxnet::TShape(indim, -1);
for (int i = 0; i < axis; ++i) ret[i] = oshape[i];
for (int i = axis+1; i < indim+1; ++i) ret[i-1] = oshape[i];
SHAPE_ASSIGN_CHECK(*in_attrs, 0, ret);
return shape_is_known(in_attrs->at(0)) && shape_is_known(out_attrs->at(0));
}
// Currently MKLDNN only supports step = 1 or step has no value
inline bool SupportMKLDNNSlice(const SliceParam& param) {
if (param.step.ndim() == 0U) return true;
for (int i = 0; i < param.step.ndim(); ++i) {
if (param.step[i].has_value() && param.step[i].value() != 1)
return false;
}
return true;
}
inline bool SliceForwardInferStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1);
CHECK_EQ(out_attrs->size(), 1);
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
const auto& in_stype = in_attrs->at(0);
auto& out_stype = out_attrs->at(0);
bool dispatched = false;
const auto dispatch_ex = DispatchMode::kFComputeEx;
// If step = 1, no need to fallback; otherwise fallback to dense
bool trivial_step = false;
if (param.step.ndim() == 0U) {
trivial_step = true;
} else if (param.step.ndim() == 1U
&& (!param.step[0].has_value() || param.step[0].value() == 1)) {
trivial_step = true;
}
if (in_stype == kDefaultStorage) {
#if MXNET_USE_MKLDNN == 1
if (dev_mask == Context::kCPU && MKLDNNEnvSet()
&& SupportMKLDNNSlice(param)) {
dispatched = storage_type_assign(&out_stype, kDefaultStorage,
dispatch_mode, dispatch_ex);
}
#endif
if (!dispatched) {
dispatched = storage_type_assign(&out_stype, kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
}
}
if (!dispatched && in_stype == kCSRStorage && trivial_step) {
dispatched = storage_type_assign(&out_stype, kCSRStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched) {
dispatched = dispatch_fallback(out_attrs, dispatch_mode);
}
return dispatched;
}
// slice the indptr of a csr
struct SliceCsrIndPtr {
template<typename IType>
MSHADOW_XINLINE static void Map(int i, IType* out, const IType* in, const IType* base) {
KERNEL_ASSIGN(out[i], kWriteTo, in[i] - *base);
}
};
/*
* a wrapper to launch SliceCsrIndPtr kernel.
* slice [src[begin] .. src[end]) and store in dst[0, end - begin)
*/
template<typename xpu, typename IType>
void SliceCsrIndPtrImpl(const int begin, const int end, RunContext ctx,
const IType* src, IType* dst) {
using namespace mshadow;
using namespace mxnet_op;
Stream<xpu> *s = ctx.get_stream<xpu>();
int indptr_len = end - begin + 1;
Kernel<SliceCsrIndPtr, xpu>::Launch(s, indptr_len, dst, src + begin, src + begin);
}
/*
* Slice a CSR NDArray for first dimension
*/
template<typename xpu>
void SliceDimOneCsrImpl(const mxnet::TShape &begin, const mxnet::TShape &end, const OpContext& ctx,
const NDArray &in, const NDArray &out) {
using namespace mshadow;
using namespace mxnet_op;
using namespace csr;
nnvm::dim_t begin_row = begin[0];
nnvm::dim_t end_row = end[0];
nnvm::dim_t indptr_len = end_row - begin_row + 1;
out.CheckAndAllocAuxData(kIndPtr, Shape1(indptr_len));
// assume idx indptr share the same type
MSHADOW_IDX_TYPE_SWITCH(in.aux_type(kIndPtr), RType, {
MSHADOW_IDX_TYPE_SWITCH(in.aux_type(kIdx), IType, {
MSHADOW_TYPE_SWITCH(in.dtype(), DType, {
RType* in_indptr = in.aux_data(kIndPtr).dptr<RType>();
RType* out_indptr = out.aux_data(kIndPtr).dptr<RType>();
SliceCsrIndPtrImpl<xpu, RType>(begin_row, end_row, ctx.run_ctx, in_indptr, out_indptr);
Stream<xpu> *s = ctx.get_stream<xpu>();
RType nnz = 0;
mshadow::Copy(Tensor<cpu, 1, RType>(&nnz, Shape1(1)),
Tensor<xpu, 1, RType>(out_indptr + indptr_len - 1, Shape1(1), s));
// return csr zeros if nnz = 0
if (nnz == 0) {
out.set_aux_shape(kIdx, Shape1(0));
return;
}
// copy indices and values
out.CheckAndAllocAuxData(kIdx, Shape1(nnz));
out.CheckAndAllocData(Shape1(nnz));
IType* in_idx = in.aux_data(kIdx).dptr<IType>();
IType* out_idx = out.aux_data(kIdx).dptr<IType>();
DType* in_data = in.data().dptr<DType>();
DType* out_data = out.data().dptr<DType>();
RType offset = 0;
mshadow::Copy(Tensor<cpu, 1, RType>(&offset, Shape1(1)),
Tensor<xpu, 1, RType>(in_indptr + begin_row, Shape1(1), s));
mshadow::Copy(Tensor<xpu, 1, IType>(out_idx, Shape1(nnz), s),
Tensor<xpu, 1, IType>(in_idx + offset, Shape1(nnz), s), s);
mshadow::Copy(Tensor<xpu, 1, DType>(out_data, Shape1(nnz), s),
Tensor<xpu, 1, DType>(in_data + offset, Shape1(nnz), s), s);
});
});
});
}
/*!
* \brief slice a CSRNDArray for two dimensions
*/
struct SliceDimTwoCsrAssign {
/*!
* \brief This function slices a CSRNDArray on axis one between begin_col and end_col
* \param i loop index
* \param out_idx output csr ndarray column indices
* \param out_data output csr ndarray data
* \param out_indptr output csr ndarray row index pointer
* \param in_idx input csr ndarray column indices
* \param in_data input csr ndarray data
* \param in_indptr input csr ndarray row index pointer
* \param begin_col begin column indice
* \param end_col end column indice
*/
template<typename IType, typename RType, typename DType>
MSHADOW_XINLINE static void Map(int i,
IType* out_idx, DType* out_data,
const RType* out_indptr,
const IType* in_idx, const DType* in_data,
const RType* in_indptr,
const int begin_col, const int end_col) {
RType ind = out_indptr[i];
for (RType j = in_indptr[i]; j < in_indptr[i+1]; j++) {
// indices of CSRNDArray are in ascending order per row
if (in_idx[j] >= end_col) {
break;
} else if (in_idx[j] >= begin_col) {
out_idx[ind] = in_idx[j] - begin_col;
out_data[ind] = in_data[j];
ind++;
}
}
}
};
/*
* Slice a CSR NDArray for two dimensions
*/
template<typename xpu>
void SliceDimTwoCsrImpl(const mxnet::TShape &begin, const mxnet::TShape &end, const OpContext& ctx,
const NDArray &in, const NDArray &out);
template<typename xpu>
void SliceCsrImpl(const SliceParam ¶m, const OpContext& ctx,
const NDArray &in, OpReqType req, const NDArray &out) {
if (req == kNullOp) return;
CHECK_NE(req, kAddTo) << "kAddTo for Slice on CSR input is not supported";
CHECK_NE(req, kWriteInplace) << "kWriteInplace for Slice on CSR input is not supported";
const mxnet::TShape ishape = in.shape();
const mxnet::TShape oshape = out.shape();
int N = ishape.ndim();
mxnet::TShape begin(N, -1), end(N, -1);
for (int i = 0; i < N; ++i) {
int s = 0;
if (i < param.begin.ndim() && param.begin[i]) {
s = *param.begin[i];
if (s < 0) s += ishape[i];
}
begin[i] = s;
end[i] = s + oshape[i];
}
switch (N) {
case 1: {
SliceDimOneCsrImpl<xpu>(begin, end, ctx, in, out);
break;
}
case 2: {
SliceDimTwoCsrImpl<xpu>(begin, end, ctx, in, out);
break;
}
default:
LOG(FATAL) << "CSR is only for 2-D shape";
break;
}
}
template<typename xpu>
void SliceEx(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
CHECK_EQ(inputs.size(), 1);
CHECK_EQ(outputs.size(), 1);
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
auto in_stype = inputs[0].storage_type();
if (in_stype == kCSRStorage) {
SliceCsrImpl<xpu>(param, ctx, inputs[0], req[0], outputs[0]);
} else {
LOG(FATAL) << "Slice not implemented for storage type" << in_stype;
}
}
template<int ndim>
inline bool GetIndexRange(const mxnet::TShape& dshape,
const mxnet::Tuple<dmlc::optional<index_t>>& param_begin,
const mxnet::Tuple<dmlc::optional<index_t>>& param_end,
const mxnet::Tuple<dmlc::optional<index_t>>& param_step,
common::StaticArray<index_t, ndim>* begin,
common::StaticArray<index_t, ndim>* end,
common::StaticArray<index_t, ndim>* step) {
// Function returns false if output is zero-sized, true otherwise.
bool zero_size_shape = false;
CHECK_NE(dshape.ndim(), 0U);
CHECK_LE(param_begin.ndim(), dshape.ndim())
<< "Slicing axis exceeds data dimensions";
CHECK_LE(param_end.ndim(), dshape.ndim())
<< "Slicing axis exceeds data dimensions";
CHECK_EQ(param_begin.ndim(), param_end.ndim())
<< "begin and end must have the same length";
CHECK_EQ(ndim, dshape.ndim())
<< "Static array size=" << ndim
<< " is not equal to data shape ndim=" << dshape.ndim();
if (param_step.ndim() > 0) {
CHECK_EQ(param_step.ndim(), param_begin.ndim())
<< "step and begin must have the same length";
}
for (int i = 0; i < param_begin.ndim(); ++i) {
index_t s = param_step.ndim() > 0 && param_step[i].has_value() ? param_step[i].value() : 1;
CHECK_NE(s, 0) << "slice op step[" << i << "] cannot be 0";
index_t b = 0, e = 0;
const index_t len = dshape[i];
if (len > 0) {
b = param_begin[i].has_value() ? param_begin[i].value() : (s < 0 ? len - 1 : 0);
e = param_end[i].has_value() ? param_end[i].value() : (s < 0 ? -1 : len);
if (b < 0) {
b += len;
}
if (e < 0 && param_end[i].has_value()) {
e += len;
}
// move the begin and end to correct position for calculating dim size
b = (b < 0 && s > 0) ? 0 : b;
b = (b > len - 1 && s < 0) ? len - 1 : b;
// if the start value lead to empty tensor under step s, use -1 for indication
b = (b < 0 || b > len - 1) ? -1 : b;
e = e > -1 ? e : -1;
e = e > len ? len : e;
} else if (len == 0) {
b = 0;
e = 0;
}
(*begin)[i] = b;
(*end)[i] = e;
(*step)[i] = s;
// checking begin==end
if (b == e) {
zero_size_shape = true;
}
}
for (int i = param_begin.ndim(); i < dshape.ndim(); ++i) {
(*begin)[i] = 0;
(*end)[i] = dshape[i];
(*step)[i] = 1;
}
return zero_size_shape;
}
inline void SetSliceOpOutputDimSize(const mxnet::TShape& dshape,
const index_t i, const index_t b,
const index_t e, const index_t s,
mxnet::TShape* oshape) {
if (!mxnet::dim_size_is_known(dshape, i)) {
(*oshape)[i] = -1;
return;
}
if (e != b && b >= 0) {
if (s > 0) {
(*oshape)[i] = e > b ? (e - b - 1) / s + 1 : 0;
} else {
(*oshape)[i] = e < b ? (b - e - 1) / (-s) + 1 : 0;
}
} else {
(*oshape)[i] = 0;
}
}
inline bool SliceOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(dshape)) return false;
CHECK_GT(dshape.ndim(), 0) << "slice only works for ndim > 0";
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
mxnet::TShape oshape = dshape;
MXNET_NDIM_SWITCH(dshape.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(dshape, param.begin, param.end, param.step, &begin, &end, &step);
for (int i = 0; i < param.begin.ndim(); ++i) {
const index_t b = begin[i], e = end[i], s = step[i];
SetSliceOpOutputDimSize(dshape, i, b, e, s, &oshape);
}
})
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return shape_is_known(dshape) && shape_is_known(oshape);
}
template<int ndim, int req, typename xpu>
struct slice_forward;
template<int ndim, int req>
struct slice_forward<ndim, req, gpu> {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* data,
const mshadow::Shape<ndim> dshape,
const mshadow::Shape<ndim> oshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = dshape[ndim-1];
const index_t out_last_dim_size = oshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
const index_t j = i % out_last_dim_size;
index_t irow = 0; // row id of flattend 2D data
index_t stride = 1;
index_t idx = i / out_last_dim_size;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % oshape[k]) * step[k] + begin[k]);
idx /= oshape[k];
stride *= dshape[k];
}
KERNEL_ASSIGN(out[i], req,
data[irow * data_last_dim_size + j * step_last_dim + begin_last_dim]);
}
};
template<int ndim, int req>
struct slice_forward<ndim, req, cpu> {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* data,
const mshadow::Shape<ndim> dshape,
const mshadow::Shape<ndim> oshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = dshape[ndim-1];
const index_t out_last_dim_size = oshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
index_t out_offset = i * out_last_dim_size;
for (index_t j = 0; j < out_last_dim_size; ++j) {
index_t irow = 0; // row id of flattend 2D data
index_t stride = 1;
index_t idx = i;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % oshape[k]) * step[k] + begin[k]);
idx /= oshape[k];
stride *= dshape[k];
}
KERNEL_ASSIGN(out[out_offset++], req,
data[irow * data_last_dim_size + j * step_last_dim + begin_last_dim]);
}
}
};
template<typename xpu>
void SliceOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
if (req[0] == kNullOp) return;
using namespace mshadow;
Stream<xpu>* s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& out = outputs[0];
if (out.Size() == 0) return;
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(data.shape_, param.begin, param.end, param.step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH_WITH_BOOL(out.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
size_t num_threads = out.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= out.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_forward<ndim, Req, xpu>, xpu>::Launch(s, num_threads,
out.dptr<DType>(), data.dptr<DType>(),
data.shape_.get<ndim>(), out.shape_.get<ndim>(), begin, step);
})
})
})
}
template<int ndim, int req, typename xpu>
struct slice_assign;
template<int ndim, int req>
struct slice_assign<ndim, req, cpu> {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* val,
const mshadow::Shape<ndim> oshape,
const mshadow::Shape<ndim> vshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = oshape[ndim-1];
const index_t out_last_dim_size = vshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
index_t offset = i * out_last_dim_size;
for (index_t j = 0; j < out_last_dim_size; ++j) {
index_t irow = 0; // row id of flattend 2D out
index_t stride = 1;
index_t idx = i;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % vshape[k]) * step[k] + begin[k]);
idx /= vshape[k];
stride *= oshape[k];
}
KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim],
req, val[offset++]);
}
}
};
template<int ndim, int req>
struct slice_assign<ndim, req, gpu> {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* val,
const mshadow::Shape<ndim> oshape,
const mshadow::Shape<ndim> vshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = oshape[ndim-1];
const index_t out_last_dim_size = vshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
const index_t j = i % out_last_dim_size;
index_t irow = 0; // row id of flattend 2D out
index_t stride = 1;
index_t idx = i / out_last_dim_size;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % vshape[k]) * step[k] + begin[k]);
idx /= vshape[k];
stride *= oshape[k];
}
KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim],
req, val[i]);
}
};
template<typename xpu>
void SliceOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
if (req[0] == kNullOp) return;
using namespace mshadow;
Stream<xpu>* s = ctx.get_stream<xpu>();
const TBlob& ograd = inputs[0];
const TBlob& igrad = outputs[0];
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
if (req[0] == kWriteTo) {
Fill(s, igrad, req[0], 0);
} else if (req[0] == kWriteInplace) {
LOG(FATAL) << "_slice_backward does not support kWriteInplace";
}
if (ograd.Size() == 0) return;
MXNET_NDIM_SWITCH(ograd.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(igrad.shape_, param.begin, param.end, param.step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = ograd.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= ograd.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads,
igrad.dptr<DType>(), ograd.dptr<DType>(),
igrad.shape_.get<ndim>(), ograd.shape_.get<ndim>(), begin, step);
})
})
})
}
inline bool SliceAssignOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 2U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(dshape)) return false;
mxnet::TShape vshape = dshape; // vshape is the value shape on the right hand side
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
MXNET_NDIM_SWITCH(dshape.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(dshape, param.begin, param.end, param.step, &begin, &end, &step);
for (int i = 0; i < param.begin.ndim(); ++i) {
const int b = begin[i], e = end[i], s = step[i];
SetSliceOpOutputDimSize(dshape, i, b, e, s, &vshape);
}
})
SHAPE_ASSIGN_CHECK(*in_attrs, 1, vshape);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
template<typename xpu>
void SliceAssignOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
CHECK_EQ(inputs.size(), 2U); // data[index] = val, data and val are two inputs
CHECK_EQ(outputs.size(), 1U);
if (req[0] == kNullOp) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& val = inputs[1];
const TBlob& out = outputs[0];
if (req[0] == kWriteTo) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Tensor<xpu, 1, DType> in = inputs[0].FlatTo1D<xpu, DType>(s);
Tensor<xpu, 1, DType> out = outputs[0].FlatTo1D<xpu, DType>(s);
Copy(out, in, s);
});
} else if (req[0] != kWriteInplace) {
LOG(FATAL) << "_slice_assign only supports kWriteTo and kWriteInplace";
}
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
bool zero_size_shape = GetIndexRange(data.shape_, param.begin, param.end, param.step,
&begin, &end, &step);
if (zero_size_shape) {
return; // slice_assign of zero-sized subspace needs no operation.
}
MSHADOW_TYPE_SWITCH(out.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = val.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= val.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads,
out.dptr<DType>(), val.dptr<DType>(),
out.shape_.get<ndim>(), val.shape_.get<ndim>(), begin, step);
})
})
})
}
struct SliceAssignScalarParam : public dmlc::Parameter<SliceAssignScalarParam> {
double scalar;
mxnet::Tuple<dmlc::optional<index_t>> begin, end;
mxnet::Tuple<dmlc::optional<index_t>> step;
DMLC_DECLARE_PARAMETER(SliceAssignScalarParam) {
DMLC_DECLARE_FIELD(scalar)
.set_default(0)
.describe("The scalar value for assignment.");
DMLC_DECLARE_FIELD(begin)
.describe("starting indices for the slice operation, supports negative indices.");
DMLC_DECLARE_FIELD(end)
.describe("ending indices for the slice operation, supports negative indices.");
DMLC_DECLARE_FIELD(step)
.set_default(mxnet::Tuple<dmlc::optional<index_t>>())
.describe("step for the slice operation, supports negative values.");
}
};
inline bool SliceAssignScalarOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = (*in_attrs)[0];
if (!shape_is_known(dshape)) return false;
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
template<int ndim>
struct slice_assign_scalar {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType val,
const OpReqType req,
const mshadow::Shape<ndim> oshape,
const mshadow::Shape<ndim> vshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = oshape[ndim-1];
const index_t out_last_dim_size = vshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
for (index_t j = 0; j < out_last_dim_size; ++j) {
index_t irow = 0; // row id of flattend 2D out
index_t stride = 1;
index_t idx = i;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % vshape[k]) * step[k] + begin[k]);
idx /= vshape[k];
stride *= oshape[k];
}
KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim], req, val);
}
}
};
template<typename xpu>
void SliceAssignScalarOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
using namespace mshadow;
Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& out = outputs[0];
if (req[0] == kWriteTo) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Tensor<xpu, 1, DType> in = inputs[0].FlatTo1D<xpu, DType>(s);
Tensor<xpu, 1, DType> out = outputs[0].FlatTo1D<xpu, DType>(s);
Copy(out, in, s);
});
} else if (req[0] != kWriteInplace) {
LOG(FATAL) << "_crop_assign_scalar only supports kWriteTo and kWriteInplace";
}
mxnet::TShape vshape = data.shape_;
const SliceAssignScalarParam& param = nnvm::get<SliceAssignScalarParam>(attrs.parsed);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
bool zero_size_shape = GetIndexRange(data.shape_, param.begin, param.end, param.step,
&begin, &end, &step);
if (zero_size_shape) {
return; // slice_assign of zero-sized subspaced needs no operation.
}
for (index_t i = 0; i < param.begin.ndim(); ++i) {
const int b = begin[i], e = end[i], s = step[i];
SetSliceOpOutputDimSize(data.shape_, i, b, e, s, &vshape);
}
MSHADOW_TYPE_SWITCH_WITH_BOOL(out.type_flag_, DType, {
mxnet_op::Kernel<slice_assign_scalar<ndim>, xpu>::Launch(s, vshape.FlatTo2D()[0],
out.dptr<DType>(), static_cast<DType>(param.scalar), req[0],
out.shape_.get<ndim>(), vshape.get<ndim>(), begin, step);
})
})
}
struct SliceAxisParam : public dmlc::Parameter<SliceAxisParam> {
int axis;
index_t begin;
dmlc::optional<index_t> end;
DMLC_DECLARE_PARAMETER(SliceAxisParam) {
DMLC_DECLARE_FIELD(axis)
.describe("Axis along which to be sliced, supports negative indexes.");
DMLC_DECLARE_FIELD(begin)
.describe("The beginning index along the axis to be sliced, "
" supports negative indexes.");
DMLC_DECLARE_FIELD(end)
.describe("The ending index along the axis to be sliced, "
" supports negative indexes.");
}
};
inline void GetSliceAxisParams(const SliceAxisParam& param, const mxnet::TShape& ishape,
int* axis, index_t* begin, index_t* end) {
*axis = param.axis;
if (*axis < 0) {
*axis += ishape.ndim();
}
CHECK(*axis < ishape.ndim() && *axis >= 0) <<
"Transformed axis must be smaller than the source ndim and larger than zero! Recieved axis=" <<
param.axis << ", src_ndim=" << ishape.ndim() << ", transformed axis=" << *axis;
index_t axis_size = static_cast<index_t>(ishape[*axis]);
*begin = param.begin;
*end = -1;
if (*begin < 0) {
*begin += axis_size;
}
if (axis_size > 0) {
if (!static_cast<bool>(param.end)) {
*end = axis_size;
} else {
*end = param.end.value();
if (*end < 0) {
*end += axis_size;
}
}
CHECK(*end <= axis_size) << "Invalid end for end=" << *end << " as axis_size is " << axis_size;
CHECK((*begin < *end))
<< "Invalid begin, end, get begin=" << param.begin << ", end=" << param.end;
} else {
*begin = 0;
*end = 0;
}
CHECK(*end >= 0)
<< "Invalid begin, end, get begin=" << param.begin << ", end=" << param.end;
CHECK(*begin >= 0) << "Invalid begin for begin=" << param.begin;
}
inline bool SliceAxisShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& ishape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(ishape)) return false;
int axis;
index_t begin, end;
GetSliceAxisParams(param, ishape, &axis, &begin, &end);
if (!mxnet::dim_size_is_known(ishape, axis)) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ishape);
return false;
}
mxnet::TShape shape(ishape.ndim(), -1);
for (int i = 0; i < ishape.ndim(); ++i) {
if (i == axis) {
shape[i] = static_cast<index_t>(end - begin);
} else {
shape[i] = ishape[i];
}
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
return shape_is_known(shape);
}
template<typename xpu>
void SliceAxis(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow::expr;
const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed);
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
int axis;
index_t begin, end;
GetSliceAxisParams(param, inputs[0].shape_, &axis, &begin, &end);
int ndim = outputs[0].ndim();
if (axis + 1 == ndim) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 2, DType> in =
inputs[0].FlatTo2D<xpu, DType>(s);
mshadow::Tensor<xpu, 2, DType> out =
outputs[0].FlatTo2D<xpu, DType>(s);
ASSIGN_DISPATCH(out, req[0], slice<1>(in, begin, end));
});
} else {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 3, DType> in =
inputs[0].FlatTo3D<xpu, DType>(axis, s);
mshadow::Tensor<xpu, 3, DType> out =
outputs[0].FlatTo3D<xpu, DType>(axis, s);
ASSIGN_DISPATCH(out, req[0], slice<1>(in, begin, end));
});
}
}
// Backward pass of broadcast over the given axis
template<typename xpu>
void SliceAxisGrad_(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
if (outputs[0].shape_.Size() == 0) {
return;
}
const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed);
using namespace mshadow::op;
using namespace mshadow::expr;
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
int axis;
index_t begin, end;
GetSliceAxisParams(param, outputs[0].shape_, &axis, &begin, &end);
int ndim = outputs[0].shape_.ndim();
if (axis + 1 == ndim) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 2, DType> ograd =
inputs[0].FlatTo2D<xpu, DType>(s);
mshadow::Tensor<xpu, 2, DType> igrad =
outputs[0].FlatTo2D<xpu, DType>(s);
if (req[0] == kAddTo) {
slice<1>(igrad, begin, end) += F<identity>(ograd);
} else if (req[0] == kWriteTo) {
igrad = 0.0f;
slice<1>(igrad, begin, end) = F<identity>(ograd);
} else {
CHECK_EQ(req[0], kNullOp);
}
});
} else {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 3, DType> ograd =
inputs[0].FlatTo3D<xpu, DType>(axis, s);
mshadow::Tensor<xpu, 3, DType> igrad =
outputs[0].FlatTo3D<xpu, DType>(axis, s);
if (req[0] == kAddTo) {
slice<1>(igrad, begin, end) += F<identity>(ograd);
} else if (req[0] == kWriteTo) {
igrad = 0.0f;
slice<1>(igrad, begin, end) = F<identity>(ograd);
} else {
CHECK_EQ(req[0], kNullOp);
}
});
}
}
struct SliceLikeParam : public dmlc::Parameter<SliceLikeParam> {
mxnet::Tuple<int> axes;
DMLC_DECLARE_PARAMETER(SliceLikeParam) {
DMLC_DECLARE_FIELD(axes).set_default(mxnet::Tuple<int>())
.describe("List of axes on which input data will be sliced according to the "
"corresponding size of the second input. By default will slice on "
"all axes. Negative axes are supported.");
}
};
inline bool SliceLikeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 2U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& ishape = (*in_attrs)[0];
mxnet::TShape& from_shape = (*in_attrs)[1];
if (param.axes.ndim() == 0) {
CHECK_EQ(ishape.ndim(), from_shape.ndim())
<< "By default slice_axis performs slice on all axes, but ndim mismatch "
"for inputs: " << ishape.ndim() << " vs. " << from_shape.ndim();
for (int i = 0; i < ishape.ndim(); ++i) {
CHECK_GE(ishape[i], from_shape[i])
<< "Slice axis " << i << " with size " << from_shape[i]
<< "exceeds limit of input with size " << ishape[i];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, from_shape);
} else {
mxnet::TShape shape(ishape);
for (int i = 0; i < param.axes.ndim(); ++i) {
int axis = param.axes[i];
if (axis < 0) {
axis += ishape.ndim();
}
CHECK_GE(axis, 0)
<< "Slice axis: " << param.axes[i] << " too small";
CHECK_GT(ishape.ndim(), axis)
<< "Slice axis: " << axis << " exceeds first input: " << ishape.ndim();
CHECK_GT(from_shape.ndim(), axis)
<< "Slice axis: " << axis << " exceeds second input: " << from_shape.ndim();
shape[axis] = from_shape[axis];
CHECK_GE(ishape[axis], from_shape[axis])
<< "Slice axis " << axis << " with size " << from_shape[axis]
<< "exceeds limit of input with size " << ishape[axis];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
}
return true;
}
inline void SliceLikeInferRanges(const mxnet::TShape& dshape,
const mxnet::TShape& fshape,
const mxnet::Tuple<int>& axes,
mxnet::Tuple<dmlc::optional<index_t>>* param_begin,
mxnet::Tuple<dmlc::optional<index_t>>* param_end,
mxnet::Tuple<dmlc::optional<index_t>>* param_step) {
std::vector<dmlc::optional<index_t>> pb(dshape.ndim());
std::vector<dmlc::optional<index_t>> pe(dshape.ndim());
std::vector<dmlc::optional<index_t>> ps(dshape.ndim());
if (axes.ndim() == 0) {
for (int i = 0; i < dshape.ndim(); ++i) {
pb[i] = 0;
pe[i] = fshape[i];
ps[i] = 1;
}
} else {
for (int i = 0; i < axes.ndim(); ++i) {
int axis = axes[i];
if (axis < 0) {
axis += dshape.ndim();
}
CHECK_GE(axis, 0)
<< "Slice axis: " << axes[i] << " too small";
CHECK_LT(axis, dshape.ndim())
<< "Slice axis: " << axis << " exceeds first input: " << dshape.ndim();
CHECK_LT(axis, fshape.ndim())
<< "Slice axis: " << axis << " exceeds first input: " << fshape.ndim();
pb[axis] = 0;
pe[axis] = fshape[axis];
ps[axis] = 1;
}
}
*param_begin = mxnet::Tuple<dmlc::optional<index_t>>(pb.begin(), pb.end());
*param_end = mxnet::Tuple<dmlc::optional<index_t>>(pe.begin(), pe.end());
*param_step = mxnet::Tuple<dmlc::optional<index_t>>(ps.begin(), ps.end());
}
template<typename xpu>
void SliceLikeForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
using namespace mshadow::expr;
const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed);
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& out = outputs[0];
const mxnet::TShape& ishape = data.shape_;
const mxnet::TShape& from_shape = inputs[1].shape_;
mxnet::Tuple<dmlc::optional<index_t>> param_begin;
mxnet::Tuple<dmlc::optional<index_t>> param_end;
mxnet::Tuple<dmlc::optional<index_t>> param_step;
SliceLikeInferRanges(ishape, from_shape, param.axes, ¶m_begin, ¶m_end, ¶m_step);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(data.shape_, param_begin, param_end, param_step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH(out.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = out.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= out.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_forward<ndim, Req, xpu>, xpu>::Launch(s,
num_threads, out.dptr<DType>(), data.dptr<DType>(),
data.shape_.get<ndim>(), out.shape_.get<ndim>(), begin, step);
})
})
})
}
template<typename xpu>
void SliceLikeBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 2U);
CHECK_EQ(req.size(), 2U);
using namespace mshadow;
Stream<xpu>* s = ctx.get_stream<xpu>();
if (req[1] != kNullOp && req[1] != kAddTo) {
Fill(s, outputs[1], req[1], 0); // Second input not relavant to gradients.
}
if (req[0] == kNullOp) return;
const TBlob& ograd = inputs[0];
const TBlob& igrad = outputs[0];
const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed);
if (req[0] == kWriteTo) {
Fill(s, igrad, req[0], 0);
} else if (req[0] == kWriteInplace) {
LOG(FATAL) << "_slice_like_backward does not support kWriteInplace";
}
const mxnet::TShape& ishape = ograd.shape_;
const mxnet::TShape& from_shape = outputs[1].shape_;
mxnet::Tuple<dmlc::optional<index_t>> param_begin;
mxnet::Tuple<dmlc::optional<index_t>> param_end;
mxnet::Tuple<dmlc::optional<index_t>> param_step;
SliceLikeInferRanges(ishape, from_shape, param.axes, ¶m_begin, ¶m_end, ¶m_step);
MXNET_NDIM_SWITCH(ograd.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(ograd.shape_, param_begin, param_end, param_step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = ograd.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= ograd.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads,
igrad.dptr<DType>(), ograd.dptr<DType>(),
igrad.shape_.get<ndim>(), ograd.shape_.get<ndim>(), begin, step);
})
})
})
}
struct ClipParam : public dmlc::Parameter<ClipParam> {
real_t a_min, a_max;
DMLC_DECLARE_PARAMETER(ClipParam) {
DMLC_DECLARE_FIELD(a_min)
.describe("Minimum value");
DMLC_DECLARE_FIELD(a_max)
.describe("Maximum value");
}
};
struct clip {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* datas,
const float a_min, const float a_max) {
DType data = datas[i];
if (data > a_max) {
out[i] = a_max;
} else if (data < a_min) {
out[i] = a_min;
} else {
out[i] = data;
}
}
};
struct clip_grad {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* grad, const DType* datas,
const float a_min, const float a_max) {
DType data = datas[i];
if (data > a_max) {
out[i] = 0;
} else if (data < a_min) {
out[i] = 0;
} else {
out[i] = grad[i];
}
}
};
template<typename xpu>
void Clip(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
const ClipParam& param = nnvm::get<ClipParam>(attrs.parsed);
CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_);
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mxnet_op::Kernel<mxnet::op::clip, xpu>::Launch(s, outputs[0].Size(), outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(),
param.a_min, param.a_max);
});
}
template<typename xpu>
void ClipEx(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
CHECK_EQ(inputs[0].dtype(), outputs[0].dtype());
CHECK_EQ(inputs[0].storage_type(), outputs[0].storage_type());
CHECK_NE(inputs[0].storage_type(), kDefaultStorage);
UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Clip<xpu>);
}
template<typename xpu>
void ClipGrad_(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mxnet_op;
const ClipParam& param = nnvm::get<ClipParam>(attrs.parsed);
CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_);
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Kernel<clip_grad, xpu>::Launch(s, outputs[0].Size(), outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>(), param.a_min, param.a_max);
});
}
/*!
* \brief The parameters of the repeat operator include
* the number of repeating time and axis (optional).
* The parameters will be later used to deduce the
* output ndarray shape in bool RepeatShape() function.
*/
struct RepeatParam : public dmlc::Parameter<RepeatParam> {
int repeats = 1;
dmlc::optional<int> axis;
DMLC_DECLARE_PARAMETER(RepeatParam) {
DMLC_DECLARE_FIELD(repeats)
.describe("The number of repetitions for each element.");
DMLC_DECLARE_FIELD(axis)
.set_default(dmlc::optional<int>())
.describe("The axis along which to repeat values."
" The negative numbers are interpreted counting from the backward."
" By default, use the flattened input array,"
" and return a flat output array.");
}
};
/*!
* \brief Helper function for getting user input params for the operator repeat.
* Sanity check the user input values.
*/
inline void GetRepeatParams(const RepeatParam& param, const mxnet::TShape& ishape,
int* repeats, dmlc::optional<int>* axisOpt) {
*repeats = param.repeats;
CHECK_GE(*repeats, 0) << "repeats cannot be a negative number";
*axisOpt = param.axis;
if (static_cast<bool>(*axisOpt)) {
int ndims = ishape.ndim();
int axis = axisOpt->value();
if (axis < 0) {
axis += ndims;
}
CHECK(axis >= 0 && axis < ndims) << "axis = " << axisOpt->value() << " out of bounds";
}
}
inline bool RepeatOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& ishape = (*in_attrs)[0];
int repeats = 0;
dmlc::optional<int> axisOpt;
GetRepeatParams(param, ishape, &repeats, &axisOpt);
// If 0 repeats, return an empty 1-dim, 0-size array
if (0 == repeats) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape(1, 0));
return true;
}
// If repeats > 0, multiply the size of the corresponding axis by repeats
if (static_cast<bool>(axisOpt)) {
int ndims = ishape.ndim();
int axis = axisOpt.value();
if (axis < 0) {
axis += ndims;
}
mxnet::TShape shape(ishape.ndim(), -1);
for (int i = 0; i < ishape.ndim(); ++i) {
if (i == axis) {
shape[i] = repeats * ishape[i];
} else {
shape[i] = ishape[i];
}
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
} else { // If axis is not input by user, return a flat 1D array of size = in.size*repeats
mxnet::TShape shape(1, ishape.Size() * repeats);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
}
return shape_is_known(out_attrs->at(0));
}
inline bool RepeatOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
if ((*in_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, (*in_attrs)[0]);
} else if ((*out_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*in_attrs, 0, (*out_attrs)[0]);
}
return true;
}
/*!
* \brief Reshape the input and output tensors for
* using broadcast_to to achieve the funcitonality
* of operator repeat.
* \return a pair of mxnet::TShape's, first is the reshaped
* input shape, second is the reshaped output shape.
*/
inline std::pair<mxnet::TShape, mxnet::TShape> ReshapeInputOutputForRepeatOp(
const mxnet::TShape& ishape,
const dmlc::optional<int>& axisOpt,
const int repeats) {
if (static_cast<bool>(axisOpt)) {
int axis = axisOpt.value();
int ndim = ishape.ndim();
if (axis < 0) {
axis += ndim;
}
CHECK(axis >= 0 && axis < ishape.ndim()) << "Invalid input of axis";
// reshape the input tensor by adding a dim at the (axis+1)-th dim
mxnet::TShape rshape(ishape.ndim()+1, 1);
// the shape we want to broadcast to
mxnet::TShape bshape(rshape.ndim(), 1);
int i = 0;
while (i <= axis) {
rshape[i] = bshape[i] = ishape[i];
++i;
}
rshape[i] = 1;
bshape[i] = repeats;
while (i < ishape.ndim()) {
rshape[i+1] = ishape[i];
bshape[i+1] = ishape[i];
++i;
}
return std::make_pair(rshape, bshape);
} else {
// axis is not input by user
// reshape the tensor into shape (ishape.Size(), 1)
// then add one dim at axis = 1 and broadcast to
// shape (ishape.Size(), repeats)
mxnet::TShape rshape(2, 1);
rshape[0] = ishape.Size();
rshape[1] = 1;
mxnet::TShape bshape(2, 1);
bshape[0] = rshape[0];
bshape[1] = repeats;
return std::make_pair(rshape, bshape);
}
}
template<typename xpu>
void RepeatOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const TBlob& iTBlob = inputs[0];
const mxnet::TShape& ishape = iTBlob.shape_;
if (!shape_is_known(ishape)) return;
int repeats = 0;
dmlc::optional<int> axisOpt;
const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed);
GetRepeatParams(param, ishape, &repeats, &axisOpt);
if (0 == repeats) return;
std::pair<mxnet::TShape, mxnet::TShape> rshapes = \
ReshapeInputOutputForRepeatOp(ishape, axisOpt, repeats);
// reshaped input tblob
TBlob iblob(inputs[0].dptr_, rshapes.first, inputs[0].dev_mask(),
inputs[0].type_flag_, inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
// reshaped output tblob
TBlob oblob(outputs[0].dptr_, rshapes.second, outputs[0].dev_mask(),
outputs[0].type_flag_, outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
BroadcastCompute<xpu>(attrs, ctx, newInputs, req, newOutputs);
}
/*!
* \brief Compute the gradient of the loss function
* with respect to the input of the operator.
* Backpropagation is employed to implement the
* chain rule.
* \param inputs the gradient of the loss function
* with respect to the outputs of the operator
* \param outputs the gradient of the loss function
* with respect to the inputs of the operator
*/
template<typename xpu>
void RepeatOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
const mxnet::TShape& oshape = outputs[0].shape_;
if (!shape_is_known(oshape)) return;
int repeats = 0;
dmlc::optional<int> axisOpt;
const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed);
GetRepeatParams(param, oshape, &repeats, &axisOpt);
if (0 == repeats) return;
std::pair<mxnet::TShape, mxnet::TShape> rshapes =
ReshapeInputOutputForRepeatOp(oshape, axisOpt, repeats);
// reshaped output grad tblob
TBlob oblob(outputs[0].dptr_, rshapes.first, outputs[0].dev_mask(),
outputs[0].type_flag_, outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
// reshaped input grad tblob
TBlob iblob(inputs[0].dptr_, rshapes.second, inputs[0].dev_mask(),
inputs[0].type_flag_, inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
ReduceAxesComputeImpl<xpu, mshadow::red::sum, false, false>(
ctx, newInputs, req, newOutputs, rshapes.first);
}
struct TileParam : public dmlc::Parameter<TileParam> {
mxnet::Tuple<int> reps;
DMLC_DECLARE_PARAMETER(TileParam) {
DMLC_DECLARE_FIELD(reps)
.describe("The number of times for repeating the tensor a. Each dim size of reps"
" must be a positive integer."
" If reps has length d, the result will have dimension of max(d, a.ndim);"
" If a.ndim < d, a is promoted to be d-dimensional by prepending new axes."
" If a.ndim > d, reps is promoted to a.ndim by pre-pending 1's to it.");
}
};
inline bool TileOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const TileParam& param = nnvm::get<TileParam>(attrs.parsed);
const mxnet::TShape& ishape = (*in_attrs)[0];
if (!shape_is_known(ishape)) {
return false;
}
const mxnet::Tuple<int>& reps = param.reps;
// If reps is empty, return a identical input array
if (reps.ndim() == 0) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ishape);
return true;
}
mxnet::TShape oshape(std::max(ishape.ndim(), reps.ndim()), -1);
int i1 = ishape.ndim() - 1;
int i2 = reps.ndim() - 1;
for (int i = oshape.ndim() - 1; i >= 0; --i) {
if (i1 >= 0 && i2 >= 0) {
oshape[i] = ishape[i1--] * reps[i2--];
} else if (i1 >= 0) {
oshape[i] = ishape[i1--];
} else if (i2 >= 0) {
oshape[i] = reps[i2--];
}
}
// If reps contains 0s, oshape is a zero-size shape.
// Need to distinguish between np_shape mode and legacy mode.
if (!Imperative::Get()->is_np_shape()) {
common::ConvertToNumpyShape(&oshape);
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return shape_is_known(oshape);
}
inline bool TileOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
if ((*in_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, (*in_attrs)[0]);
} else if ((*out_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*in_attrs, 0, (*out_attrs)[0]);
}
return true;
}
/*!
* \brief Reshape the input and output tensors for
* using broadcast_to to achieve the functionality
* of operator tile.
* \return a pair of mxnet::TShape's, first is the reshaped
* input shape, second is the reshaped output shape.
*/
inline std::pair<mxnet::TShape, mxnet::TShape> ReshapeInputOutputForTileOp(
const mxnet::TShape& ishape,
const mxnet::Tuple<int>& reps) {
if (reps.ndim() == 0) {
return std::make_pair(ishape, ishape);
}
// The shape we want to broadcast to
mxnet::TShape bshape(std::max(ishape.ndim(), reps.ndim()) * 2, 1);
// The shape of the input tensor after adding new axes before each dim
mxnet::TShape rshape(bshape.ndim(), 1);
int i1 = ishape.ndim() - 1;
int i2 = reps.ndim() - 1;
for (int i = bshape.ndim() - 1; i >= 0; --i) {
if (0 == (i & 1)) {
bshape[i] = (i2 >= 0? reps[i2--] : 1);
rshape[i] = 1;
} else {
rshape[i] = bshape[i] = (i1 >= 0? ishape[i1--] : 1);
}
}
return std::make_pair(rshape, bshape);
}
/*!
* \brief Implementation of tiling the input tensor a based
* on the user-input shape, reps.
* If a.ndim < reps.ndim, new axes are pre-pended to a. For example,
* the input tensor has shape (3,), and the reps is (2, 4); the input
* tensor would be reshaped to (1, 3).
* If a.ndim > reps.ndim, pre-pending 1's to reps. For example,
* the input tensor has shape (2, 3, 4, 5), and reps is (2, 2);
* the reps would be changed to (1, 1, 2, 2).
* Suppose we have a.ndim = reps.ndim now. To achieve tiling,
* we utilize the operator broadcast_to. For example, for a tensor
* of shape (2, 3, 4, 5) and reps (2, 8, 9, 3), we first reshape
* the tensor to the shape (1, 2, 1, 3, 1, 4, 1, 5) by adding
* one axis before each dimension. Then, we want to broadcast
* the new tensor to shape (2, 2, 8, 3, 9, 4, 3, 5). The final
* output tensor would have shape (2*2, 8*3, 9*4, 3*5).
*/
template<typename xpu>
void TileOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
if (inputs[0].Size() == 0) return;
const mxnet::TShape& ishape = inputs[0].shape_;
const mxnet::Tuple<int>& reps = nnvm::get<TileParam>(attrs.parsed).reps;
// If any one of the number in reps is zero, return immediately
for (int i = 0; i < reps.ndim(); ++i) {
if (0 == reps[i]) return;
}
std::pair<mxnet::TShape, mxnet::TShape> rshapes = ReshapeInputOutputForTileOp(ishape, reps);
// reshaped input tblob
TBlob iblob(inputs[0].dptr_, rshapes.first, inputs[0].dev_mask(),
inputs[0].type_flag_, inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
// reshaped output tblob
TBlob oblob(outputs[0].dptr_, rshapes.second, outputs[0].dev_mask(),
outputs[0].type_flag_, outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
BroadcastCompute<xpu>(attrs, ctx, newInputs, req, newOutputs);
}
/*!
* \brief Compute the gradient of the loss function
* with respect to the input of the operator.
* Backpropagation is employed to implement the
* chain rule.
* \param inputs the gradient of the loss function
* with respect to the outputs of the operator
* \param outputs the gradient of the loss function
* with respect to the inputs of the operator
*/
template<typename xpu>
void TileOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
if (inputs[0].Size() == 0) return;
const mxnet::TShape& oshape = outputs[0].shape_;
const mxnet::Tuple<int>& reps = nnvm::get<TileParam>(attrs.parsed).reps;
// If any one of the number in reps is zero, return immediately
for (int i = 0; i < reps.ndim(); ++i) {
if (0 == reps[i]) return;
}
std::pair<mxnet::TShape, mxnet::TShape> rshapes = ReshapeInputOutputForTileOp(oshape, reps);
// reshaped output grad tblob
TBlob oblob(outputs[0].dptr_, rshapes.first, outputs[0].dev_mask(),
outputs[0].type_flag_, outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
// reshaped input grad tblob
TBlob iblob(inputs[0].dptr_, rshapes.second, inputs[0].dev_mask(),
inputs[0].type_flag_, inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
ReduceAxesComputeImpl<xpu, mshadow::red::sum, false, false>(
ctx, newInputs, req, newOutputs, rshapes.first);
}
struct ReverseParam : public dmlc::Parameter<ReverseParam> {
mxnet::Tuple<int> axis;
DMLC_DECLARE_PARAMETER(ReverseParam) {
DMLC_DECLARE_FIELD(axis)
.describe("The axis which to reverse elements.");
}
};
#define REVERSE_MAX_DIM 10U
struct reverse {
MSHADOW_XINLINE static index_t ReverseIndex(index_t idx,
index_t nreversedim,
const index_t * stride_,
const index_t * trailing_) {
index_t outputIndex = idx;
for (index_t i = 0; i < nreversedim; ++i) {
const index_t low = outputIndex % trailing_[i];
index_t high = outputIndex / trailing_[i];
const index_t x = high%stride_[i];
high /= stride_[i];
outputIndex = (high*stride_[i] + stride_[i] - 1 - x)*trailing_[i] + low;
}
return outputIndex;
}
#ifdef __CUDACC__
template<typename DType>
__device__ static void Map(index_t index, index_t nreversedim, const DType *src, DType *dst,
const index_t * stride_,
const index_t * trailing_) {
__shared__ index_t stride_share[REVERSE_MAX_DIM];
__shared__ index_t trailing_share[REVERSE_MAX_DIM];
if (threadIdx.x < REVERSE_MAX_DIM) {
stride_share[threadIdx.x] = stride_[threadIdx.x];
trailing_share[threadIdx.x] = trailing_[threadIdx.x];
}
__syncthreads();
index_t new_idx = ReverseIndex(index, nreversedim, stride_share, trailing_share);
dst[new_idx] = src[index];
}
#else
template<typename DType>
MSHADOW_XINLINE static void Map(index_t index, index_t nreversedim, const DType *src, DType *dst,
const index_t * stride_,
const index_t * trailing_) {
index_t new_idx = ReverseIndex(index, nreversedim, stride_, trailing_);
dst[new_idx] = src[index];
}
#endif
};
template<typename xpu>
void ReverseOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mxnet_op;
const ReverseParam& param = nnvm::get<ReverseParam>(attrs.parsed);
CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_);
CHECK_LT(param.axis.ndim(), REVERSE_MAX_DIM);
Stream<xpu> *s = ctx.get_stream<xpu>();
const mxnet::TShape& ishape = inputs[0].shape_;
std::vector<index_t> stride_(param.axis.ndim());
std::vector<index_t> trailing_(param.axis.ndim());
index_t reverse_index = 0;
for (int axis : param.axis) {
CHECK_LT(axis, ishape.ndim());
stride_[reverse_index] = ishape[axis];
trailing_[reverse_index] = 1;
for (int i2 = axis + 1; i2 < ishape.ndim(); ++i2) {
trailing_[reverse_index] *= ishape[i2];
}
reverse_index++;
}
#ifdef __CUDACC__
mshadow::Tensor<xpu, 1, uint8_t> workspace =
ctx.requested[0].get_space_typed<xpu, 1, uint8_t>(
mshadow::Shape1(reverse_index * sizeof(index_t) * 2), s);
auto stride_workspace = workspace.dptr_;
auto trailing_workspace = workspace.dptr_ + reverse_index * sizeof(index_t);
cudaMemcpyAsync(stride_workspace, thrust::raw_pointer_cast(stride_.data()),
stride_.size() * sizeof(index_t),
cudaMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s));
cudaMemcpyAsync(trailing_workspace, thrust::raw_pointer_cast(trailing_.data()),
trailing_.size() * sizeof(index_t),
cudaMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s));
#endif
#ifdef __CUDACC__
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Kernel<reverse, xpu>::Launch(s, inputs[0].Size(), reverse_index,
inputs[0].dptr<DType>(), outputs[0].dptr<DType>(),
reinterpret_cast<index_t*>(stride_workspace), reinterpret_cast<index_t*>(trailing_workspace));
});
#else
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Kernel<reverse, xpu>::Launch(s, inputs[0].Size(), reverse_index,
inputs[0].dptr<DType>(), outputs[0].dptr<DType>(),
stride_.data(), trailing_.data());
});
#endif
}
struct StackParam : public dmlc::Parameter<StackParam> {
int axis;
int num_args;
DMLC_DECLARE_PARAMETER(StackParam) {
DMLC_DECLARE_FIELD(axis)
.set_default(0)
.describe("The axis in the result array along which the input arrays are stacked.");
DMLC_DECLARE_FIELD(num_args).set_lower_bound(1)
.describe("Number of inputs to be stacked.");
}
};
inline bool StackOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const StackParam& param = dmlc::get<StackParam>(attrs.parsed);
mxnet::TShape dshape;
for (const mxnet::TShape& i : (*in_attrs)) {
shape_assign(&dshape, i);
}
if (!shape_is_known(dshape)) return false;
mxnet::TShape oshape(dshape.ndim() + 1, -1);
int axis = CheckAxis(param.axis, oshape.ndim());
for (int i = 0; i < axis; ++i) {
oshape[i] = dshape[i];
}
oshape[axis] = param.num_args;
for (index_t i = axis + 1; i < oshape.ndim(); ++i) {
oshape[i] = dshape[i-1];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return shape_is_known(oshape);
}
template<typename xpu>
void StackOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
const StackParam& param = dmlc::get<StackParam>(attrs.parsed);
int axis = CheckAxis(param.axis, outputs[0].ndim());
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH_WITH_BOOL(outputs[0].type_flag_, DType, {
std::vector<Tensor<xpu, 3, DType> > data(inputs.size());
Tensor<xpu, 3, DType> out;
size_t leading = 1, trailing = 1;
for (int i = 0; i < axis; ++i) {
leading *= outputs[0].shape_[i];
}
for (int i = axis + 1; i < outputs[0].ndim(); ++i) {
trailing *= outputs[0].shape_[i];
}
size_t mid = outputs[0].shape_[axis];
Shape<3> oshape = Shape3(leading, mid, trailing);
out = outputs[0].get_with_shape<xpu, 3, DType>(oshape, s);
for (size_t i = 0; i < inputs.size(); ++i) {
Shape<3> dshape = Shape3(leading, 1, trailing);
data[i] = inputs[i].get_with_shape<xpu, 3, DType>(dshape, s);
}
Concatenate(data, &out, 1, req[0]);
})
}
template<typename xpu>
void StackOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
const StackParam& param = dmlc::get<StackParam>(attrs.parsed);
int axis = CheckAxis(param.axis, inputs[0].ndim());
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[0].type_flag_, DType, {
std::vector<Tensor<xpu, 3, DType> > grad_in(outputs.size());
Tensor<xpu, 3, DType> grad;
size_t leading = 1, trailing = 1;
for (int i = 0; i < axis; ++i) {
leading *= inputs[0].shape_[i];
}
for (int i = axis + 1; i < inputs[0].ndim(); ++i) {
trailing *= inputs[0].shape_[i];
}
size_t mid = inputs[0].shape_[axis];
Shape<3> oshape = Shape3(leading, mid, trailing);
grad = inputs[0].get_with_shape<xpu, 3, DType>(oshape, s);
for (size_t i = 0; i < outputs.size(); ++i) {
Shape<3> dshape = Shape3(leading, 1, trailing);
grad_in[i] = outputs[i].get_with_shape<xpu, 3, DType>(dshape, s);
}
Split(grad, &grad_in, 1, req);
})
}
struct SqueezeParam : public dmlc::Parameter<SqueezeParam> {
dmlc::optional<mxnet::Tuple<int>> axis;
DMLC_DECLARE_PARAMETER(SqueezeParam) {
DMLC_DECLARE_FIELD(axis)
.set_default(dmlc::optional<mxnet::Tuple<int>>())
.describe("Selects a subset of the single-dimensional entries in the shape."
" If an axis is selected with shape entry greater than one, an error is raised.");
}
};
// Given a shape that may have dim size equal to 0,
// move all the zeros to the last of the shape array
// and keep the relative order of the non-zero values.
// Returns the new shape size after moving all zeros to the end.
inline size_t SqueezeShapeHelper(mxnet::TShape* shape) {
CHECK(shape != nullptr);
size_t count = 0;
for (int i = 0; i < shape->ndim(); ++i) {
if ((*shape)[i] == -1) {
++count;
} else {
std::swap((*shape)[i], (*shape)[i-count]);
}
}
return shape->ndim() - count;
}
inline bool SqueezeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const SqueezeParam& param = nnvm::get<SqueezeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]";
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = in_attrs->at(0);
const int dndim = dshape.ndim();
if (!shape_is_known(dshape)) return false;
mxnet::TShape oshape = dshape;
if (param.axis.has_value()) {
// preprocess axis
mxnet::Tuple<int> axes = param.axis.value();
for (int i = 0; i < axes.ndim(); ++i) {
if (axes[i] < 0) {
axes[i] += dndim;
CHECK_GE(axes[i], 0)
<< "axis " << axes[i] - dndim << " is out of bounds for array of dimension " << dndim;
}
CHECK_LT(axes[i], dndim)
<< "axis " << axes[i] << " is out of bounds for array of dimension " << dndim;
CHECK_EQ(dshape[axes[i]], 1)
<< "cannot select an axis to squeeze out which has size="
<< dshape[axes[i]] << " not equal to one";
CHECK_NE(oshape[axes[i]], -1) << "duplicate value in axis";
oshape[axes[i]] = -1;
}
} else {
for (int i = 0; i < oshape.ndim(); ++i) {
if (oshape[i] == 1) oshape[i] = -1;
}
}
size_t oshape_size = SqueezeShapeHelper(&oshape);
if (oshape_size == 0) { // corner case when dshape is (1, 1, 1, 1)
oshape[0] = 1;
oshape_size = 1;
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape(oshape.data(), oshape.data()+oshape_size));
return true;
}
struct DepthToSpaceParam : public dmlc::Parameter<DepthToSpaceParam> {
int block_size;
DMLC_DECLARE_PARAMETER(DepthToSpaceParam) {
DMLC_DECLARE_FIELD(block_size)
.describe("Blocks of [block_size. block_size] are moved");
}
};
inline bool DepthToSpaceOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
CHECK_EQ(in_attrs->at(0).ndim(), 4) << "Operation Depth To Space requires exactly 4D tensor";
mxnet::TShape expected_out(4, -1);
mxnet::TShape& in_shape = in_attrs->at(0);
int block = param.block_size;
CHECK_NE(block, 0) << "block_size must be a positive integer value";
CHECK_NE(in_shape[1], 0) << "Depth dimension:1 cannot be 0";
CHECK_EQ(in_shape[1] % (block * block), 0)
<< "Cannot perform Depth To Space operation on the specified tensor."
" Dimension:1(depth dimension) should be a multiple of 'block^2'";
CHECK_NE(in_shape[0], 0)
<< "Operation requires a 4D tensor. Size of dimension:0 cannot be 0";
CHECK_NE(in_shape[2], 0)
<< "Operation requires a 4D tensor. Size of dimension:2 cannot be 0";
CHECK_NE(in_shape[3], 0)
<< "Operation requires a 4D tensor. Size of dimension:3 cannot be 0";
expected_out[0] = in_shape[0];
expected_out[1] = in_shape[1] / (block * block);
int i = 2;
while (i < expected_out.ndim()) {
expected_out[i] = in_shape[i] * block;
++i;
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, expected_out);
return shape_is_known(expected_out);
}
inline bool DepthToSpaceOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
return out_attrs->at(0) != -1;
}
/*!
* \brief This function updates the value of input index from where the data element
* needs to be fetched and written out to the ith location in output tensor
* \param index_position index within offset array to get offset of given dimension
* \param dim_size size of current dimension
* \param idx output tensor index
* \param inp_index index within input tensor from where value is retrieved
* \param offset_arr array containing the linear offset of input tensor
*/
MSHADOW_XINLINE void update_index(index_t index_position, index_t dim_size, index_t *idx,
index_t *inp_index, const index_t* offset_arr) {
index_t next_idx_val = *idx / dim_size;
*inp_index += (*idx - next_idx_val * dim_size) * offset_arr[index_position];
*idx = next_idx_val;
}
/*!
* \brief This function performs the tensor transpose (0, 1, 2, 3, 4, 5) ->
* (0, 3, 4, 1, 5, 2) by computing linear index within input tensor to be mapped
* to the ith index of output tensor
* \param i tensor index
* \param out_data output tensor
* \param in_data input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size array containing the size of each dimension of input tensor
* \param offset_arr array containing the linear offset of input tensor
*/
template<int req>
struct depth_to_space_forward {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out_data, const DType* in_data,
const int block, const index_t* size, const index_t* offset_arr) {
index_t inp_index = 0, idx = i, dim_size;
dim_size = block;
update_index(2, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[3];
update_index(5, dim_size, &idx, &inp_index, offset_arr);
dim_size = block;
update_index(1, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[2];
update_index(4, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[1] / (block * block);
update_index(3, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[0];
update_index(0, dim_size, &idx, &inp_index, offset_arr);
KERNEL_ASSIGN(out_data[i], req, in_data[inp_index]);
}
};
/*!
* \brief This function calculates the linear offset for each dimension of
* input tensor and stores them in an array, which is later used in
* performing depth_to_space operation
* \param i global thread id
* \param offset_arr array to be populated with offset values
* \param size array to be populated with size of each dimension of input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size0 size of Dim 0 of input tensor
* \param size1 size of Dim 1 of input tensor
* \param size2 size of Dim 2 of input tensor
* \param size3 size of Dim 3 of input tensor
*/
template<int req>
struct compute_offset_for_depth_to_space {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* offset_arr, DType* size, const int block,
const index_t size0, const index_t size1, const index_t size2,
const index_t size3) {
size[0] = size0;
size[1] = size1;
size[2] = size2;
size[3] = size3;
offset_arr[5] = 1;
offset_arr[4] = offset_arr[5] * size[3];
offset_arr[3] = offset_arr[4] * size[2];
offset_arr[2] = offset_arr[3] * size[1] / (block * block);
offset_arr[1] = offset_arr[2] * block;
offset_arr[0] = offset_arr[1] * block;
}
};
template<typename xpu>
void DepthToSpaceOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
using namespace mxnet_op;
int block = param.block_size;
mshadow::Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(mshadow::Shape1(sizeof(index_t) * 10), s);
char* workspace_curr_ptr = workspace.dptr_;
index_t* offset_arr = reinterpret_cast<index_t*>(workspace_curr_ptr);
index_t* size = reinterpret_cast<index_t*>(workspace_curr_ptr + sizeof(index_t) * 6);
MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
Kernel<compute_offset_for_depth_to_space<req_type>, xpu>::Launch(
s, 1, offset_arr, size, block, in_data.shape_[0], in_data.shape_[1],
in_data.shape_[2], in_data.shape_[3]);
Kernel<depth_to_space_forward<req_type>, xpu>::Launch(
s, out_data.Size(), out_data.dptr<DType>(), in_data.dptr<DType>(),
block, size, offset_arr);
});
});
}
inline bool SpaceToDepthOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
CHECK_EQ(in_attrs->at(0).ndim(), 4) << "Operation Space To Depth requires exactly 4D tensor";
mxnet::TShape expected_out(in_attrs->at(0).ndim(), -1);
mxnet::TShape& in_shape = in_attrs->at(0);
int block = param.block_size;
CHECK_NE(block, 0) << "block_size must be a positive integer value";
CHECK_NE(in_shape[0], 0)
<< "Operation requires a 4D tensor. Size of dimension:0 cannot be 0";
CHECK_NE(in_shape[1], 0) << "Depth dimension:1 cannot be 0";
CHECK_NE(in_shape[2], 0)
<< "Operation requires a 4D tensor. Size of dimension:2 cannot be 0";
CHECK_EQ(in_shape[2] % block, 0)
<< "Cannot perform Depth To Space operation on the specified tensor."
" Dimension:2(1st Space dimension) should be a multiple of 'block' ";
CHECK_NE(in_shape[3], 0)
<< "Operation requires a 4D tensor. Size of dimension:3 cannot be 0";
CHECK_EQ(in_shape[3] % block, 0)
<< "Cannot perform Depth To Space operation on the specified tensor."
" Dimension:3(2nd space dimension) should be a multiple of 'block' ";
expected_out[0] = in_shape[0];
expected_out[1] = in_shape[1] * block * block;
int i = 2;
while (i < expected_out.ndim()) {
expected_out[i] = in_shape[i] / block;
++i;
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, expected_out);
return shape_is_known(expected_out);
}
inline bool SpaceToDepthOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
return out_attrs->at(0) != -1;
}
/*!
* \brief This function preforms the tensor transpose (0, 1, 2, 3, 4, 5) ->
* (0, 3, 5, 1, 2, 4) by computing linear index within input tensor to be mapped
* to the ith index of output tensor
* \param i tensor index
* \param out_data output tensor
* \param in_data input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size array containing the size of each dimension of input tensor
* \param offset_arr array containing the linear offset of input tensor
*/
template<int req>
struct space_to_depth_forward {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out_data, const DType* in_data, const int block,
const index_t* size, const index_t* offset_arr) {
index_t inp_index = 0, idx = i, dim_size;
dim_size = size[3] / block;
update_index(4, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[2] / block;
update_index(2, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[1];
update_index(1, dim_size, &idx, &inp_index, offset_arr);
dim_size = block;
update_index(5, dim_size, &idx, &inp_index, offset_arr);
dim_size = block;
update_index(3, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[0];
update_index(0, dim_size, &idx, &inp_index, offset_arr);
KERNEL_ASSIGN(out_data[i], req, in_data[inp_index]);
}
};
/*!
* \brief This function calculates the linear offset for each dimension of
* input tensor and stores them in an array, which is later used in
* performing space_to_depth operation
* \param i global thread id
* \param offset_arr array to be populated with offset values
* \param size array to be populated with size of each dimension of input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size0 size of Dim 0 of input tensor
* \param size1 size of Dim 1 of input tensor
* \param size2 size of Dim 2 of input tensor
* \param size3 size of Dim 3 of input tensor
*/
template<int req>
struct compute_offset_for_space_to_depth {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* offset_arr, DType* size, const int block,
const index_t size0, const index_t size1,
const index_t size2, const index_t size3) {
size[0] = size0;
size[1] = size1;
size[2] = size2;
size[3] = size3;
offset_arr[5] = 1;
offset_arr[4] = offset_arr[5] * block;
offset_arr[3] = offset_arr[4] * size[3] / block;
offset_arr[2] = offset_arr[3] * block;
offset_arr[1] = offset_arr[2] * size[2] / block;
offset_arr[0] = offset_arr[1] * size[1];
}
};
template<typename xpu>
void SpaceToDepthOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
using namespace mxnet_op;
int block = param.block_size;
mshadow::Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(mshadow::Shape1(sizeof(index_t) * 10), s);
char* workspace_curr_ptr = workspace.dptr_;
index_t* offset_arr = reinterpret_cast<index_t*>(workspace_curr_ptr);
index_t* size = reinterpret_cast<index_t*>(workspace_curr_ptr + sizeof(index_t) * 6);
MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
Kernel<compute_offset_for_space_to_depth<req_type>, xpu>::Launch(
s, 1, offset_arr, size, block, in_data.shape_[0], in_data.shape_[1],
in_data.shape_[2], in_data.shape_[3]);
Kernel<space_to_depth_forward<req_type>, xpu>::Launch(
s, out_data.Size(), out_data.dptr<DType>(), in_data.dptr<DType>(),
block, size, offset_arr);
});
});
}
namespace split_enum {
enum SplitOpInputs {kData};
} // namespace split_enum
struct SplitParam : public dmlc::Parameter<SplitParam> {
mxnet::TShape indices;
int axis;
bool squeeze_axis;
int sections;
DMLC_DECLARE_PARAMETER(SplitParam) {
DMLC_DECLARE_FIELD(indices)
.describe("Indices of splits. The elements should denote the boundaries of at which split"
" is performed along the `axis`.");
DMLC_DECLARE_FIELD(axis).set_default(1)
.describe("Axis along which to split.");
DMLC_DECLARE_FIELD(squeeze_axis).set_default(0)
.describe("If true, Removes the axis with length 1 from the shapes of the output arrays."
" **Note** that setting `squeeze_axis` to ``true`` removes axis with length 1"
" only along the `axis` which it is split."
" Also `squeeze_axis` can be set to ``true``"
" only if ``input.shape[axis] == num_outputs``.");
DMLC_DECLARE_FIELD(sections).set_default(0)
.describe("Number of sections if equally splitted. Default to 0 which means split by indices.");
}
}; // struct SplitParam
inline mxnet::TShape GetSplitIndices(const mxnet::TShape& ishape, int axis, int sections) {
mxnet::TShape indices(sections+1, -1);
indices[0] = 0;
int64_t section_size_b = (int64_t) (ishape[axis] / sections);
int64_t section_size_a = section_size_b + 1;
int section_a = ishape[axis] % sections;
for (int i = 0; i < sections; ++i) {
if ( i < section_a ) {
indices[i+1] = section_size_a * (i + 1);
} else {
indices[i+1] = section_size_b + indices[i];
}
}
return indices;
}
inline bool SplitOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
int dtype = (*in_attrs)[0];
CHECK_NE(dtype, -1) << "First input must have specified type";
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
out_attrs->clear();
int num_outputs = (param.sections > 0) ? param.sections : param.indices.ndim();
for (int i = 0; i < num_outputs; ++i) {
out_attrs->push_back(dtype);
}
return true;
}
inline bool SplitOpShapeImpl(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs,
const int real_axis) {
using namespace mshadow;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
mxnet::TShape dshape = in_attrs->at(split_enum::kData);
mxnet::TShape ishape = in_attrs->at(split_enum::kData);
const mxnet::TShape indices =
(param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices;
int num_outputs = (param.sections > 0) ? indices.ndim() - 1 : indices.ndim();
// Pre-compute squeezed output shape for future usage
mxnet::TShape squeezed_dshape = dshape;
for (int d = real_axis; d < squeezed_dshape.ndim() - 1; ++d) {
squeezed_dshape[d] = squeezed_dshape[d+1];
}
squeezed_dshape = mxnet::TShape(&squeezed_dshape[0], &squeezed_dshape[squeezed_dshape.ndim()-1]);
// Assign shape to every output
for (int i = 0; i < num_outputs; ++i) {
int start = indices[i];
int end = (i < num_outputs - 1) ? indices[i + 1] : ishape[real_axis];
if (ishape[real_axis] == 0U) {
end = start;
} else {
CHECK(start <= end)
<< "start " << start << " is not less than end " << end << "for subarray " << i;
CHECK(end <= ishape[real_axis])
<< "end " << end << " is no less than the size of the axis " << ishape[real_axis];
}
dshape[real_axis] = (end - start);
if (param.squeeze_axis) {
CHECK_EQ(end - start, 1U) << "expected axis size of 1 but got " << end - start;
SHAPE_ASSIGN_CHECK(*out_attrs, i, squeezed_dshape);
} else {
SHAPE_ASSIGN_CHECK(*out_attrs, i, dshape);
}
}
mxnet::TShape back_calculate_dshape = ishape;
back_calculate_dshape[real_axis] = 0;
for (int d = 0; d < real_axis; ++d) {
back_calculate_dshape[d] = (*out_attrs)[0][d];
}
if (param.squeeze_axis) {
back_calculate_dshape[real_axis] = num_outputs;
} else {
for (int i = 0; i < num_outputs; ++i) {
back_calculate_dshape[real_axis] += (*out_attrs)[i][real_axis];
}
}
for (int d = real_axis + 1; d < ishape.ndim(); ++d) {
if (param.squeeze_axis) {
back_calculate_dshape[d] = (*out_attrs)[0][d - 1];
} else {
back_calculate_dshape[d] = (*out_attrs)[0][d];
}
}
SHAPE_ASSIGN_CHECK(*in_attrs, split_enum::kData, back_calculate_dshape);
return true;
}
inline bool SplitOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
using namespace mshadow;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
mxnet::TShape dshape = in_attrs->at(split_enum::kData);
if (!mxnet::ndim_is_known(dshape)) return false;
if (param.axis >= 0) {
CHECK_LT(param.axis, dshape.ndim());
} else {
CHECK_LT(param.axis + dshape.ndim(), dshape.ndim());
}
int real_axis = param.axis;
if (real_axis < 0) {
real_axis += dshape.ndim();
}
return SplitOpShapeImpl(attrs, in_attrs, out_attrs, real_axis);
}
struct SplitKernel {
/*!
* \brief Map function for forward split_v2 operator
* \param i global thread id
* \param in_data ptr to input buffer
* \param out_data ptr to ptr of outputs buffer
* \param indices ptr to indices buffer
* \param num_sections # of sections after split
* \param axis_size size of axis to be splitted on
* \param trailing_size step size within the data buffer of the axis to be splitted on
*/
template<typename DType>
static MSHADOW_XINLINE void Map(size_t i,
const DType *in_data, DType** out_data, const size_t* indices,
const size_t num_sections, const size_t axis_size,
const size_t trailing_size) {
size_t idx = i / trailing_size % axis_size;
size_t target = 0;
for (size_t section = 0;
section < num_sections && indices[section] <= idx;
target = section++) {}
DType* target_data = out_data[target];
const size_t mid_idx = idx - indices[target];
const size_t head_idx = i / (trailing_size * axis_size);
const size_t tail_idx = i % trailing_size;
const size_t section_size = indices[target + 1] - indices[target];
const size_t target_idx =
head_idx * trailing_size * section_size + mid_idx * trailing_size + tail_idx;
target_data[target_idx] = in_data[i];
}
};
struct ConcatenateKernel {
/*!
* \brief Map function for backward split_v2 operator
* \param i global thread id
* \param out_grad ptr to ptr of out grads buffer
* \param in_grad ptr to input grad buffer
* \param indices ptr to indices buffer
* \param num_sections # of sections after split
* \param axis_size size of axis to be splitted on
* \param trailing_size step size within the data buffer of the axis to be splitted on
*/
template<typename DType>
static MSHADOW_XINLINE void Map(size_t i,
DType** out_grad, DType* in_grad, const size_t* indices,
const size_t num_sections, const size_t axis_size,
const size_t trailing_size) {
size_t idx = i / trailing_size % axis_size;
size_t src = 0;
for (size_t section = 0;
section < num_sections && indices[section] <= idx;
src = section++) {}
DType* src_grad = out_grad[src];
const size_t mid_idx = idx - indices[src];
const size_t head_idx = i / (trailing_size * axis_size);
const size_t tail_idx = i % trailing_size;
const size_t section_size = indices[src + 1] - indices[src];
const size_t src_idx =
head_idx * trailing_size * section_size + mid_idx * trailing_size + tail_idx;
in_grad[i] = src_grad[src_idx];
}
};
template<typename xpu>
inline void SplitOpForwardImpl(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs,
const int real_axis) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& input_data = inputs[split_enum::kData];
size_t leading = 1, trailing = 1;
CHECK_LT(real_axis, input_data.ndim());
size_t mid = input_data.shape_[real_axis];
for (int i = 0; i < real_axis; ++i) {
leading *= input_data.shape_[i];
}
for (int i = real_axis + 1; i < input_data.ndim(); ++i) {
trailing *= input_data.shape_[i];
}
size_t workspace_size = 0;
const mxnet::TShape& ishape = input_data.shape_;
const mxnet::TShape split_pts =
(param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices;
std::vector<size_t> indices;
for (const auto& section : split_pts) {
indices.push_back(section);
}
if (param.sections == 0) {
indices.push_back(ishape[real_axis]);
}
workspace_size += indices.size() * sizeof(size_t);
MSHADOW_TYPE_SWITCH(input_data.type_flag_, DType, {
std::vector<DType*> output_data;
for (const TBlob& data : outputs) {
output_data.push_back(data.dptr<DType>());
}
workspace_size += output_data.size() * sizeof(DType*);
Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(Shape1(workspace_size), s);
Tensor<cpu, 1, size_t> indices_cpu_tensor(indices.data(), Shape1(indices.size()));
Tensor<xpu, 1, size_t> indices_xpu_tensor(
reinterpret_cast<size_t*>(workspace.dptr_), Shape1(indices.size()));
Tensor<cpu, 1, DType*> ptrs_cpu_tensor(output_data.data(), Shape1(output_data.size()));
Tensor<xpu, 1, DType*> ptrs_xpu_tensor(
reinterpret_cast<DType**>(workspace.dptr_ + indices.size() * sizeof(size_t)),
Shape1(output_data.size()));
mshadow::Copy(indices_xpu_tensor, indices_cpu_tensor, s);
mshadow::Copy(ptrs_xpu_tensor, ptrs_cpu_tensor, s);
Kernel<SplitKernel, xpu>::Launch(
s, input_data.Size(), input_data.dptr<DType>(), ptrs_xpu_tensor.dptr_,
indices_xpu_tensor.dptr_, indices.size() - 1, mid, trailing);
});
}
template<typename xpu>
inline void SplitOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), (param.sections > 0) ? param.sections : param.indices.ndim());
const TBlob& input_data = inputs[split_enum::kData];
int real_axis = param.axis;
if (real_axis < 0) {
real_axis += input_data.ndim();
}
SplitOpForwardImpl<xpu>(attrs, ctx, inputs, req, outputs, real_axis);
}
template<typename xpu>
inline void SplitOpBackwardImpl(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs,
const int real_axis) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
Stream<xpu> *s = ctx.get_stream<xpu>();
TBlob input_grad = outputs[split_enum::kData];
size_t leading = 1, trailing = 1;
CHECK_LT(real_axis, input_grad.ndim());
size_t mid = input_grad.shape_[real_axis];
for (int i = 0; i < real_axis; ++i) {
leading *= input_grad.shape_[i];
}
for (int i = real_axis + 1; i < input_grad.ndim(); ++i) {
trailing *= input_grad.shape_[i];
}
size_t workspace_size = 0;
const mxnet::TShape& ishape = input_grad.shape_;
const mxnet::TShape split_pts =
(param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices;
std::vector<size_t> indices;
for (const auto& section : split_pts) {
indices.push_back(section);
}
if (param.sections == 0) {
indices.push_back(ishape[real_axis]);
}
workspace_size += indices.size() * sizeof(size_t);
MSHADOW_TYPE_SWITCH(input_grad.type_flag_, DType, {
std::vector<DType*> out_grads;
for (const TBlob& output_grad : inputs) {
out_grads.push_back(output_grad.dptr<DType>());
}
workspace_size += out_grads.size() * sizeof(DType*);
Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(Shape1(workspace_size), s);
Tensor<cpu, 1, size_t> indices_cpu_tensor(indices.data(), Shape1(indices.size()));
Tensor<xpu, 1, size_t> indices_xpu_tensor(
reinterpret_cast<size_t*>(workspace.dptr_), Shape1(indices.size()));
Tensor<cpu, 1, DType*> ptrs_cpu_tensor(out_grads.data(), Shape1(inputs.size()));
Tensor<xpu, 1, DType*> ptrs_xpu_tensor(
reinterpret_cast<DType**>(workspace.dptr_ + indices.size() * sizeof(size_t)),
Shape1(inputs.size()));
mshadow::Copy(indices_xpu_tensor, indices_cpu_tensor, s);
mshadow::Copy(ptrs_xpu_tensor, ptrs_cpu_tensor, s);
Kernel<ConcatenateKernel, xpu>::Launch(
s, input_grad.Size(), ptrs_xpu_tensor.dptr_, input_grad.dptr<DType>(),
indices_xpu_tensor.dptr_, indices.size() - 1, mid, trailing);
});
}
template<typename xpu>
inline void SplitOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
CHECK_EQ(inputs.size(), (param.sections > 0) ? param.sections : param.indices.ndim())
<< "out grad vector size mush match the output size";
CHECK_EQ(outputs.size(), 1U);
int real_axis = param.axis;
if (real_axis < 0) {
real_axis += outputs[split_enum::kData].ndim();
}
SplitOpBackwardImpl<xpu>(attrs, ctx, inputs, req, outputs, real_axis);
}
inline uint32_t SplitNumOutputs(const NodeAttrs& attrs) {
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
return (param.sections > 0) ? param.sections : param.indices.ndim();
}
} // namespace op
} // namespace mxnet
namespace std {
template<>
struct hash<mxnet::op::TransposeParam> {
size_t operator()(const mxnet::op::TransposeParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.axes);
return ret;
}
};
template<>
struct hash<mxnet::op::ReshapeParam> {
size_t operator()(const mxnet::op::ReshapeParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.target_shape);
ret = dmlc::HashCombine(ret, val.keep_highest);
ret = dmlc::HashCombine(ret, val.shape);
ret = dmlc::HashCombine(ret, val.reverse);
return ret;
}
};
template<>
struct hash<mxnet::op::ExpandDimParam> {
size_t operator()(const mxnet::op::ExpandDimParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.axis);
return ret;
}
};
} // namespace std
#endif // MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_
|
allocator.h | #pragma once
#include <queue>
#include "adabs/pgas_addr.h"
#include "adabs/tools/alignment.h"
namespace adabs {
namespace collective {
namespace pgas {
inline void remote_allocate_real(gasnet_token_t token, gasnet_handlerarg_t arg0,
gasnet_handlerarg_t arg1,
gasnet_handlerarg_t arg2,
gasnet_handlerarg_t arg3,
gasnet_handlerarg_t arg4,
gasnet_handlerarg_t arg5
);
inline void remote_malloc_real_reply(gasnet_token_t token, gasnet_handlerarg_t arg0,
gasnet_handlerarg_t arg1,
gasnet_handlerarg_t arg2,
gasnet_handlerarg_t arg3
);
inline void remote_free_real(gasnet_token_t token, gasnet_handlerarg_t arg0,
gasnet_handlerarg_t arg1
);
inline void add_to_stack(gasnet_token_t token, void *buf, size_t nbytes,
gasnet_handlerarg_t arg0,
gasnet_handlerarg_t arg1
);
}
namespace impl{
inline void* remote_allocate(const int node, const int num_objects, const int batch_size, const int sizeT, const int a);
inline void remote_free(const int node, void* ptr);
inline void* real_allocate(const int num_objects, const int batch_size, const int batch_mem_size, const int a);
extern std::queue<void*> memory_queue;
}
/**
* This is our collective allocator, which will allocate memory on all
* nodes. The memory will be connected with each other, so that it
* looks like one uniform memory address. The memory allocation follows
* this pattern:
* RRRRRFTTTTTTTTTTTTFTTTTTTTTTTTTFTTTTTTTTTTTT
* |-X-| |---num----| |---num----| |---num----|
* whereas: - R are the addresses off this memory on the other node
* - X is the number of nodes this memory exists, which is
* currently identical to adabs::all
* - F is our flag indicating if data is available
* - T is the data
* - num indicates the batch size
* NOTE: This allocator follows the STL concept, but is not expected
* to be STL conform. We may decide to bend (or even break)
* parts of what is expected from STL allocators. After all
* STL allocators are "weird" (see Effective STL).
* Currently missing:
* - non-const references
* - max_size()
*/
template <typename T>
class pgas_addr;
template <typename T>
struct allocator;
// specialize for void, since we can't have void&
// not sure if we really need this...
template <>
struct allocator<void> {
typedef pgas_addr<void> pointer;
typedef const pgas_addr<void> const_pointer;
typedef void value_type;
template <class U> struct rebind { typedef allocator<U> other; };
};
template <typename T>
struct allocator {
/***************** TYPEDEFS ***********************/
typedef size_t size_type;
//typedef ptrdiff_t difference_type;
typedef pgas_addr<T> pointer;
typedef const pgas_addr<T> const_pointer;
typedef T value_type;
typedef const T& const_reference;
// allows to rebind this allocator to a different type
template <typename U> struct rebind { typedef allocator<U> other; };
/***************** CONSTRUCTORS ********************/
allocator() throw() {}
allocator(const allocator&) throw() {}
template <class U> allocator(const allocator<U>&) throw() {}
~allocator() throw() {}
/****************** ALLOCATE **********************/
static pointer allocate(size_type num_objects, const void* localityHint = 0) {
allocate (num_objects, 1, localityHint);
}
static pointer allocate(size_type num_objects, size_type batch_size, const void* localityHint = 0);
/****************** DEALLOCATE **********************/
static void deallocate(pointer &p, size_type n) {
deallocate(p);
}
static void deallocate(pointer &p);
/****************** CONSTRUCT **********************/
static void construct(pointer p, const T& val);
static void destroy(pointer p);
/****************** ADDRESS **********************/
static const_pointer address(const_reference x);
};
template <typename T>
allocator<T>::pointer allocator<T>::allocate(allocator<T>::size_type num_objects,
allocator<T>::size_type batch_size,
const void* localityHint) {
using namespace adabs::tools;
void* ptr;
if (leader) {
int a = tools::alignment<T>::val();
if (a<sizeof(int)) a = sizeof(int);
const size_t batch_mem_size = sizeof(T) * batch_size + a;
void **ptrs = new void*[all];
for (int i=0; i<all; ++i) {
if (i == me) {
// local allocate
ptrs[i] = impl::real_allocate (num_objects, batch_size, batch_mem_size, a);
} else {
// allocate on remote node
ptrs[i] = adabs::collective::impl::remote_allocate(i, num_objects, batch_size, batch_mem_size, a);
}
//std::cout << "allocated on " << i << ": " << ptrs[i] << std::endl;
}
ptr = ptrs[me];
// broadcast the addresses to all nodes and put the ptr on the stack
for (int i=0; i<all; ++i) {
if (i==me) {
void** ptrptr = (void**)ptr;
for (int i=0; i<all; ++i) {
ptrptr[i] = ptrs[i];
}
} else {
GASNET_CALL(gasnet_AMRequestLong2(i, adabs::impl::COLLECTIVE_ALLOC_BROADCAST,
ptrs, all*sizeof(T*), ptrs[i],
get_low(ptrs[i]),
get_high(ptrs[i])
)
)
}
}
for (int i=0; i<all; ++i) {
//std::cout << "allocated on " << i << ": " << ptrs[i] << std::endl;
}
delete[] ptrs;
} else {
// get memory from the collective memory stack
while (true) {
bool end = false;
// TODO OPTIMIZE ME!
#pragma omp critical (queue)
{
if (!impl::memory_queue.empty()) {
ptr = impl::memory_queue.front();
impl::memory_queue.pop();
end = true;
}
}
if (end) break;
}
//std::cout << me << ": got " << ptr << std::endl;
}
return pointer(ptr, batch_size);
}
template <typename T>
void allocator<T>::deallocate(pointer &p) {
// TODO: we could broadcast a 0 to all nodes so they will not sent
// us any information
free(p._orig_ptr);
}
template <typename T>
void allocator<T>::construct(pointer p, const T& val) {
// TODO
throw "not implemented";
p->T(val);
}
template <typename T>
void allocator<T>::destroy(pointer p) {
// TODO
throw "not implemented";
p->~T();
}
template <typename T>
allocator<T>::const_pointer allocator<T>::address(const_reference x) {
// TODO
throw "not implemented";
//char* ptr = &x;
//ptr -= sizeof(int);
//return const_pointer((void*)ptr);
}
template <class T1, class T2>
bool operator==(const allocator<T1>&, const allocator<T2>&) throw() {
return true;
}
template <class T1, class T2>
bool operator!=(const allocator<T1>&, const allocator<T2>&) throw() {
return false;
}
namespace impl {
inline void* remote_allocate(const int node, const int num_objects, const int batch_size, const int sizeT, const int alignmentT) {
using namespace adabs::tools;
volatile long returnee;
returnee = -1;
// start remote thread and allocate memory
GASNET_CALL(gasnet_AMRequestShort6(node, adabs::impl::REMOTE_COLLECTIVE_ALLOC,
get_low(&returnee),
get_high(&returnee),
num_objects,
batch_size,
sizeT,
alignmentT
)
)
//wait until returnee != -1
while (returnee == -1) {}
return (void*)(returnee);
}
inline void remote_free(const int node, void* ptr) {
using namespace adabs::tools;
if (ptr == 0) return;
GASNET_CALL(gasnet_AMRequestShort2(node, adabs::impl::REMOTE_COLLECTIVE_FREE,
get_low(ptr),
get_high(ptr)
)
)
}
inline void* real_allocate(const int num_objects, const int batch_size, const int batch_mem_size, const int alignmentT) {
//std::cout << "real alloc parameter: " << num_objects << ", " << batch_size << ", " << batch_mem_size << ", " << alignmentT << std::endl;
const size_t num_batch = (num_objects % batch_size == 0)
? (num_objects / batch_size)
: (num_objects / batch_size) + 1;
const int pointer_alignment = alignmentT - ((adabs::all*sizeof(void*)) % alignmentT);
const size_t mem_size = num_batch * batch_mem_size + adabs::all*sizeof(void*) + pointer_alignment;
void *ptr = malloc (mem_size);
void **init_ptr_1 = (void**)ptr;
for (int i=0; i<adabs::all; ++i) {
if (i!=me)
*init_ptr_1 = 0;
else
*init_ptr_1 = ptr;
++init_ptr_1;
}
char *init_ptr_2 = reinterpret_cast<char*>(init_ptr_1) + pointer_alignment;
//std::cout << "malloc returned " << ptr << " to " << (void*)((char*)ptr + mem_size) << " - " << mem_size << std::endl;
for (int i=0; i<num_batch; ++i) {
init_ptr_2 += batch_mem_size - alignmentT;
int *flag_ptr = reinterpret_cast<int*>(init_ptr_2);
//std::cout << "write 0 to " << flag_ptr << std::endl;
*flag_ptr = 0;
init_ptr_2 += alignmentT;
}
return ptr;
}
}
namespace pgas {
inline void remote_allocate_real(gasnet_token_t token, gasnet_handlerarg_t arg0,
gasnet_handlerarg_t arg1,
gasnet_handlerarg_t arg2,
gasnet_handlerarg_t arg3,
gasnet_handlerarg_t arg4,
gasnet_handlerarg_t arg5
) {
using namespace adabs::tools;
void* returnee = adabs::collective::impl::real_allocate(arg2, arg3, arg4, arg5);
GASNET_CALL(gasnet_AMReplyShort4(token, adabs::impl::REMOTE_COLLECTIVE_ALLOC_REPLY,
arg0,
arg1,
get_low(returnee),
get_high(returnee)
)
)
}
inline void remote_malloc_real_reply(gasnet_token_t token, gasnet_handlerarg_t arg0,
gasnet_handlerarg_t arg1,
gasnet_handlerarg_t arg2,
gasnet_handlerarg_t arg3
) {
using namespace adabs::tools;
long* local = get_ptr<long> (arg0, arg1);
void* remote = get_ptr<void> (arg2, arg3);
*local = reinterpret_cast<long>(remote);
}
inline void remote_free_real(gasnet_token_t token, gasnet_handlerarg_t arg0,
gasnet_handlerarg_t arg1
) {
using namespace adabs::tools;
void* ptr = get_ptr<void>(arg0, arg1);
free (ptr);
}
inline void add_to_stack(gasnet_token_t token, void *buf, size_t nbytes,
gasnet_handlerarg_t arg0,
gasnet_handlerarg_t arg1
) {
using namespace adabs::tools;
void* ptr = get_ptr<void>(arg0, arg1);
#pragma omp critical (queue)
adabs::collective::impl::memory_queue.push(ptr);
}
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.