source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
array_init_2.c | // Test the handling of two loops under omp for
// watch the loop index replacement (private by default)
// and tje array outlining
#include <stdlib.h>
int main(void)
{
int i, j;
float** u = (float**) malloc( 500 * sizeof( float*) );
for( i=0; i<500; i++)
u[i] = (float*) malloc( 500 * sizeof(float) );
#pragma omp parallel for
for (i=0; i<500; i++)
for (j=0; j<500; j++)
{
u[i][j] = 0.0;
}
return 0;
}
|
idasFoodWeb_kry_omp.c | /*
* -----------------------------------------------------------------
* Programmer(s): Daniel R. Reynolds and Ting Yan @ SMU
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2020, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* Example program for IDAS: Food web problem, OpenMP, GMRES,
* user-supplied preconditioner
*
* This example program uses SUNLinSol_SPGMR as the linear
* solver, and IDACalcIC for initial condition calculation.
*
* The mathematical problem solved in this example is a DAE system
* that arises from a system of partial differential equations after
* spatial discretization. The PDE system is a food web population
* model, with predator-prey interaction and diffusion on the unit
* square in two dimensions. The dependent variable vector is:
*
* 1 2 ns
* c = (c , c , ..., c ) , ns = 2 * np
*
* and the PDE's are as follows:
*
* i i i
* dc /dt = d(i)*(c + c ) + R (x,y,c) (i = 1,...,np)
* xx yy i
*
* i i
* 0 = d(i)*(c + c ) + R (x,y,c) (i = np+1,...,ns)
* xx yy i
*
* where the reaction terms R are:
*
* i ns j
* R (x,y,c) = c * (b(i) + sum a(i,j)*c )
* i j=1
*
* The number of species is ns = 2 * np, with the first np being
* prey and the last np being predators. The coefficients a(i,j),
* b(i), d(i) are:
*
* a(i,i) = -AA (all i)
* a(i,j) = -GG (i <= np , j > np)
* a(i,j) = EE (i > np, j <= np)
* all other a(i,j) = 0
* b(i) = BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i <= np)
* b(i) =-BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i > np)
* d(i) = DPREY (i <= np)
* d(i) = DPRED (i > np)
*
* The various scalar parameters required are set using '#define'
* statements or directly in routine InitUserData. In this program,
* np = 1, ns = 2. The boundary conditions are homogeneous Neumann:
* normal derivative = 0.
*
* A polynomial in x and y is used to set the initial values of the
* first np variables (the prey variables) at each x,y location,
* while initial values for the remaining (predator) variables are
* set to a flat value, which is corrected by IDACalcIC.
*
* The PDEs are discretized by central differencing on a MX by MY
* mesh.
*
* The DAE system is solved by IDAS using the SUNLinSol_SPGMR linear solver.
* Output is printed at t = 0, .001, .01, .1, .4, .7, 1.
*
* Optionally, we can set the number of threads from environment
* variable or command line. To check the current value for number
* of threads from environment:
* % echo $OMP_NUM_THREADS
*
* Execution:
*
* To use the default value for the number of threads from
* the OMP_NUM_THREADS environment value:
* % ./idasFoodWeb_kry_omp
* To specify the number of threads at the command line, use
* % ./idasFoodWeb_kry_omp num_threads
* where num_threads is the desired number of threads.
*
* -----------------------------------------------------------------
* References:
* [1] Peter N. Brown and Alan C. Hindmarsh,
* Reduced Storage Matrix Methods in Stiff ODE systems, Journal
* of Applied Mathematics and Computation, Vol. 31 (May 1989),
* pp. 40-91.
*
* [2] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold,
* Using Krylov Methods in the Solution of Large-Scale
* Differential-Algebraic Systems, SIAM J. Sci. Comput., 15
* (1994), pp. 1467-1488.
*
* [3] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold,
* Consistent Initial Condition Calculation for Differential-
* Algebraic Systems, SIAM J. Sci. Comput., 19 (1998),
* pp. 1495-1512.
* -----------------------------------------------------------------
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <idas/idas.h>
#include <sunlinsol/sunlinsol_spgmr.h>
#include <nvector/nvector_openmp.h>
#include <sundials/sundials_dense.h>
#include <sundials/sundials_types.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/* helpful macros */
#ifndef MAX
#define MAX(A, B) ((A) > (B) ? (A) : (B))
#endif
/* Problem Constants. */
#define NPREY 1 /* No. of prey (= no. of predators). */
#define NUM_SPECIES 2*NPREY
#define PI RCONST(3.1415926535898)
#define FOURPI (RCONST(4.0)*PI)
#define MX 20 /* MX = number of x mesh points */
#define MY 20 /* MY = number of y mesh points */
#define NSMX (NUM_SPECIES * MX)
#define NEQ (NUM_SPECIES*MX*MY)
#define AA RCONST(1.0) /* Coefficient in above eqns. for a */
#define EE RCONST(10000.) /* Coefficient in above eqns. for a */
#define GG RCONST(0.5e-6) /* Coefficient in above eqns. for a */
#define BB RCONST(1.0) /* Coefficient in above eqns. for b */
#define DPREY RCONST(1.0) /* Coefficient in above eqns. for d */
#define DPRED RCONST(0.05) /* Coefficient in above eqns. for d */
#define ALPHA RCONST(50.) /* Coefficient alpha in above eqns. */
#define BETA RCONST(1000.) /* Coefficient beta in above eqns. */
#define AX RCONST(1.0) /* Total range of x variable */
#define AY RCONST(1.0) /* Total range of y variable */
#define RTOL RCONST(1.e-5) /* Relative tolerance */
#define ATOL RCONST(1.e-5) /* Absolute tolerance */
#define NOUT 6 /* Number of output times */
#define TMULT RCONST(10.0) /* Multiplier for tout values */
#define TADD RCONST(0.3) /* Increment for tout values */
#define ZERO RCONST(0.)
#define ONE RCONST(1.0)
/*
* User-defined vector and accessor macro: IJ_Vptr.
* IJ_Vptr is defined in order to express the underlying 3-D structure of
* the dependent variable vector from its underlying 1-D storage (an N_Vector).
* IJ_Vptr(vv,i,j) returns a pointer to the location in vv corresponding to
* species index is = 0, x-index ix = i, and y-index jy = j.
*/
#define IJ_Vptr(vv,i,j) (&NV_Ith_OMP(vv, (i)*NUM_SPECIES + (j)*NSMX))
/* Type: UserData. Contains problem constants, etc. */
typedef struct {
sunindextype Neq, ns, np, mx, my;
realtype dx, dy, **acoef;
realtype cox[NUM_SPECIES], coy[NUM_SPECIES], bcoef[NUM_SPECIES];
realtype **PP[MX][MY];
sunindextype *pivot[MX][MY];
N_Vector rates;
N_Vector ewt;
void *ida_mem;
int nthreads;
} *UserData;
/* Prototypes for functions called by the IDA Solver. */
static int resweb(realtype time, N_Vector cc, N_Vector cp, N_Vector resval,
void *user_data);
static int Precond(realtype tt, N_Vector cc, N_Vector cp,
N_Vector rr, realtype cj, void *user_data);
static int PSolve(realtype tt, N_Vector cc, N_Vector cp,
N_Vector rr, N_Vector rvec, N_Vector zvec,
realtype cj, realtype delta, void *user_data);
/* Prototypes for private Helper Functions. */
static void InitUserData(UserData webdata);
static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id,
UserData webdata);
static void PrintHeader(int maxl, realtype rtol, realtype atol);
static void PrintOutput(void *ida_mem, N_Vector c, realtype t);
static void PrintFinalStats(void *ida_mem);
static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate, UserData webdata);
static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy,
UserData webdata);
static realtype dotprod(sunindextype size, realtype *x1, realtype *x2);
static int check_retval(void *returnvalue, char *funcname, int opt);
/*
*--------------------------------------------------------------------
* MAIN PROGRAM
*--------------------------------------------------------------------
*/
int main(int argc, char *argv[])
{
void *ida_mem;
SUNLinearSolver LS;
UserData webdata;
N_Vector cc, cp, id;
int iout, jx, jy, retval;
int maxl;
realtype rtol, atol, t0, tout, tret;
int num_threads;
ida_mem = NULL;
LS = NULL;
webdata = NULL;
cc = cp = id = NULL;
/* Set the number of threads to use */
num_threads = 1; /* default value */
#ifdef _OPENMP
num_threads = omp_get_max_threads(); /* overwrite with OMP_NUM_THREADS */
#endif
if (argc > 1)
num_threads = (int) strtol(argv[1], NULL, 0);
/* Allocate and initialize user data block webdata. */
webdata = (UserData) malloc(sizeof *webdata);
webdata->rates = N_VNew_OpenMP(NEQ, num_threads);
webdata->acoef = newDenseMat(NUM_SPECIES, NUM_SPECIES);
webdata->ewt = N_VNew_OpenMP(NEQ, num_threads);
for (jx = 0; jx < MX; jx++) {
for (jy = 0; jy < MY; jy++) {
(webdata->pivot)[jx][jy] = newIndexArray(NUM_SPECIES);
(webdata->PP)[jx][jy] = newDenseMat(NUM_SPECIES, NUM_SPECIES);
}
}
webdata->nthreads = num_threads;
InitUserData(webdata);
/* Allocate N-vectors and initialize cc, cp, and id. */
cc = N_VNew_OpenMP(NEQ, num_threads);
if(check_retval((void *)cc, "N_VNew_OpenMP", 0)) return(1);
cp = N_VNew_OpenMP(NEQ, num_threads);
if(check_retval((void *)cp, "N_VNew_OpenMP", 0)) return(1);
id = N_VNew_OpenMP(NEQ, num_threads);
if(check_retval((void *)id, "N_VNew_OpenMP", 0)) return(1);
SetInitialProfiles(cc, cp, id, webdata);
/* Set remaining inputs to IDAMalloc. */
t0 = ZERO;
rtol = RTOL;
atol = ATOL;
/* Call IDACreate and IDAMalloc to initialize IDA. */
ida_mem = IDACreate();
if(check_retval((void *)ida_mem, "IDACreate", 0)) return(1);
retval = IDASetUserData(ida_mem, webdata);
if(check_retval(&retval, "IDASetUserData", 1)) return(1);
retval = IDASetId(ida_mem, id);
if(check_retval(&retval, "IDASetId", 1)) return(1);
retval = IDAInit(ida_mem, resweb, t0, cc, cp);
if(check_retval(&retval, "IDAInit", 1)) return(1);
retval = IDASStolerances(ida_mem, rtol, atol);
if(check_retval(&retval, "IDASStolerances", 1)) return(1);
webdata->ida_mem = ida_mem;
/* Create SUNLinSol_SPGMR linear solver, attach to IDA, and set
preconditioning routines. */
maxl = 16; /* max dimension of the Krylov subspace */
LS = SUNLinSol_SPGMR(cc, PREC_LEFT, maxl); /* IDA only allows left preconditioning */
if(check_retval((void *)LS, "SUNLinSol_SPGMR", 0)) return(1);
retval = IDASetLinearSolver(ida_mem, LS, NULL);
if(check_retval(&retval, "IDASetLinearSolver", 1)) return(1);
retval = IDASetPreconditioner(ida_mem, Precond, PSolve);
if(check_retval(&retval, "IDASetPreconditioner", 1)) return(1);
/* Call IDACalcIC (with default options) to correct the initial values. */
tout = RCONST(0.001);
retval = IDACalcIC(ida_mem, IDA_YA_YDP_INIT, tout);
if(check_retval(&retval, "IDACalcIC", 1)) return(1);
/* Print heading, basic parameters, and initial values. */
PrintHeader(maxl, rtol, atol);
PrintOutput(ida_mem, cc, ZERO);
/* Loop over iout, call IDASolve (normal mode), print selected output. */
for (iout = 1; iout <= NOUT; iout++) {
retval = IDASolve(ida_mem, tout, &tret, cc, cp, IDA_NORMAL);
if(check_retval(&retval, "IDASolve", 1)) return(retval);
PrintOutput(ida_mem, cc, tret);
if (iout < 3) tout *= TMULT; else tout += TADD;
}
/* Print final statistics and free memory. */
PrintFinalStats(ida_mem);
printf("num_threads = %i\n\n", num_threads);
/* Free memory */
IDAFree(&ida_mem);
SUNLinSolFree(LS);
N_VDestroy(cc);
N_VDestroy(cp);
N_VDestroy(id);
destroyMat(webdata->acoef);
N_VDestroy(webdata->rates);
N_VDestroy(webdata->ewt);
for (jx = 0; jx < MX; jx++) {
for (jy = 0; jy < MY; jy ++) {
destroyArray((webdata->pivot)[jx][jy]);
destroyMat((webdata->PP)[jx][jy]);
}
}
free(webdata);
return(0);
}
/* Define lines for readability in later routines */
#define acoef (webdata->acoef)
#define bcoef (webdata->bcoef)
#define cox (webdata->cox)
#define coy (webdata->coy)
/*
*--------------------------------------------------------------------
* FUNCTIONS CALLED BY IDA
*--------------------------------------------------------------------
*/
/*
* resweb: System residual function for predator-prey system.
* This routine calls Fweb to get all the right-hand sides of the
* equations, then loads the residual vector accordingly,
* using cp in the case of prey species.
*/
static int resweb(realtype tt, N_Vector cc, N_Vector cp,
N_Vector res, void *user_data)
{
sunindextype jx, jy, is, yloc, loc, np;
realtype *resv, *cpv;
UserData webdata;
jx = jy = is = 0;
webdata = (UserData)user_data;
cpv = NV_DATA_OMP(cp);
resv = NV_DATA_OMP(res);
np = webdata->np;
/* Call Fweb to set res to vector of right-hand sides. */
Fweb(tt, cc, res, webdata);
/* Loop over all grid points, setting residual values appropriately
for differential or algebraic components. */
#pragma omp parallel for default(shared) private(jy, jx, is, yloc, loc) schedule(static) num_threads(webdata->nthreads)
for (jy = 0; jy < MY; jy++) {
yloc = NSMX * jy;
for (jx = 0; jx < MX; jx++) {
loc = yloc + NUM_SPECIES * jx;
for (is = 0; is < NUM_SPECIES; is++) {
if (is < np)
resv[loc+is] = cpv[loc+is] - resv[loc+is];
else
resv[loc+is] = -resv[loc+is];
}
}
}
return(0);
}
static int Precond(realtype tt, N_Vector cc, N_Vector cp,
N_Vector rr, realtype cj, void *user_data)
{
int retval;
sunindextype ret;
realtype uround, xx, yy, del_x, del_y;
realtype **Pxy, *ratesxy, *Pxycol, *cxy, *cpxy, *ewtxy, cctmp;
realtype inc, fac, sqru, perturb_rates[NUM_SPECIES];
int is, js, jx, jy;
void *ida_mem;
N_Vector ewt;
realtype hh;
UserData webdata;
webdata = (UserData) user_data;
del_x = webdata->dx;
del_y = webdata->dy;
uround = UNIT_ROUNDOFF;
sqru = sqrt(uround);
ida_mem = webdata->ida_mem;
ewt = webdata->ewt;
retval = IDAGetErrWeights(ida_mem, ewt);
if(check_retval(&retval, "IDAGetErrWeights", 1)) return(1);
retval = IDAGetCurrentStep(ida_mem, &hh);
if(check_retval(&retval, "IDAGetCurrentStep", 1)) return(1);
for (jy = 0; jy < MY; jy++) {
yy = jy * del_y;
for (jx = 0; jx < MX; jx++) {
xx = jx * del_x;
Pxy = (webdata->PP)[jx][jy];
cxy = IJ_Vptr(cc, jx, jy);
cpxy = IJ_Vptr(cp, jx, jy);
ewtxy = IJ_Vptr(ewt, jx, jy);
ratesxy = IJ_Vptr((webdata->rates), jx, jy);
for (js = 0; js < NUM_SPECIES; js++) {
inc = sqru*(MAX(fabs(cxy[js]), MAX(hh*fabs(cpxy[js]), ONE/ewtxy[js])));
cctmp = cxy[js];
cxy[js] += inc;
fac = -ONE/inc;
WebRates(xx, yy, cxy, perturb_rates, webdata);
Pxycol = Pxy[js];
for (is = 0; is < NUM_SPECIES; is++)
Pxycol[is] = (perturb_rates[is] - ratesxy[is])*fac;
if (js < 1) Pxycol[js] += cj;
cxy[js] = cctmp;
}
ret = denseGETRF(Pxy, NUM_SPECIES, NUM_SPECIES, (webdata->pivot)[jx][jy]);
if (ret != 0) return(1);
}
}
return(0);
}
static int PSolve(realtype tt, N_Vector cc, N_Vector cp,
N_Vector rr, N_Vector rvec, N_Vector zvec,
realtype cj, realtype dalta, void *user_data)
{
realtype **Pxy, *zxy;
sunindextype *pivot;
sunindextype jx, jy;
UserData webdata;
jx = jy = 0;
webdata = (UserData) user_data;
N_VScale(ONE, rvec, zvec);
#pragma omp parallel for collapse(2) default(shared) private(jx, jy, zxy, Pxy, pivot) schedule(static) num_threads(webdata->nthreads)
for (jx = 0; jx < MX; jx++) {
for (jy = 0; jy <MY; jy++) {
zxy = IJ_Vptr(zvec, jx, jy);
Pxy = (webdata->PP)[jx][jy];
pivot = (webdata->pivot)[jx][jy];
denseGETRS(Pxy, NUM_SPECIES, pivot, zxy);
}
}
return(0);
}
/*
*--------------------------------------------------------------------
* PRIVATE FUNCTIONS
*--------------------------------------------------------------------
*/
/*
* InitUserData: Load problem constants in webdata (of type UserData).
*/
static void InitUserData(UserData webdata)
{
sunindextype i, j, np;
realtype *a1,*a2, *a3, *a4, dx2, dy2;
webdata->mx = MX;
webdata->my = MY;
webdata->ns = NUM_SPECIES;
webdata->np = NPREY;
webdata->dx = AX/(MX-1);
webdata->dy = AY/(MY-1);
webdata->Neq= NEQ;
/* Set up the coefficients a and b, and others found in the equations. */
np = webdata->np;
dx2 = (webdata->dx)*(webdata->dx); dy2 = (webdata->dy)*(webdata->dy);
for (i = 0; i < np; i++) {
a1 = &(acoef[i][np]);
a2 = &(acoef[i+np][0]);
a3 = &(acoef[i][0]);
a4 = &(acoef[i+np][np]);
/* Fill in the portion of acoef in the four quadrants, row by row. */
for (j = 0; j < np; j++) {
*a1++ = -GG;
*a2++ = EE;
*a3++ = ZERO;
*a4++ = ZERO;
}
/* Reset the diagonal elements of acoef to -AA. */
acoef[i][i] = -AA; acoef[i+np][i+np] = -AA;
/* Set coefficients for b and diffusion terms. */
bcoef[i] = BB; bcoef[i+np] = -BB;
cox[i] = DPREY/dx2; cox[i+np] = DPRED/dx2;
coy[i] = DPREY/dy2; coy[i+np] = DPRED/dy2;
}
}
/*
* SetInitialProfiles: Set initial conditions in cc, cp, and id.
* A polynomial profile is used for the prey cc values, and a constant
* (1.0e5) is loaded as the initial guess for the predator cc values.
* The id values are set to 1 for the prey and 0 for the predators.
* The prey cp values are set according to the given system, and
* the predator cp values are set to zero.
*/
static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id,
UserData webdata)
{
sunindextype loc, yloc, is, jx, jy, np;
realtype xx, yy, xyfactor;
realtype *ccv, *cpv, *idv;
ccv = NV_DATA_OMP(cc);
cpv = NV_DATA_OMP(cp);
idv = NV_DATA_OMP(id);
np = webdata->np;
/* Loop over grid, load cc values and id values. */
for (jy = 0; jy < MY; jy++) {
yy = jy * webdata->dy;
yloc = NSMX * jy;
for (jx = 0; jx < MX; jx++) {
xx = jx * webdata->dx;
xyfactor = RCONST(16.0)*xx*(ONE-xx)*yy*(ONE-yy);
xyfactor *= xyfactor;
loc = yloc + NUM_SPECIES*jx;
for (is = 0; is < NUM_SPECIES; is++) {
if (is < np) {
ccv[loc+is] = RCONST(10.0) + (realtype)(is+1) * xyfactor;
idv[loc+is] = ONE;
}
else {
ccv[loc+is] = RCONST(1.0e5);
idv[loc+is] = ZERO;
}
}
}
}
/* Set c' for the prey by calling the function Fweb. */
Fweb(ZERO, cc, cp, webdata);
/* Set c' for predators to 0. */
for (jy = 0; jy < MY; jy++) {
yloc = NSMX * jy;
for (jx = 0; jx < MX; jx++) {
loc = yloc + NUM_SPECIES * jx;
for (is = np; is < NUM_SPECIES; is++) {
cpv[loc+is] = ZERO;
}
}
}
}
/*
* Print first lines of output (problem description)
*/
static void PrintHeader(int maxl, realtype rtol, realtype atol)
{
printf("\nidasFoodWeb_kry_omp: Predator-prey DAE OpenMP example problem using Krylov solver for IDAS \n\n");
printf("Number of species ns: %d", NUM_SPECIES);
printf(" Mesh dimensions: %d x %d", MX, MY);
printf(" System size: %d\n", NEQ);
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("Tolerance parameters: rtol = %Lg atol = %Lg\n", rtol, atol);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol);
#else
printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol);
#endif
printf("Linear solver: SUNLinSol_SPGMR, maxl = %d\n", maxl);
printf("CalcIC called to correct initial predator concentrations.\n\n");
printf("-----------------------------------------------------------\n");
printf(" t bottom-left top-right");
printf(" | nst k h\n");
printf("-----------------------------------------------------------\n\n");
}
/*
* PrintOutput: Print output values at output time t = tt.
* Selected run statistics are printed. Then values of the concentrations
* are printed for the bottom left and top right grid points only.
*/
static void PrintOutput(void *ida_mem, N_Vector c, realtype t)
{
int i, kused, retval;
long int nst;
realtype *c_bl, *c_tr, hused;
retval = IDAGetLastOrder(ida_mem, &kused);
check_retval(&retval, "IDAGetLastOrder", 1);
retval = IDAGetNumSteps(ida_mem, &nst);
check_retval(&retval, "IDAGetNumSteps", 1);
retval = IDAGetLastStep(ida_mem, &hused);
check_retval(&retval, "IDAGetLastStep", 1);
c_bl = IJ_Vptr(c,0,0);
c_tr = IJ_Vptr(c,MX-1,MY-1);
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("%8.2Le %12.4Le %12.4Le | %3ld %1d %12.4Le\n",
t, c_bl[0], c_tr[0], nst, kused, hused);
for (i=1;i<NUM_SPECIES;i++)
printf(" %12.4Le %12.4Le |\n",c_bl[i],c_tr[i]);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n",
t, c_bl[0], c_tr[0], nst, kused, hused);
for (i=1;i<NUM_SPECIES;i++)
printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]);
#else
printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n",
t, c_bl[0], c_tr[0], nst, kused, hused);
for (i=1;i<NUM_SPECIES;i++)
printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]);
#endif
printf("\n");
}
/*
* PrintFinalStats: Print final run data contained in iopt.
*/
static void PrintFinalStats(void *ida_mem)
{
long int nst, nre, sli, netf, nps, npevals, nrevalsLS;
int retval;
retval = IDAGetNumSteps(ida_mem, &nst);
check_retval(&retval, "IDAGetNumSteps", 1);
retval = IDAGetNumLinIters(ida_mem, &sli);
check_retval(&retval, "IDAGetNumLinIters", 1);
retval = IDAGetNumResEvals(ida_mem, &nre);
check_retval(&retval, "IDAGetNumResEvals", 1);
retval = IDAGetNumErrTestFails(ida_mem, &netf);
check_retval(&retval, "IDAGetNumErrTestFails", 1);
retval = IDAGetNumPrecSolves(ida_mem, &nps);
check_retval(&retval, "IDAGetNumPrecSolves", 1);
retval = IDAGetNumPrecEvals(ida_mem, &npevals);
check_retval(&retval, "IDAGetNumPrecEvals", 1);
retval = IDAGetNumLinResEvals(ida_mem, &nrevalsLS);
check_retval(&retval, "IDAGetNumLinResEvals", 1);
printf("-----------------------------------------------------------\n");
printf("Final run statistics: \n\n");
printf("Number of steps = %ld\n", nst);
printf("Number of residual evaluations = %ld\n", nre);
printf("Number of Preconditioner evaluations = %ld\n", npevals);
printf("Number of linear iterations = %ld\n", sli);
printf("Number of error test failures = %ld\n", netf);
printf("Number of precond solve fun called = %ld\n", nps);
}
/*
* Fweb: Rate function for the food-web problem.
* This routine computes the right-hand sides of the system equations,
* consisting of the diffusion term and interaction term.
* The interaction term is computed by the function WebRates.
*/
static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate,
UserData webdata)
{
sunindextype jx, jy, is, idyu, idyl, idxu, idxl;
realtype xx, yy, *cxy, *ratesxy, *cratexy, dcyli, dcyui, dcxli, dcxui;
/* Loop over grid points, evaluate interaction vector (length ns),
form diffusion difference terms, and load crate. */
jx = jy = is = 0;
for (jy = 0; jy < MY; jy++) {
yy = (webdata->dy) * jy ;
idyu = (jy!=MY-1) ? NSMX : -NSMX;
idyl = (jy!= 0 ) ? NSMX : -NSMX;
for (jx = 0; jx < MX; jx++) {
xx = (webdata->dx) * jx;
idxu = (jx!= MX-1) ? NUM_SPECIES : -NUM_SPECIES;
idxl = (jx!= 0 ) ? NUM_SPECIES : -NUM_SPECIES;
cxy = IJ_Vptr(cc,jx,jy);
ratesxy = IJ_Vptr(webdata->rates,jx,jy);
cratexy = IJ_Vptr(crate,jx,jy);
/* Get interaction vector at this grid point. */
WebRates(xx, yy, cxy, ratesxy, webdata);
/* Loop over species, do differencing, load crate segment. */
#pragma omp parallel for default(shared) private(is, dcyli, dcyui, dcxli, dcxui) schedule(static) num_threads(webdata->nthreads)
for (is = 0; is < NUM_SPECIES; is++) {
/* Differencing in y. */
dcyli = *(cxy+is) - *(cxy - idyl + is) ;
dcyui = *(cxy + idyu + is) - *(cxy+is);
/* Differencing in x. */
dcxli = *(cxy+is) - *(cxy - idxl + is);
dcxui = *(cxy + idxu +is) - *(cxy+is);
/* Compute the crate values at (xx,yy). */
cratexy[is] = coy[is] * (dcyui - dcyli) +
cox[is] * (dcxui - dcxli) + ratesxy[is];
} /* End is loop */
} /* End of jx loop */
} /* End of jy loop */
}
/*
* WebRates: Evaluate reaction rates at a given spatial point.
* At a given (x,y), evaluate the array of ns reaction terms R.
*/
static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy,
UserData webdata)
{
int is;
realtype fac;
for (is = 0; is < NUM_SPECIES; is++)
ratesxy[is] = dotprod(NUM_SPECIES, cxy, acoef[is]);
fac = ONE + ALPHA*xx*yy + BETA*sin(FOURPI*xx)*sin(FOURPI*yy);
for (is = 0; is < NUM_SPECIES; is++)
ratesxy[is] = cxy[is]*( bcoef[is]*fac + ratesxy[is] );
}
/*
* dotprod: dot product routine for realtype arrays, for use by WebRates.
*/
static realtype dotprod(sunindextype size, realtype *x1, realtype *x2)
{
sunindextype i;
realtype *xx1, *xx2, temp = ZERO;
xx1 = x1; xx2 = x2;
for (i = 0; i < size; i++) temp += (*xx1++) * (*xx2++);
return(temp);
}
/*
* Check function return value...
* opt == 0 means SUNDIALS function allocates memory so check if
* returned NULL pointer
* opt == 1 means SUNDIALS function returns an integer value so check if
* retval < 0
* opt == 2 means function allocates memory so check if returned
* NULL pointer
*/
static int check_retval(void *returnvalue, char *funcname, int opt)
{
int *retval;
if (opt == 0 && returnvalue == NULL) {
/* Check if SUNDIALS function returned NULL pointer - no memory allocated */
fprintf(stderr,
"\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1);
} else if (opt == 1) {
/* Check if retval < 0 */
retval = (int *) returnvalue;
if (*retval < 0) {
fprintf(stderr,
"\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n",
funcname, *retval);
return(1);
}
} else if (opt == 2 && returnvalue == NULL) {
/* Check if function returned NULL pointer - no memory allocated */
fprintf(stderr,
"\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1);
}
return(0);
}
|
GB_unop__identity_uint64_int16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_uint64_int16
// op(A') function: GB_unop_tran__identity_uint64_int16
// C type: uint64_t
// A type: int16_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = (uint64_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = (uint64_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_uint64_int16
(
uint64_t *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int16_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
uint64_t z = (uint64_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int16_t aij = Ax [p] ;
uint64_t z = (uint64_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_uint64_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
krb5pa-md5_fmt_plug.c | /*
* Kerberos 5 etype 23 "PA ENC TIMESTAMP" by magnum
*
* Previously called mskrb5 because I had the idea it was Micro$oft specific.
*
* Pcap file -> input file:
* 1. tshark -r capture.pcapng -T pdml > ~/capture.pdml
* 2. krbng2john.py ~/capture.pdml > krb5.in
* 3. Run john on krb5.in
*
* PA_DATA_ENC_TIMESTAMP = Checksum[16 bytes] . Enc_Timestamp[36 bytes]
* -> encode as:
* HexChecksum[32 chars], HexTimestamp[72 chars]
*
* Legacy input format:
* user:$mskrb5$user$realm$HexChecksum$HexTimestamp
*
* New input format from krbpa2john.py (the above is still supported),
* note the lack of a separator between HexTimestamp and HexChecksum:
* user:$krb5pa$etype$user$realm$salt$HexTimestampHexChecksum
*
* user, realm and salt are unused in this format.
*
* This attacks a known-plaintext vulnerability in AS_REQ pre-auth packets. The
* known plaintext is a UTC timestamp in the format 20081120171510Z. Only if
* this indicate a match we decrypt the whole timestamp and calculate our own
* checksum to be really sure.
*
* The plaintext attack combined with re-using key setup was said to result in
* more than 60% speedup. This was confirmed using John the Ripper and variants
* of this code.
*
* http://www.ietf.org/rfc/rfc4757.txt
* http://www.securiteam.com/windowsntfocus/5BP0H0A6KM.html
*
* OMP is supported and scales very well now.
*
* This software is Copyright (c) 2011-2012 magnum, and it is hereby released
* to the general public under the following terms: Redistribution and use in
* source and binary forms, with or without modification, are permitted.
*
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_mskrb5;
#elif FMT_REGISTERS_H
john_register_one(&fmt_mskrb5);
#else
#if AC_BUILT
#include "autoconfig.h"
#endif
#include <sys/types.h>
#include <sys/stat.h>
#if !AC_BUILT || HAVE_FCNTL_H
#include <fcntl.h>
#endif
#include <errno.h>
#include <string.h>
#include <stdlib.h>
#include <ctype.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "arch.h"
#include "misc.h"
#include "formats.h"
#include "options.h"
#include "common.h"
#include "unicode.h"
#include "md5.h"
#include "hmacmd5.h"
#include "md4.h"
#include "rc4.h"
#include "memdbg.h"
#define FORMAT_LABEL "krb5pa-md5"
#define FORMAT_NAME "Kerberos 5 AS-REQ Pre-Auth etype 23" /* md4 rc4-hmac-md5 */
#define FORMAT_TAG "$krb5pa$"
#define FORMAT_TAG2 "$mskrb5$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1000
#define PLAINTEXT_LENGTH 125
#define MAX_REALMLEN 64
#define MAX_USERLEN 64
#define MAX_SALTLEN 128
#define TIMESTAMP_SIZE 36
#define CHECKSUM_SIZE 16
#define KEY_SIZE 16
#define BINARY_SIZE CHECKSUM_SIZE
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct salt_t)
#define SALT_ALIGN 4
#define TOTAL_LENGTH (14 + 2 * (CHECKSUM_SIZE + TIMESTAMP_SIZE) + MAX_REALMLEN + MAX_USERLEN + MAX_SALTLEN)
// these may be altered in init() if running OMP
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#ifndef OMP_SCALE
#define OMP_SCALE 1024
#endif
// Second and third plaintext will be replaced in init() under come encodings
static struct fmt_tests tests[] = {
{"$krb5pa$23$user$realm$salt$afcbe07c32c3450b37d0f2516354570fe7d3e78f829e77cdc1718adf612156507181f7daeb03b6fbcfe91f8346f3c0ae7e8abfe5", "John"},
{"$mskrb5$john$JOHN.DOE.MS.COM$02E837D06B2AC76891F388D9CC36C67A$2A9785BF5036C45D3843490BF9C228E8C18653E10CE58D7F8EF119D2EF4F92B1803B1451", "fr2beesgr"},
{"$mskrb5$user1$EXAMPLE.COM$08b5adda3ab0add14291014f1d69d145$a28da154fa777a53e23059647682eee2eb6c1ada7fb5cad54e8255114270676a459bfe4a", "openwall"},
{"$mskrb5$hackme$EXAMPLE.NET$e3cdf70485f81a85f7b59a4c1d6910a3$6e2f6705551a76f84ec2c92a9dd0fef7b2c1d4ca35bf1b02423359a3ecaa19bdf07ed0da", "openwall@123"},
{"$mskrb5$$$98cd00b6f222d1d34e08fe0823196e0b$5937503ec29e3ce4e94a051632d0fff7b6781f93e3decf7dca707340239300d602932154", ""},
{"$mskrb5$$$F4085BA458B733D8092E6B348E3E3990$034ACFC70AFBA542690B8BC912FCD7FED6A848493A3FF0D7AF641A263B71DCC72902995D", "frank"},
{"$mskrb5$user$realm$eb03b6fbcfe91f8346f3c0ae7e8abfe5$afcbe07c32c3450b37d0f2516354570fe7d3e78f829e77cdc1718adf612156507181f7da", "John"},
{"$mskrb5$$$881c257ce5df7b11715a6a60436e075a$c80f4a5ec18e7c5f765fb9f00eda744a57483db500271369cf4752a67ca0e67f37c68402", "the"},
{"$mskrb5$$$ef012e13c8b32448241091f4e1fdc805$354931c919580d4939421075bcd50f2527d092d2abdbc0e739ea72929be087de644cef8a", "Ripper"},
{"$mskrb5$$$334ef74dad191b71c43efaa16aa79d88$34ebbad639b2b5a230b7ec1d821594ed6739303ae6798994e72bd13d5e0e32fdafb65413", "VeryveryveryloooooooongPassword"},
// repeat first hash in exactly the same form that is used in john.pot
{"$krb5pa$23$$$$afcbe07c32c3450b37d0f2516354570fe7d3e78f829e77cdc1718adf612156507181f7daeb03b6fbcfe91f8346f3c0ae7e8abfe5", "John"},
// http://www.exumbraops.com/layerone2016/party (sample.krb.pcap, hash extracted by krbpa2john.py)
{"$krb5pa$23$$$$4b8396107e9e4ec963c7c2c5827a4f978ad6ef943f87637614c0f31b2030ad1115d636e1081340c5d6612a3e093bd40ce8232431", "P@$$w0rd123"},
{NULL}
};
static struct salt_t {
uint32_t checksum[CHECKSUM_SIZE / sizeof(uint32_t)];
unsigned char timestamp[TIMESTAMP_SIZE];
} *cur_salt;
static char (*saved_plain)[(PLAINTEXT_LENGTH+4)];
static int (*saved_len);
static uint32_t (*output)[BINARY_SIZE / sizeof(uint32_t)];
static HMACMD5Context (*saved_ctx);
static int keys_prepared;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_plain = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_plain));
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
output = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*output));
saved_ctx = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_ctx));
if (options.target_enc == UTF_8) {
tests[1].plaintext = "\xC3\xBC"; // German u-umlaut in UTF-8
tests[1].ciphertext = "$mskrb5$$$958db4ddb514a6cc8be1b1ccf82b0191$090408357a6f41852d17f3b4bb4634adfd388db1be64d3fe1a1d75ee4338d2a4aea387e5";
tests[2].plaintext = "\xC3\x9C\xC3\x9C"; // 2x uppercase of them
tests[2].ciphertext = "$mskrb5$$$057cd5cb706b3de18e059912b1f057e3$fe2e561bd4e42767e972835ea99f08582ba526e62a6a2b6f61364e30aca7c6631929d427";
} else {
if (CP_to_Unicode[0xfc] == 0x00fc) {
tests[1].plaintext = "\xFC"; // German u-umlaut in many ISO-8859-x
tests[1].ciphertext = "$mskrb5$$$958db4ddb514a6cc8be1b1ccf82b0191$090408357a6f41852d17f3b4bb4634adfd388db1be64d3fe1a1d75ee4338d2a4aea387e5";
}
if (CP_to_Unicode[0xdc] == 0x00dc) {
tests[2].plaintext = "\xDC\xDC"; // 2x uppercase of them
tests[2].ciphertext = "$mskrb5$$$057cd5cb706b3de18e059912b1f057e3$fe2e561bd4e42767e972835ea99f08582ba526e62a6a2b6f61364e30aca7c6631929d427";
}
}
}
static void done(void)
{
MEM_FREE(saved_ctx);
MEM_FREE(output);
MEM_FREE(saved_len);
MEM_FREE(saved_plain);
}
static void *get_salt(char *ciphertext)
{
static struct salt_t salt;
char *p;
int i;
p = strrchr(ciphertext, '$') + 1;
for (i = 0; i < TIMESTAMP_SIZE; i++) {
salt.timestamp[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
for (i = 0; i < CHECKSUM_SIZE; i++) {
((unsigned char*)salt.checksum)[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return (void*)&salt;
}
static void set_salt(void *salt)
{
cur_salt = salt;
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[TOTAL_LENGTH + 1];
char *data;
if (!strncmp(ciphertext, FORMAT_TAG2, FORMAT_TAG_LEN)) {
char in[TOTAL_LENGTH + 1];
char *c, *t;
strnzcpy(in, ciphertext, sizeof(in));
t = strrchr(in, '$'); *t++ = 0;
c = strrchr(in, '$'); *c++ = 0;
snprintf(out, sizeof(out), "%s23$$$$%s%s", FORMAT_TAG, t, c);
} else {
char *tc;
tc = strrchr(ciphertext, '$');
snprintf(out, sizeof(out), "%s23$$$$%s", FORMAT_TAG, ++tc);
}
data = out + strlen(out) - 2 * (CHECKSUM_SIZE + TIMESTAMP_SIZE) - 1;
strlwr(data);
return out;
}
static void *get_binary(char *ciphertext)
{
static unsigned char *binary;
char *p;
int i;
if (!binary) binary = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD);
p = strrchr(ciphertext, '$') + 1;
p += 2 * TIMESTAMP_SIZE;
for (i = 0; i < CHECKSUM_SIZE; i++) {
binary[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return (void*)binary;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *data = ciphertext, *p;
if (!strncmp(ciphertext, FORMAT_TAG2, FORMAT_TAG_LEN)) {
data += FORMAT_TAG_LEN;
// user field
p = strchr(data, '$');
if (!p || p - data > MAX_USERLEN)
return 0;
data = p + 1;
// realm field
p = strchr(data, '$');
if (!p || p - data > MAX_REALMLEN)
return 0;
data = p + 1;
// checksum
p = strchr(data, '$');
if (!p || p - data != 2 * CHECKSUM_SIZE ||
strspn(data, HEXCHARS_all) != p - data)
return 0;
data = p + 1;
// encrypted timestamp
p += strlen(data) + 1;
if (*p || p - data != TIMESTAMP_SIZE * 2 ||
strspn(data, HEXCHARS_all) != p - data)
return 0;
return 1;
} else if (!strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) {
data += FORMAT_TAG_LEN;
if (strncmp(data, "23$", 3)) return 0;
data += 3;
// user field
p = strchr(data, '$');
if (!p || p - data > MAX_USERLEN)
return 0;
data = p + 1;
// realm field
p = strchr(data, '$');
if (!p || p - data > MAX_REALMLEN)
return 0;
data = p + 1;
// salt field
p = strchr(data, '$');
if (!p || p - data > MAX_SALTLEN)
return 0;
data = p + 1;
// timestamp+checksum
p += strlen(data) + 1;
if (*p || p - data != (TIMESTAMP_SIZE + CHECKSUM_SIZE) * 2 ||
strspn(data, HEXCHARS_all) != p - data)
return 0;
return 1;
}
return 0;
}
static void set_key(char *key, int index)
{
saved_len[index] = strlen(key);
memcpy(saved_plain[index], key, saved_len[index] + 1);
keys_prepared = 0;
}
static char *get_key(int index)
{
return (char *) saved_plain[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
const unsigned char one[] = { 1, 0, 0, 0 };
int i = 0;
if (!keys_prepared) {
#ifdef _OPENMP
#pragma omp parallel for
for (i = 0; i < count; i++)
#endif
{
int len;
unsigned char K[KEY_SIZE];
unsigned char K1[KEY_SIZE];
// K = MD4(UTF-16LE(password)), ordinary 16-byte NTLM hash
len = E_md4hash((unsigned char *) saved_plain[i], saved_len[i], K);
if (len <= 0)
((char*)(saved_plain[i]))[-len] = 0; // match truncation
// K1 = HMAC-MD5(K, 1)
// 1 is encoded as little endian in 4 bytes (0x01000000)
hmac_md5(K, (unsigned char *) &one, 4, K1);
// We do key setup of the next HMAC_MD5 here. rest in inner loop
hmac_md5_init_K16(K1, &saved_ctx[i]);
}
keys_prepared = 1;
}
#ifdef _OPENMP
#pragma omp parallel for
for (i = 0; i < count; i++)
#endif
{
unsigned char K3[KEY_SIZE], cleartext[TIMESTAMP_SIZE];
HMACMD5Context ctx;
// key set up with K1 is stored in saved_ctx[i]
// K3 = HMAC-MD5(K1, CHECKSUM)
memcpy(&ctx, &saved_ctx[i], sizeof(ctx));
hmac_md5_update((unsigned char*)cur_salt->checksum,
CHECKSUM_SIZE, &ctx);
hmac_md5_final(K3, &ctx);
// Decrypt part of the timestamp with the derived key K3
RC4_single(K3, KEY_SIZE, cur_salt->timestamp, 16, cleartext);
// Bail out unless we see known plaintext
if (cleartext[14] == '2' && cleartext[15] == '0') {
// Decrypt the rest of the timestamp
RC4_single(K3, KEY_SIZE, cur_salt->timestamp,
TIMESTAMP_SIZE, cleartext);
if (cleartext[28] == 'Z') {
// create checksum K2 = HMAC-MD5(K1, plaintext)
memcpy(&ctx, &saved_ctx[i], sizeof(ctx));
hmac_md5_update(cleartext, TIMESTAMP_SIZE, &ctx);
hmac_md5_final((unsigned char*)output[i], &ctx);
}
} else {
output[i][0] = 0;
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (index = 0; index < count; index++)
#endif
if (*(uint32_t*)binary == output[index][0])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, output[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int get_hash_0(int index) { return output[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return output[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return output[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return output[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return output[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return output[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return output[index][0] & PH_MASK_6; }
static int salt_hash(void *salt)
{
return (((struct salt_t*)salt)->checksum[0]) & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_mskrb5 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP | FMT_UNICODE | FMT_UTF8,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
7117.c | // this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/heat-3d/kernel.c' as parsed by frontend compiler rose
void kernel_heat_3d(int tsteps, int n, double A[120 + 0][120 + 0][120 + 0], double B[120 + 0][120 + 0][120 + 0]) {
int t12;
int t10;
int t8;
int t6;
int t4;
int t2;
for (t2 = 1; t2 <= 500; t2 += 1) {
#pragma omp parallel for private(t4,t8,t10,t12,t14)
for (t4 = 1; t4 <= n - 2; t4 += 16)
for (t6 = t4; t6 <= (t4 + 15 < n - 2 ? t4 + 15 : n - 2); t6 += 1)
for (t8 = 1; t8 <= n - 2; t8 += 64)
for (t10 = t8; t10 <= (t8 + 63 < n - 2 ? t8 + 63 : n - 2); t10 += 1)
for (t12 = 1; t12 <= n - 2; t12 += 1)
B[t6][t10][t12] = 0.125 * (A[t6 + 1][t10][t12] - 2 * A[t6][t10][t12] + A[t6 - 1][t10][t12]) + 0.125 * (A[t6][t10 + 1][t12] - 2 * A[t6][t10][t12] + A[t6][t10 - 1][t12]) + 0.125 * (A[t6][t10][t12 + 1] - 2 * A[t6][t10][t12] + A[t6][t10][t12 - 1]) + A[t6][t10][t12];
#pragma omp parallel for private(t4,t8,t10,t12,t14)
for (t4 = 1; t4 <= n - 2; t4 += 16)
for (t6 = t4; t6 <= (t4 + 15 < n - 2 ? t4 + 15 : n - 2); t6 += 1)
for (t8 = 1; t8 <= n - 2; t8 += 64)
for (t10 = t8; t10 <= (t8 + 63 < n - 2 ? t8 + 63 : n - 2); t10 += 1)
for (t12 = 1; t12 <= n - 2; t12 += 1)
A[t6][t10][t12] = 0.125 * (B[t6 + 1][t10][t12] - 2 * B[t6][t10][t12] + B[t6 - 1][t10][t12]) + 0.125 * (B[t6][t10 + 1][t12] - 2 * B[t6][t10][t12] + B[t6][t10 - 1][t12]) + 0.125 * (B[t6][t10][t12 + 1] - 2 * B[t6][t10][t12] + B[t6][t10][t12 - 1]) + B[t6][t10][t12];
}
}
|
transform.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% TTTTT RRRR AAA N N SSSSS FFFFF OOO RRRR M M %
% T R R A A NN N SS F O O R R MM MM %
% T RRRR AAAAA N N N SSS FFF O O RRRR M M M %
% T R R A A N NN SS F O O R R M M %
% T R R A A N N SSSSS F OOO R R M M %
% %
% %
% MagickCore Image Transform Methods %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/attribute.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/distort.h"
#include "magick/draw.h"
#include "magick/effect.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/image.h"
#include "magick/memory_.h"
#include "magick/layer.h"
#include "magick/list.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-private.h"
#include "magick/resource_.h"
#include "magick/resize.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/transform.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o O r i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoOrientImage() adjusts an image so that its orientation is suitable for
% viewing (i.e. top-left orientation).
%
% The format of the AutoOrientImage method is:
%
% Image *AutoOrientImage(const Image *image,
% const OrientationType orientation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image.
%
% o orientation: Current image orientation.
%
% o exception: Return any errors or warnings in this structure.
%
*/
MagickExport Image *AutoOrientImage(const Image *image,
const OrientationType orientation,ExceptionInfo *exception)
{
Image
*orient_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
orient_image=(Image *) NULL;
switch(orientation)
{
case UndefinedOrientation:
case TopLeftOrientation:
default:
{
orient_image=CloneImage(image,0,0,MagickTrue,exception);
break;
}
case TopRightOrientation:
{
orient_image=FlopImage(image,exception);
break;
}
case BottomRightOrientation:
{
orient_image=RotateImage(image,180.0,exception);
break;
}
case BottomLeftOrientation:
{
orient_image=FlipImage(image,exception);
break;
}
case LeftTopOrientation:
{
orient_image=TransposeImage(image,exception);
break;
}
case RightTopOrientation:
{
orient_image=RotateImage(image,90.0,exception);
break;
}
case RightBottomOrientation:
{
orient_image=TransverseImage(image,exception);
break;
}
case LeftBottomOrientation:
{
orient_image=RotateImage(image,270.0,exception);
break;
}
}
if (orient_image != (Image *) NULL)
orient_image->orientation=TopLeftOrientation;
return(orient_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ChopImage() removes a region of an image and collapses the image to occupy
% the removed portion.
%
% The format of the ChopImage method is:
%
% Image *ChopImage(const Image *image,const RectangleInfo *chop_info)
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o chop_info: Define the region of the image to chop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ChopImage(const Image *image,const RectangleInfo *chop_info,
ExceptionInfo *exception)
{
#define ChopImageTag "Chop/Image"
CacheView
*chop_view,
*image_view;
Image
*chop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
extent;
ssize_t
y;
/*
Check chop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
assert(chop_info != (RectangleInfo *) NULL);
if (((chop_info->x+(ssize_t) chop_info->width) < 0) ||
((chop_info->y+(ssize_t) chop_info->height) < 0) ||
(chop_info->x > (ssize_t) image->columns) ||
(chop_info->y > (ssize_t) image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
extent=(*chop_info);
if ((extent.x+(ssize_t) extent.width) > (ssize_t) image->columns)
extent.width=(size_t) ((ssize_t) image->columns-extent.x);
if ((extent.y+(ssize_t) extent.height) > (ssize_t) image->rows)
extent.height=(size_t) ((ssize_t) image->rows-extent.y);
if (extent.x < 0)
{
extent.width-=(size_t) (-extent.x);
extent.x=0;
}
if (extent.y < 0)
{
extent.height-=(size_t) (-extent.y);
extent.y=0;
}
chop_image=CloneImage(image,image->columns-extent.width,image->rows-
extent.height,MagickTrue,exception);
if (chop_image == (Image *) NULL)
return((Image *) NULL);
/*
Extract chop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
chop_view=AcquireAuthenticCacheView(chop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,chop_image,1,1)
#endif
for (y=0; y < (ssize_t) extent.y; y++)
{
register const PixelPacket
*restrict p;
register IndexPacket
*restrict chop_indexes,
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,y,chop_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
chop_indexes=GetCacheViewAuthenticIndexQueue(chop_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
*q=(*p);
if (indexes != (IndexPacket *) NULL)
{
if (chop_indexes != (IndexPacket *) NULL)
*chop_indexes++=GetPixelIndex(indexes+x);
}
q++;
}
p++;
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ChopImage)
#endif
proceed=SetImageProgress(image,ChopImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
/*
Extract chop image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (y=0; y < (ssize_t) (image->rows-(extent.y+extent.height)); y++)
{
register const PixelPacket
*restrict p;
register IndexPacket
*restrict chop_indexes,
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,extent.y+extent.height+y,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,extent.y+y,chop_image->columns,
1,exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
chop_indexes=GetCacheViewAuthenticIndexQueue(chop_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
*q=(*p);
if (indexes != (IndexPacket *) NULL)
{
if (chop_indexes != (IndexPacket *) NULL)
*chop_indexes++=GetPixelIndex(indexes+x);
}
q++;
}
p++;
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ChopImage)
#endif
proceed=SetImageProgress(image,ChopImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
chop_view=DestroyCacheView(chop_view);
image_view=DestroyCacheView(image_view);
chop_image->type=image->type;
if (status == MagickFalse)
chop_image=DestroyImage(chop_image);
return(chop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s o l i d a t e C M Y K I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConsolidateCMYKImage() consolidates separate C, M, Y, and K planes into a
% single image.
%
% The format of the ConsolidateCMYKImage method is:
%
% Image *ConsolidateCMYKImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image sequence.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConsolidateCMYKImages(const Image *images,
ExceptionInfo *exception)
{
CacheView
*cmyk_view,
*image_view;
Image
*cmyk_image,
*cmyk_images;
register ssize_t
i;
ssize_t
y;
/*
Consolidate separate C, M, Y, and K planes into a single image.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
cmyk_images=NewImageList();
for (i=0; i < (ssize_t) GetImageListLength(images); i+=4)
{
cmyk_image=CloneImage(images,images->columns,images->rows,MagickTrue,
exception);
if (cmyk_image == (Image *) NULL)
break;
if (SetImageStorageClass(cmyk_image,DirectClass) == MagickFalse)
break;
(void) SetImageColorspace(cmyk_image,CMYKColorspace);
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*restrict p;
register ssize_t
x;
register PixelPacket
*restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=QueueCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumRange-GetPixelIntensity(images,p)));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*restrict p;
register ssize_t
x;
register PixelPacket
*restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
q->green=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*restrict p;
register ssize_t
x;
register PixelPacket
*restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
q->blue=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*restrict p;
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
indexes=GetCacheViewAuthenticIndexQueue(cmyk_view);
for (x=0; x < (ssize_t) images->columns; x++)
{
SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange-
GetPixelIntensity(images,p)));
p++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
AppendImageToList(&cmyk_images,cmyk_image);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
}
return(cmyk_images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImage() extracts a region of the image starting at the offset defined
% by geometry. Region must be fully defined, and no special handling of
% geometry flags is performed.
%
% The format of the CropImage method is:
%
% Image *CropImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to crop with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CropImage(const Image *image,const RectangleInfo *geometry,
ExceptionInfo *exception)
{
#define CropImageTag "Crop/Image"
CacheView
*crop_view,
*image_view;
Image
*crop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
bounding_box,
page;
ssize_t
y;
/*
Check crop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
bounding_box=image->page;
if ((bounding_box.width == 0) || (bounding_box.height == 0))
{
bounding_box.width=image->columns;
bounding_box.height=image->rows;
}
page=(*geometry);
if (page.width == 0)
page.width=bounding_box.width;
if (page.height == 0)
page.height=bounding_box.height;
if (((bounding_box.x-page.x) >= (ssize_t) page.width) ||
((bounding_box.y-page.y) >= (ssize_t) page.height) ||
((page.x-bounding_box.x) > (ssize_t) image->columns) ||
((page.y-bounding_box.y) > (ssize_t) image->rows))
{
/*
Crop is not within virtual canvas, return 1 pixel transparent image.
*/
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.opacity=(Quantum) TransparentOpacity;
(void) SetImageBackgroundColor(crop_image);
crop_image->page=bounding_box;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
if (crop_image->dispose == BackgroundDispose)
crop_image->dispose=NoneDispose;
return(crop_image);
}
if ((page.x < 0) && (bounding_box.x >= 0))
{
page.width+=page.x-bounding_box.x;
page.x=0;
}
else
{
page.width-=bounding_box.x-page.x;
page.x-=bounding_box.x;
if (page.x < 0)
page.x=0;
}
if ((page.y < 0) && (bounding_box.y >= 0))
{
page.height+=page.y-bounding_box.y;
page.y=0;
}
else
{
page.height-=bounding_box.y-page.y;
page.y-=bounding_box.y;
if (page.y < 0)
page.y=0;
}
if ((size_t) (page.x+page.width) > image->columns)
page.width=image->columns-page.x;
if ((geometry->width != 0) && (page.width > geometry->width))
page.width=geometry->width;
if ((size_t) (page.y+page.height) > image->rows)
page.height=image->rows-page.y;
if ((geometry->height != 0) && (page.height > geometry->height))
page.height=geometry->height;
bounding_box.x+=page.x;
bounding_box.y+=page.y;
if ((page.width == 0) || (page.height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
return((Image *) NULL);
}
/*
Initialize crop image attributes.
*/
crop_image=CloneImage(image,page.width,page.height,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->page.width=image->page.width;
crop_image->page.height=image->page.height;
if (((ssize_t) (bounding_box.x+bounding_box.width) > (ssize_t) image->page.width) ||
((ssize_t) (bounding_box.y+bounding_box.height) > (ssize_t) image->page.height))
{
crop_image->page.width=bounding_box.width;
crop_image->page.height=bounding_box.height;
}
crop_image->page.x=bounding_box.x;
crop_image->page.y=bounding_box.y;
/*
Crop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
crop_view=AcquireAuthenticCacheView(crop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,crop_image,1,1)
#endif
for (y=0; y < (ssize_t) crop_image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict crop_indexes;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,page.x,page.y+y,crop_image->columns,
1,exception);
q=QueueCacheViewAuthenticPixels(crop_view,0,y,crop_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
crop_indexes=GetCacheViewAuthenticIndexQueue(crop_view);
(void) CopyMagickMemory(q,p,(size_t) crop_image->columns*sizeof(*p));
if ((indexes != (IndexPacket *) NULL) &&
(crop_indexes != (IndexPacket *) NULL))
(void) CopyMagickMemory(crop_indexes,indexes,(size_t) crop_image->columns*
sizeof(*crop_indexes));
if (SyncCacheViewAuthenticPixels(crop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CropImage)
#endif
proceed=SetImageProgress(image,CropImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
crop_view=DestroyCacheView(crop_view);
image_view=DestroyCacheView(image_view);
crop_image->type=image->type;
if (status == MagickFalse)
crop_image=DestroyImage(crop_image);
return(crop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e T o T i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImageToTiles() crops a single image, into a possible list of tiles.
% This may include a single sub-region of the image. This basically applies
% all the normal geometry flags for Crop.
%
% Image *CropImageToTiles(const Image *image,
% const RectangleInfo *crop_geometry, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
MagickExport Image *CropImageToTiles(const Image *image,
const char *crop_geometry,ExceptionInfo *exception)
{
Image
*next,
*crop_image;
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
crop_image=NewImageList();
next=NewImageList();
flags=ParseGravityGeometry(image,crop_geometry,&geometry,exception);
if ((flags & AreaValue) != 0)
{
PointInfo
delta,
offset;
RectangleInfo
crop;
size_t
height,
width;
/*
Crop into NxM tiles (@ flag).
*/
width=image->columns;
height=image->rows;
if (geometry.width == 0)
geometry.width=1;
if (geometry.height == 0)
geometry.height=1;
if ((flags & AspectValue) == 0)
{
width-=(geometry.x < 0 ? -1 : 1)*geometry.x;
height-=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
else
{
width+=(geometry.x < 0 ? -1 : 1)*geometry.x;
height+=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
delta.x=(double) width/geometry.width;
delta.y=(double) height/geometry.height;
if (delta.x < 1.0)
delta.x=1.0;
if (delta.y < 1.0)
delta.y=1.0;
for (offset.y=0; offset.y < (double) height; )
{
if ((flags & AspectValue) == 0)
{
crop.y=(ssize_t) MagickRound((MagickRealType) (offset.y-
(geometry.y > 0 ? 0 : geometry.y)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) MagickRound((MagickRealType) (offset.y+
(geometry.y < 0 ? 0 : geometry.y)));
}
else
{
crop.y=(ssize_t) MagickRound((MagickRealType) (offset.y-
(geometry.y > 0 ? geometry.y : 0)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) MagickRound((MagickRealType) (offset.y+
(geometry.y < 0 ? geometry.y : 0)));
}
crop.height-=crop.y;
crop.y+=image->page.y;
for (offset.x=0; offset.x < (double) width; )
{
if ((flags & AspectValue) == 0)
{
crop.x=(ssize_t) MagickRound((MagickRealType) (offset.x-
(geometry.x > 0 ? 0 : geometry.x)));
offset.x+=delta.x; /* increment now to find height */
crop.width=(size_t) MagickRound((MagickRealType) (offset.x+
(geometry.x < 0 ? 0 : geometry.x)));
}
else
{
crop.x=(ssize_t) MagickRound((MagickRealType) (offset.x-
(geometry.x > 0 ? geometry.x : 0)));
offset.x+=delta.x; /* increment now to find height */
crop.width=(size_t) MagickRound((MagickRealType) (offset.x+
(geometry.x < 0 ? geometry.x : 0)));
}
crop.width-=crop.x;
crop.x+=image->page.x;
next=CropImage(image,&crop,exception);
if (next == (Image *) NULL)
break;
AppendImageToList(&crop_image,next);
}
if (next == (Image *) NULL)
break;
}
ClearMagickException(exception);
return(crop_image);
}
if (((geometry.width == 0) && (geometry.height == 0)) ||
((flags & XValue) != 0) || ((flags & YValue) != 0))
{
/*
Crop a single region at +X+Y.
*/
crop_image=CropImage(image,&geometry,exception);
if ((crop_image != (Image *) NULL) && ((flags & AspectValue) != 0))
{
crop_image->page.width=geometry.width;
crop_image->page.height=geometry.height;
crop_image->page.x-=geometry.x;
crop_image->page.y-=geometry.y;
}
return(crop_image);
}
if ((image->columns > geometry.width) || (image->rows > geometry.height))
{
RectangleInfo
page;
size_t
height,
width;
ssize_t
x,
y;
/*
Crop into tiles of fixed size WxH.
*/
page=image->page;
if (page.width == 0)
page.width=image->columns;
if (page.height == 0)
page.height=image->rows;
width=geometry.width;
if (width == 0)
width=page.width;
height=geometry.height;
if (height == 0)
height=page.height;
next=NewImageList();
for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height)
{
for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width)
{
geometry.width=width;
geometry.height=height;
geometry.x=x;
geometry.y=y;
next=CropImage(image,&geometry,exception);
if (next == (Image *) NULL)
break;
AppendImageToList(&crop_image,next);
}
if (next == (Image *) NULL)
break;
}
return(crop_image);
}
return(CloneImage(image,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x c e r p t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExcerptImage() returns a excerpt of the image as defined by the geometry.
%
% The format of the ExcerptImage method is:
%
% Image *ExcerptImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExcerptImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define ExcerptImageTag "Excerpt/Image"
CacheView
*excerpt_view,
*image_view;
Image
*excerpt_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate excerpt image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
excerpt_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (excerpt_image == (Image *) NULL)
return((Image *) NULL);
/*
Excerpt each row.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
excerpt_view=AcquireAuthenticCacheView(excerpt_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,excerpt_image,excerpt_image->rows,1)
#endif
for (y=0; y < (ssize_t) excerpt_image->rows; y++)
{
register const PixelPacket
*restrict p;
register IndexPacket
*restrict excerpt_indexes,
*restrict indexes;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,geometry->x,geometry->y+y,
geometry->width,1,exception);
q=GetCacheViewAuthenticPixels(excerpt_view,0,y,excerpt_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
(void) CopyMagickMemory(q,p,(size_t) excerpt_image->columns*sizeof(*q));
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (indexes != (IndexPacket *) NULL)
{
excerpt_indexes=GetCacheViewAuthenticIndexQueue(excerpt_view);
if (excerpt_indexes != (IndexPacket *) NULL)
(void) CopyMagickMemory(excerpt_indexes,indexes,(size_t)
excerpt_image->columns*sizeof(*excerpt_indexes));
}
if (SyncCacheViewAuthenticPixels(excerpt_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ExcerptImage)
#endif
proceed=SetImageProgress(image,ExcerptImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
excerpt_view=DestroyCacheView(excerpt_view);
image_view=DestroyCacheView(image_view);
excerpt_image->type=image->type;
if (status == MagickFalse)
excerpt_image=DestroyImage(excerpt_image);
return(excerpt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x t e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExtentImage() extends the image as defined by the geometry, gravity, and
% image background color. Set the (x,y) offset of the geometry to move the
% original image relative to the extended image.
%
% The format of the ExtentImage method is:
%
% Image *ExtentImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExtentImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
Image
*extent_image;
/*
Allocate extent image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
extent_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (extent_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(extent_image,DirectClass) == MagickFalse)
{
InheritException(exception,&extent_image->exception);
extent_image=DestroyImage(extent_image);
return((Image *) NULL);
}
if (extent_image->background_color.opacity != OpaqueOpacity)
extent_image->matte=MagickTrue;
(void) SetImageBackgroundColor(extent_image);
(void) CompositeImage(extent_image,image->compose,image,-geometry->x,
-geometry->y);
return(extent_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlipImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis.
%
% The format of the FlipImage method is:
%
% Image *FlipImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlipImage(const Image *image,ExceptionInfo *exception)
{
#define FlipImageTag "Flip/Image"
CacheView
*flip_view,
*image_view;
Image
*flip_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
flip_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (flip_image == (Image *) NULL)
return((Image *) NULL);
/*
Flip image.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireVirtualCacheView(image,exception);
flip_view=AcquireAuthenticCacheView(flip_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,flip_image,1,1)
#endif
for (y=0; y < (ssize_t) flip_image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict flip_indexes;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flip_view,0,(ssize_t) (flip_image->rows-y-
1),flip_image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
(void) CopyMagickMemory(q,p,(size_t) image->columns*sizeof(*q));
indexes=GetCacheViewVirtualIndexQueue(image_view);
if (indexes != (const IndexPacket *) NULL)
{
flip_indexes=GetCacheViewAuthenticIndexQueue(flip_view);
if (flip_indexes != (IndexPacket *) NULL)
(void) CopyMagickMemory(flip_indexes,indexes,(size_t) image->columns*
sizeof(*flip_indexes));
}
if (SyncCacheViewAuthenticPixels(flip_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FlipImage)
#endif
proceed=SetImageProgress(image,FlipImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flip_view=DestroyCacheView(flip_view);
image_view=DestroyCacheView(image_view);
flip_image->type=image->type;
if (page.height != 0)
page.y=(ssize_t) (page.height-flip_image->rows-page.y);
flip_image->page=page;
if (status == MagickFalse)
flip_image=DestroyImage(flip_image);
return(flip_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlopImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis.
%
% The format of the FlopImage method is:
%
% Image *FlopImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception)
{
#define FlopImageTag "Flop/Image"
CacheView
*flop_view,
*image_view;
Image
*flop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
flop_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (flop_image == (Image *) NULL)
return((Image *) NULL);
/*
Flop each row.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireVirtualCacheView(image,exception);
flop_view=AcquireAuthenticCacheView(flop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,flop_image,1,1)
#endif
for (y=0; y < (ssize_t) flop_image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict flop_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1,
exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
q+=flop_image->columns;
indexes=GetCacheViewVirtualIndexQueue(image_view);
flop_indexes=GetCacheViewAuthenticIndexQueue(flop_view);
for (x=0; x < (ssize_t) flop_image->columns; x++)
{
(*--q)=(*p++);
if ((indexes != (const IndexPacket *) NULL) &&
(flop_indexes != (IndexPacket *) NULL))
SetPixelIndex(flop_indexes+flop_image->columns-x-1,
GetPixelIndex(indexes+x));
}
if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FlopImage)
#endif
proceed=SetImageProgress(image,FlopImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flop_view=DestroyCacheView(flop_view);
image_view=DestroyCacheView(image_view);
flop_image->type=image->type;
if (page.width != 0)
page.x=(ssize_t) (page.width-flop_image->columns-page.x);
flop_image->page=page;
if (status == MagickFalse)
flop_image=DestroyImage(flop_image);
return(flop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RollImage() offsets an image as defined by x_offset and y_offset.
%
% The format of the RollImage method is:
%
% Image *RollImage(const Image *image,const ssize_t x_offset,
% const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x_offset: the number of columns to roll in the horizontal direction.
%
% o y_offset: the number of rows to roll in the vertical direction.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType CopyImageRegion(Image *destination,
const Image *source,const size_t columns,const size_t rows,
const ssize_t sx,const ssize_t sy,const ssize_t dx,const ssize_t dy,
ExceptionInfo *exception)
{
CacheView
*source_view,
*destination_view;
MagickBooleanType
status;
ssize_t
y;
status=MagickTrue;
source_view=AcquireVirtualCacheView(source,exception);
destination_view=AcquireAuthenticCacheView(destination,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(source,destination,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict destination_indexes;
register PixelPacket
*restrict q;
/*
Transfer scanline.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,sx,sy+y,columns,1,exception);
q=GetCacheViewAuthenticPixels(destination_view,dx,dy+y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source_view);
(void) CopyMagickMemory(q,p,(size_t) columns*sizeof(*p));
if (indexes != (IndexPacket *) NULL)
{
destination_indexes=GetCacheViewAuthenticIndexQueue(destination_view);
if (destination_indexes != (IndexPacket *) NULL)
(void) CopyMagickMemory(destination_indexes,indexes,(size_t)
columns*sizeof(*indexes));
}
sync=SyncCacheViewAuthenticPixels(destination_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
destination_view=DestroyCacheView(destination_view);
source_view=DestroyCacheView(source_view);
return(status);
}
MagickExport Image *RollImage(const Image *image,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define RollImageTag "Roll/Image"
Image
*roll_image;
MagickStatusType
status;
RectangleInfo
offset;
/*
Initialize roll image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
roll_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (roll_image == (Image *) NULL)
return((Image *) NULL);
offset.x=x_offset;
offset.y=y_offset;
while (offset.x < 0)
offset.x+=(ssize_t) image->columns;
while (offset.x >= (ssize_t) image->columns)
offset.x-=(ssize_t) image->columns;
while (offset.y < 0)
offset.y+=(ssize_t) image->rows;
while (offset.y >= (ssize_t) image->rows)
offset.y-=(ssize_t) image->rows;
/*
Roll image.
*/
status=CopyImageRegion(roll_image,image,(size_t) offset.x,
(size_t) offset.y,(ssize_t) image->columns-offset.x,(ssize_t) image->rows-
offset.y,0,0,exception);
(void) SetImageProgress(image,RollImageTag,0,3);
status|=CopyImageRegion(roll_image,image,image->columns-offset.x,
(size_t) offset.y,0,(ssize_t) image->rows-offset.y,offset.x,0,
exception);
(void) SetImageProgress(image,RollImageTag,1,3);
status|=CopyImageRegion(roll_image,image,(size_t) offset.x,image->rows-
offset.y,(ssize_t) image->columns-offset.x,0,0,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,2,3);
status|=CopyImageRegion(roll_image,image,image->columns-offset.x,image->rows-
offset.y,0,0,offset.x,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,3,3);
roll_image->type=image->type;
if (status == MagickFalse)
roll_image=DestroyImage(roll_image);
return(roll_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShaveImage() shaves pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the ShaveImage method is:
%
% Image *ShaveImage(const Image *image,const RectangleInfo *shave_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o shave_image: Method ShaveImage returns a pointer to the shaved
% image. A null image is returned if there is a memory shortage or
% if the image width or height is zero.
%
% o image: the image.
%
% o shave_info: Specifies a pointer to a RectangleInfo which defines the
% region of the image to crop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShaveImage(const Image *image,
const RectangleInfo *shave_info,ExceptionInfo *exception)
{
Image
*shave_image;
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (((2*shave_info->width) >= image->columns) ||
((2*shave_info->height) >= image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
SetGeometry(image,&geometry);
geometry.width-=2*shave_info->width;
geometry.height-=2*shave_info->height;
geometry.x=(ssize_t) shave_info->width+image->page.x;
geometry.y=(ssize_t) shave_info->height+image->page.y;
shave_image=CropImage(image,&geometry,exception);
if (shave_image == (Image *) NULL)
return((Image *) NULL);
shave_image->page.width-=2*shave_info->width;
shave_image->page.height-=2*shave_info->height;
shave_image->page.x-=(ssize_t) shave_info->width;
shave_image->page.y-=(ssize_t) shave_info->height;
return(shave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p l i c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpliceImage() splices a solid color into the image as defined by the
% geometry.
%
% The format of the SpliceImage method is:
%
% Image *SpliceImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to splice with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpliceImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define SpliceImageTag "Splice/Image"
CacheView
*image_view,
*splice_view;
Image
*splice_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
splice_geometry;
ssize_t
y;
/*
Allocate splice image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
splice_geometry=(*geometry);
splice_image=CloneImage(image,image->columns+splice_geometry.width,
image->rows+splice_geometry.height,MagickTrue,exception);
if (splice_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(splice_image,DirectClass) == MagickFalse)
{
InheritException(exception,&splice_image->exception);
splice_image=DestroyImage(splice_image);
return((Image *) NULL);
}
(void) SetImageBackgroundColor(splice_image);
/*
Respect image geometry.
*/
switch (image->gravity)
{
default:
case UndefinedGravity:
case NorthWestGravity:
break;
case NorthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
break;
}
case NorthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
break;
}
case WestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.width/2;
break;
}
case StaticGravity:
case CenterGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case EastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case SouthWestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
}
/*
Splice image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
splice_view=AcquireAuthenticCacheView(splice_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,splice_image,1,1)
#endif
for (y=0; y < (ssize_t) splice_geometry.y; y++)
{
register const PixelPacket
*restrict p;
register IndexPacket
*restrict indexes,
*restrict splice_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
splice_indexes=GetCacheViewAuthenticIndexQueue(splice_view);
for (x=0; x < splice_geometry.x; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes));
indexes++;
p++;
q++;
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q++;
for ( ; x < (ssize_t) splice_image->columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes));
indexes++;
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransposeImage)
#endif
proceed=SetImageProgress(image,SpliceImageTag,progress++,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,splice_image,1,1)
#endif
for (y=(ssize_t) (splice_geometry.y+splice_geometry.height);
y < (ssize_t) splice_image->rows; y++)
{
register const PixelPacket
*restrict p;
register IndexPacket
*restrict indexes,
*restrict splice_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height,
image->columns,1,exception);
if ((y < 0) || (y >= (ssize_t) splice_image->rows))
continue;
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
splice_indexes=GetCacheViewAuthenticIndexQueue(splice_view);
for (x=0; x < splice_geometry.x; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes));
indexes++;
p++;
q++;
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q++;
for ( ; x < (ssize_t) splice_image->columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes));
indexes++;
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransposeImage)
#endif
proceed=SetImageProgress(image,SpliceImageTag,progress++,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
splice_view=DestroyCacheView(splice_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
splice_image=DestroyImage(splice_image);
return(splice_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImage() is a convenience method that behaves like ResizeImage() or
% CropImage() but accepts scaling and/or cropping information as a region
% geometry specification. If the operation fails, the original image handle
% is left as is.
%
% This should only be used for single images.
%
% The format of the TransformImage method is:
%
% MagickBooleanType TransformImage(Image **image,const char *crop_geometry,
% const char *image_geometry)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string. This geometry defines a
% subregion of the image to crop.
%
% o image_geometry: An image geometry string. This geometry defines the
% final size of the image.
%
*/
/*
DANGER: This function destroys what it assumes to be a single image list.
If the input image is part of a larger list, all other images in that list
will be simply 'lost', not destroyed.
Also if the crop generates a list of images only the first image is resized.
And finally if the crop succeeds and the resize failed, you will get a
cropped image, as well as a 'false' or 'failed' report.
This function and should probably be depreciated in favor of direct calls
to CropImageToTiles() or ResizeImage(), as appropriate.
*/
MagickExport MagickBooleanType TransformImage(Image **image,
const char *crop_geometry,const char *image_geometry)
{
Image
*resize_image,
*transform_image;
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image **) NULL);
assert((*image)->signature == MagickSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
transform_image=(*image);
if (crop_geometry != (const char *) NULL)
{
Image
*crop_image;
/*
Crop image to a user specified size.
*/
crop_image=CropImageToTiles(*image,crop_geometry,&(*image)->exception);
if (crop_image == (Image *) NULL)
transform_image=CloneImage(*image,0,0,MagickTrue,&(*image)->exception);
else
{
transform_image=DestroyImage(transform_image);
transform_image=GetFirstImageInList(crop_image);
}
*image=transform_image;
}
if (image_geometry == (const char *) NULL)
return(MagickTrue);
/*
Scale image to a user specified size.
*/
flags=ParseRegionGeometry(transform_image,image_geometry,&geometry,
&(*image)->exception);
(void) flags;
if ((transform_image->columns == geometry.width) &&
(transform_image->rows == geometry.height))
return(MagickTrue);
resize_image=ResizeImage(transform_image,geometry.width,geometry.height,
transform_image->filter,transform_image->blur,&(*image)->exception);
if (resize_image == (Image *) NULL)
return(MagickFalse);
transform_image=DestroyImage(transform_image);
transform_image=resize_image;
*image=transform_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImages() calls TransformImage() on each image of a sequence.
%
% The format of the TransformImage method is:
%
% MagickBooleanType TransformImages(Image **image,
% const char *crop_geometry,const char *image_geometry)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string. This geometry defines a
% subregion of the image to crop.
%
% o image_geometry: An image geometry string. This geometry defines the
% final size of the image.
%
*/
MagickExport MagickBooleanType TransformImages(Image **images,
const char *crop_geometry,const char *image_geometry)
{
Image
*image,
**image_list,
*transform_images;
MagickStatusType
status;
register ssize_t
i;
assert(images != (Image **) NULL);
assert((*images)->signature == MagickSignature);
if ((*images)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
(*images)->filename);
image_list=ImageListToArray(*images,&(*images)->exception);
if (image_list == (Image **) NULL)
return(MagickFalse);
status=MagickTrue;
transform_images=NewImageList();
for (i=0; image_list[i] != (Image *) NULL; i++)
{
image=image_list[i];
status|=TransformImage(&image,crop_geometry,image_geometry);
AppendImageToList(&transform_images,image);
}
*images=transform_images;
image_list=(Image **) RelinquishMagickMemory(image_list);
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p o s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransposeImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis while rotating them by 90 degrees.
%
% The format of the TransposeImage method is:
%
% Image *TransposeImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception)
{
#define TransposeImageTag "Transpose/Image"
CacheView
*image_view,
*transpose_view;
Image
*transpose_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transpose_image == (Image *) NULL)
return((Image *) NULL);
/*
Transpose image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
transpose_view=AcquireAuthenticCacheView(transpose_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,transpose_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register IndexPacket
*restrict transpose_indexes,
*restrict indexes;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1),
0,1,transpose_image->rows,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
(void) CopyMagickMemory(q,p,(size_t) image->columns*sizeof(*q));
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (indexes != (IndexPacket *) NULL)
{
transpose_indexes=GetCacheViewAuthenticIndexQueue(transpose_view);
if (transpose_indexes != (IndexPacket *) NULL)
(void) CopyMagickMemory(transpose_indexes,indexes,(size_t)
image->columns*sizeof(*transpose_indexes));
}
if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransposeImage)
#endif
proceed=SetImageProgress(image,TransposeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transpose_view=DestroyCacheView(transpose_view);
image_view=DestroyCacheView(image_view);
transpose_image->type=image->type;
page=transpose_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
transpose_image->page=page;
if (status == MagickFalse)
transpose_image=DestroyImage(transpose_image);
return(transpose_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s v e r s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransverseImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis while rotating them by 270 degrees.
%
% The format of the TransverseImage method is:
%
% Image *TransverseImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransverseImage(const Image *image,ExceptionInfo *exception)
{
#define TransverseImageTag "Transverse/Image"
CacheView
*image_view,
*transverse_view;
Image
*transverse_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
transverse_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transverse_image == (Image *) NULL)
return((Image *) NULL);
/*
Transverse image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
transverse_view=AcquireAuthenticCacheView(transverse_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,transverse_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict transverse_indexes,
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transverse_view,(ssize_t) (image->rows-y-
1),0,1,transverse_image->rows,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
q+=image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
*--q=(*p++);
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (indexes != (IndexPacket *) NULL)
{
transverse_indexes=GetCacheViewAuthenticIndexQueue(transverse_view);
if (transverse_indexes != (IndexPacket *) NULL)
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(transverse_indexes+image->columns-x-1,
GetPixelIndex(indexes+x));
}
sync=SyncCacheViewAuthenticPixels(transverse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransverseImage)
#endif
proceed=SetImageProgress(image,TransverseImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transverse_view=DestroyCacheView(transverse_view);
image_view=DestroyCacheView(image_view);
transverse_image->type=image->type;
page=transverse_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.width != 0)
page.x=(ssize_t) (page.width-transverse_image->columns-page.x);
if (page.height != 0)
page.y=(ssize_t) (page.height-transverse_image->rows-page.y);
transverse_image->page=page;
if (status == MagickFalse)
transverse_image=DestroyImage(transverse_image);
return(transverse_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r i m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TrimImage() trims pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the TrimImage method is:
%
% Image *TrimImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TrimImage(const Image *image,ExceptionInfo *exception)
{
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
geometry=GetImageBoundingBox(image,exception);
if ((geometry.width == 0) || (geometry.height == 0))
{
Image
*crop_image;
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.opacity=(Quantum) TransparentOpacity;
(void) SetImageBackgroundColor(crop_image);
crop_image->page=image->page;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
return(crop_image);
}
geometry.x+=image->page.x;
geometry.y+=image->page.y;
return(CropImage(image,&geometry,exception));
}
|
wtsne_inl.h | /*
*
* Copyright (c) 2014, Nicola Pezzotti (Delft University of Technology)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the Delft University of Technology.
* 4. Neither the name of the Delft University of Technology nor the names of
* its contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY NICOLA PEZZOTTI ''AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL NICOLA PEZZOTTI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
*/
#ifndef WSNE_INL
#define WSNE_INL
#include "hdi/dimensionality_reduction/wtsne.h"
#include "hdi/utils/math_utils.h"
#include "hdi/utils/log_helper_functions.h"
#include "hdi/utils/scoped_timers.h"
#include "weighted_sptree.h"
#include <random>
#ifdef __APPLE__
#include <dispatch/dispatch.h>
#else
#define __block
#endif
#pragma warning( push )
#pragma warning( disable : 4267)
#pragma warning( push )
#pragma warning( disable : 4291)
#pragma warning( push )
#pragma warning( disable : 4996)
#pragma warning( push )
#pragma warning( disable : 4018)
#pragma warning( push )
#pragma warning( disable : 4244)
//#define FLANN_USE_CUDA
#include "flann/flann.h"
#pragma warning( pop )
#pragma warning( pop )
#pragma warning( pop )
#pragma warning( pop )
#pragma warning( pop )
namespace hdi{
namespace dr{
/////////////////////////////////////////////////////////////////////////
template <typename scalar, typename sparse_scalar_matrix>
WeightedTSNE<scalar, sparse_scalar_matrix>::Parameters::Parameters():
_seed(-1),
_embedding_dimensionality(2),
_minimum_gain(0.1),
_eta(200),
_momentum(0.2),
_final_momentum(0.5),
_mom_switching_iter(250),
_exaggeration_factor(4),
_remove_exaggeration_iter(250),
_exponential_decay_iter(150)
{}
/////////////////////////////////////////////////////////////////////////
template <typename scalar, typename sparse_scalar_matrix>
WeightedTSNE<scalar, sparse_scalar_matrix>::WeightedTSNE():
_initialized(false),
_logger(nullptr),
_theta(0)
{
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::reset(){
_initialized = false;
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::clear(){
_embedding->clear();
_initialized = false;
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::getEmbeddingPosition(scalar_vector_type& embedding_position, data_handle_type handle)const{
if(!_initialized){
throw std::logic_error("Algorithm must be initialized before ");
}
embedding_position.resize(_params._embedding_dimensionality);
for(int i = 0; i < _params._embedding_dimensionality; ++i){
(*_embedding_container)[i] = (*_embedding_container)[handle*_params._embedding_dimensionality + i];
}
}
/////////////////////////////////////////////////////////////////////////
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::initialize(const sparse_scalar_matrix& probabilities, data::Embedding<scalar_type>* embedding, Parameters params){
utils::secureLog(_logger,"Initializing W-tSNE...");
{//Aux data
_params = params;
unsigned int size = probabilities.size();
unsigned int size_sq = probabilities.size()*probabilities.size();
_embedding = embedding;
_embedding_container = &(embedding->getContainer());
_embedding->resize(_params._embedding_dimensionality,size);
_P.resize(size);
_Q.resize(size_sq);
_gradient.resize(size*params._embedding_dimensionality,0);
_previous_gradient.resize(size*params._embedding_dimensionality,0);
_gain.resize(size*params._embedding_dimensionality,1);
}
utils::secureLogValue(_logger,"Number of data points",_P.size());
computeHighDimensionalDistribution(probabilities);
initializeEmbeddingPosition(params._seed);
computeWeights();
_iteration = 0;
_initialized = true;
utils::secureLog(_logger,"Initialization complete!");
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::initializeWithJointProbabilityDistribution(const sparse_scalar_matrix& distribution, data::Embedding<scalar_type>* embedding, Parameters params){
utils::secureLog(_logger,"Initializing W-tSNE with a user-defined joint-probability distribution...");
{//Aux data
_params = params;
unsigned int size = distribution.size();
unsigned int size_sq = distribution.size()*distribution.size();
_embedding = embedding;
_embedding_container = &(embedding->getContainer());
_embedding->resize(_params._embedding_dimensionality,size);
_P.resize(size);
_Q.resize(size_sq);
_gradient.resize(size*params._embedding_dimensionality,0);
_previous_gradient.resize(size*params._embedding_dimensionality,0);
_gain.resize(size*params._embedding_dimensionality,1);
}
utils::secureLogValue(_logger,"Number of data points",_P.size());
_P = distribution;
initializeEmbeddingPosition(params._seed);
computeWeights();
_iteration = 0;
_initialized = true;
utils::secureLog(_logger,"Initialization complete!");
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::computeHighDimensionalDistribution(const sparse_scalar_matrix& probabilities){
utils::secureLog(_logger,"Computing high-dimensional joint probability distribution...");
const int n = getNumberOfDataPoints();
//Can be improved by using the simmetry of the matrix (half the memory) //TODO
for(int j = 0; j < n; ++j){
for(auto& elem: probabilities[j]){
scalar_type v0 = elem.second;
auto iter = probabilities[elem.first].find(j);
scalar_type v1 = 0.;
if(iter != probabilities[elem.first].end())
v1 = iter->second;
_P[j][elem.first] = static_cast<scalar_type>((v0+v1)*0.5);
_P[elem.first][j] = static_cast<scalar_type>((v0+v1)*0.5);
}
}
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::computeWeights(){
_weights.clear();
_weights.resize(_P.size(),0);
for(int i = 0; i < _weights.size(); ++i){
for(auto v: _P[i]){
_weights[i] += v.second;
}
}
utils::secureLogVectorStats(_logger,"Weights",_weights);
}
//! Set weights (overwrites the default weights)
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::setWeights(const scalar_vector_type& weights){
checkAndThrowLogic(weights.size() == _P.size(), "setWeights: wrong size");
_weights = weights;
utils::secureLogVectorStats(_logger,"Weights",_weights);
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::initializeEmbeddingPosition(int seed, double multiplier){
utils::secureLog(_logger,"Initializing the embedding...");
if(seed < 0){
std::srand(static_cast<unsigned int>(time(NULL)));
}
else{
std::srand(seed);
}
for(auto& v : (*_embedding_container)){
double x(0.);
double y(0.);
double radius(0.);
do {
x = 2 * (rand() / ((double)RAND_MAX + 1)) - 1;
y = 2 * (rand() / ((double)RAND_MAX + 1)) - 1;
radius = (x * x) + (y * y);
} while((radius >= 1.0) || (radius == 0.0));
radius = sqrt(-2 * log(radius) / radius);
x *= radius;
y *= radius;
v = static_cast<scalar_type>(x * multiplier);
}
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::doAnIteration(double mult){
if(!_initialized){
throw std::logic_error("Cannot compute a gradient descent iteration on unitialized data");
}
if(_iteration == _params._mom_switching_iter){
utils::secureLog(_logger,"Switch to final momentum...");
}
if(_iteration == _params._remove_exaggeration_iter){
utils::secureLog(_logger,"Remove exaggeration...");
}
if(_theta == 0){
doAnIterationExact(mult);
}else{
doAnIterationBarnesHut(mult);
}
}
template <typename scalar, typename sparse_scalar_matrix>
scalar WeightedTSNE<scalar, sparse_scalar_matrix>::exaggerationFactor(){
scalar_type exaggeration = 1;
if(_iteration <= _params._remove_exaggeration_iter){
exaggeration = _params._exaggeration_factor;
}else if(_iteration <= (_params._remove_exaggeration_iter + _params._exponential_decay_iter)){
double decay = std::exp(-scalar_type(_iteration-_params._remove_exaggeration_iter)/30.);
exaggeration = 1 + (_params._exaggeration_factor-1)*decay;
//utils::secureLogValue(_logger,"Exaggeration decay...",exaggeration);
}
return exaggeration;
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::doAnIterationExact(double mult){
//Compute Low-dimensional distribution
computeLowDimensionalDistribution();
//Compute gradient of the KL function
computeExactGradient(exaggerationFactor());
//Update the embedding based on the gradient
updateTheEmbedding(mult);
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::doAnIterationBarnesHut(double mult){
//Compute gradient of the KL function using the Barnes Hut approximation
computeBarnesHutGradient(exaggerationFactor());
//Update the embedding based on the gradient
updateTheEmbedding();
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::computeLowDimensionalDistribution(){
const int n = getNumberOfDataPoints();
double sum_Q = 0;
// Loop over all edges in the graph
#ifdef __APPLE__
std::cout << "GCD dispatch, wtsne_inl 303.\n";
dispatch_apply(n, dispatch_get_global_queue(0, 0), ^(size_t j) {
#else
#pragma omp parallel for
for(int j = 0; j < n; ++j){
#endif //__APPLE__
//_Q[j*n + j] = 0;
for(int i = j+1; i < n; ++i){
const double euclidean_dist_sq(
utils::euclideanDistanceSquared<scalar_type>(
(*_embedding_container).begin()+j*_params._embedding_dimensionality,
(*_embedding_container).begin()+(j+1)*_params._embedding_dimensionality,
(*_embedding_container).begin()+i*_params._embedding_dimensionality,
(*_embedding_container).begin()+(i+1)*_params._embedding_dimensionality
)
);
const double v = 1./(1.+euclidean_dist_sq);
_Q[j*n + i] = static_cast<scalar_type>(v);
_Q[i*n + j] = static_cast<scalar_type>(v);
}
}
#ifdef __APPLE__
);
#endif
for(int j = 0; j < n; ++j){
for(int i = 0; i < n; ++i){
sum_Q += _Q[j*n + i]*_weights[j]*_weights[i];
}
}
_normalization_Q = static_cast<scalar_type>(sum_Q);
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::computeExactGradient(double exaggeration){
const int n = getNumberOfDataPoints();
const int dim = _params._embedding_dimensionality;
for(int i = 0; i < n; ++i){
for(int d = 0; d < dim; ++d){
_gradient[i * dim + d] = 0;
}
}
for(int i = 0; i < n; ++i){
for(int j = 0; j < n; ++j){
for(int d = 0; d < dim; ++d){
const int idx = i*n + j;
const double distance((*_embedding_container)[i * dim + d] - (*_embedding_container)[j * dim + d]);
const double negative(_weights[i] * _weights[j] * _Q[idx] * _Q[idx] * distance / _normalization_Q);
_gradient[i * dim + d] += static_cast<scalar_type>(-4*negative);
}
}
for(auto& elem: _P[i]){
for(int d = 0; d < dim; ++d){
const int j = elem.first;
const int idx = i*n + j;
const double distance((*_embedding_container)[i * dim + d] - (*_embedding_container)[j * dim + d]);
double p_ij = elem.second/n;
const double positive(p_ij * _Q[idx] * distance);
_gradient[i * dim + d] += static_cast<scalar_type>(4*exaggeration*positive);
}
}
}
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::computeBarnesHutGradient(double exaggeration){
typedef double hp_scalar_type;
WeightedSPTree<scalar_type> sptree(_params._embedding_dimensionality,_embedding->getContainer().data(),_weights.data(),getNumberOfDataPoints());
scalar_type sum_Q = .0;
std::vector<hp_scalar_type> positive_forces(getNumberOfDataPoints()*_params._embedding_dimensionality);
__block std::vector<hp_scalar_type> negative_forces(getNumberOfDataPoints()*_params._embedding_dimensionality);
sptree.computeEdgeForces(_P, exaggeration, positive_forces.data());
__block std::vector<hp_scalar_type> sum_Q_subvalues(getNumberOfDataPoints(),0);
#ifdef __APPLE__
std::cout << "GCD dispatch, wtsne_inl 303.\n";
dispatch_apply(getNumberOfDataPoints(), dispatch_get_global_queue(0, 0), ^(size_t n) {
#else
#pragma omp parallel for
for(int n = 0; n < getNumberOfDataPoints(); n++){
#endif //__APPLE__
sptree.computeNonEdgeForces(n, _theta, negative_forces.data() + n * _params._embedding_dimensionality, sum_Q_subvalues[n]);
}
#ifdef __APPLE__
);
#endif
sum_Q = 0;
for(int n = 0; n < getNumberOfDataPoints(); n++){
sum_Q += sum_Q_subvalues[n];
}
for(int i = 0; i < _gradient.size(); i++){
_gradient[i] = positive_forces[i] - (negative_forces[i] / sum_Q);
}
}
//temp
template <typename T>
T sign(T x) { return (x == .0 ? .0 : (x < .0 ? -1.0 : 1.0)); }
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::updateTheEmbedding(double mult){
for(int i = 0; i < _gradient.size(); ++i){
_gain[i] = static_cast<scalar_type>((sign(_gradient[i]) != sign(_previous_gradient[i])) ? (_gain[i] + .2) : (_gain[i] * .8));
if(_gain[i] < _params._minimum_gain){
_gain[i] = static_cast<scalar_type>(_params._minimum_gain);
}
_gradient[i] = static_cast<scalar_type>((_gradient[i]>0?1:-1)*std::abs(_gradient[i]*_params._eta* _gain[i])/(_params._eta*_gain[i]));
_previous_gradient[i] = static_cast<scalar_type>(((_iteration<_params._mom_switching_iter)?_params._momentum:_params._final_momentum) * _previous_gradient[i] - _params._eta * _gain[i] * _gradient[i]);
(*_embedding_container)[i] += static_cast<scalar_type>(_previous_gradient[i] * mult);
}
++_iteration;
}
template <typename scalar, typename sparse_scalar_matrix>
double WeightedTSNE<scalar, sparse_scalar_matrix>::computeKullbackLeiblerDivergence(){
assert(false);
return 0;
}
}
}
#endif
|
gate.h | /**
* @file gate.h
* @author Nader KHAMMASSI - nader.khammassi@gmail.com
* @date 02-10-15
* @brief
*/
#pragma once
#ifndef QX_GATE_H
#define QX_GATE_H
#include <map>
#include <xpu.h>
#include <immintrin.h> // avx
#include <emmintrin.h> // sse
#include <core/hash_set.h>
#include <core/linalg.h>
#include <core/register.h>
#include <core/binary_counter.h>
#include <core/kronecker.h>
// #ifndef __BUILTIN_LINALG__
// #include <boost/numeric/ublas/matrix.hpp>
// #endif
#define SQRT_2 (1.4142135623730950488016887242096980785696718753769480731766797379f)
#define R_SQRT_2 (0.7071067811865475244008443621048490392848359376884740365883398690f)
#define __bit_test(x,pos) ((x) & (1<<(pos)))
#define __bit_set(x,pos) ((x) | (1<<(pos)))
#define __bit_flip(x,pos) ((x) ^ (1<<(pos)))
#define __bit_reset(x,pos) ((x) & ~(1<<(pos)))
#define __AVX__NO
#define __OP_PREFETCH__
//#define SQRT_2 (1.41421356237309504880f)
//#define R_SQRT_2 (0.70710678118654752440f)
namespace qx
{
/**
* types definition
*/
typedef uint64_t basis_state_t;
typedef std::map<basis_state_t,complex_t> quantum_state_t;
typedef enum __gate_type_t
{
__identity_gate__,
__hadamard_gate__,
__pauli_x_gate__ ,
__pauli_y_gate__ ,
__pauli_z_gate__ ,
__cnot_gate__ ,
__toffoli_gate__ ,
__swap_gate__ ,
__phase_gate__ ,
__rx_gate__ ,
__ry_gate__ ,
__rz_gate__ ,
__cphase_gate__ ,
__t_gate__ ,
__tdag_gate__ ,
__sdag_gate__ ,
__custom_gate__ ,
__prepx_gate__ ,
__prepy_gate__ ,
__prepz_gate__ ,
__measure_gate__ ,
__measure_reg_gate__,
__measure_x_gate__ ,
__measure_x_reg_gate__,
__measure_y_gate__ ,
__measure_y_reg_gate__,
__ctrl_phase_shift_gate__,
__parallel_gate__,
__display__,
__display_binary__,
__print_str__,
__bin_ctrl_gate__,
__lookup_table__,
__classical_not_gate__,
__qft_gate__,
__prepare_gate__,
__unitary_gate__
} gate_type_t;
/**
* gates coeffecients
*/
const complex_t cnot_c [] __attribute__((aligned(64))) = { complex_t(1.0), complex_t(0.0), complex_t(0.0), complex_t(0.0), complex_t(0.0), complex_t(1.0), complex_t(0.0), complex_t(0.0), complex_t(0.0), complex_t(0.0), complex_t(0.0), complex_t(1.0), complex_t(0.0), complex_t(0.0), complex_t(1.0), complex_t(0.0) }; /* CNOT */
const complex_t swap_c [] __attribute__((aligned(64))) = { 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 }; /* SWAP */
const complex_t identity_c [] __attribute__((aligned(64))) = { complex_t(1.0), complex_t(0.0), complex_t(0.0), complex_t(1.0) }; /* I */
const complex_t pauli_x_c [] __attribute__((aligned(64))) = { complex_t(0.0, 0.0) , complex_t(1.0, 0.0), complex_t(1.0, 0.0) , complex_t(0.0, 0.0) }; /* X */
const complex_t pauli_y_c [] __attribute__((aligned(64))) = { complex_t(0.0, 0.0) , complex_t(0.0,-1.0), complex_t(0.0, 1.0) , complex_t(0.0, 0.0) }; /* Y */
const complex_t pauli_z_c [] __attribute__((aligned(64))) = { complex_t(1.0, 0.0) , complex_t(0.0, 0.0), complex_t(0.0, 0.0) , complex_t(-1.0,0.0) }; /* Z */
const complex_t phase_c [] __attribute__((aligned(64))) = { complex_t(1.0, 0.0) , complex_t(0.0, 0.0), complex_t(0.0, 0.0) , complex_t(0.0, 1.0) }; /* S */
const complex_t sdag_gate_c[] __attribute__((aligned(64))) = { complex_t(1.0, 0.0) , complex_t(0.0, 0.0), complex_t(0.0, 0.0) , complex_t(0.0, -1.0) }; /* S_dag */
const complex_t t_gate_c [] __attribute__((aligned(64))) = { complex_t(1.0, 0.0) , complex_t(0.0, 0.0), complex_t(0.0, 0.0) , complex_t(cos(M_PI/4),sin(M_PI/4)) }; /* T */
const complex_t tdag_gate_c[] __attribute__((aligned(64))) = { complex_t(1.0, 0.0) , complex_t(0.0, 0.0), complex_t(0.0, 0.0) , complex_t(cos(M_PI/4),-sin(M_PI/4)) }; /* T_dag */
const complex_t hadamard_c [] __attribute__((aligned(64))) = { R_SQRT_2, R_SQRT_2, R_SQRT_2, -R_SQRT_2 }; /* H */
#define __rc(r,c,s) (r*s+c)
/**
* \brief common abstract gate interface for
* all gates implementation.
*/
class gate
{
public:
virtual int64_t apply(qu_register& qureg) = 0;
virtual std::vector<uint64_t> qubits() = 0;
virtual std::vector<uint64_t> control_qubits() = 0;
virtual std::vector<uint64_t> target_qubits() = 0;
virtual gate_type_t type() = 0;
virtual std::string micro_code() { return "# unsupported operation : qubit out of range"; }
virtual void dump() = 0;
virtual ~gate() { };
virtual void set_duration(uint64_t d) { duration = d; }
virtual uint64_t get_duration() { return duration; }
protected:
uint64_t duration;
};
/**
* \brief rotation in the x-z plane with a given
* angle theta (see "Large scale simulation of
* error-prone quantum systems" p.39" [Niwa 2002])
*/
inline cmatrix_t rotation(double theta)
{
cmatrix_t r; // (2,2);
r(0,0) = complex_t(cos(theta),0); r(0,1) = complex_t(-sin(theta),0);
r(1,0) = complex_t(sin(theta),0); r(1,1) = complex_t(cos(theta),0);
return r;
}
/**
* \brief phase shift for a given angle phi
*/
inline cmatrix_t phase(double phi)
{
cmatrix_t p; // (2,2);
p(0,0) = complex_t(1,0); p(0,1) = complex_t(0,0);
p(1,0) = complex_t(0,0); p(1,1) = complex_t(cos(phi),sin(phi));
return p;
}
/**
* \brief generate noisy hadamard gate
*/
cmatrix_t noisy_hadamard(double epsilon1=0, double epsilon2=0)
{
#ifdef __BUILTIN_LINALG__
return mxm(rotation(M_PI/4 + epsilon1), phase(M_PI + epsilon2));
#else
cmatrix_t rz = rotation(M_PI/4 + epsilon1);
cmatrix_t p = phase(M_PI + epsilon2);
return mxm(rz,p);
#endif
}
/**
* \brief build n x n matrix from an array
*/
cmatrix_t build_matrix(const complex_t * c, uint64_t n)
{
// assert(n==2);
// TO DO : remove the n parameter
cmatrix_t m; // (n,n);
for (int i=0; i<n; i++)
for (int j=0; j<n; j++)
m(i,j) = c[i*n+j];
return m;
}
/**
* sqg_apply
*/
#ifdef QX_COMPACT_GATE_OP
inline void sqg_apply(cmatrix_t & cm, uint64_t qubit, qu_register& qureg)
{
uint64_t n = qureg.size();
matrix_t m(2,row_t(2,0));
m[0][0] = cm(0,0); m[0][1] = cm(0,1);
m[1][0] = cm(1,0); m[1][1] = cm(1,1);
if (qubit == 0)
{
identity id(1 << (n-1));
unitary_matrix um(cm.size1(),m);
kronecker k(&id, &um);
cvector_t r(qureg.get_data());
mulmv(k,qureg.get_data(),r);
qureg = r;
}
else if (qubit == n-1)
{
identity id(1 << (n-1));
unitary_matrix um(cm.size1(),m);
kronecker k(&um, &id);
cvector_t r(qureg.get_data());
mulmv(k,qureg.get_data(),r);
qureg = r;
}
else
{
identity id1(1 << (qubit));
identity id2(1 << (n-qubit-1));
unitary_matrix um(cm.size1(),m);
kronecker k(&id2, &um, &id1);
cvector_t r(qureg.get_data());
mulmv(k,qureg.get_data(),r);
qureg = r;
}
}
/**
* u on the kth qubit :
* non-null value in each row of the kronocker matrix:
* for each row r :
* c1 = r || 000100 // 1 at the n-k bit
* c2 = r || 000000
*/
// #elif QX_SPARSE_MV_MUL
#else // QX_SPARSE_MV_MUL
uint64_t rw_process(int is, int ie, int s, uint64_t n, uint64_t qubit, const kronecker * m, cvector_t * v, cvector_t * res)
{
uint64_t k = n-qubit;
// println("run : " << is << " .. " << ie);
complex_t * pv = v->data();
complex_t * pr = res->data();
size_t nk = n-k;
for (uint64_t r=is; r<ie; ++r)
{
size_t bc = r;
size_t c1 = __bit_reset(bc,nk);
size_t c2 = __bit_set(bc,nk);
// complex_t s; // = 0;
pr[r] = pv[c1]*(m->get(r,c1)) + pv[c2]*(m->get(r,c2));
}
return 0;
}
void sparse_mulmv(uint64_t n, uint64_t qubit, const kronecker& m, cvector_t& v, cvector_t& res)
{
uint64_t k = n-qubit;
uint64_t rows = (1 << n);
uint64_t z = 0;
xpu::task rw_t(rw_process,0,0,0,n,qubit,&m,&v,&res);
xpu::parallel_for process(z,rows,1,&rw_t);
process.run();
}
void __apply_m(std::size_t start, std::size_t end, const std::size_t qubit, complex_t * state, const std::size_t stride0, const std::size_t stride1, const complex_t * matrix)
{
#if 0
__m128d m00 = matrix[0].xmm;
__m128d m01 = matrix[1].xmm;
__m128d m10 = matrix[2].xmm;
__m128d m11 = matrix[3].xmm;
#endif
complex_t m00 = matrix[0];
complex_t m01 = matrix[1];
complex_t m10 = matrix[2];
complex_t m11 = matrix[3];
#ifdef USE_OPENMP
#pragma omp parallel for // shared(m00,m01,m10,m11)
#endif
for(size_t offset = start; offset < end; offset += (1L << (qubit + 1L)))
for(size_t i = offset; i < offset + (1L << qubit); i++)
{
size_t i0 = i + stride0;
size_t i1 = i + stride1;
complex_t in0 = state[i0];
complex_t in1 = state[i1];
state[i0] = m00*in0+m01*in1;
state[i1] = m10*in0+m11*in1;
#if 0
__m128d in0 = state[i0].xmm;
__m128d in1 = state[i1].xmm;
state[i0].xmm = _mm_add_pd(xpu::_mm_mulc_pd(m00, in0), xpu::_mm_mulc_pd(m10, in1));
state[i1].xmm = _mm_add_pd(xpu::_mm_mulc_pd(m10, in1), xpu::_mm_mulc_pd(m11, in1));
#endif
}
}
#ifdef __SSE__
// #ifdef __FMA__
void __apply_x(std::size_t start, std::size_t end, const std::size_t qubit, complex_t * state, const std::size_t stride0, const std::size_t stride1, const complex_t * matrix)
{
#ifdef USE_OPENMP
#pragma omp parallel for // private(m00,r00,neg)
#endif
for(size_t offset = start; offset < end; offset += (1L << (qubit + 1L)))
for(size_t i = offset; i < offset + (1L << qubit); i++)
{
size_t i0 = i + stride0;
size_t i1 = i + stride1;
__m128d xin0 = state[i0].xmm; // _mm_load_pd((double*)&(state[i0].xmm));
// __m128d xin1 = state[i1].xmm; // _mm_load_pd((double*)&(state[i1].xmm));
state[i0].xmm = state[i1].xmm;
state[i1].xmm = xin0;
}
}
// #else
// #error "FMA not available !"
// #endif // FMA
#else
#error "SSE not available !"
#endif // SSE
#ifdef __SSE__
// #ifdef __FMA__
void __apply_h(std::size_t start, std::size_t end, const std::size_t qubit, complex_t * state, const std::size_t stride0, const std::size_t stride1, const complex_t * matrix)
{
__m128d m00 = matrix[0].xmm;
__m128d r00 = _mm_shuffle_pd(m00,m00,3); // 1 cyc
__m128d neg = _mm_set1_pd(-0.0f);
#ifdef USE_OPENMP
#pragma omp parallel for // private(m00,r00,neg)
#endif
for(size_t offset = start; offset < end; offset += (1L << (qubit + 1L)))
for(size_t i = offset; i < offset + (1L << qubit); i++)
{
size_t i0 = i + stride0;
size_t i1 = i + stride1;
__m128d xin0 = state[i0].xmm; // _mm_load_pd((double*)&(state[i0].xmm));
__m128d xin1 = state[i1].xmm; // _mm_load_pd((double*)&(state[i1].xmm));
__m128d t2; // = _mm_shuffle_pd(m01,m01,3); // 1 cyc
__m128d t1 = _mm_mul_pd(xin0,r00); // 5 cyc
#ifdef __FMA__
__m128d xi0 = _mm_fmadd_pd (xin1,r00, t1); // x2*t2+t1 // 5 cyc
#else
__m128d xi0 = _mm_mul_pd(xin1,r00);
xi0 = _mm_add_pd(xi0,t1); // x2*t2+t1 // 5 cyc
#endif // __FMA__
// t2 = _mm_shuffle_pd(m11,m11,3); // 1 cyc
t2 = _mm_xor_pd(r00,neg); // 1 cyc (m11=-m00)
#ifdef __FMA__
__m128d xi1 = _mm_fmadd_pd (xin1, t2, t1); // x2*t2+t1 // 5 cyc
#else
__m128d xi1 = _mm_mul_pd(xin1,t2);
xi1 = _mm_add_pd(xi1,t1); // x2*t2+t1 // 5 cyc
#endif
state[i0].xmm = xi0; // _mm_store_pd((double*)(&state[i0].xmm),xi0);
state[i1].xmm = xi1; // _mm_store_pd((double*)(&state[i1].xmm),xi1);
}
}
// #else
// #error "FMA not available !"
// #endif // FMA
#else
#error "SSE not available !"
#endif // SSE
uint64_t rw_process_ui(int is, int ie, int s, uint64_t n, uint64_t qubit, kronecker_ui m, cvector_t * v, cvector_t * res)
{
uint64_t k = n-qubit;
// println("run : " << is << " .. " << ie);
complex_t * pv = v->data();
complex_t * pr = res->data();
size_t bc, c1, c2;
size_t nk = n-k;
for (uint64_t r=is; r<ie; ++r)
{
bc = r;
c1 = __bit_reset(bc,nk);
c2 = __bit_set(bc,nk);
bc++;
#ifdef __OP_PREFETCH__
_mm_prefetch((void*)&pv[__bit_reset(bc,nk)],_MM_HINT_T0);
_mm_prefetch((void*)&pv[__bit_set(bc,nk)],_MM_HINT_T0);
#endif // __OP_PREFETCH__
#ifdef __AVX__
// cxc
xpu::_mm_cmul_add_pd(pv[c1], pv[c2], m.get(r,c1), m.get(r,c2),pr[r]);
// cxr
// pr[r].xmm = complex_t::_mm256_cr_mul_add_pd(pv[c1].xmm, m.get(r,c1).xmm, pv[c2].xmm, m.get(r,c2).xmm);
#elif __SSE__
// complex_t s; // = 0;
//pr[r] = pv[c1]*(m->get(r,c1)) + pv[c2]*(m->get(r,c2));
// --- cc mul add ---
pr[r].xmm = _mm_add_pd((pv[c1]*(m.get(r,c1))).xmm, (pv[c2]*(m.get(r,c2))).xmm);
// --- cr mul add --- pr[r].xmm = _mm_add_pd(complex_t::mul_cr(pv[c1].xmm,m.get(r,c1).xmm), complex_t::mul_cr(pv[c2].xmm,m.get(r,c2).xmm));
// --- f. mul add ---
// pr[r].xmm = complex_t::_mm_cr_mul_add_pd(pv[c1].xmm, m.get(r,c1).xmm, pv[c2].xmm, m.get(r,c2).xmm);
#else
pr[r] = (pv[c1]*(m.get(r,c1))) + (pv[c2]*(m.get(r,c2)));
#endif
}
return 0;
}
void sparse_mulmv(uint64_t n, uint64_t qubit, kronecker_ui m, cvector_t& v, cvector_t& res)
{
uint64_t k = n-qubit;
uint64_t rows = (1 << n);
uint64_t z = 0;
#ifdef SEQUENTIAL
rw_process_ui(z,rows,1,n,qubit,m,&v,&res);
#else
xpu::task rw_t(rw_process_ui,0,0,0,n,qubit,m,&v,&res);
xpu::parallel_for process(z,rows,1,&rw_t);
process.run();
#endif
}
uint64_t rw_process_iu(int is, int ie, int s, uint64_t n, uint64_t qubit, kronecker_iu m, cvector_t * v, cvector_t * res)
{
uint64_t k = n-qubit;
// println("run : " << is << " .. " << ie);
complex_t * pv = v->data();
complex_t * pr = res->data();
size_t bc, c1, c2;
size_t nk = n-k;
for (uint64_t r=is; r<ie; ++r)
{
bc = r;
c1 = __bit_reset(bc,nk);
c2 = __bit_set(bc,nk);
bc++;
#ifdef __OP_PREFETCH__
_mm_prefetch((void*)&pv[__bit_reset(bc,nk)],_MM_HINT_T0);
_mm_prefetch((void*)&pv[__bit_set(bc,nk)],_MM_HINT_T0);
#endif // __OP_PREFETCH__
#ifdef __AVX__
// cxc
xpu::_mm_cmul_add_pd(pv[c1], pv[c2], m.get(r,c1), m.get(r,c2),pr[r]);
// cxr
// pr[r].xmm = complex_t::_mm256_cr_mul_add_pd(pv[c1].xmm, m.get(r,c1).xmm, pv[c2].xmm, m.get(r,c2).xmm);
#elif __SSE__
// complex_t s; // = 0;
// pr[r] = pv[c1]*(m->get(r,c1)) + pv[c2]*(m->get(r,c2));
// --- cc mul add ---
pr[r].xmm = _mm_add_pd((pv[c1]*(m.get(r,c1))).xmm, (pv[c2]*(m.get(r,c2))).xmm);
// --- cr mul add --- pr[r].xmm = _mm_add_pd(complex_t::mul_cr(pv[c1].xmm,m.get(r,c1).xmm), complex_t::mul_cr(pv[c2].xmm,m.get(r,c2).xmm));
// --- f. mul add ---
// pr[r].xmm = complex_t::_mm_cr_mul_add_pd(pv[c1].xmm, m.get(r,c1).xmm, pv[c2].xmm, m.get(r,c2).xmm);
#else
pr[r] = (pv[c1]*(m.get(r,c1))) + (pv[c2]*(m.get(r,c2)));
#endif
}
return 0;
}
void sparse_mulmv(uint64_t n, uint64_t qubit, kronecker_iu m, cvector_t& v, cvector_t& res)
{
uint64_t k = n-qubit;
uint64_t rows = (1 << n);
uint64_t z = 0;
#ifdef SEQUENTIAL
rw_process_iu(z,rows,1,n,qubit,m,&v,&res);
#else
xpu::task rw_t(rw_process_iu,0,0,0,n,qubit,m,&v,&res);
xpu::parallel_for process(z,rows,1,&rw_t);
process.run();
#endif
}
// static xpu::core::os::mutex mtx;
uint64_t rw_process_iui(int is, int ie, int s, uint64_t n, uint64_t qubit, kronecker_iui m, cvector_t * v, cvector_t * res)
{
uint64_t k = n-qubit;
// println("run : " << is << " .. " << ie);
complex_t * pv = v->data();
complex_t * pr = res->data();
size_t bc, c1, c2;
size_t nk = n-k;
for (uint64_t r=is; r<ie; r++) //+=2)
{
// 1st
bc = r;
c1 = __bit_reset(bc,nk);
c2 = __bit_set(bc,nk);
bc++;
#ifdef __OP_PREFETCH__
_mm_prefetch((void*)&pv[__bit_reset(bc,nk)],_MM_HINT_T0);
_mm_prefetch((void*)&pv[__bit_set(bc,nk)],_MM_HINT_T0);
#endif // __OP_PREFETCH__
#ifdef __AVX__
// mtx.lock();
// cxc :
xpu::_mm_cmul_add_pd(pv[c1], pv[c2], m.get(r,c1), m.get(r,c2),pr[r]);
// cxr
// pr[r].xmm = complex_t::_mm256_cr_mul_add_pd(pv[c1].xmm, m.get(r,c1).xmm, pv[c2].xmm, m.get(r,c2).xmm);
/*
__m256d a; //_mm256_loadu2_m128d((double*)&pv[c1], (double*)&pv[c2]);
a = _mm256_insertf128_pd(a,_mm_permute_pd(pv[c1].xmm,1), 0);
a = _mm256_insertf128_pd(a,_mm_permute_pd(pv[c2].xmm,1), 1);
print("(r="<<r<<") : pr12: "); xpu::dump_m256d(a);
// __m256d b = _mm256_set_m128d((m.get(r,c1)).xmm, (m.get(r,c2)).xmm);
__m256d b;
b = _mm256_insertf128_pd(b,_mm_permute_pd(m.get(r,c1).xmm, 1), 1);
print("(r="<<r<<") : c1 : "); xpu::dump_m256d(b);
b = _mm256_insertf128_pd(b,_mm_permute_pd(m.get(r,c2).xmm, 1), 0);
print("(r="<<r<<") : c2 : "); xpu::dump_m256d(b);
__m256d ab = xpu::_mm256_cmul_pd(a,b);
print("(r="<<r<<") : mul: "); xpu::dump_m256d(ab);
__m256d abr = _mm256_permute2f128_pd(ab, ab, 1);
print("(r="<<r<<") : prm: "); xpu::dump_m256d(abr);
ab = _mm256_add_pd(ab,abr);
print("(r="<<r<<") : add: "); xpu::dump_m256d(ab);
pr[r].xmm = _mm256_extractf128_pd(ab,0);
print("(r="<<r<<") : res:"); xpu::dump_m128d(pr[r].xmm);
mtx.unlock();
*/
#elif __SSE__
/*
mtx.lock();
print("(r="<<r<<") : pr1: "); xpu::dump_m128d(pv[c1].xmm);
print("(r="<<r<<") : pr2: "); xpu::dump_m128d(pv[c2].xmm);
print("(r="<<r<<") : c1 : "); xpu::dump_m128d((m.get(r,c1)).xmm);
print("(r="<<r<<") : c2 : "); xpu::dump_m128d((m.get(r,c2)).xmm);
*/
// --- cxc mul ---
pr[r].xmm = _mm_add_pd((pv[c1]*(m.get(r,c1))).xmm, (pv[c2]*(m.get(r,c2))).xmm);
// --- cxr mul --- pr[r].xmm = _mm_add_pd(complex_t::mul_cr(pv[c1].xmm,m.get(r,c1).xmm), complex_t::mul_cr(pv[c2].xmm,m.get(r,c2).xmm));
// --- fus ma ---
// pr[r].xmm = complex_t::_mm_cr_mul_add_pd(pv[c1].xmm, m.get(r,c1).xmm, pv[c2].xmm, m.get(r,c2).xmm);
// pr[r].xmm = xpu::_mm128_mul_add_pc(pv[c1].xmm, pv[c2].xmm, m.get(r,c1).xmm, m.get(r,c2).xmm);
/*
print("(r="<<r<<") : res: "); xpu::dump_m128d(pr[r].xmm);
mtx.unlock();
*/
#else
pr[r] = (pv[c1]*(m.get(r,c1))) + (pv[c2]*(m.get(r,c2)));
#endif
/*
// 2nd
c1 = __bit_reset(bc,n-k);
c2 = __bit_set(bc,n-k);
#ifdef __AVX__NO
a = _mm256_loadu2_m128d((double*)&pv[c1], (double*)&pv[c2]);
// __m256d b = _mm256_set_m128d((m.get(r,c1)).xmm, (m.get(r,c2)).xmm);
b = _mm256_insertf128_pd(b,(m.get(bc,c1)).xmm, 1);
b = _mm256_insertf128_pd(b,(m.get(bc,c2)).xmm, 0);
ab = xpu::_mm256_cmul_pd(a,b);
abr = _mm256_permute2f128_pd(ab, ab, 1);
ab = _mm256_add_pd(ab,abr);
pr[bc].xmm = _mm256_extractf128_pd(ab,0);
#elif __SSE__
pr[bc].xmm = _mm_add_pd((pv[c1]*(m.get(bc,c1))).xmm, (pv[c2]*(m.get(bc,c2))).xmm);
#else
pr[bc] = (pv[c1]*(m.get(bc,c1))) + (pv[c2]*(m.get(bc,c2)));
#endif
*/
}
return 0;
}
void sparse_mulmv(uint64_t n, uint64_t qubit, kronecker_iui m, cvector_t& v, cvector_t& res)
{
uint64_t k = n-qubit;
uint64_t rows = (1 << n);
uint64_t z = 0;
#ifdef SEQUENTIAL
rw_process_iui(z,rows,1,n,qubit,m,&v,&res);
#else
xpu::task rw_t(rw_process_iui,0,0,0,n,qubit,m,&v,&res);
xpu::parallel_for process(z,rows,1,&rw_t);
process.run();
#endif
}
inline void sqg_apply(cmatrix_t & cm, uint64_t qubit, qu_register& qureg)
{
uint64_t n = qureg.size();
complex_t * s = qureg.get_data().data();
// cm.dump();
__apply_m(0, (1 << n), qubit, s, 0, (1 << qubit), cm.m);
return;
}
#endif // remove naive tensor computation
typedef enum
{
__x180__,
__x90__ ,
__y180__,
__y90__ ,
__ym90__
} elementary_operation_t;
static const char * pulse_lt[][5] =
{
{ " pulse 9,0,0", " pulse 10,0,0", " pulse 11,0,0", " pulse 12,0,0", " pulse 14,0,0" },
{ " pulse 0,9,0", " pulse 0,10,0", " pulse 0,11,0", " pulse 0,12,0", " pulse 0,14,0" },
{ " pulse 0,0,9", " pulse 0,0,10", " pulse 0,0,11", " pulse 0,0,12", " pulse 0,0,14" },
};
/**
* \brief hadamard gate:
*
* | 1 1|
* 1/sqrt(2) | |
* | 1 -1|
*/
class hadamard : public gate
{
private:
uint64_t qubit;
cmatrix_t m;
public:
hadamard(uint64_t qubit) : qubit(qubit) //,m((complex_t*)hadamard_c)
{
m = build_matrix(hadamard_c,2);
}
int64_t apply(qu_register& qureg)
{
size_t qs = qureg.states();
complex_t * data = qureg.get_data().data();
// sqg_apply(m,qubit,qureg);
__apply_h(0, qs, qubit, data, 0, (1 << qubit), hadamard_c);
// __apply_m(0, qs, qubit, data, 0, (1 << qubit), hadamard_c);
//__apply_h_old(0, qs, qubit, data, 0, (1 << qubit), hadamard_c);
// qureg.set_binary(qubit,__state_unknown__);
qureg.set_measurement_prediction(qubit,__state_unknown__);
return 0;
}
std::string micro_code()
{
/**
| wait 5
| y90 q0 --> { pulse 12,0,0 }
| wait 5
| x180 q0 --> { pulse 9,0,0 }
*/
if (qubit > 2) return "# unsupported operation : qubit out of range";
std::stringstream uc;
uc << pulse_lt[qubit][__y90__] << "\n";
uc << " wait 4 \n";
uc << pulse_lt[qubit][__x180__] << "\n";
uc << " wait 4 \n";
return uc.str();
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
gate_type_t type()
{
return __hadamard_gate__;
}
void dump()
{
println(" [-] hadamard(q=" << qubit << ")");
}
};
void __swap(cvector_t& amp, size_t size, size_t bit, size_t trg, size_t ctrl, size_t offset=0)
{
// println("bit=" << bit);
// println("ctrl=" << ctrl);
complex_t * p = amp.data();
for (size_t i=__bit_set(0,bit); i<(1<<size); i += (1 << (bit+1)))
for (size_t j=0; j<(1<<bit); j++)
{
size_t v = i+j+offset;
/*
#ifdef __SSE__
__m128d x = _mm_load_pd((const double *)&p[v]);
__m128d y = _mm_load_pd((const double *)&p[__bit_reset(v,trg)]);
_mm_store_pd((double *)&p[__bit_reset(v,trg)],x);
_mm_store_pd((double *)&p[v],y);
#else
*/
std::swap(amp[v], amp[__bit_reset(v,trg)]);
// println("swap("<<v<<","<<__bit_reset(v,trg)<<")");
// #endif
}
}
int cx_worker(int cs, int ce, int s, cvector_t * p_amp, size_t bit1, size_t bit2, size_t trg, size_t ctrl)
{
cvector_t & = * p_amp;
// xpu::parallel_for fswp(__bit_set(0,b1), (1 << qn), (1 << (b1+1)), &t);
size_t step=(1 << (bit1+1));
size_t b = cs;
size_t e = ce;
size_t offset = __bit_set(0,bit1);
//for (size_t i=__bit_set(0,bit1); i<(1<<size); i += (1 << (bit1+1)))
//__swap(amp,bit1,bit2,trg,ctrl,i);
for (size_t i=b; i<e; i++)
__swap(amp,bit1,bit2,trg,ctrl,offset+(i*step));
return 0;
}
/**
* \brief controlled-not gate:
*
* | 1 0 0 0 |
* | 0 1 0 0 |
* | 0 0 0 1 |
* | 0 0 1 1 |
*/
class cnot : public gate
{
private:
uint64_t control_qubit;
uint64_t target_qubit;
cmatrix_t m;
public:
cnot(uint64_t ctrl_q, uint64_t target_q) : control_qubit(ctrl_q),
target_qubit(target_q)
{
// m = build_matrix(cnot_c,4); // stack smaching
}
// #define CG_HASH_SET
//#define CG_MATRIX
#ifndef CG_BC
#ifndef CG_MATRIX
#define CG_BC
#endif
#endif // CG_BC
int64_t apply(qu_register& qreg)
{
// println("cnot " << control_qubit << "," << target_qubit);
#ifdef CG_MATRIX
uint64_t sn = qreg.states();
uint64_t qn = qreg.size();
uint64_t cq = control_qubit;
uint64_t tq = target_qubit;
cmatrix_t i = cidentity_t(sn);
perm_t p = perms(qn,cq,tq);
// dump_matrix(i);
for (perm_t::iterator it = p.begin(); it != p.end(); it++)
{
i(it->first,it->second) = 1;
i(it->second,it->first) = 1;
i(it->first, it->first) = 0;
i(it->second,it->second) = 0;
}
// dump_matrix(i);
qreg = mxv(i, qreg.get_data());
#elif defined(CG_BC)
uint64_t sn = qreg.states();
uint64_t qn = qreg.size();
uint64_t cq = control_qubit;
uint64_t tq = target_qubit;
cvector_t& amp = qreg.get_data();
// perms(qn,cq,tq,amp);
// #if 0
size_t b1 = std::max(cq,tq);
size_t b2 = std::min(cq,tq);
size_t steps = ((1 << qn)-(__bit_set(0,b1)))/(1 << (b1+1))+1;
/*
println("from=" << (__bit_set(0,b1)));
println("to=" << (1 << qn));
println("s=" << (1 << (b1+1)));
println("steps=" << steps);
*/
if (qn<17)
fast_cx(amp, qn, b1, b2, tq, cq);
else
{
#ifdef USE_OPENMP
#pragma omp parallel for
for (size_t i=0; i<steps; ++i)
cx_worker(i,i+1,1,&,b1,b2,(size_t)tq,(size_t)cq);
#else
xpu::task t(cx_worker,0,0,0,&,b1,b2,(size_t)tq,(size_t)cq);
xpu::parallel_for fswp(0, steps, 1, &t);
fswp.run();
#endif
}
// #endif
#elif defined(CG_HASH_SET)
uint64_t j = control_qubit+1;
uint64_t k = target_qubit+1;
uint64_t k2 = (1 << (k-1));
uint64_t j2 = (1 << (j-1));
uint64_t r_size = qreg.states();
xpu::container::hash_set<uint64_t> swap_set;
// find swap pairs
for (uint64_t t = 0; t < r_size; t++)
{
if ((t & j2) <= 0)
continue;
if (swap_set.find(t-k2) == swap_set.end())
swap_set.insert(t);
}
int64_t t2;
cvector_t& amp = qreg.get_data();
complex_t c1, c2;
for (xpu::container::hash_set<uint64_t>::iterator t = swap_set.begin(); t != swap_set.end(); ++t)
{
int64_t _t = *t;
t2 = (_t + k2 < r_size) ? _t + k2 : _t - k2;
c1 = amp(_t);
c2 = amp(t2);
std::swap(c1, c2);
amp(_t) = c1;
amp(t2) = c2;
}
//qreg=amp;
#endif // CG_HASH_SET
// if (qreg.get_binary(control_qubit) == __state_1__)
if (qreg.get_measurement_prediction(control_qubit) == __state_1__)
qreg.flip_binary(target_qubit);
//else if (qreg.get_binary(control_qubit) == __state_unknown__)
else if (qreg.get_measurement_prediction(control_qubit) == __state_unknown__)
qreg.set_measurement_prediction(target_qubit,__state_unknown__);
// qreg.set_binary(target_qubit,__state_unknown__);
return 0;
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(control_qubit);
r.push_back(target_qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
r.push_back(control_qubit);
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(target_qubit);
return r;
}
gate_type_t type()
{
return __cnot_gate__;
}
void dump()
{
println(" [-] cnot(ctrl_qubit=" << control_qubit << ", target_qubit=" << target_qubit << ")");
}
private:
#if 0
void __swap(cvector_t& amp, size_t size, size_t bit, size_t trg, size_t ctrl, size_t offset=0)
{
// println("bit=" << bit);
// println("ctrl=" << ctrl);
for (size_t i=__bit_set(0,bit); i<(1<<size); i += (1 << (bit+1)))
for (size_t j=0; j<(1<<bit); j++)
{
size_t v = i+j+offset;
std::swap(amp[v], amp[__bit_reset(v,trg)]);
// println(" swap(" << std::bitset<16>(v) << "," << std::bitset<16>(__bit_reset(v,trg)) << ")");
}
}
#endif
void fast_cx(cvector_t& amp, size_t size, size_t bit1, size_t bit2, size_t trg, size_t ctrl)
{
/*
println("from=" << (__bit_set(0,bit1)));
println("to=" << (1 << size));
println("s=" << (1 << (bit1+1)));
*/
for (size_t i=__bit_set(0,bit1); i<(1<<size); i += (1 << (bit1+1)))
__swap(amp,bit1,bit2,trg,ctrl,i);
}
};
template<typename T>
void swap_if_greater(T& a, T& b)
{
if (a > b)
{
T tmp(a);
a = b;
b = tmp;
}
}
template<typename T>
void sort(T& a, T& b, T& c)
{
swap_if_greater(a, b);
swap_if_greater(a, c);
swap_if_greater(b, c);
}
/**
* \brief toffoli gate:
*
* | 1 0 0 0 |
* | 0 1 0 0 |
* | 0 0 0 1 |
* | 0 0 1 1 |
*/
class toffoli : public gate
{
private:
uint64_t control_qubit_1;
uint64_t control_qubit_2;
uint64_t target_qubit;
public:
toffoli(uint64_t ctrl_q1, uint64_t ctrl_q2, uint64_t target_q) : control_qubit_1(ctrl_q1),
control_qubit_2(ctrl_q2),
target_qubit(target_q)
{
}
int64_t apply(qu_register& qreg)
{
uint64_t sn = qreg.states();
uint64_t qn = qreg.size();
uint64_t cq1 = control_qubit_1;
uint64_t cq2 = control_qubit_2;
uint64_t tq = target_qubit;
cvector_t& amp = qreg.get_data();
//println("\ntoffoli " << cq1 << "," << cq2 << "," << tq);
#if 1
size_t c1=cq1;
size_t c2=cq2;
size_t c3=tq;
size_t t=tq;
size_t size=qn;
sort(c1,c2,c3);
#ifdef USE_OPENMP
#pragma omp parallel for
#endif
for (size_t i=__bit_set(__bit_set(__bit_set(0,c1),c2),c3); i<(1<<size); i += (1 << (c3+1)))
for (size_t j=i; j<(i+(1<<c3)); j += (1 << (c2+1)))
for (size_t k=j; k<(j+(1<<c2)); k+=(1 << (c1+1)))
for (size_t l=k; l<(k+(1<<(c1))); l++)
{
std::swap(amp[__bit_set(l,t)],amp[__bit_reset(l,t)]);
// println("swap : " << __bit_set(l,t) << "," << __bit_reset(l,t));
}
#else
std::vector<uint64_t> done(sn, 0);
perm_t p = perms(qn,cq1,cq2,tq);
uint64_t p1,p2;
for (perm_t::iterator it = p.begin(); it != p.end(); it++)
{
p1 = it->first;
p2 = it->second;
if (!(done[p1] || done[p2]))
//if (!(done[p1]))
{
// std::swap(amp(p1),amp(p2)); // ublas
std::swap(amp[p1],amp[p2]);
//println("swap : " << p1 << "," << p2);
done[p1] = 1;
done[p2] = 1;
}
}
#endif
if ((qreg.get_measurement_prediction(control_qubit_1) == __state_1__) &&
(qreg.get_measurement_prediction(control_qubit_2) == __state_1__) )
{
qreg.flip_binary(target_qubit);
}
else if ((qreg.get_measurement_prediction(control_qubit_1) == __state_unknown__) ||
(qreg.get_measurement_prediction(control_qubit_2) == __state_unknown__) )
{
qreg.set_measurement_prediction(target_qubit,__state_unknown__);
// qreg.set_binary(target_qubit,__state_unknown__);
}
return 0;
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(control_qubit_1);
r.push_back(control_qubit_2);
r.push_back(target_qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
r.push_back(control_qubit_1);
r.push_back(control_qubit_2);
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(target_qubit);
return r;
}
gate_type_t type()
{
return __toffoli_gate__;
}
void dump()
{
println(" [-] toffoli(ctrl_qubit_1=" << control_qubit_1 << ", ctrl_qubit_2=" << control_qubit_2 << ", target_qubit=" << target_qubit << ")");
}
};
int fliper(int cs, int ce, int s, uint64_t q, cvector_t * p_amp)
{
cvector_t & = * p_amp;
for (int i=cs; i<ce; ++i)
{
if (__bit_test(i,q))
std::swap(amp[i],amp[__bit_flip(i,q)]);
}
return 0;
}
#define __swap_xmm(x,y) { x = _mm_xor_pd(x,y); y = _mm_xor_pd(y,x); x = _mm_xor_pd(x,y); }
void fast_flip(uint64_t q, uint64_t n, cvector_t& amp)
{
complex_t * x = amp.data();
#ifdef USE_OPENMP
#pragma omp parallel for
#endif
for (size_t i=0; i<(1 << n); i+=(1 << (q+1)))
for (size_t j=i; j<(i+(1 << q)); j++)
//__swap_xmm(x[j].xmm,x[__bit_flip(j,q)].xmm);
std::swap(x[j].xmm,x[__bit_flip(j,q)].xmm);
}
void flip(uint64_t q, uint64_t n, cvector_t& amp)
{
uint64_t nn = (1 << n);
uint64_t p1, p2;
std::bitset<MAX_QB_N> b;
// perm_t res;
b.reset();
b.set(q);
uint64_t bc = b.to_ulong();
while (bc < nn)
{
b.set(q); p1 = b.to_ulong();
b.flip(q); p2 = b.to_ulong();
if (p2<p1)
std::swap(amp[p1],amp[p2]);
b.flip(q);
b = inc(b);
b.set(q);
bc = b.to_ulong();
}
//return res;
}
/**
* \brief identity :
*
* | 1 0 |
* | 0 1 |
*
*/
class identity : public gate
{
private:
uint64_t qubit;
cmatrix_t m;
public:
identity(uint64_t qubit) : qubit(qubit)
{
m = build_matrix(identity_c,2);
}
int64_t apply(qu_register& qreg)
{
return 0;
}
std::string micro_code()
{
if (qubit > 2) return "# unsupported operation : qubit out of range";
std::stringstream uc;
// uc << pulse_lt[qubit][__x180__] << "\n";
uc << " wait 4 \n";
return uc.str();
}
void dump()
{
println(" [-] identity(qubit=" << qubit << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
gate_type_t type()
{
return __identity_gate__;
}
};
/**
* \brief pauli-x :
*
* | 0 1 |
* | 1 0 |
*
*/
class pauli_x : public gate
{
private:
uint64_t qubit;
cmatrix_t m;
public:
pauli_x(uint64_t qubit) : qubit(qubit)
{
m = build_matrix(pauli_x_c,2);
}
int64_t apply(qu_register& qreg)
{
// #define FAST_FLIP
#ifdef FAST_FLIP
uint64_t qn = qreg.size();
cvector_t& amp = qreg.get_data();
// flip(qubit,qn,amp);
fast_flip(qubit,qn,amp);
/*
xpu::task flip_t(fliper,0,0,0,qubit,&);
xpu::parallel_for parallel_flip(0,(1 << qn),1,&flip_t);
parallel_flip.run();
*/
#else
uint64_t n = qreg.size();
complex_t * s = qreg.get_data().data();
// cm.dump();
__apply_m(0, (1 << n), qubit, s, 0, (1 << qubit), m.m);
// sqg_apply(m,qubit,qreg);
#endif // FAST_FLIP
qreg.flip_binary(qubit);
return 0;
}
std::string micro_code()
{
/**
| wait 5
| x180 q0 --> { pulse 9,0,0 }
*/
if (qubit > 2) return "# unsupported operation : qubit out of range";
std::stringstream uc;
uc << pulse_lt[qubit][__x180__] << "\n";
uc << " wait 4 \n";
return uc.str();
}
void dump()
{
println(" [-] pauli-x(qubit=" << qubit << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
gate_type_t type()
{
return __pauli_x_gate__;
}
};
/**
* \brief pauli-y :
*
* | 0 -i |
* | i 0 |
*/
class pauli_y : public gate
{
private:
uint64_t qubit;
cmatrix_t m;
public:
pauli_y(uint64_t qubit) : qubit(qubit)
{
m = build_matrix(pauli_y_c,2);
}
int64_t apply(qu_register& qreg)
{
sqg_apply(m,qubit,qreg);
qreg.flip_binary(qubit);
return 0;
}
std::string micro_code()
{
/**
| wait 5
| x180 q0 --> { pulse 9,0,0 }
*/
if (qubit > 2) return "# unsupported operation : qubit out of range";
std::stringstream uc;
uc << pulse_lt[qubit][__y180__] << "\n";
uc << " wait 4 \n";
return uc.str();
}
void dump()
{
println(" [-] pauli-y(qubit=" << qubit << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
gate_type_t type()
{
return __pauli_y_gate__;
}
};
/**
* \brief pauli-z :
*
* | 1 0 |
* | 0 -1 |
*/
class pauli_z : public gate
{
private:
uint64_t qubit;
cmatrix_t m;
public:
pauli_z(uint64_t qubit) : qubit(qubit)
{
m = build_matrix(pauli_z_c,2);
}
int64_t apply(qu_register& qreg)
{
sqg_apply(m,qubit,qreg);
return 0;
}
std::string micro_code()
{
/**
| wait 5
| x180 q0 --> { pulse 9,0,0 }
*/
if (qubit > 2) return "# unsupported operation : qubit out of range";
std::stringstream uc;
uc << pulse_lt[qubit][__y180__] << "\n";
uc << " wait 4 \n";
uc << pulse_lt[qubit][__x180__] << "\n";
uc << " wait 4 \n";
return uc.str();
}
void dump()
{
println(" [-] pauli-z(qubit=" << qubit << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
gate_type_t type()
{
return __pauli_z_gate__;
}
};
/**
* \brief phase :
*
* | 1 0 |
* | 0 i |
*/
class phase_shift : public gate
{
private:
uint64_t qubit;
cmatrix_t m;
public:
phase_shift(uint64_t qubit) : qubit(qubit)
{
m = build_matrix(phase_c,2);
}
int64_t apply(qu_register& qreg)
{
sqg_apply(m,qubit,qreg);
return 0;
}
std::string micro_code()
{
if (qubit > 2) return "# unsupported operation : qubit out of range";
std::stringstream uc;
uc << pulse_lt[qubit][__y90__] << "\n";
uc << " wait 4 \n";
uc << pulse_lt[qubit][__x90__] << "\n";
uc << " wait 4 \n";
uc << pulse_lt[qubit][__ym90__] << "\n";
uc << " wait 4 \n";
return uc.str();
}
void dump()
{
println(" [-] phase(qubit=" << qubit << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
gate_type_t type()
{
return __phase_gate__;
}
};
/**
* \brief S dag gate
*/
class s_dag_gate : public gate
{
private:
uint64_t qubit;
cmatrix_t m;
public:
s_dag_gate(uint64_t qubit) : qubit(qubit)
{
m = build_matrix(sdag_gate_c,2);
}
int64_t apply(qu_register& qreg)
{
sqg_apply(m,qubit,qreg);
return 0;
}
void dump()
{
println(" [-] s_dag_gate(qubit=" << qubit << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
gate_type_t type()
{
return __sdag_gate__;
}
};
/**
* \brief T gate
*/
class t_gate : public gate
{
private:
uint64_t qubit;
cmatrix_t m;
public:
t_gate(uint64_t qubit) : qubit(qubit)
{
m = build_matrix(t_gate_c,2);
}
int64_t apply(qu_register& qreg)
{
sqg_apply(m,qubit,qreg);
return 0;
}
void dump()
{
println(" [-] t_gate(qubit=" << qubit << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
gate_type_t type()
{
return __t_gate__;
}
};
/**
* \brief T dag gate
*/
class t_dag_gate : public gate
{
private:
uint64_t qubit;
cmatrix_t m;
public:
t_dag_gate(uint64_t qubit) : qubit(qubit)
{
m = build_matrix(tdag_gate_c,2);
}
int64_t apply(qu_register& qreg)
{
sqg_apply(m,qubit,qreg);
return 0;
}
void dump()
{
println(" [-] t_dag_gate(qubit=" << qubit << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
gate_type_t type()
{
return __tdag_gate__;
}
};
/**
* phase factoring
*/
void reset_gphase(cmatrix_t& m)
{
double n = m(0,0).norm();
if (n > 10e-9)
{
complex_t p(m(0,0).re/n,m(0,0).im/n);
m(0,0) /= p;
m(0,1) /= p;
m(1,0) /= p;
m(1,1) /= p;
}
else
{
n = m(0,1).norm();
complex_t p(m(0,1).re/n,m(0,1).im/n);
m(0,0) /= p;
m(0,1) /= p;
m(1,0) /= p;
m(1,1) /= p;
}
double n1 = std::sqrt(m(0,0).norm()+m(1,0).norm());
double n2 = std::sqrt(m(0,1).norm()+m(1,1).norm());
m(0,0) /= n1;
m(0,1) /= n2;
m(1,0) /= n1;
m(1,1) /= n2;
}
/**
* | (cos(?/2) -e(i?)sin(?/2)) |
* general gate u = | |
* | (e(i?)sin(?/2) e(i?+i?)cos(?/2)) |
*/
class unitary : public gate
{
private:
uint64_t qubit;
double angle[3];
cmatrix_t m;
public:
unitary(uint64_t qubit, double angle[3]) : qubit(qubit)
{
// m.resize(2,2);
m(0,0) = cos(angle[1]/2); m(0,1) = complex_t(-cos(angle[2]/2),-sin(angle[2]/2))*sin(angle[1]/2);
m(1,0) = complex_t(cos(angle[3]/2),sin(angle[3]/2))*sin(angle[1]/2) ; m(1,1) = complex_t(cos((angle[3]/2)+(angle[2]/2)),sin((angle[3]/2)+(angle[2]/2)))*cos(angle[1]/2);
}
int64_t apply(qu_register& qreg)
{
sqg_apply(m,qubit,qreg);
qreg.set_measurement_prediction(qubit,__state_unknown__);
// qreg.set_binary(qubit,__state_unknown__);
return 0;
}
double get_angle()
{
return *angle;
}
void dump()
{
println(" [-] unitary(qubit=" << qubit << ", angle=" << angle << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
gate_type_t type()
{
return __unitary_gate__;
}
};
/**
* \brief rotation-x :
*/
class rx : public gate
{
private:
uint64_t qubit;
double angle;
cmatrix_t m;
public:
rx(uint64_t qubit, double angle) : qubit(qubit), angle(angle)
{
// m.resize(2,2);
m(0,0) = cos(angle/2); m(0,1) = complex_t(0,-sin(angle/2));
m(1,0) = complex_t(0,-sin(angle/2)); m(1,1) = cos(angle/2);
reset_gphase(m);
}
int64_t apply(qu_register& qreg)
{
sqg_apply(m,qubit,qreg);
qreg.set_measurement_prediction(qubit,__state_unknown__);
// qreg.set_binary(qubit,__state_unknown__);
return 0;
}
void dump()
{
println(" [-] rx(qubit=" << qubit << ", angle=" << angle << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
gate_type_t type()
{
return __rx_gate__;
}
};
/**
* \brief rotation-y :
*/
class ry : public gate
{
private:
uint64_t qubit;
double angle;
cmatrix_t m;
public:
ry(uint64_t qubit, double angle) : qubit(qubit), angle(angle)
{
// m.resize(2,2);
m(0,0) = cos(angle/2); m(0,1) = -sin(angle/2);
m(1,0) = sin(angle/2); m(1,1) = cos(angle/2);
// reset_gphase(m);
}
int64_t apply(qu_register& qreg)
{
sqg_apply(m,qubit,qreg);
qreg.set_measurement_prediction(qubit,__state_unknown__);
//qreg.set_binary(qubit,__state_unknown__);
return 0;
}
void dump()
{
println(" [-] ry(qubit=" << qubit << ", angle=" << angle << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
gate_type_t type()
{
return __ry_gate__;
}
};
/**
* \brief rotation-z :
*/
class rz : public gate
{
private:
uint64_t qubit;
double angle;
cmatrix_t m;
public:
rz(uint64_t qubit, double angle) : qubit(qubit), angle(angle)
{
// m.resize(2,2);
m(0,0) = complex_t(cos(-angle/2), sin(-angle/2)); m(0,1) = 0;
m(1,0) = 0; m(1,1) = complex_t(cos(angle/2), sin(angle/2));
reset_gphase(m);
}
int64_t apply(qu_register& qreg)
{
sqg_apply(m,qubit,qreg);
qreg.set_measurement_prediction(qubit,__state_unknown__);
//qreg.set_binary(qubit,__state_unknown__);
return 0;
}
void dump()
{
println(" [-] rz(qubit=" << qubit << ", angle=" << angle << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
gate_type_t type()
{
return __rz_gate__;
}
};
void __shift(cvector_t& amp, size_t size, size_t bit, complex_t p, size_t offset=0)
{
// println("bit=" << bit);
// println("ctrl=" << ctrl);
complex_t * x = amp.data();
// println(">>>> " << p);
for (size_t i=__bit_set(0,bit); i<(1<<size); i += (1 << (bit+1)))
for (size_t j=0; j<(1<<bit); j++)
{
size_t v = i+j+offset;
// amp[v] *= p;
// println(" before mul : " << x[v]);
x[v] *= p;
// println(" after mul : " << x[v]);
// println(" swap(" << std::bitset<16>(v) << "," << std::bitset<16>(__bit_reset(v,trg)) << ")");
}
}
void __shift(complex_t * x, size_t size, size_t bit, complex_t p, size_t offset=0)
{
// println("bit=" << bit);
// println("ctrl=" << ctrl);
for (size_t i=__bit_set(0,bit); i<(1<<size); i += (1 << (bit+1)))
for (size_t j=0; j<(1<<bit); j++)
{
size_t v = i+j+offset;
// amp[v] *= p;
x[v] *= p;
// println(" swap(" << std::bitset<16>(v) << "," << std::bitset<16>(__bit_reset(v,trg)) << ")");
}
}
int shift_worker(int cs, int ce, int s, cvector_t * p_amp, size_t bit1, size_t bit2, complex_t p)
{
cvector_t & = * p_amp;
// xpu::parallel_for fswp(__bit_set(0,b1), (1 << qn), (1 << (b1+1)), &t);
size_t step=(1 << (bit1+1));
size_t b = cs;
size_t e = ce;
size_t offset = __bit_set(0,bit1);
//for (size_t i=__bit_set(0,bit1); i<(1<<size); i += (1 << (bit1+1)))
//__swap(amp,bit1,bit2,trg,ctrl,i);
for (size_t i=b; i<e; i++)
__shift(amp,bit1,bit2,p,offset+(i*step));
return 0;
}
uint64_t qft_1st_fold_worker(int is, int ie, int s, uint64_t n, uint64_t qubit, kronecker_ui m, cvector_t * v, cvector_t * res)
{
uint64_t k = n-qubit;
// println("run : " << is << " .. " << ie);
complex_t * pv = v->data();
complex_t * pr = res->data();
size_t bc, c1, c2;
for (uint64_t r=is; r<ie; ++r)
{
bc = r;
c1 = __bit_reset(bc,n-k);
c2 = __bit_set(bc,n-k);
#ifdef __OP_PREFETCH__
_mm_prefetch((void*)&pv[__bit_reset((bc+1),n-k)],_MM_HINT_T0);
_mm_prefetch((void*)&pv[__bit_set((bc+1),n-k)],_MM_HINT_T0);
#endif // __OP_PREFETCH__
#ifdef __AVX__ //NO
xpu::_mm_cmul_add_pd(pv[c1], pv[c2], m.get(r,c1), m.get(r,c2),pr[r]);
#else
// complex_t s; // = 0;
//pr[r] = pv[c1]*(m->get(r,c1)) + pv[c2]*(m->get(r,c2));
pr[r].xmm = _mm_add_pd((pv[c1]*(m.get(r,c1))).xmm, (pv[c2]*(m.get(r,c2))).xmm);
#endif
}
size_t bit2 = qubit;
for (size_t j=qubit+1; j<n; ++j)
{
complex_t p(cos(M_PI/(1 << (j-qubit))), sin(M_PI/(1 << (j- qubit))));
size_t bit1 = j;
size_t step=(1 << (bit1+1));
size_t offset = __bit_set(0,bit1);
for (size_t i=is; i<ie; i++)
{
// println("i=" << i*step);
__shift(pr,bit1,bit2,p,offset+(i*step));
}
}
return 0;
}
void qft_1st_fold(uint64_t n, uint64_t qubit, kronecker_ui m, cvector_t& v, cvector_t& res)
{
uint64_t k = n-qubit;
uint64_t rows = (1 << n);
uint64_t z = 0;
//xpu::task qf_t(qft_fold_worker,0,0,0,n,qubit,m,&v,&res);
//xpu::parallel_for process(z,rows,1,&qf_t);
//process.run();
qft_1st_fold_worker(0,rows,1,n,qubit,m,&v,&res);
}
uint64_t qft_nth_fold_worker(int is, int ie, int s, uint64_t n, uint64_t qubit, kronecker_iui m, cvector_t * v, cvector_t * res)
{
uint64_t k = n-qubit;
// println("run : " << is << " .. " << ie);
complex_t * pv = v->data();
complex_t * pr = res->data();
size_t bc, c1, c2;
for (uint64_t r=is; r<ie; ++r)
{
bc = r;
c1 = __bit_reset(bc,n-k);
c2 = __bit_set(bc,n-k);
#ifdef __OP_PREFETCH__
_mm_prefetch((void*)&pv[__bit_reset((bc+1),n-k)],_MM_HINT_T0);
_mm_prefetch((void*)&pv[__bit_set((bc+1),n-k)],_MM_HINT_T0);
#endif // __OP_PREFETCH__
#ifdef __AVX__ //NO
xpu::_mm_cmul_add_pd(pv[c1], pv[c2], m.get(r,c1), m.get(r,c2),pr[r]);
#else
// complex_t s; // = 0;
//pr[r] = pv[c1]*(m->get(r,c1)) + pv[c2]*(m->get(r,c2));
pr[r].xmm = _mm_add_pd((pv[c1]*(m.get(r,c1))).xmm, (pv[c2]*(m.get(r,c2))).xmm);
#endif
}
size_t bit2 = qubit;
for (size_t j=qubit+1; j<n; ++j)
{
complex_t p(cos(M_PI/(1 << (j-qubit))), sin(M_PI/(1 << (j-qubit))));
size_t bit1 = j;
size_t step=(1 << (bit1+1));
size_t offset = __bit_set(0,bit1);
for (size_t i=is; i<ie; i++)
{
__shift(pr,bit1,bit2,p,offset+(i*step));
}
}
return 0;
}
void qft_nth_fold(uint64_t n, uint64_t qubit, kronecker_iui m, cvector_t& v, cvector_t& res)
{
uint64_t k = n-qubit;
uint64_t rows = (1 << n);
uint64_t z = 0;
//xpu::task qf_t(qft_fold_worker,0,0,0,n,qubit,m,&v,&res);
//xpu::parallel_for process(z,rows,1,&qf_t);
//process.run();
qft_nth_fold_worker(0,rows,1,n,qubit,m,&v,&res);
}
int qft_worker(int cs, int ce, int s, size_t n, cvector_t& p_in, cvector_t& p_out, kronecker_ui kr, size_t qubit)
{
complex_t * in = p_in.data();
complex_t * out = p_out.data();
cvector_t & amp = p_out;
// xpu::parallel_for fswp(__bit_set(0,b1), (1 << qn), (1 << (b1+1)), &t);
size_t b = cs;
size_t e = ce;
rw_process_ui(cs, ce, s, n, qubit, kr, &p_in, &p_out); // H
size_t bit2 = qubit;
for (size_t j=qubit+1; j<n; ++j)
{
complex_t p(cos(M_PI/(1 << (j-qubit))), sin(M_PI/(1 << (j- qubit))));
size_t bit1 = j;
size_t step=(1 << (bit1+1));
size_t offset = __bit_set(0,bit1);
for (size_t i=b; i<e; i++)
{
println("i=" << i*step);
__shift(amp,bit1,bit2,p,offset+(i*step));
}
}
return 0;
}
int qft_worker(int cs, int ce, int s, size_t n, cvector_t& p_in, cvector_t& p_out, kronecker_iui kr, size_t qubit)
{
complex_t * in = p_in.data();
complex_t * out = p_out.data();
cvector_t & amp = p_out;
// xpu::parallel_for fswp(__bit_set(0,b1), (1 << qn), (1 << (b1+1)), &t);
size_t b = cs;
size_t e = ce;
rw_process_iui(cs, ce, s, n, qubit, kr, &p_in, &p_out); // H
return 0;
size_t bit2 = qubit;
for (size_t j=qubit+1; j<n; ++j)
{
complex_t p(cos(M_PI/(1 << (j-qubit))), sin(M_PI/(1 << (j- qubit))));
size_t bit1 = j;
size_t step=(1 << (bit1+1));
size_t offset = __bit_set(0,bit1);
for (size_t i=b; i<e; i++)
{
__shift(p_out,bit1,bit2,p,offset+(i*step));
}
}
return 0;
}
/**
* \brief qft
*/
class qft : public gate
{
private:
std::vector<uint64_t> qubit;
cmatrix_t hm;
public:
qft(std::vector<uint64_t> qubit) : qubit(qubit)
{
hm = build_matrix(hadamard_c,2);
}
int64_t apply(qu_register& qreg)
{
size_t n = qreg.size();
size_t s = qreg.states();
cvector_t& in = qreg.get_data();
cvector_t& out = qreg.get_aux();
// kronecker_ui kui(hm,2,(1 << (n-1)));
kronecker_ui kui(hadamard_c,2,(1 << (n-1)));
qft_1st_fold(n, 0, kui, in, out);
for (size_t i=1; i<n-1; ++i)
{
size_t q = qubit[i];
// kronecker_iui kiui(hm, 2, (1 << (n-q-1)), (1 << (q)));
kronecker_iui kiui(hadamard_c, 2, (1 << (n-q-1)), (1 << (q)));
qft_nth_fold(n, 0, kiui, in, out);
}
in.swap(out);
return 0;
#if 0
// 1st fold
qft_worker(0, s, 1, n, in, out, kronecker_ui(m,2,s-2), 0);
return 0;
// ith fold
for (size_t i=1; i<qubit.size(); ++i)
{
size_t q = qubit[i];
kronecker_iui k(m, 2, (1 << (n-q-1)), (1 << (q)));
qft_worker(0, qreg.states(), 1, qreg.size(), (qreg.get_data()), (qreg.get_aux()), k, q);
}
// last fold
kronecker_iu k(m,2,(1 << (n-1)));
sparse_mulmv(n,qubit[n-1],k,qreg.get_data(),qreg.get_aux());
in.swap(out);
return 0;
#endif
}
void dump()
{
print(" [-] qft(");
for (size_t i=0; i<(qubit.size()-1); ++i)
print("q" << qubit[i] << ",");
println("q" << qubit[qubit.size()-1] << ")");
}
std::vector<uint64_t> qubits()
{
return qubit;
}
std::vector<uint64_t> control_qubits()
{
return qubit;
}
std::vector<uint64_t> target_qubits()
{
return qubit;
}
gate_type_t type()
{
return __qft_gate__;
}
};
/**
* phase shifter
*/
void __apply_cm(complex_t * state,
complex_t m[2][2],
std::size_t i11, std::size_t i12, std::size_t i13,
std::size_t i21, std::size_t i22, std::size_t i23,
std::size_t i31, std::size_t i32, std::size_t ish )
{
complex_t m00 = m[0][0],
m01 = m[0][1],
m10 = m[1][0],
m11 = m[1][1];
for(std::size_t r1 = i11; r1 < i12; r1 += i13)
{
#ifdef USE_OPENMP
// #pragma omp parallel for
#endif
for(std::size_t r2 = r1 + i21; r2 < r1 + i22; r2 += i23)
{
for(std::size_t ind0 = r2 + i31; ind0 < r2 + i32; ind0++)
{
std::size_t ind1 = ind0 + ish;
complex_t in0 = state[ind0], in1 = state[ind1];
state[ind0] = m00 * in0 + m01 * in1;
state[ind1] = m10 * in0 + m11 * in1;
}
}
}
}
/**
* \brief controlled phase shift by arbitrary phase angle or (2*pi/(2^(k=ctrl-target)))
*/
class ctrl_phase_shift : public gate
{
private:
uint64_t ctrl_qubit;
uint64_t target_qubit;
complex_t z;
complex_t m[2][2];
double phase;
protected:
void build_operator()
{
m[0][0] = complex_t(cos(-phase/2), sin(-phase/2)); m[0][1] = 0;
m[1][0] = 0; m[1][1] = complex_t(cos(phase/2), sin(phase/2));
double n = m[0][0].norm();
if (n > 10e-9)
{
complex_t p(m[0][0].re/n,m[0][0].im/n);
m[0][0] /= p;
m[0][1] /= p;
m[1][0] /= p;
m[1][1] /= p;
}
else
{
n = m[0][1].norm();
complex_t p(m[0][0].re/n,m[0][0].im/n);
m[0][0] /= p;
m[0][1] /= p;
m[1][0] /= p;
m[1][1] /= p;
}
}
public:
/**
* ctor (q)
*/
ctrl_phase_shift(uint64_t ctrl_qubit, uint64_t target_qubit) : ctrl_qubit(ctrl_qubit),
target_qubit(target_qubit)
{
phase = 2*M_PI/(1 << (ctrl_qubit - target_qubit));
build_operator();
}
/**
* ctor (k)
*/
ctrl_phase_shift(uint64_t ctrl_qubit, uint64_t target_qubit, size_t k) : ctrl_qubit(ctrl_qubit),
target_qubit(target_qubit)
{
phase = 2*M_PI/(1 << k);
build_operator();
}
/**
* ctor (p)
*/
ctrl_phase_shift(uint64_t ctrl_qubit, uint64_t target_qubit, double angle) : ctrl_qubit(ctrl_qubit),
target_qubit(target_qubit)
{
phase = angle;
build_operator();
}
int64_t apply(qu_register& qreg)
{
uint64_t n = qreg.size();
complex_t * s = qreg.get_data().data();
size_t c = ctrl_qubit;
size_t t = target_qubit;
if (c > t)
__apply_cm(qreg.get_data().data(),
m,
0, (1 << n), 1l << (c+1l),
1l << c, 1l << (c+1l), 1l << (t+1l),
0l, 1l << t, 1l << t);
else
__apply_cm(qreg.get_data().data(),
m,
0, (1 << n), 1l << (t+1l),
0l, 1l << t, 1l << (c+1l),
1l << c, 1l<< (c+1l), 1l << t);
return 0;
}
void dump()
{
println(" [-] ctrl_phase_shift(ctrl_qubit=" << ctrl_qubit << ", target_qubit: " << target_qubit << ", phase = (" << z.re << ", i." << z.im << ") )");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(ctrl_qubit);
r.push_back(target_qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
r.push_back(ctrl_qubit);
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(target_qubit);
return r;
}
gate_type_t type()
{
return __ctrl_phase_shift_gate__;
}
};
/**
* \brief swap :
*
* | 1 0 0 0 |
* | 0 0 1 0 |
* | 0 1 0 0 |
* | 0 0 0 1 |
*/
class swap : public gate
{
private:
uint64_t qubit1;
uint64_t qubit2;
// cmatrix_t m;
public:
swap(uint64_t qubit1, uint64_t qubit2) : qubit1(qubit1), qubit2(qubit2)
{
// m = build_matrix(swap_c,4);
}
int64_t apply(qu_register& qreg)
{
cnot(qubit1,qubit2).apply(qreg);
cnot(qubit2,qubit1).apply(qreg);
cnot(qubit1,qubit2).apply(qreg);
return 0;
}
void dump()
{
println(" [-] swap(q1=" << qubit1 << ", q2=" << qubit2 << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit1);
r.push_back(qubit2);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit1);
r.push_back(qubit2);
return r;
}
gate_type_t type()
{
return __swap_gate__;
}
};
/**
* \brief cphase
*/
class cphase : public gate
{
private:
uint64_t ctrl_qubit;
uint64_t target_qubit;
public:
cphase(uint64_t ctrl_qubit, uint64_t target_qubit) : ctrl_qubit(ctrl_qubit), target_qubit(target_qubit)
{
}
int64_t apply(qu_register& qreg)
{
hadamard(target_qubit).apply(qreg);
cnot(ctrl_qubit,target_qubit).apply(qreg);
hadamard(target_qubit).apply(qreg);
return 0;
}
void dump()
{
println(" [-] cphase(ctrl_qubit=" << ctrl_qubit << ", target_qubit=" << target_qubit << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(ctrl_qubit);
r.push_back(target_qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
r.push_back(ctrl_qubit);
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(target_qubit);
return r;
}
gate_type_t type()
{
return __cphase_gate__;
}
};
/**
* \brief custom matrix gate
*
*/
class custom : public gate
{
private:
// std::vector<uint64_t> qubits;
uint64_t qubit;
cmatrix_t m;
public:
// #ifdef __BUILTIN_LINALG__
// custom(std::vector<uint64_t> qubits, qx::linalg::matrix<complex_t> m) : qubits(qubits), m(m)
// #else
custom(uint64_t qubit, cmatrix_t m) : qubit(qubit), m(m)
// #endif
{
// uint64_t size = 1 << qubits.size();
// if (size != m.size1() || size != m.size2())
// println("[x] error: cutom gate : the matrix size do not match the number of qubits !");
// verify also that the matrix is unitary
// #ifdef __BUILTIN_LINALG__
// cmatrix_t ctr(m.size2(),m.size1());
// qx::linalg::matrix<complex_t> ctr(m.size2(),m.size1());
// for (uint64_t i=0; i<m.size2(); ++i)
// for (uint64_t j=0; j<m.size1(); ++j)
// ctr(i,j) = m(j,i).conj();
// // cmatrix_t mxctr = mxm(m,ctr);
// qx::linalg::matrix<complex_t> mxctr = mxm(m,ctr);
// qx::linalg::identity_matrix<complex_t> id(m.size1());
// #else
// cmatrix_t mxctr = mxm(m,ublas::trans(conj(m)));
// ublas::identity_matrix<complex_t> id(m.size1());
// #endif
// #ifdef __BUILTIN_LINALG__
// if (qx::linalg::equals(mxctr,id))
// #else
// if (equals(mxctr,id))
// #endif
// println("[x] error: custom gate : the specified matrix is not unitary !");
}
/**
* apply
*/
int64_t apply(qu_register& qreg)
{
sqg_apply(m,qubit,qreg);
qreg.set_measurement_prediction(qubit,__state_unknown__);
return 0;
}
/**
* dump
*/
void dump()
{
println(" [-] custom matrix on qubit " << qubit);
// println(" [-] custom(qubits=" << qubits << ", matrix=" << m << ")");
}
/**
* type
*/
gate_type_t type()
{
return __custom_gate__;
}
};
int p1_worker(uint64_t cs, uint64_t ce, uint64_t s, double * p1, uint64_t qubit, xpu::lockable * l, cvector_t * p_data)
{
cvector_t &data = * p_data;
double local_p1 = 0;
for (uint64_t i=cs; i<ce; ++i)
{
i = __bit_set(i,qubit);
if (i<ce)
local_p1 += data[i].norm(); //std::norm(data[i]);
// if (__bit_test(i,qubit))
// local_p1 += std::norm(data[i]);
}
l->lock();
// println("l_p1 [" << cs << ".." << ce << "]: " << local_p1);
*p1 += local_p1;
l->unlock();
return 0;
}
int zero_worker(uint64_t cs, uint64_t ce, uint64_t s, int64_t m, double * length, uint64_t qubit, xpu::lockable * l, cvector_t * p_data)
{
cvector_t &data = * p_data;
double local_length = 0;
uint64_t size = data.size();
if (m)
{
for (uint64_t i=cs; i<ce; ++i)
{
if (!__bit_test(i,qubit))
data[i] = 0;
local_length += data[i].norm(); //std::norm(data[i]);
}
}
else
{
for (uint64_t i=cs; i<ce; ++i)
{
if (__bit_test(i,qubit))
data[i] = 0;
local_length += data[i].norm(); //std::norm(data[i]);
}
}
l->lock();
*length += local_length;
l->unlock();
return 0;
}
int renorm_worker(uint64_t cs, uint64_t ce, uint64_t s, double * length, cvector_t * p_data)
{
cvector_t &data = * p_data;
double l = *length;
#ifdef __AVX__
// println("avx");
complex_t * vd = p_data->data();
__m256d vl = _mm256_set1_pd(l);
for (uint64_t i=cs; i<ce; i+=2)
{
double * pvd = (double*)&vd[i];
__m256d va = _mm256_load_pd(pvd);
__m256d vr = _mm256_div_pd(va, vl);
_mm256_store_pd(pvd,vr);
}
#elif defined(__SSE__)
// println("sse");
complex_t * vd = p_data->data();
__m128d vl = _mm_set1_pd(l);
for (uint64_t i=cs; i<ce; ++i)
{
double * pvd = (double*)&vd[i];
__m128d va = _mm_load_pd(pvd);
__m128d vr = _mm_div_pd(va, vl);
_mm_store_pd(pvd,vr);
}
#else
for (uint64_t i=cs; i<ce; ++i)
data[i] /= l;
#endif // __SSE__
return 0;
}
/**
* measure
*/
class measure : public gate
{
private:
uint64_t qubit;
bool measure_all;
bool disable_averaging;
public:
measure(uint64_t qubit, bool disable_averaging=false) : qubit(qubit), measure_all(false), disable_averaging(disable_averaging)
{
}
measure() : qubit(0), measure_all(true)
{
}
int64_t apply(qu_register& qreg)
{
if (measure_all)
{
// qreg.measure();
for (size_t q=0; q<qreg.size(); q++)
qx::measure(q).apply(qreg);
return 0;
}
double f = qreg.rand();
double p = 0;
int64_t value;
uint64_t size = qreg.size();
uint64_t n = (1 << size);
cvector_t& data = qreg.get_data();
double length = 0;
if (size > 64)
{
// #define PARALLEL_MEASUREMENT
// #ifdef PARALLEL_MEASUREMENT
xpu::lockable * l = new xpu::core::os::mutex();
xpu::task p1_worker_t(p1_worker, (uint64_t)0, n, (uint64_t)1, &p, qubit, l, &data);
xpu::parallel_for parallel_p1( (uint64_t)0, n, (uint64_t)1, &p1_worker_t);
parallel_p1.run();
if (f<p) value = 1;
else value = 0;
xpu::task zero_worker_t(zero_worker,(uint64_t)0, n, (uint64_t)1, value, &length, qubit, l, &data);
xpu::parallel_for parallel_zero( (uint64_t)0, n, (uint64_t)1, &zero_worker_t);
parallel_zero.run();
length = std::sqrt(length);
xpu::task renorm_worker_t(renorm_worker, (uint64_t)0, n, (uint64_t)1, &length, &data);
xpu::parallel_for parallel_renorm( (uint64_t)0, n, (uint64_t)1, &renorm_worker_t);
parallel_renorm.run();
}
else
{
//#else
int64_t k, l, m;
int64_t j = qubit;
double fvalue;
std::bitset<MAX_QB_N> b;
b.reset();
b.set(qubit);
uint64_t bc = b.to_ulong();
while (bc < n)
{
bc = b.to_ulong();
// p += std::norm(data[bc]);
p += data[bc].norm();
b = inc(b);
b.set(qubit);
bc = b.to_ulong();
}
if (f<p) value = 1;
else value = 0;
if (value) // 1
{ // reset all states where the qubit is 0
for (uint64_t i=0; i<(1 << size); ++i)
{
if (!__bit_test(i,qubit))
data[i] = 0;
}
}
else
{
for (uint64_t i=0; i<(1 << size); ++i)
{
if (__bit_test(i,qubit))
data[i] = 0;
}
}
for (uint64_t k = 0; k < (1 << size); k++)
length += data[k].norm(); //std::norm(data[k]);
length = std::sqrt(length);
for (uint64_t k = 0; k < (1 << size); k++)
data[k] /= length;
// #endif // PARALLEL_MEASUREMENT
}
// println(" [>] measured value : " << value);
qreg.set_measurement_prediction(qubit,(value == 1 ? __state_1__ : __state_0__));
qreg.set_measurement(qubit,(value == 1 ? true : false));
//qreg.set_binary(qubit,(value == 1 ? __state_1__ : __state_0__));
if (!disable_averaging)
{
if (qreg.measurement_averaging_enabled)
{
if (value == 1)
{
// println("> exited_states++");
qreg.measurement_averaging[qubit].exited_states++;
}
else
{
// println("> ground_states++");
qreg.measurement_averaging[qubit].ground_states++;
}
}
}
return value;
}
void dump()
{
if (measure_all)
println(" [-] measure(register)");
else
println(" [-] measure(qubit=" << qubit << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
if (!measure_all)
r.push_back(qubit);
else // this is a dirty hack, itshould be fixed later (unknown qubit number !)
{
for (int64_t i=0; i<MAX_QB_N; ++i)
r.push_back(i);
}
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
return qubits();
}
gate_type_t type()
{
if (measure_all)
return __measure_reg_gate__;
else
return __measure_gate__;
}
};
/**
* measure_x
*/
class measure_x : public gate
{
private:
uint64_t qubit;
bool measure_all;
bool disable_averaging;
qx::hadamard hg;
qx::measure mg;
public:
measure_x(uint64_t qubit, bool disable_averaging=false) : qubit(qubit), measure_all(false), hg(qubit), mg(qubit), disable_averaging(disable_averaging)
{
}
measure_x() : qubit(0), hg(qubit), mg(qubit), measure_all(true)
{
}
int64_t apply(qu_register& qreg)
{
int64_t r = 0;
if (measure_all)
{
for (size_t i=0; i<qreg.size(); ++i)
qx::hadamard(i).apply(qreg);
qreg.measure();
for (size_t i=0; i<qreg.size(); ++i)
qx::hadamard(i).apply(qreg);
return 0;
}
hg.apply(qreg);
r = mg.apply(qreg);
hg.apply(qreg);
return r;
}
void dump()
{
if (measure_all)
println(" [-] measure_x(register)");
else
println(" [-] measure_x(qubit=" << qubit << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
if (!measure_all)
r.push_back(qubit);
else // this is a dirty hack, itshould be fixed later (unknown qubit number !)
{
for (int64_t i=0; i<MAX_QB_N; ++i)
r.push_back(i);
}
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
return qubits();
}
gate_type_t type()
{
if (measure_all)
return __measure_x_reg_gate__;
else
return __measure_x_gate__;
}
};
/**
* measure_y
*/
class measure_y : public gate
{
private:
uint64_t qubit;
bool measure_all;
bool disable_averaging;
qx::phase_shift sg;
qx::pauli_z zg;
qx::measure_x mg;
/*
S(qubit);
Z(qubit);
bool b = MeasX(qubit, randint);
S(qubit);
*/
public:
measure_y(uint64_t qubit, bool disable_averaging=false) : qubit(qubit), measure_all(false), sg(qubit), zg(qubit), mg(qubit), disable_averaging(disable_averaging)
{
}
measure_y() : qubit(0), sg(qubit), zg(qubit), mg(), measure_all(true)
{
}
int64_t apply(qu_register& qreg)
{
int64_t r = 0;
if (measure_all)
{
for (size_t i=0; i<qreg.size(); ++i)
{
qx::phase_shift(i).apply(qreg);
qx::pauli_z(i).apply(qreg);
}
mg.apply(qreg);
for (size_t i=0; i<qreg.size(); ++i)
qx::phase_shift(i).apply(qreg);
return 0;
}
sg.apply(qreg);
zg.apply(qreg);
r = mg.apply(qreg);
sg.apply(qreg);
return r;
}
void dump()
{
if (measure_all)
println(" [-] measure_y(register)");
else
println(" [-] measure_y(qubit=" << qubit << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
if (!measure_all)
r.push_back(qubit);
else // this is a dirty hack, itshould be fixed later (unknown qubit number !)
{
for (int64_t i=0; i<MAX_QB_N; ++i)
r.push_back(i);
}
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
return qubits();
}
gate_type_t type()
{
if (measure_all)
return __measure_y_reg_gate__;
else
return __measure_y_gate__;
}
};
/**
* \brief generic binary controlled gate
*/
class bin_ctrl : public gate
{
private:
// uint64_t bit;
std::vector<size_t> bits;
gate * g;
public:
bin_ctrl(size_t bit, gate * g) : g(g)
{
bits.push_back(bit);
}
bin_ctrl(std::vector<size_t> bit, gate * g) : g(g)
{
for (auto b : bit)
bits.push_back(b);
}
int64_t apply(qu_register& qreg)
{
bool m = true;
for (auto b : bits)
if (!qreg.test(b))
m = false;
if (m)
g->apply(qreg);
return 0;
}
gate * get_gate()
{
return g;
}
std::vector<size_t> get_bits()
{
return bits;
}
void dump()
{
print(" [-] bin_ctrl: \n bit=" << bits[0] << " -> ");
g->dump();
}
std::vector<uint64_t> qubits()
{
return g->qubits();
}
std::vector<uint64_t> control_qubits()
{
return g->control_qubits();
}
std::vector<uint64_t> target_qubits()
{
return g->target_qubits();
}
gate_type_t type()
{
return __bin_ctrl_gate__;
}
};
#define bin_ctrl_pauli_x(b,q) bin_ctrl(b,new pauli_x(q))
#define bin_ctrl_pauli_y(b,q) bin_ctrl(b,new pauli_y(q))
#define bin_ctrl_pauli_z(b,q) bin_ctrl(b,new pauli_z(q))
/**
* \brief classical binary not gate
*/
class classical_not : public gate
{
private:
uint64_t bit;
public:
classical_not(uint64_t bit) : bit(bit)
{
}
int64_t apply(qu_register& qreg)
{
qreg.flip_measurement(bit);
return 0;
}
uint64_t get_bit()
{
return bit;
}
void dump()
{
// println(" [-] classical not gate: \n bit=" << bit);
println(" [-] not " << bit);
}
std::vector<uint64_t> qubits()
{
return std::vector<uint64_t>();
}
std::vector<uint64_t> control_qubits()
{
return std::vector<uint64_t>();
}
std::vector<uint64_t> target_qubits()
{
return std::vector<uint64_t>();
}
gate_type_t type()
{
return __classical_not_gate__;
}
};
/**
* prepz
*/
class prepz : public gate
{
private:
uint64_t qubit;
public:
prepz(uint64_t qubit) : qubit(qubit)
{
}
int64_t apply(qu_register& qreg)
{
measure(qubit,true).apply(qreg);
bin_ctrl_pauli_x(qubit,qubit).apply(qreg);
// bin_ctrl_pauli_z(qubit,qubit).apply(qreg);
qreg.set_measurement(qubit,false);
return 0;
}
void dump()
{
println(" [-] prepz(qubit=" << qubit << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
return qubits();
}
gate_type_t type()
{
return __prepz_gate__;
}
};
/**
* prepx
*/
class prepx : public gate
{
private:
uint64_t qubit;
hadamard h;
public:
prepx(uint64_t qubit) : qubit(qubit), h(qubit)
{
}
int64_t apply(qu_register& qreg)
{
h.apply(qreg);
measure(qubit,true).apply(qreg);
h.apply(qreg);
bin_ctrl_pauli_z(qubit,qubit).apply(qreg);
qreg.set_measurement(qubit,false);
return 0;
}
void dump()
{
println(" [-] prepx(qubit=" << qubit << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
return qubits();
}
gate_type_t type()
{
return __prepx_gate__;
}
};
/**
* prepy
*/
class prepy : public gate
{
private:
uint64_t qubit;
prepx px;
phase_shift s;
public:
prepy(uint64_t qubit) : qubit(qubit), px(qubit), s(qubit)
{
}
int64_t apply(qu_register& qreg)
{
px.apply(qreg);
s.apply(qreg);
qreg.set_measurement(qubit,false);
return 0;
}
void dump()
{
println(" [-] prepy(qubit=" << qubit << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
return qubits();
}
gate_type_t type()
{
return __prepy_gate__;
}
};
class lookup_gate_table : public gate
{
private:
std::vector<uint64_t> ctrl_bits;
std::map<uint64_t,gate *> gates;
public:
lookup_gate_table(uint64_t b0)
{
ctrl_bits.push_back(b0);
}
lookup_gate_table(uint64_t b0, uint64_t b1)
{
ctrl_bits.push_back(b0);
ctrl_bits.push_back(b1);
}
lookup_gate_table(uint64_t b0, uint64_t b1, uint64_t b2)
{
ctrl_bits.push_back(b0);
ctrl_bits.push_back(b1);
ctrl_bits.push_back(b2);
}
lookup_gate_table(std::vector<uint64_t> ctrl_bits) : ctrl_bits(ctrl_bits)
{
}
void add_gate(uint64_t cond, gate * g)
{
assert(cond < (1<< ctrl_bits.size()));
gates[cond] = g;
}
int64_t apply(qu_register& qreg)
{
uint64_t k = 0;
for (uint64_t i=0; i<ctrl_bits.size(); i++)
{
//println(qreg.get_binary(i));
if (qreg.test(ctrl_bits[i]))
k = k * 2 + 1;
else
k *= 2;
}
// println("[+] lookup table : cond = " << k);
std::map<uint64_t,gate*>::iterator it = gates.find(k);
if (it != gates.end())
(*it).second->apply(qreg);
return 0;
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
// to do
std::map<uint64_t,gate *>::iterator ig;
for (ig=gates.begin(); ig!=gates.end(); ++ig)
{
std::vector<uint64_t> ri = ig->second->qubits();
r.insert(r.begin(), ri.begin(), ri.end());
}
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
// to do
std::map<uint64_t,gate *>::iterator ig;
for (ig=gates.begin(); ig!=gates.end(); ++ig)
{
std::vector<uint64_t> ri = ig->second->control_qubits();
if (ri.size())
r.insert(r.begin(), ri.begin(), ri.end());
}
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
// to do
std::map<uint64_t,gate *>::iterator ig;
for (ig=gates.begin(); ig!=gates.end(); ++ig)
{
std::vector<uint64_t> ri = ig->second->target_qubits();
if (ri.size())
r.insert(r.begin(), ri.begin(), ri.end());
}
return r;
}
void dump()
{
println(" [-] lookup gate table : ");
}
gate_type_t type()
{
return __lookup_table__;
}
};
/**
* \brief display : debug utility
* display intermediate quantum states of a
* quantum register whithin a circuit.
*/
class display : public gate
{
private:
bool only_binary;
public:
display(bool only_binary=false) : only_binary(only_binary)
{
}
int64_t apply(qu_register& qreg)
{
qreg.dump(only_binary);
return 0;
}
void dump()
{
println(" [-] display(only_binary=" << only_binary << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
return r;
}
gate_type_t type()
{
if (only_binary)
return __display_binary__;
else
return __display__;
}
};
/**
* parallel gates
*/
class parallel_gates : public gate
{
public:
parallel_gates()
{
}
int64_t apply(qu_register& qreg)
{
for (uint64_t i=0; i<gates.size(); i++)
gates[i]->apply(qreg);
return 0;
}
uint64_t add(gate * g)
{
gates.push_back(g);
return gates.size();
}
std::vector<gate *> get_gates()
{
return gates;
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
for (uint64_t i=0; i<gates.size(); i++)
{
std::vector<uint64_t> q = gates[i]->qubits();
r.insert(r.end(),q.begin(),q.end());
}
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
for (uint64_t i=0; i<gates.size(); i++)
{
std::vector<uint64_t> q = gates[i]->control_qubits();
r.insert(r.end(),q.begin(),q.end());
}
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
for (uint64_t i=0; i<gates.size(); i++)
{
std::vector<uint64_t> q = gates[i]->target_qubits();
r.insert(r.end(),q.begin(),q.end());
}
return r;
}
void dump()
{
println(" [-] parallel_gates (" << gates.size() << " gates) : ");
for (uint64_t i=0; i<gates.size(); i++)
gates[i]->dump();
}
gate_type_t type()
{
return __parallel_gate__;
}
private:
std::vector<gate *> gates; // list of the parallel gates
};
/**
* prepare the qubits into an arbitrary quantum state
*/
class prepare : public gate
{
private:
quantum_state_t * state;
public:
prepare(quantum_state_t * state) : state(state)
{
}
int64_t apply(qu_register& qreg)
{
qreg.reset();
cvector_t& q = qreg.get_data();
double norm = 0;
for (quantum_state_t::iterator i=state->begin(); i != state->end(); ++i)
{
basis_state_t bs = (*i).first;
complex_t c = (*i).second;
// println("bs=" << bs << ", a=" << c);
q[bs] = c;
norm += c.norm(); //std::norm(c);
}
if (std::fabs(norm-1) > QUBIT_ERROR_THRESHOLD)
{
println("[!] warning : the loaded quantum state is not normalized (norm = " << norm << ") !");
println("[!] renormalizing the quantum state...");
qreg.normalize();
println("[!] quantum state renormalized successfully.");
}
for (size_t qi=0; qi<qreg.size(); ++qi)
{
qreg.set_measurement_prediction(qi,__state_unknown__);
//qreg.set_binary(qi,__state_unknown__);
}
return 0;
}
void dump()
{
println(" [-] prepare (quantum_state=" << state << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
// this is a dirty hack, itshould be fixed later (unknown qubit number !)
for (int64_t i=0; i<MAX_QB_N; ++i)
r.push_back(i);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
return qubits();
}
gate_type_t type()
{
return __prepare_gate__;
}
};
/**
* \brief print : debug utility
* print arbitrary string
*/
class print_str : public gate
{
private:
std::string str;
public:
print_str(std::string& s) : str(s)
{
}
int64_t apply(qu_register& qreg)
{
println(str);
return 0;
}
void dump()
{
println(" print " << str << "\"");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
return r;
}
gate_type_t type()
{
return __print_str__;
}
};
}
#endif // QX_GATE_H
|
mpi_omp_pthreads.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <unistd.h>
#include <pthread.h>
#include <omp.h>
#include <mpi.h>
/* this is to ensure that the threads overlap in time */
#define NAPTIME 3
#define MAX_POSIX_THREADS 64
static pthread_t thread_pool[MAX_POSIX_THREADS];
static int mpi_size, mpi_rank;
static int num_posix_threads;
void* foo(void* dummy)
{
int i, my_pth = -1;
pthread_t my_pthread = pthread_self();
for (i=0 ; i<num_posix_threads ; i++)
if (my_pthread==thread_pool[i]) my_pth = i;
sleep(NAPTIME);
int my_core = -1, my_hwth = -1;
int my_omp, num_omp;
#pragma omp parallel private(my_core,my_hwth,my_omp,num_omp) shared(my_pth)
{
sleep(NAPTIME);
my_core = get_bgq_core();
my_hwth = get_bgq_hwthread();
my_omp = omp_get_thread_num();
num_omp = omp_get_num_threads();
fprintf(stdout,"MPI rank = %2d Pthread = %2d OpenMP thread = %2d of %2d core = %2d:%1d \n",
mpi_rank, my_pth, my_omp, num_omp, my_core, my_hwth);
fflush(stdout);
sleep(NAPTIME);
}
sleep(NAPTIME);
pthread_exit(0);
}
void bar()
{
sleep(NAPTIME);
int my_core = -1, my_hwth = -1;
int my_omp, num_omp;
#pragma omp parallel private(my_core,my_hwth,my_omp,num_omp)
{
sleep(NAPTIME);
my_core = get_bgq_core();
my_hwth = get_bgq_hwthread();
my_omp = omp_get_thread_num();
num_omp = omp_get_num_threads();
fprintf(stdout,"MPI rank = %2d OpenMP thread = %2d of %2d core = %2d:%1d \n",
mpi_rank, my_omp, num_omp, my_core, my_hwth);
fflush(stdout);
sleep(NAPTIME);
}
sleep(NAPTIME);
}
int main(int argc, char *argv[])
{
int i, rc;
int provided;
MPI_Init_thread(&argc,&argv,MPI_THREAD_MULTIPLE,&provided);
if ( provided != MPI_THREAD_MULTIPLE ) exit(1);
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
MPI_Barrier(MPI_COMM_WORLD);
sleep(NAPTIME);
#ifdef __bgq__
int bg_threadlayout = atoi(getenv("BG_THREADLAYOUT"));
if (mpi_rank==0) fprintf(stdout,"BG_THREADLAYOUT = %2d\n", bg_threadlayout);
#endif
num_posix_threads = atoi(getenv("POSIX_NUM_THREADS"));
if (num_posix_threads<0) num_posix_threads = 0;
if (num_posix_threads>MAX_POSIX_THREADS) num_posix_threads = MAX_POSIX_THREADS;
if (mpi_rank==0) fprintf(stdout,"POSIX_NUM_THREADS = %2d\n", num_posix_threads);
if (mpi_rank==0) fprintf(stdout,"OMP_MAX_NUM_THREADS = %2d\n", omp_get_max_threads());
fflush(stdout);
if ( num_posix_threads > 0 ) {
//fprintf(stdout,"MPI rank %2d creating %2d POSIX threads\n", mpi_rank, num_posix_threads); fflush(stdout);
for (i=0 ; i<num_posix_threads ; i++){
rc = pthread_create(&thread_pool[i], NULL, foo, NULL);
assert(rc==0);
}
MPI_Barrier(MPI_COMM_WORLD);
sleep(NAPTIME);
for (i=0 ; i<num_posix_threads ; i++){
rc = pthread_join(thread_pool[i],NULL);
assert(rc==0);
}
//fprintf(stdout,"MPI rank %2d joined %2d POSIX threads\n", mpi_rank, num_posix_threads); fflush(stdout);
} else {
bar();
}
MPI_Barrier(MPI_COMM_WORLD);
sleep(NAPTIME);
MPI_Finalize();
return 0;
}
|
9014.c | // this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c' as parsed by frontend compiler rose
void kernel_fdtd_2d(int tmax, int nx, int ny, double ex[1000 + 0][1200 + 0], double ey[1000 + 0][1200 + 0], double hz[1000 + 0][1200 + 0], double _fict_[500 + 0]) {
int t10;
int t8;
int t6;
int t4;
int t2;
for (t2 = 0; t2 <= tmax - 1; t2 += 1) {
for (t4 = 0; t4 <= ny - 1; t4 += 1)
ey[0][t4] = _fict_[t2];
#pragma omp parallel for
for (t4 = 1; t4 <= nx - 1; t4 += 128)
for (t6 = t4; t6 <= (t4 + 127 < nx - 1 ? t4 + 127 : nx - 1); t6 += 1)
for (t8 = 0; t8 <= ny - 1; t8 += 16)
for (t10 = t8; t10 <= (ny - 1 < t8 + 15 ? ny - 1 : t8 + 15); t10 += 1)
ey[t6][t10] = ey[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6 - 1][t10]);
#pragma omp parallel for
for (t4 = 0; t4 <= nx - 1; t4 += 128)
for (t6 = t4; t6 <= (t4 + 127 < nx - 1 ? t4 + 127 : nx - 1); t6 += 1)
for (t8 = 1; t8 <= ny - 1; t8 += 16)
for (t10 = t8; t10 <= (ny - 1 < t8 + 15 ? ny - 1 : t8 + 15); t10 += 1)
ex[t6][t10] = ex[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6][t10 - 1]);
#pragma omp parallel for
for (t4 = 0; t4 <= nx - 2; t4 += 128)
for (t6 = t4; t6 <= (t4 + 127 < nx - 2 ? t4 + 127 : nx - 2); t6 += 1)
for (t8 = 0; t8 <= ny - 2; t8 += 16)
for (t10 = t8; t10 <= (ny - 2 < t8 + 15 ? ny - 2 : t8 + 15); t10 += 1)
hz[t6][t10] = hz[t6][t10] - 0.69999999999999996 * (ex[t6][t10 + 1] - ex[t6][t10] + ey[t6 + 1][t10] - ey[t6][t10]);
}
}
|
SpatialFullConvolution.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/SpatialFullConvolution.c"
#else
static int nn_(SpatialFullConvolution_updateOutput)(lua_State *L)
{
THTensor *input = luaT_checkudata(L, 2, torch_Tensor);
int dW = luaT_getfieldcheckint(L, 1, "dW");
int dH = luaT_getfieldcheckint(L, 1, "dH");
THTensor *weight = luaT_getfieldcheckudata(L, 1, "weight", torch_Tensor);
THTensor *bias = luaT_getfieldcheckudata(L, 1, "bias", torch_Tensor);
THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor);
luaL_argcheck(L, input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D(batch mode) tensor expected");
int dimw = 2;
int dimh = 1;
if (input->nDimension == 4) {
dimw++;
dimh++;
}
long nOutputPlane = weight->size[1];
long kW = weight->size[3];
long kH = weight->size[2];
long inputWidth = input->size[dimw];
long inputHeight = input->size[dimh];
long outputWidth = (inputWidth - 1) * dW + kW;
long outputHeight = (inputHeight - 1) * dH + kH;
if (input->nDimension == 3)
{
THTensor_(resize3d)(output, nOutputPlane, outputHeight, outputWidth);
/* add bias */
long i;
real* bias_data = THTensor_(data)(bias);
real* output_data = THTensor_(data)(output);
#pragma omp parallel for private(i)
for (i=0; i<bias->size[0]; i++)
{
/*THTensor_(select)(outn,output,0,i);*/
/*TH_TENSOR_APPLY(real,outn, *outn_data = bias_data[i];);*/
real *ptr_output = output_data + i*outputWidth*outputHeight;
long j;
for(j = 0; j < outputWidth*outputHeight; j++)
ptr_output[j] = bias_data[i];
}
/* do convolutions */
THTensor *tweight = THTensor_(newTranspose)(weight,0,1);
THTensor_(conv2Dmv)(output, 1.0, 1.0, input, tweight, dH, dW, "F", "C");
THTensor_(free)(tweight);
}
else
{
THTensor_(resize4d)(output, input->size[0], nOutputPlane, outputHeight, outputWidth);
real* bias_data = THTensor_(data)(bias);
real* output_data = THTensor_(data)(output);
long p;
#pragma omp parallel for private(p)
for (p=0; p<input->size[0]; p++)
{
/* BIAS */
long i;
for (i=0; i<bias->size[0]; i++)
{
real *ptr_output = output_data + p*nOutputPlane*outputWidth*outputHeight + i*outputWidth*outputHeight;
long j;
for(j = 0; j < outputWidth*outputHeight; j++)
ptr_output[j] = bias_data[i];
}
}
/* do convolutions */
THTensor *tweight = THTensor_(newTranspose)(weight,0,1);
THTensor_(conv2Dmm)(output, 1.0, 1.0, input, tweight, dH, dW, "F", "C");
THTensor_(free)(tweight);
}
return 1;
}
static int nn_(SpatialFullConvolution_updateGradInput)(lua_State *L)
{
THTensor *input = luaT_checkudata(L, 2, torch_Tensor);
THTensor *gradOutput = luaT_checkudata(L, 3, torch_Tensor);
int dW = luaT_getfieldcheckint(L, 1, "dW");
int dH = luaT_getfieldcheckint(L, 1, "dH");
THTensor *weight = luaT_getfieldcheckudata(L, 1, "weight", torch_Tensor);
THTensor *gradInput = luaT_getfieldcheckudata(L, 1, "gradInput", torch_Tensor);
long nOutputPlane = weight->size[1];
THArgCheck( nOutputPlane == gradOutput->size[input->nDimension == 4 ? 1 : 0], 1, "Number of output features is not equal to nOutputPlane" );
if (input->nDimension == 3)
{
/* gradient to input */
THTensor_(conv2Dmv)(gradInput, 0.0, 1.0, gradOutput, weight, dH, dW, "V", "X");
}
else
{
/* gradient to input */
THTensor_(conv2Dmm)(gradInput, 0.0, 1.0, gradOutput, weight, dH, dW, "V", "X");
}
return 1;
}
static int nn_(SpatialFullConvolution_accGradParameters)(lua_State *L)
{
THTensor *input = luaT_checkudata(L, 2, torch_Tensor);
THTensor *gradOutput = luaT_checkudata(L, 3, torch_Tensor);
real scale = luaL_optnumber(L, 4, 1);
int dW = luaT_getfieldcheckint(L, 1, "dW");
int dH = luaT_getfieldcheckint(L, 1, "dH");
THTensor *weight = luaT_getfieldcheckudata(L, 1, "weight", torch_Tensor);
THTensor *gradWeight = luaT_getfieldcheckudata(L, 1, "gradWeight", torch_Tensor);
THTensor *gradBias = luaT_getfieldcheckudata(L, 1, "gradBias", torch_Tensor);
long nOutputPlane = weight->size[1];
THArgCheck( nOutputPlane == gradOutput->size[input->nDimension == 4 ? 1 : 0], 1, "Number of output features is not equal to nOutputPlane" );
int dimw = 2;
int dimh = 1;
if (input->nDimension == 4)
{
dimw++;
dimh++;
}
/* gradient to bias */
real *gradBias_data = THTensor_(data)(gradBias);
real *gradOutput_data = THTensor_(data)(gradOutput);
long noutSlice = gradOutput->size[dimh]*gradOutput->size[dimw];
/*THTensor* gradOutSlice = THTensor_(new)();*/
if (input->nDimension == 3)
{
long k;
#pragma omp parallel for private(k)
for(k = 0; k < nOutputPlane; k++)
{
/*THTensor_(select)(gradOutSlice, gradOutput, 0, k);*/
real *ptr_gradOutput = gradOutput_data + k*noutSlice;
long l;
for(l = 0; l < noutSlice; l++)
gradBias_data[k] += scale*ptr_gradOutput[l];
}
/* gradient to kernels */
THTensor_(conv2DRevger)(gradWeight, 1.0, scale, gradOutput, input, dH, dW);
}
else
{
long k;
#pragma omp parallel for private(k)
for(k = 0; k < nOutputPlane; k++)
{
long p;
for(p = 0; p < input->size[0]; p++)
{
/* BIAS */
real *ptr_gradOutput = gradOutput_data + p*nOutputPlane*noutSlice + k*noutSlice;
long l;
for(l = 0; l < noutSlice; l++)
gradBias_data[k] += scale*ptr_gradOutput[l];
}
}
/* gradient to kernels */
THTensor_(conv2DRevgerm)(gradWeight, 1.0, scale, gradOutput, input, dH, dW);
}
return 0;
}
static const struct luaL_Reg nn_(SpatialFullConvolution__) [] = {
{"SpatialFullConvolution_updateOutput", nn_(SpatialFullConvolution_updateOutput)},
{"SpatialFullConvolution_updateGradInput", nn_(SpatialFullConvolution_updateGradInput)},
{"SpatialFullConvolution_accGradParameters", nn_(SpatialFullConvolution_accGradParameters)},
{NULL, NULL}
};
static void nn_(SpatialFullConvolution_init)(lua_State *L)
{
luaT_pushmetatable(L, torch_Tensor);
luaT_registeratname(L, nn_(SpatialFullConvolution__), "nn");
lua_pop(L,1);
}
#endif
|
matrix_multiplication.c | #include <stdio.h>
#include <omp.h>
#include <time.h>
int main()
{
int arr[][3] = {
{1, 2, 3},
{4, 5, 6},
{7, 8, 9}};
int ans[3][3];
clock_t start = clock();
#pragma omp parallel
{
#pragma omp for
for (int i = 0; i < 3; i++)
{
int res = 0;
int k, j;
for (k = 0; k < 3; k++)
{
for (j = 0; j < 3; j++)
{
res += arr[i][j] * arr[j][i];
}
ans[i][k] = res;
}
}
}
clock_t end = clock();
clock_t diff = end - start;
double time_taken = ((double)diff) / CLOCKS_PER_SEC;
printf("Execution Time : %f seconds\n", time_taken);
// Print the matrix
#pragma omp parallel
#pragma omp single
{
for (int i = 0; i < 3; i++)
{
for (int j = 0; j < 3; j++)
{
printf("%d ", ans[i][j]);
}
printf("\n");
}
}
}
/*
Since we assumed the matrix size as 3, the above code can be changed to used sections instead
for (int i = 0; i < 3; i++)
{
int res = 0;
int k, j;
for (k = 0; k < 3; k++)
{
for (j = 0; j < 3; j++)
{
res += arr[i][j] * arr[j][i];
}
ans[i][k] = res;
}
}
REPLACED BY
#pragma omp parallel sections
{
#pragma omp section
{
int i=0;
int res = 0;
int k, j;
for (k = 0; k < 3; k++)
{
for (j = 0; j < 3; j++)
{
res += arr[i][j] * arr[j][i];
}
ans[i][k] = res;
}
}
#pragma omp section
{
int i=1;
int res = 0;
int k, j;
for (k = 0; k < 3; k++)
{
for (j = 0; j < 3; j++)
{
res += arr[i][j] * arr[j][i];
}
ans[i][k] = res;
}
}
#pragma omp section
{
int i=2;
int res = 0;
int k, j;
for (k = 0; k < 3; k++)
{
for (j = 0; j < 3; j++)
{
res += arr[i][j] * arr[j][i];
}
ans[i][k] = res;
}
}
}
*/ |
GeometryConverter.h | /* -*-c++-*- IfcQuery www.ifcquery.com
*
MIT License
Copyright (c) 2017 Fabian Gerold
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include <unordered_set>
#include <ifcpp/model/BasicTypes.h>
#include <ifcpp/model/BuildingModel.h>
#include <ifcpp/model/StatusCallback.h>
#include <ifcpp/IFC4/include/IfcCurtainWall.h>
#include <ifcpp/IFC4/include/IfcGloballyUniqueId.h>
#include <ifcpp/IFC4/include/IfcPropertySetDefinitionSet.h>
#include <ifcpp/IFC4/include/IfcRelAggregates.h>
#include <ifcpp/IFC4/include/IfcRelContainedInSpatialStructure.h>
#include <ifcpp/IFC4/include/IfcRelDefinesByProperties.h>
#include <ifcpp/IFC4/include/IfcSpace.h>
#include <ifcpp/IFC4/include/IfcWindow.h>
#include "IncludeCarveHeaders.h"
#include "GeometryInputData.h"
#include "RepresentationConverter.h"
#include "CSG_Adapter.h"
class GeometryConverter : public StatusCallback
{
protected:
shared_ptr<BuildingModel> m_ifc_model;
shared_ptr<GeometrySettings> m_geom_settings;
shared_ptr<RepresentationConverter> m_representation_converter;
std::map<std::string, shared_ptr<ProductShapeData> > m_product_shape_data;
std::map<std::string, shared_ptr<BuildingObject> > m_map_outside_spatial_structure;
double m_recent_progress = 0;
double m_csg_eps = 1.5e-05;
std::map<int, std::vector<shared_ptr<StatusCallback::Message> > > m_messages;
#ifdef ENABLE_OPENMP
Mutex m_writelock_messages;
#endif
public:
// getters and setters
shared_ptr<BuildingModel>& getBuildingModel() { return m_ifc_model; }
shared_ptr<RepresentationConverter>& getRepresentationConverter() { return m_representation_converter; }
shared_ptr<GeometrySettings>& getGeomSettings() { return m_geom_settings; }
std::map<std::string, shared_ptr<ProductShapeData> >& getShapeInputData() { return m_product_shape_data; }
std::map<std::string, shared_ptr<BuildingObject> >& getObjectsOutsideSpatialStructure() { return m_map_outside_spatial_structure; }
GeometryConverter( shared_ptr<BuildingModel>& ifc_model )
{
m_ifc_model = ifc_model;
m_geom_settings = shared_ptr<GeometrySettings>( new GeometrySettings() );
resetNumVerticesPerCircle();
shared_ptr<UnitConverter>& unit_converter = m_ifc_model->getUnitConverter();
m_representation_converter = shared_ptr<RepresentationConverter>( new RepresentationConverter( m_geom_settings, unit_converter ) );
// redirect all messages to this->messageTarget
m_ifc_model->setMessageTarget( this );
m_representation_converter->setMessageTarget( this );
}
virtual ~GeometryConverter() {}
void resetModel()
{
progressTextCallback( L"Unloading model, cleaning up memory..." );
clearInputCache();
m_recent_progress = 0.0;
m_ifc_model->clearCache();
m_ifc_model->clearIfcModel();
progressTextCallback( L"Unloading model done" );
progressValueCallback( 0.0, "parse" );
#ifdef _DEBUG
GeomDebugDump::clearMeshsetDump();
#endif
}
void clearInputCache()
{
m_product_shape_data.clear();
m_map_outside_spatial_structure.clear();
m_representation_converter->clearCache();
m_messages.clear();
}
void resetNumVerticesPerCircle()
{
m_geom_settings->resetNumVerticesPerCircle();
}
void setCsgEps(double eps)
{
m_csg_eps = eps;
}
void setModel( shared_ptr<BuildingModel> model )
{
if( m_ifc_model )
{
m_ifc_model->unsetMessageCallBack();
}
clearInputCache();
m_ifc_model = model;
m_representation_converter->clearCache();
m_representation_converter->setUnitConverter( m_ifc_model->getUnitConverter() );
m_ifc_model->setMessageTarget( this );
}
void resolveProjectStructure( shared_ptr<ProductShapeData>& product_data )
{
if( !product_data )
{
return;
}
if( product_data->m_ifc_object_definition.expired() )
{
return;
}
shared_ptr<IfcObjectDefinition> ifc_object_def(product_data->m_ifc_object_definition);
if (!ifc_object_def)
{
return;
}
product_data->m_added_to_spatial_structure = true;
const std::vector<weak_ptr<IfcRelAggregates> >& vec_IsDecomposedBy = ifc_object_def->m_IsDecomposedBy_inverse;
for( size_t ii = 0; ii < vec_IsDecomposedBy.size(); ++ii )
{
const weak_ptr<IfcRelAggregates>& rel_aggregates_weak_ptr = vec_IsDecomposedBy[ii];
if( rel_aggregates_weak_ptr.expired() )
{
continue;
}
shared_ptr<IfcRelAggregates> rel_aggregates( rel_aggregates_weak_ptr );
if( rel_aggregates )
{
const std::vector<shared_ptr<IfcObjectDefinition> >& vec_related_objects = rel_aggregates->m_RelatedObjects;
for( size_t jj = 0; jj < vec_related_objects.size(); ++jj )
{
const shared_ptr<IfcObjectDefinition>& related_obj_def = vec_related_objects[jj];
if( related_obj_def )
{
std::string related_guid;
if (related_obj_def->m_GlobalId)
{
std::wstring_convert<std::codecvt_utf8<wchar_t>, wchar_t> converterX;
related_guid = converterX.to_bytes(related_obj_def->m_GlobalId->m_value);
}
auto it_product_map = m_product_shape_data.find(related_guid);
if( it_product_map != m_product_shape_data.end() )
{
shared_ptr<ProductShapeData>& related_product_shape = it_product_map->second;
if( related_product_shape )
{
product_data->addChildProduct( related_product_shape, product_data );
resolveProjectStructure( related_product_shape );
}
}
}
}
}
}
shared_ptr<IfcSpatialStructureElement> spatial_ele = dynamic_pointer_cast<IfcSpatialStructureElement>(ifc_object_def);
if( spatial_ele )
{
const std::vector<weak_ptr<IfcRelContainedInSpatialStructure> >& vec_contains = spatial_ele->m_ContainsElements_inverse;
for( size_t ii = 0; ii < vec_contains.size(); ++ii )
{
const weak_ptr<IfcRelContainedInSpatialStructure>& rel_contained_weak_ptr = vec_contains[ii];
if( rel_contained_weak_ptr.expired() )
{
continue;
}
shared_ptr<IfcRelContainedInSpatialStructure> rel_contained( rel_contained_weak_ptr );
if( rel_contained )
{
const std::vector<shared_ptr<IfcProduct> >& vec_related_elements = rel_contained->m_RelatedElements;
for( size_t jj = 0; jj < vec_related_elements.size(); ++jj )
{
const shared_ptr<IfcProduct>& related_product = vec_related_elements[jj];
if( related_product )
{
std::string related_guid;
if (related_product->m_GlobalId)
{
std::wstring_convert<std::codecvt_utf8<wchar_t>, wchar_t> converterX;
related_guid = converterX.to_bytes(related_product->m_GlobalId->m_value);
}
auto it_product_map = m_product_shape_data.find(related_guid);
if( it_product_map != m_product_shape_data.end() )
{
shared_ptr<ProductShapeData>& related_product_shape = it_product_map->second;
if( related_product_shape )
{
product_data->addChildProduct( related_product_shape, product_data );
resolveProjectStructure( related_product_shape );
}
}
}
}
}
}
}
// TODO: handle IfcRelAssignsToProduct
}
void readAppearanceFromPropertySet( const shared_ptr<IfcPropertySet>& prop_set, shared_ptr<ProductShapeData>& product_shape )
{
if( !prop_set )
{
return;
}
for( auto& ifc_property : prop_set->m_HasProperties )
{
if( !ifc_property )
{
continue;
}
shared_ptr<IfcSimpleProperty> simple_property = dynamic_pointer_cast<IfcSimpleProperty>(ifc_property);
if( simple_property )
{
// ENTITY IfcSimpleProperty ABSTRACT SUPERTYPE OF(ONEOF( IfcPropertyBoundedValue, IfcPropertyEnumeratedValue, IfcPropertyListValue,
// IfcPropertyReferenceValue, IfcPropertySingleValue, IfcPropertyTableValue))
shared_ptr<IfcIdentifier> property_name = simple_property->m_Name;
std::wstring name_str = property_name->m_value;
if( name_str.compare( L"LayerName" ) == 0 )
{
// TODO: implement layers
}
shared_ptr<IfcText> description = simple_property->m_Description;
shared_ptr<IfcPropertySingleValue> property_single_value = dynamic_pointer_cast<IfcPropertySingleValue>(simple_property);
if( property_single_value )
{
//shared_ptr<IfcValue>& nominal_value = property_single_value->m_NominalValue; //optional
//shared_ptr<IfcUnit>& unit = property_single_value->m_Unit; //optional
}
continue;
}
shared_ptr<IfcComplexProperty> complex_property = dynamic_pointer_cast<IfcComplexProperty>(ifc_property);
if( complex_property )
{
if( !complex_property->m_UsageName ) continue;
if( complex_property->m_UsageName->m_value.compare( L"Color" ) == 0 )
{
vec4 vec_color;
m_representation_converter->getStylesConverter()->convertIfcComplexPropertyColor( complex_property, vec_color );
shared_ptr<AppearanceData> appearance_data( new AppearanceData( -1 ) );
if( !appearance_data )
{
throw OutOfMemoryException( __FUNC__ );
}
appearance_data->m_apply_to_geometry_type = AppearanceData::GEOM_TYPE_ANY;
appearance_data->m_color_ambient.setColor( vec_color );
appearance_data->m_color_diffuse.setColor( vec_color );
appearance_data->m_color_specular.setColor( vec_color );
appearance_data->m_shininess = 35.f;
product_shape->addAppearance( appearance_data );
}
}
}
}
/*\brief method convertGeometry: Creates geometry for Carve from previously loaded BuildingModel model.
**/
void convertGeometry()
{
progressTextCallback( L"Creating geometry..." );
progressValueCallback( 0, "geometry" );
m_product_shape_data.clear();
m_map_outside_spatial_structure.clear();
m_representation_converter->clearCache();
if( !m_ifc_model )
{
return;
}
shared_ptr<ProductShapeData> ifc_project_data;
std::vector<shared_ptr<IfcObjectDefinition> > vec_object_definitions;
double length_to_meter_factor = 1.0;
if( m_ifc_model->getUnitConverter() )
{
length_to_meter_factor = m_ifc_model->getUnitConverter()->getLengthInMeterFactor();
}
carve::setEpsilon( m_csg_eps );
const std::map<int, shared_ptr<BuildingEntity> >& map_entities = m_ifc_model->getMapIfcEntities();
if (map_entities.size() > 0)
{
for (auto it = map_entities.begin(); it != map_entities.end(); ++it)
{
shared_ptr<BuildingEntity> obj = it->second;
shared_ptr<IfcObjectDefinition> object_def = dynamic_pointer_cast<IfcObjectDefinition>(obj);
if (object_def)
{
vec_object_definitions.push_back(object_def);
}
}
}
// create geometry for for each IfcProduct independently, spatial structure will be resolved later
std::map<std::string, shared_ptr<ProductShapeData> >* map_products_ptr = &m_product_shape_data;
const int num_object_definitions = (int)vec_object_definitions.size();
#ifdef ENABLE_OPENMP
Mutex writelock_map;
Mutex writelock_ifc_project;
#pragma omp parallel firstprivate(num_object_definitions) shared(map_products_ptr)
{
// time for one product may vary significantly, so schedule not so many
#pragma omp for schedule(dynamic,40)
#endif
for( int i = 0; i < num_object_definitions; ++i )
{
shared_ptr<IfcObjectDefinition> object_def = vec_object_definitions[i];
const int entity_id = object_def->m_entity_id;
std::string guid;
if (object_def->m_GlobalId)
{
std::wstring_convert<std::codecvt_utf8<wchar_t>, wchar_t> converterX;
guid = converterX.to_bytes(object_def->m_GlobalId->m_value);
}
shared_ptr<ProductShapeData> product_geom_input_data( new ProductShapeData( entity_id ) );
product_geom_input_data->m_ifc_object_definition = object_def;
std::stringstream thread_err;
if( !m_geom_settings->getRenderObjectFilter()(object_def) )
{
// geometry will be created in method subtractOpenings
continue;
}
else if( dynamic_pointer_cast<IfcProject>(object_def) )
{
#ifdef ENABLE_OPENMP
ScopedLock scoped_lock( writelock_ifc_project );
#endif
ifc_project_data = product_geom_input_data;
}
try
{
convertIfcProductShape( product_geom_input_data );
}
catch( OutOfMemoryException& e )
{
throw e;
}
catch( BuildingException& e )
{
thread_err << e.what();
}
catch( carve::exception& e )
{
thread_err << e.str();
}
catch( std::exception& e )
{
thread_err << e.what();
}
catch( ... )
{
thread_err << "undefined error, product id " << entity_id;
}
{
#ifdef ENABLE_OPENMP
ScopedLock scoped_lock( writelock_map );
#endif
map_products_ptr->insert( std::make_pair( guid, product_geom_input_data ) );
if( thread_err.tellp() > 0 )
{
messageCallback( thread_err.str().c_str(), StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__ );
}
}
// progress callback
double progress = (double)i / (double)num_object_definitions;
if( progress - m_recent_progress > 0.02 )
{
#ifdef ENABLE_OPENMP
if( omp_get_thread_num() == 0 )
#endif
{
// leave 10% of progress to openscenegraph internals
progressValueCallback( progress*0.9, "geometry" );
m_recent_progress = progress;
}
}
}
#ifdef ENABLE_OPENMP
} // implicit barrier
#endif
// subtract openings in related objects, such as IFCBUILDINGELEMENTPART connected to a window through IFCRELAGGREGATES
for( auto it = map_products_ptr->begin(); it != map_products_ptr->end(); ++it )
{
shared_ptr<ProductShapeData> product_geom_input_data = it->second;
try
{
subtractOpeningsInRelatedObjects(product_geom_input_data);
}
catch( OutOfMemoryException& e )
{
throw e;
}
catch( BuildingException& e )
{
messageCallback(e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "");
}
catch( carve::exception& e )
{
messageCallback(e.str(), StatusCallback::MESSAGE_TYPE_ERROR, "");
}
catch( std::exception& e )
{
messageCallback(e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "");
}
catch( ... )
{
messageCallback("undefined error", StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__);
}
}
try
{
// now resolve spatial structure
if( ifc_project_data )
{
resolveProjectStructure( ifc_project_data );
}
// check if there are entities that are not in spatial structure
for( auto it_product_shapes = m_product_shape_data.begin(); it_product_shapes != m_product_shape_data.end(); ++it_product_shapes )
{
shared_ptr<ProductShapeData> product_shape = it_product_shapes->second;
if( !product_shape )
{
continue;
}
if( !product_shape->m_added_to_spatial_structure )
{
if( !product_shape->m_ifc_object_definition.expired() )
{
shared_ptr<IfcObjectDefinition> ifc_object_def( product_shape->m_ifc_object_definition );
shared_ptr<IfcFeatureElementSubtraction> opening = dynamic_pointer_cast<IfcFeatureElementSubtraction>(ifc_object_def);
if( !m_geom_settings->getRenderObjectFilter()(ifc_object_def) )
{
continue;
}
std::string guid;
if (ifc_object_def->m_GlobalId)
{
std::wstring_convert<std::codecvt_utf8<wchar_t>, wchar_t> converterX;
guid = converterX.to_bytes(ifc_object_def->m_GlobalId->m_value);
}
m_map_outside_spatial_structure[guid] = ifc_object_def;
}
}
}
}
catch( OutOfMemoryException& e )
{
throw e;
}
catch( BuildingException& e )
{
messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" );
}
catch( std::exception& e )
{
messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" );
}
catch( ... )
{
messageCallback( "undefined error", StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__ );
}
m_representation_converter->getProfileCache()->clearProfileCache();
progressTextCallback( L"Loading file done" );
progressValueCallback( 1.0, "geometry" );
}
//\brief method convertIfcProduct: Creates geometry objects (meshset with connected vertex-edge-face graph) from an IfcProduct object
// caution: when using OpenMP, this method runs in parallel threads, so every write access to member variables needs a write lock
void convertIfcProductShape( shared_ptr<ProductShapeData>& product_shape )
{
if( product_shape->m_ifc_object_definition.expired() )
{
return;
}
shared_ptr<IfcObjectDefinition> ifc_object_def(product_shape->m_ifc_object_definition);
shared_ptr<IfcProduct> ifc_product = dynamic_pointer_cast<IfcProduct>(ifc_object_def);
if (!ifc_product)
{
return;
}
if( !ifc_product->m_Representation )
{
return;
}
double length_factor = 1.0;
if( m_ifc_model )
{
if( m_ifc_model->getUnitConverter() )
{
length_factor = m_ifc_model->getUnitConverter()->getLengthInMeterFactor();
}
}
// evaluate IFC geometry
shared_ptr<IfcProductRepresentation>& product_representation = ifc_product->m_Representation;
std::vector<shared_ptr<IfcRepresentation> >& vec_representations = product_representation->m_Representations;
for( size_t i_representations = 0; i_representations < vec_representations.size(); ++i_representations )
{
const shared_ptr<IfcRepresentation>& representation = vec_representations[i_representations];
if( !representation )
{
continue;
}
try
{
shared_ptr<RepresentationData> representation_data( new RepresentationData() );
m_representation_converter->convertIfcRepresentation( representation, representation_data );
product_shape->m_vec_representations.push_back( representation_data );
representation_data->m_parent_product = product_shape;
}
catch( OutOfMemoryException& e )
{
throw e;
}
catch( BuildingException& e )
{
messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" );
}
catch( std::exception& e )
{
messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" );
}
}
// IfcProduct has an ObjectPlacement that can be local or global
product_shape->m_object_placement = ifc_product->m_ObjectPlacement;
if( ifc_product->m_ObjectPlacement )
{
// IfcPlacement2Matrix follows related placements in case of local coordinate systems
std::unordered_set<IfcObjectPlacement*> placement_already_applied;
m_representation_converter->getPlacementConverter()->convertIfcObjectPlacement( ifc_product->m_ObjectPlacement, product_shape, placement_already_applied, false );
}
// handle openings
std::vector<shared_ptr<ProductShapeData> > vec_opening_data;
const shared_ptr<IfcElement> ifc_element = dynamic_pointer_cast<IfcElement>(ifc_product);
if( ifc_element )
{
m_representation_converter->subtractOpenings(ifc_element, product_shape);
}
// Fetch the IFCProduct relationships
if( ifc_product->m_IsDefinedBy_inverse.size() > 0 )
{
std::vector<weak_ptr<IfcRelDefinesByProperties> >& vec_IsDefinedBy_inverse = ifc_product->m_IsDefinedBy_inverse;
for( size_t i = 0; i < vec_IsDefinedBy_inverse.size(); ++i )
{
shared_ptr<IfcRelDefinesByProperties> rel_def( vec_IsDefinedBy_inverse[i] );
shared_ptr<IfcPropertySetDefinitionSelect> relating_property_definition_select = rel_def->m_RelatingPropertyDefinition;
if( relating_property_definition_select )
{
// TYPE IfcPropertySetDefinitionSelect = SELECT (IfcPropertySetDefinition ,IfcPropertySetDefinitionSet);
shared_ptr<IfcPropertySetDefinition> property_set_def = dynamic_pointer_cast<IfcPropertySetDefinition>(relating_property_definition_select);
if( property_set_def )
{
shared_ptr<IfcPropertySet> property_set = dynamic_pointer_cast<IfcPropertySet>(property_set_def);
if( property_set )
{
readAppearanceFromPropertySet( property_set, product_shape );
}
continue;
}
shared_ptr<IfcPropertySetDefinitionSet> property_set_def_set = dynamic_pointer_cast<IfcPropertySetDefinitionSet>(relating_property_definition_select);
if( property_set_def_set )
{
std::vector<shared_ptr<IfcPropertySetDefinition> >& vec_propterty_set_def = property_set_def_set->m_vec;
std::vector<shared_ptr<IfcPropertySetDefinition> >::iterator it_property_set_def;
for( it_property_set_def = vec_propterty_set_def.begin(); it_property_set_def != vec_propterty_set_def.end(); ++it_property_set_def )
{
shared_ptr<IfcPropertySetDefinition> property_set_def2 = (*it_property_set_def);
if( property_set_def2 )
{
shared_ptr<IfcPropertySet> property_set = dynamic_pointer_cast<IfcPropertySet>(property_set_def2);
if( property_set )
{
readAppearanceFromPropertySet( property_set, product_shape );
}
}
}
continue;
}
}
}
}
}
void subtractOpeningsInRelatedObjects(shared_ptr<ProductShapeData>& product_shape)
{
if( product_shape->m_ifc_object_definition.expired() )
{
return;
}
shared_ptr<IfcObjectDefinition> ifc_object_def(product_shape->m_ifc_object_definition);
shared_ptr<IfcProduct> ifc_product = dynamic_pointer_cast<IfcProduct>(ifc_object_def);
if (!ifc_product)
{
return;
}
shared_ptr<IfcElement> ifc_element = dynamic_pointer_cast<IfcElement>(ifc_product);
if( !ifc_element )
{
return;
}
if( ifc_element->m_HasOpenings_inverse.size() == 0 )
{
return;
}
// collect aggregated objects
const std::vector<weak_ptr<IfcRelAggregates> >& vec_decomposed_by = ifc_element->m_IsDecomposedBy_inverse;
for( auto& decomposed_by : vec_decomposed_by )
{
if( decomposed_by.expired() )
{
continue;
}
shared_ptr<IfcRelAggregates> decomposed_by_aggregates(decomposed_by);
std::vector<shared_ptr<IfcObjectDefinition> >& vec_related_objects = decomposed_by_aggregates->m_RelatedObjects;
for( auto& related_object : vec_related_objects )
{
if( !related_object )
{
continue;
}
std::string guid;
if (related_object->m_GlobalId)
{
std::wstring_convert<std::codecvt_utf8<wchar_t>, wchar_t> converterX;
guid = converterX.to_bytes(related_object->m_GlobalId->m_value);
auto it_find_related_shape = m_product_shape_data.find(guid);
if( it_find_related_shape != m_product_shape_data.end() )
{
shared_ptr<ProductShapeData>& related_product_shape = it_find_related_shape->second;
m_representation_converter->subtractOpenings(ifc_element, related_product_shape);
}
}
}
}
}
virtual void messageTarget( void* ptr, shared_ptr<StatusCallback::Message> m )
{
GeometryConverter* myself = (GeometryConverter*)ptr;
if( myself )
{
if( m->m_entity )
{
#ifdef ENABLE_OPENMP
ScopedLock lock( myself->m_writelock_messages );
#endif
// make sure that the same message for one entity does not appear several times
const int entity_id = m->m_entity->m_entity_id;
auto it = myself->m_messages.find( entity_id );
if( it != myself->m_messages.end() )
{
std::vector<shared_ptr<StatusCallback::Message> >& vec_message_for_entity = it->second;
for( size_t i = 0; i < vec_message_for_entity.size(); ++i )
{
shared_ptr<StatusCallback::Message>& existing_message = vec_message_for_entity[i];
if( existing_message->m_message_text.compare( m->m_message_text ) == 0 )
{
// same message for same entity is already there, so ignore message
return;
}
}
vec_message_for_entity.push_back( m );
}
else
{
std::vector<shared_ptr<StatusCallback::Message> >& vec = myself->m_messages.insert( std::make_pair( entity_id, std::vector<shared_ptr<StatusCallback::Message> >() ) ).first->second;
vec.push_back( m );
}
}
myself->messageCallback( m );
}
}
};
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 32;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
zip_fmt_plug.c | /*
* ZIP cracker patch for JtR. Hacked together during June of 2011
* by Dhiru Kholia <dhiru.kholia at gmail.com> for GSoC.
*
* This software is Copyright (c) 2011, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*
* Files borrowed from http://www.gladman.me.uk/cryptography_technology/fileencrypt/
* have "gladman_" prepended to them.
*
* http://www.winzip.com/aes_info.htm (There is a 1 in 65,536 chance that an
* incorrect password will yield a matching verification value; therefore, a
* matching verification value cannot be absolutely relied on to indicate a
* correct password.). The alternative is to implement/use a full unzip engine.
*
* This format significantly improved, Summer of 2014, JimF. Changed the signature
* to the $zip2$, and added logic to properly make this format work. Now it is NOT a
* 'FMT_NOT_EXACT' format any more. Now it properly cracks the passwords. There is
* an hmac-sha1 'key' that is also processed (and the decryption key), in the pbkdf2
* call. Now we use this hmac-sha1 key, process the compressed and encrypted buffer,
* compare to a 10 byte checksum (which is now the binary blob), and we KNOW that we
* have cracked or not cracked the key. The $zip$ was broken before, so that signature
* has simply been retired as DOA. This format is now much like the pkzip format.
* it may have all data contained within the hash string, OR it may have some, and
* have a file pointer on where to get the rest of the data.
*
* optimizations still that can be done.
* 1. decrypt and inflate some data for really large buffers, BEFORE doing the
* hmac-sha1 call. The inflate algorithm is pretty self checking for 'valid'
* data, so a few hundred bytes of checking and we are 99.999% sure we have the
* right password, before starting an expensive hmac (for instance if the zip blob
* was 50mb).
* 2. Put in the 'file magic' logic we have for pkzip. There is a place holder for it,
* but the logic has not been added.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_zip;
#elif FMT_REGISTERS_H
john_register_one(&fmt_zip);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#include <ctype.h>
#include "arch.h"
#include "crc32.h"
#include "misc.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "memory.h"
#include "pkzip.h"
#include "pbkdf2_hmac_sha1.h"
#include "dyna_salt.h"
#ifdef _OPENMP
#include <omp.h>
#define OMP_SCALE 4 // Tuned on core i7 (note, use -test=120 during tuning)
static int omp_t = 1;
#endif
#include "gladman_hmac.h"
#include "memdbg.h"
#define BASE_SCALE 96
#define KEY_LENGTH(mode) (8 * ((mode) & 3) + 8)
#define SALT_LENGTH(mode) (4 * ((mode) & 3) + 4)
typedef struct my_salt_t {
dyna_salt dsalt;
uint32_t comp_len;
struct {
uint16_t type : 4;
uint16_t mode : 4;
} v;
unsigned char passverify[2];
unsigned char salt[SALT_LENGTH(3)];
//uint64_t data_key; // MSB of md5(data blob). We lookup using this.
unsigned char datablob[1];
} my_salt;
/* From gladman_fileenc.h */
#define PWD_VER_LENGTH 2
#define KEYING_ITERATIONS 1000
#define FORMAT_LABEL "ZIP"
#define FORMAT_NAME "WinZip"
#define FORMAT_TAG "$zip2$"
#define FORMAT_CLOSE_TAG "$/zip2$"
#define TAG_LENGTH 6
#ifdef MMX_COEF
#define ALGORITHM_NAME "PBKDF2-SHA1 " SHA1_N_STR MMX_TYPE
#else
#define ALGORITHM_NAME "PBKDF2-SHA1 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1001
#define BINARY_SIZE 10
#define PLAINTEXT_LENGTH 125
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define SALT_SIZE sizeof(my_salt*)
#define SALT_ALIGN sizeof(my_salt*)
#ifdef MMX_COEF
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static unsigned char (*crypt_key)[((BINARY_SIZE+3)/4)*4];
static my_salt *saved_salt;
// filename:$zip2$*Ty*Mo*Ma*Sa*Va*Le*DF*Au*$/zip2$
// Ty = type (0) and ignored.
// Mo = mode (1 2 3 for 128/192/256 bit
// Ma = magic (file magic). This is reserved for now. See pkzip_fmt_plug.c or zip2john.c for information.
// For now, this must be a '0'
// Sa = salt(hex). 8, 12 or 16 bytes of salt (depends on mode)
// Va = Verification bytes(hex) (2 byte quick checker)
// Le = real compr len (hex) length of compressed/encrypted data (field DF)
// DF = compressed data DF can be L*2 hex bytes, and if so, then it is the ENTIRE file blob written 'inline'.
// However, if the data blob is too long, then a .zip ZIPDATA_FILE_PTR_RECORD structure will be the 'contents' of DF
// Au = Authentication code (hex) a 10 byte hex value that is the hmac-sha1 of data over D. This is the binary() value
// ZIPDATA_FILE_PTR_RECORD (this can be the 'DF' of this above hash line.
// *ZFILE*Fn*Oh*Ob* (Note, the leading and trailing * are the * that 'wrap' the DF object.
// ZFILE This is the literal string ZFILE
// Fn This is the name of the .zip file. NOTE the user will need to keep the .zip file in proper locations (same as
// was seen when running zip2john. If the file is removed, this hash line will no longer be valid.
// Oh Offset to the zip central header record for this blob.
// Ob Offset to the start of the blob data
static struct fmt_tests zip_tests[] = {
{"$zip2$*0*1*0*9ffba76344938a7d*cc41*210*fb28d3fd983302058c5296c07442502ae05bb59adb9eb2378cb0841efa227cd58f7076ec00bb5faaee24c3433763d715461d4e714cdd9d933f621d2cf6ae73d824414ca2126cfc608d8fc7641d2869afa90f28be7113c71c6b6a3ad6d6633173cde9d7c1bb449cc0a1f8cbab8639255684cd25cb363234f865d9224f4065c0c62e5e60c2500bc78fa903630ccbb5816be2ef5230d411051d7bc54ecdf9dcbe500e742da2a699de0ec1f20b256dbcd506f926e91a1066a74b690f9dd50bd186d799deca428e6230957e2c6fcdcec73927d77bb49699a80e9c1540a13899ecb0b635fb728e1ade737895d3ff9babd4927bbbc296ec92bab87fd7930db6d55e74d610aef2b6ad19b7db519c0e7a257f9f78538bb0e9081c8700f7e8cd887f15a212ecb3d5a221cb8fe82a22a3258703f3c7af77ef5ecf25b4e6fb4118b00547c271d9b778b825247a4cd151bff81436997818f9d3c95155910ff152ad28b0857dcfc943e32729379c634d29a50655dc05fb63fa5f20c9c8cbdc630833a97f4f02792fcd6b1b73bfb4d333485bb0eb257b9db0481d11abfa06c2e0b82817d432341f9bdf2385ede8ca5d94917fa0bab9c2ed9d26ce58f83a93d418aa27a88697a177187e63f89904c0b9053151e30a7855252dab709aee47a2a8c098447160c8f96c56102067d9c8ffc4a74cd9011a2522998da342448b78452c6670eb7eb80ae37a96ca15f13018e16c93d515d75e792f49*bd2e946811c4c5b09694*$/zip2$", "hello1"},
{"$zip2$*0*3*0*855f69693734c7be8c1093ea5bae6114*f035*210*c02aa1d42cc7623c0746979c6c2ce78e8492e9ab1d0954b76d328c52c4d555fbdc2af52822c7b6f4548fc5cca615cd0510f699d4b6007551c38b4183cafba7b073a5ba86745f0c3842896b87425d5247d3b09e0f9f701b50866e1636ef62ee20343ea6982222434fdaf2e52fe1c90f0c30cf2b4528b79abd2824e14869846c26614d9cbc156964d63041bfab66260821bedc151663adcb2c9ac8399d921ddac06c9a4cd8b442472409356cfe0655c9dbbec36b142611ad5604b68108be3321b2324d5783938e52e5c15ec4d8beb2b5010fad66d8cf6a490370ec86878ad2b393c5aa4523b95ae21f8dd5f0ae9f24581e94793a01246a4cc5a0f772e041b3a604ae334e43fe41d32058f857c227cee567254e9c760d472af416abedf8a87e67b309d30bc94d77ef6617b0867976a4b3824c0c1c4aa2b2668f9eb70c493d20d7fab69436c59e47db40f343d98a3b7503e07969d26afa92552d15009542bf2af9b47f2cfa0c2283883e99d0966e5165850663a2deed557fb8554a16f3a9cb04b9010c4b70576b18695dfea973aa4bc607069a1d90e890973825415b717c7bdf183937fa8a3aa985be1eadc8303f756ebd07f864082b775d7788ee8901bb212e69f01836d45db320ff1ea741fa8a3c13fa49ebc34418442e6bd8b1845c56d5c798767c92a503228148a6db44a08fc4a1c1d55eea73dbb2bd4f2ab09f00b043ee0df740681f5c5579ecbb1dbb7f7f3f67ffe2*c6b781ef18c5ccd83869*$/zip2$", "hello1"},
#if 0
// This signature is specific to JimF. I have left it commented here. We can
// add one, to the unused, if we choose to, BUT the problem is that it requires
// a path that can be found. I have tested this (at least it 'worked' for this
// one. Hopefully it is working fully. If not, I will fix whatever problems it has.
#ifdef _MSC_VER
{"$zip2$*0*1*0*9bdb664673e9a944*e25a*c5*ZFILE*/phpbb/johnripper/bleeding/winz128.zip*1004*1050*925583ab1f1cdb901097*$/zip2$", "hello1"},
#else
{"$zip2$*0*1*0*9bdb664673e9a944*e25a*c5*ZFILE*/c/phpbb/johnripper/bleeding/winz128.zip*1004*1050*925583ab1f1cdb901097*$/zip2$", "hello1"},
#endif
#endif
{NULL}
};
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_tiny(sizeof(*saved_key) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_key = mem_calloc_tiny(sizeof(*crypt_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static const char *ValidateZipFileData(u8 *Fn, u8 *Oh, u8 *Ob, unsigned len, u8 *Auth) {
u32 id, i;
long off;
unsigned char bAuth[10], b;
static char tmp[8192+256]; // 8192 size came from zip2john. That is max path it can put into a filename
FILE *fp;
fp = fopen((c8*)Fn, "rb"); /* have to open in bin mode for OS's where this matters, DOS/Win32 */
if (!fp) {
/* this error is listed, even if not in pkzip debugging mode. */
snprintf(tmp, sizeof(tmp), "Error loading a zip-aes hash line. The ZIP file '%s' could NOT be found\n", Fn);
return tmp;
}
sscanf((char*)Oh, "%lx", &off);
if (fseek(fp, off, SEEK_SET) != 0) {
fclose(fp);
snprintf(tmp, sizeof(tmp), "Not able to seek to specified offset in the .zip file %s, to read the zip blob data.", Fn);
return tmp;
}
id = fget32LE(fp);
if (id != 0x04034b50U) {
fclose(fp);
snprintf(tmp, sizeof(tmp), "Compressed zip file offset does not point to start of zip blob in file %s", Fn);
return tmp;
}
sscanf((char*)Ob, "%lx", &off);
off += len;
if (fseek(fp, off, SEEK_SET) != 0) {
fclose(fp);
snprintf(tmp, sizeof(tmp), "Not enough data in .zip file %s, to read the zip blob data.", Fn);
return tmp;
}
if (fread(bAuth, 1, 10, fp) != 10) {
fclose(fp);
snprintf(tmp, sizeof(tmp), "Not enough data in .zip file %s, to read the zip authentication data.", Fn);
return tmp;
}
fclose(fp);
for (i = 0; i < 10; ++i) {
b = (atoi16[ARCH_INDEX(Auth[i*2])]<<4) + atoi16[ARCH_INDEX(Auth[i*2+1])];
if (b != bAuth[i]) {
snprintf(tmp, sizeof(tmp), "Authentication record in .zip file %s, did not match.", Fn);
return tmp;
}
}
return "";
}
static int valid(char *ciphertext, struct fmt_main *self)
{
u8 *ctcopy, *keeptr, *p, *cp, *Fn=0, *Oh=0, *Ob=0;
const char *sFailStr;
unsigned val;
int ret = 0;
int zip_file_validate=0;
if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) || ciphertext[TAG_LENGTH] != '*')
return 0;
if (!(ctcopy = (u8*)strdup(ciphertext)))
return 0;
keeptr = ctcopy;
p = &ctcopy[TAG_LENGTH+1];
p = pkz_GetFld(p, &cp); // type
if (!cp || *cp != '0') { sFailStr = "Out of data, reading count of hashes field"; goto Bail; }
p = pkz_GetFld(p, &cp); // mode
if (cp[1] || *cp < '1' || *cp > '3') {
sFailStr = "Invalid aes mode (only valid for 1 to 3)"; goto Bail; }
val = *cp - '0';
p = pkz_GetFld(p, &cp); // file_magic enum (ignored for now, just a place holder)
p = pkz_GetFld(p, &cp); // salt
if (!pkz_is_hex_str(cp) || strlen((char*)cp) != SALT_LENGTH(val)<<1) {
sFailStr = "Salt invalid or wrong length"; goto Bail; }
p = pkz_GetFld(p, &cp); // validator
if (!pkz_is_hex_str(cp) || strlen((char*)cp) != 4) {
sFailStr = "Validator invalid or wrong length (4 bytes hex)"; goto Bail; }
p = pkz_GetFld(p, &cp); // Data len.
if (!pkz_is_hex_str(cp)) {
sFailStr = "Data length invalid (not hex number)"; goto Bail; }
sscanf((const char*)cp, "%x", &val);
p = pkz_GetFld(p, &cp); // data blob, OR file structure
if (!strcmp((char*)cp, "ZFILE")) {
p = pkz_GetFld(p, &Fn);
p = pkz_GetFld(p, &Oh);
p = pkz_GetFld(p, &Ob);
zip_file_validate = 1;
} else {
if (!pkz_is_hex_str(cp) || strlen((char*)cp) != val<<1) {
sFailStr = "Inline data blob invalid (not hex number), or wrong length"; goto Bail; }
}
p = pkz_GetFld(p, &cp); // authentication_code
if (!pkz_is_hex_str(cp) || strlen((char*)cp) != BINARY_SIZE<<1) {
sFailStr = "Authentication data invalid (not hex number), or not 20 hex characters"; goto Bail; }
// Ok, now if we have to pull from .zip file, lets do so, and we can validate with the authentication bytes
if (zip_file_validate) {
sFailStr = ValidateZipFileData(Fn, Oh, Ob, val, cp);
if (*sFailStr) {
/* this error is listed, even if not in pkzip debugging mode. */
fprintf(stderr, "zip-aes file validation failed [%s] Hash is %s\n", sFailStr, ciphertext);
return 0;
}
}
p = pkz_GetFld(p, &cp); // Trailing signature
if (strcmp((char*)cp, FORMAT_CLOSE_TAG)) {
sFailStr = "Invalid trailing zip2 signature"; goto Bail; }
ret = 1;
Bail:;
#ifdef ZIP_DEBUG
fprintf (stderr, "pkzip validation failed [%s] Hash is %s\n", sFailStr, ciphertext);
#endif
MEM_FREE(keeptr);
return ret;
}
static void *binary(char *ciphertext) {
static union {
unsigned char buf[10];
unsigned x;
} x;
unsigned char *bin = x.buf;
char *c = strrchr(ciphertext, '*')-2*BINARY_SIZE;
int i;
for (i = 0; i < BINARY_SIZE; ++i) {
bin[i] = atoi16[ARCH_INDEX(c[i<<1])] << 4 | atoi16[ARCH_INDEX(c[(i<<1)+1])];
}
return bin;
}
static void *get_salt(char *ciphertext)
{
int i;
my_salt salt, *psalt;
static unsigned char *ptr;
/* extract data from "ciphertext" */
u8 *copy_mem = (u8*)strdup(ciphertext);
u8 *cp, *p;
if (!ptr) ptr = mem_alloc_tiny(sizeof(my_salt*),sizeof(my_salt*));
p = copy_mem + TAG_LENGTH+1; /* skip over "$zip2$*" */
memset(&salt, 0, sizeof(salt));
p = pkz_GetFld(p, &cp); // type
salt.v.type = atoi((const char*)cp);
p = pkz_GetFld(p, &cp); // mode
salt.v.mode = atoi((const char*)cp);
p = pkz_GetFld(p, &cp); // file_magic enum (ignored)
p = pkz_GetFld(p, &cp); // salt
for (i = 0; i < SALT_LENGTH(salt.v.mode); i++)
salt.salt[i] = (atoi16[ARCH_INDEX(cp[i<<1])]<<4) | atoi16[ARCH_INDEX(cp[(i<<1)+1])];
p = pkz_GetFld(p, &cp); // validator
salt.passverify[0] = (atoi16[ARCH_INDEX(cp[0])]<<4) | atoi16[ARCH_INDEX(cp[1])];
salt.passverify[1] = (atoi16[ARCH_INDEX(cp[2])]<<4) | atoi16[ARCH_INDEX(cp[3])];
p = pkz_GetFld(p, &cp); // data len
sscanf((const char *)cp, "%x", &salt.comp_len);
// later we will store the data blob in our own static data structure, and place the 64 bit LSB of the
// MD5 of the data blob into a field in the salt. For the first POC I store the entire blob and just
// make sure all my test data is small enough to fit.
p = pkz_GetFld(p, &cp); // data blob
// Ok, now create the allocated salt record we are going to return back to John, using the dynamic
// sized data buffer.
psalt = (my_salt*)mem_calloc(sizeof(my_salt)+salt.comp_len);
psalt->v.type = salt.v.type;
psalt->v.mode = salt.v.mode;
psalt->comp_len = salt.comp_len;
psalt->dsalt.salt_alloc_needs_free = 1; // we used mem_calloc, so JtR CAN free our pointer when done with them.
memcpy(psalt->salt, salt.salt, sizeof(salt.salt));
psalt->passverify[0] = salt.passverify[0];
psalt->passverify[1] = salt.passverify[1];
// set the JtR core linkage stuff for this dyna_salt
psalt->dsalt.salt_cmp_offset = SALT_CMP_OFF(my_salt, comp_len);
psalt->dsalt.salt_cmp_size = SALT_CMP_SIZE(my_salt, comp_len, datablob, psalt->comp_len);
if (strcmp((const char*)cp, "ZFILE")) {
for (i = 0; i < psalt->comp_len; i++)
psalt->datablob[i] = (atoi16[ARCH_INDEX(cp[i<<1])]<<4) | atoi16[ARCH_INDEX(cp[(i<<1)+1])];
} else {
u8 *Fn, *Oh, *Ob;
long len;
uint32_t id;
FILE *fp;
p = pkz_GetFld(p, &Fn);
p = pkz_GetFld(p, &Oh);
p = pkz_GetFld(p, &Ob);
fp = fopen((const char*)Fn, "rb");
if (!fp) {
psalt->v.type = 1; // this will tell the format to 'skip' this salt, it is garbage
goto Bail;
}
sscanf((const char*)Oh, "%lx", &len);
if (fseek(fp, len, SEEK_SET)) {
fclose(fp);
psalt->v.type = 1;
goto Bail;
}
id = fget32LE(fp);
if (id != 0x04034b50U) {
fclose(fp);
psalt->v.type = 1;
goto Bail;
}
sscanf((const char*)Ob, "%lx", &len);
if (fseek(fp, len, SEEK_SET)) {
fclose(fp);
psalt->v.type = 1;
goto Bail;
}
if (fread(psalt->datablob, 1, psalt->comp_len, fp) != psalt->comp_len) {
fclose(fp);
psalt->v.type = 1;
goto Bail;
}
fclose(fp);
}
Bail:;
MEM_FREE(copy_mem);
memcpy(ptr, &psalt, sizeof(my_salt*));
return (void*)ptr;
}
static void set_salt(void *salt)
{
saved_salt = *((my_salt**)salt);
}
static void set_key(char *key, int index)
{
int saved_key_length = strlen(key);
if (saved_key_length > PLAINTEXT_LENGTH)
saved_key_length = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_key_length);
saved_key[index][saved_key_length] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int get_hash_0(int index) { return ((ARCH_WORD_32*)&(crypt_key[index]))[0] & 0xf; }
static int get_hash_1(int index) { return ((ARCH_WORD_32*)&(crypt_key[index]))[0] & 0xff; }
static int get_hash_2(int index) { return ((ARCH_WORD_32*)&(crypt_key[index]))[0] & 0xfff; }
static int get_hash_3(int index) { return ((ARCH_WORD_32*)&(crypt_key[index]))[0] & 0xffff; }
static int get_hash_4(int index) { return ((ARCH_WORD_32*)&(crypt_key[index]))[0] & 0xfffff; }
static int get_hash_5(int index) { return ((ARCH_WORD_32*)&(crypt_key[index]))[0] & 0xffffff; }
static int get_hash_6(int index) { return ((ARCH_WORD_32*)&(crypt_key[index]))[0] & 0x7ffffff; }
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index;
if (saved_salt->v.type) {
// This salt passed valid() but failed get_salt().
// Should never happen.
memset(crypt_key, 0, count * BINARY_SIZE);
return count;
}
#ifdef _OPENMP
#pragma omp parallel for default(none) private(index) shared(count, saved_key, saved_salt, crypt_key)
#endif
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) {
#ifdef MMX_COEF
unsigned char pwd_ver[(2+64)*MAX_KEYS_PER_CRYPT];
int lens[MAX_KEYS_PER_CRYPT], i;
unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = strlen(saved_key[i+index]);
pin[i] = (unsigned char*)saved_key[i+index];
pout[i] = &pwd_ver[i*(2+2*KEY_LENGTH(saved_salt->v.mode))];
}
pbkdf2_sha1_sse((const unsigned char **)pin, lens, saved_salt->salt, SALT_LENGTH(saved_salt->v.mode), KEYING_ITERATIONS, pout, 2+2*KEY_LENGTH(saved_salt->v.mode), 0);
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
if (!memcmp(&(pout[i][KEY_LENGTH(saved_salt->v.mode)<<1]), saved_salt->passverify, 2))
{
// yes, I know gladman's code but for now that is what I am using. Later we will improve.
hmac_sha1(&(pout[i][KEY_LENGTH(saved_salt->v.mode)]), KEY_LENGTH(saved_salt->v.mode),
(const unsigned char*)saved_salt->datablob, saved_salt->comp_len,
crypt_key[index+i], BINARY_SIZE);
}
else
memset(crypt_key[index+i], 0, BINARY_SIZE);
}
#else
int LEN = 2+2*KEY_LENGTH(saved_salt->v.mode);
union {
// MUST be aligned on 4 byte boundary for alter endianity on BE
// we also need 2 extra bytes for endianity flipping.
unsigned char pwd_ver[4+64];
ARCH_WORD_32 w;
} x;
unsigned char *pwd_ver = x.pwd_ver;
#if !ARCH_LITTLE_ENDIAN
LEN += 2;
#endif
pbkdf2_sha1((unsigned char *)saved_key[index],
strlen(saved_key[index]), saved_salt->salt, SALT_LENGTH(saved_salt->v.mode),
KEYING_ITERATIONS, pwd_ver, LEN, 0);
#if !ARCH_LITTLE_ENDIAN
alter_endianity(pwd_ver, LEN);
#endif
if (!memcmp(&(pwd_ver[KEY_LENGTH(saved_salt->v.mode)<<1]), saved_salt->passverify, 2))
{
// yes, I know gladman's code but for now that is what I am using. Later we will improve.
hmac_sha1(&(pwd_ver[KEY_LENGTH(saved_salt->v.mode)]), KEY_LENGTH(saved_salt->v.mode),
(const unsigned char*)saved_salt->datablob, saved_salt->comp_len,
crypt_key[index], BINARY_SIZE);
}
else
memset(crypt_key[index], 0, BINARY_SIZE);
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int i;
for (i = 0; i < count; i++)
if (((ARCH_WORD_32*)&(crypt_key[i]))[0] == ((ARCH_WORD_32*)binary)[0])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return (((ARCH_WORD_32*)&(crypt_key[index]))[0] == ((ARCH_WORD_32*)binary)[0]);
}
static int cmp_exact(char *source, int index)
{
void *b = binary(source);
return !memcmp(b, crypt_key[index], sizeof(crypt_key[index]));
}
struct fmt_main fmt_zip = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
4, // BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT*BASE_SCALE,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_DYNA_SALT,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
zip_tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
binary, // to add
get_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_dyna_salt_hash,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
kwallet_fmt_plug.c | /* KDE KWallet cracker patch for JtR. Written by Narendra Kangralkar
* <narendrakangralkar at gmail.com> and Dhiru Kholia <dhiru at openwall.com>.
*
* Also see https://github.com/gaganpreet/kwallet-dump ;)
*
* This software is Copyright (c) 2013 by above authors and it is hereby
* released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_kwallet;
#elif FMT_REGISTERS_H
john_register_one(&fmt_kwallet);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "misc.h"
#include "memory.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include <openssl/blowfish.h>
#include "sha.h"
#ifdef _OPENMP
#include <omp.h>
#define OMP_SCALE 64
#endif
#include "memdbg.h"
#define FORMAT_LABEL "kwallet"
#define FORMAT_NAME "KDE KWallet"
#define ALGORITHM_NAME "SHA1 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 0
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(*cur_salt)
#define BINARY_ALIGN 1
#define SALT_ALIGN sizeof(int)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
// #define BENCH_LARGE_PASSWORDS 1
static struct fmt_tests kwallet_tests[] = {
{"$kwallet$112$25be8c9cdaa53f5404d7809ff48a37752b325c8ccd296fbd537440dfcef9d66f72940e97141d21702b325c8ccd296fbd537440dfcef9d66fcd953cf1e41904b0c494ad1e718760e74c4487cc1449233d85525e7974da221774010bb9582b1d68b55ea9288f53a2be6bd15b93a5e1b33d", "openwall"},
{"$kwallet$240$e5383800cf0ccabf76461a647bf7ed94b7260f0ac33374ea1fec0bb0144b7e3f8fa3d0f368a61075827ac60beb62be830ece6fb2f9cfb13561ed4372af19d0a720a37b0d21132a59513b3ab9030395671c9725d7d6592ad98a4754795c858c59df6049522384af98c77d5351ddc577da07ea10e7d44b3fbc9af737744f53ed0a0a67252599b66a4d1fc65926d7097dc50f45b57f41f11934e0cfc4d5491f82b43f38acde1fd337d51cf47eb5da1bcd8bff1432d7b02f0d316633b33ced337d202a44342fc79db6aea568fb322831d886d4cb6dcc50a3e17c1027550b9ee94f56bc33f9861d2b24cbb7797d79f967bea4", ""},
#ifdef BENCH_LARGE_PASSWORDS
{"$kwallet$240$f17296588b2dd9f22f7c9ec43fddb5ee28db5edcb69575dcb887f5d2d0bfcc9317773c0f4e32517ace087d33ace8155a099e16c259c1a2f4f8992fc17481b122ef9f0c38c9eafd46794ff34e32c3ad83345f2d4e19ce727379856af9b774c00dca25a8528f5a2318af1fcbffdc6e73e7e081b106b4fbfe1887ea5bde782f9b3c3a2cfe3b215a65c66c03d053bfdee4d5d940e3e28f0c2d9897460fc1153af198b9037aac4dcd76e999c6d6a1f67f559e87349c6416cd7fc37b85ee230ef8caa2417b65732b61dbdb68fd2d12eb3df87474a05f337305c79427a970700a1b63f2018ba06f32e522bba4d30a0ec8ae223d", "pythonpythonpythonpythonpython"},
#endif
{NULL}
};
#if defined (_OPENMP)
static int omp_t = 1;
#endif
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked;
static struct custom_salt {
unsigned char ct[0x10000];
unsigned int ctlen;
} *cur_salt;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_tiny(sizeof(*saved_key) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
cracked = mem_calloc_tiny(sizeof(*cracked) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr, *p;
int res;
if (strncmp(ciphertext, "$kwallet$", 9) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += 9;
if ((p = strtok(ctcopy, "$")) == NULL) /* ctlen */
goto err;
res = atoi(p);
if ((p = strtok(NULL, "$")) == NULL) /* ct */
goto err;
if(strlen(p) != res * 2)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
ctcopy += 9; /* skip over "$kwallet$*" */
cur_salt = mem_alloc_tiny(sizeof(struct custom_salt), MEM_ALIGN_WORD);
p = strtok(ctcopy, "$");
cur_salt->ctlen = atoi(p);
p = strtok(NULL, "$");
for (i = 0; i < cur_salt->ctlen; i++)
cur_salt->ct[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)cur_salt;
}
#define MIN(x,y) ((x) < (y) ? (x) : (y))
static void password2hash(const char *password, unsigned char *hash, int *key_size)
{
SHA_CTX ctx;
unsigned char output[20 * ((PLAINTEXT_LENGTH + 15) / 16)];
unsigned char buf[20];
int i, j, oindex = 0;
int plength = strlen(password);
// divide the password into blocks of size 16 and hash the resulting
// individually!
for (i = 0; i <= plength; i += 16) {
SHA1_Init(&ctx);
SHA1_Update(&ctx, password + i, MIN(plength - i, 16));
// To make brute force take longer
for (j = 0; j < 2000; j++) {
SHA1_Final(buf, &ctx);
SHA1_Init(&ctx);
SHA1_Update(&ctx, buf, 20);
}
memcpy(output + oindex, buf, 20);
oindex += 20;
}
if (plength < 16) {
// key size is 20
memcpy(hash, output, 20);
*key_size = 20;
}
else if (plength < 32) {
// key size is 40 (20/20)
memcpy(hash, output, 40);
*key_size = 40;
}
else if (plength < 48) {
// key size is 56 (20/20/16 split)
memcpy(hash, output, 56);
*key_size = 56;
}
else {
// key size is 56 (14/14/14 split)
memcpy(hash + 14 * 0, output + 0, 14);
memcpy(hash + 14 * 1, output + 20, 14);
memcpy(hash + 14 * 2, output + 40, 14);
*key_size = 56;
}
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int verify_passphrase(char *passphrase)
{
unsigned char key[56]; /* 56 seems to be the max. key size */
SHA_CTX ctx;
BF_KEY bf_key;
int sz;
int i;
int key_size = 0;
unsigned char testhash[20];
unsigned char buffer[0x10000]; // XXX respect the stack limits!
const char *t;
size_t fsize;
password2hash(passphrase, key, &key_size);
memcpy(buffer, cur_salt->ct, cur_salt->ctlen);
/* Blowfish implementation in KWallet is wrong w.r.t endianness
* Well, that is why we had bad_blowfish_plug.c originally ;) */
alter_endianity(buffer, cur_salt->ctlen);
/* decryption stuff */
BF_set_key(&bf_key, key_size, key);
for(i = 0; i < cur_salt->ctlen; i += 8) {
BF_ecb_encrypt(buffer + i, buffer + i, &bf_key, 0);
}
alter_endianity(buffer, cur_salt->ctlen);
/* verification stuff */
t = (char *) buffer;
// strip the leading data
t += 8; // one block of random data
// strip the file size off
fsize = 0;
fsize |= ((size_t) (*t) << 24) & 0xff000000;
t++;
fsize |= ((size_t) (*t) << 16) & 0x00ff0000;
t++;
fsize |= ((size_t) (*t) << 8) & 0x0000ff00;
t++;
fsize |= (size_t) (*t) & 0x000000ff;
t++;
if (fsize > (size_t) (cur_salt->ctlen) - 8 - 4) {
// file structure error
return -1;
}
SHA1_Init(&ctx);
SHA1_Update(&ctx, t, fsize);
SHA1_Final(testhash, &ctx);
// compare hashes
sz = cur_salt->ctlen;
for (i = 0; i < 20; i++) {
if (testhash[i] != buffer[sz - 20 + i]) {
return -2;
}
}
return 0;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
{
int ret;
ret = verify_passphrase(saved_key[index]);
if(ret == 0)
cracked[index] = 1;
else
cracked[index] = 0;
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void kwallet_set_key(char *key, int index)
{
int saved_key_length = strlen(key);
if (saved_key_length > PLAINTEXT_LENGTH)
saved_key_length = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_key_length);
saved_key[index][saved_key_length] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_kwallet = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
kwallet_tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
set_salt,
kwallet_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
1502.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp parallel for private(i, j) collapse(#P12) schedule(#P9, #P11) num_threads(#P11)
#pragma omp parallel for schedule(static, 28) simd
for (i = 1; i < _PB_NI - 1; ++i)
{
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
backprop.c | /*
******************************************************************
* HISTORY
* 15-Oct-94 Jeff Shufelt (js), Carnegie Mellon University
* Prepared for 15-681, Fall 1994.
* Modified by Shuai Che
******************************************************************
*/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include "backprop.h"
#include <math.h>
#define OPEN
#define ABS(x) (((x) > 0.0) ? (x) : (-(x)))
#define fastcopy(to,from,len)\
{\
register char *_to,*_from;\
register int _i,_l;\
_to = (char *)(to);\
_from = (char *)(from);\
_l = (len);\
for (_i = 0; _i < _l; _i++) *_to++ = *_from++;\
}
/*** Return random number between 0.0 and 1.0 ***/
float drnd()
{
return ((float) rand() / (float) BIGRND);
}
/*** Return random number between -1.0 and 1.0 ***/
float dpn1()
{
return ((drnd() * 2.0) - 1.0);
}
/*** The squashing function. Currently, it's a sigmoid. ***/
float squash(x)
float x;
{
float m;
//x = -x;
//m = 1 + x + x*x/2 + x*x*x/6 + x*x*x*x/24 + x*x*x*x*x/120;
//return(1.0 / (1.0 + m));
return (1.0 / (1.0 + exp(-x)));
}
/*** Allocate 1d array of floats ***/
float *alloc_1d_dbl(n)
int n;
{
float *new;
new = (float *) malloc ((unsigned) (n * sizeof (float)));
if (new == NULL) {
printf("ALLOC_1D_DBL: Couldn't allocate array of floats\n");
return (NULL);
}
return (new);
}
/*** Allocate 2d array of floats ***/
float **alloc_2d_dbl(m, n)
int m, n;
{
int i;
float **new;
new = (float **) malloc ((unsigned) (m * sizeof (float *)));
if (new == NULL) {
printf("ALLOC_2D_DBL: Couldn't allocate array of dbl ptrs\n");
return (NULL);
}
for (i = 0; i < m; i++) {
new[i] = alloc_1d_dbl(n);
}
return (new);
}
bpnn_randomize_weights(w, m, n)
float **w;
int m, n;
{
int i, j;
for (i = 0; i <= m; i++) {
for (j = 0; j <= n; j++) {
w[i][j] = (float) rand()/RAND_MAX;
// w[i][j] = dpn1();
}
}
}
bpnn_randomize_row(w, m)
float *w;
int m;
{
int i;
for (i = 0; i <= m; i++) {
//w[i] = (float) rand()/RAND_MAX;
w[i] = 0.1;
}
}
bpnn_zero_weights(w, m, n)
float **w;
int m, n;
{
int i, j;
for (i = 0; i <= m; i++) {
for (j = 0; j <= n; j++) {
w[i][j] = 0.0;
}
}
}
void bpnn_initialize(seed)
{
printf("Random number generator seed: %d\n", seed);
srand(seed);
}
BPNN *bpnn_internal_create(n_in, n_hidden, n_out)
int n_in, n_hidden, n_out;
{
BPNN *newnet;
newnet = (BPNN *) malloc (sizeof (BPNN));
if (newnet == NULL) {
printf("BPNN_CREATE: Couldn't allocate neural network\n");
return (NULL);
}
newnet->input_n = n_in;
newnet->hidden_n = n_hidden;
newnet->output_n = n_out;
newnet->input_units = alloc_1d_dbl(n_in + 1);
newnet->hidden_units = alloc_1d_dbl(n_hidden + 1);
newnet->output_units = alloc_1d_dbl(n_out + 1);
newnet->hidden_delta = alloc_1d_dbl(n_hidden + 1);
newnet->output_delta = alloc_1d_dbl(n_out + 1);
newnet->target = alloc_1d_dbl(n_out + 1);
newnet->input_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1);
newnet->hidden_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1);
newnet->input_prev_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1);
newnet->hidden_prev_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1);
return (newnet);
}
void bpnn_free(net)
BPNN *net;
{
int n1, n2, i;
n1 = net->input_n;
n2 = net->hidden_n;
free((char *) net->input_units);
free((char *) net->hidden_units);
free((char *) net->output_units);
free((char *) net->hidden_delta);
free((char *) net->output_delta);
free((char *) net->target);
for (i = 0; i <= n1; i++) {
free((char *) net->input_weights[i]);
free((char *) net->input_prev_weights[i]);
}
free((char *) net->input_weights);
free((char *) net->input_prev_weights);
for (i = 0; i <= n2; i++) {
free((char *) net->hidden_weights[i]);
free((char *) net->hidden_prev_weights[i]);
}
free((char *) net->hidden_weights);
free((char *) net->hidden_prev_weights);
free((char *) net);
}
/*** Creates a new fully-connected network from scratch,
with the given numbers of input, hidden, and output units.
Threshold units are automatically included. All weights are
randomly initialized.
Space is also allocated for temporary storage (momentum weights,
error computations, etc).
***/
BPNN *bpnn_create(n_in, n_hidden, n_out)
int n_in, n_hidden, n_out;
{
BPNN *newnet;
newnet = bpnn_internal_create(n_in, n_hidden, n_out);
#ifdef INITZERO
bpnn_zero_weights(newnet->input_weights, n_in, n_hidden);
#else
bpnn_randomize_weights(newnet->input_weights, n_in, n_hidden);
#endif
bpnn_randomize_weights(newnet->hidden_weights, n_hidden, n_out);
bpnn_zero_weights(newnet->input_prev_weights, n_in, n_hidden);
bpnn_zero_weights(newnet->hidden_prev_weights, n_hidden, n_out);
bpnn_randomize_row(newnet->target, n_out);
return (newnet);
}
void bpnn_layerforward(l1, l2, conn, n1, n2)
float *l1, *l2, **conn;
int n1, n2;
{
float sum;
int j, k;
/*** Set up thresholding unit ***/
l1[0] = 1.0;
#ifdef OPEN
omp_set_num_threads(NUM_THREAD);
#pragma omp parallel for shared(conn, n1, n2, l1) private(k, j) reduction(+: sum) schedule(static)
#endif
/*** For each unit in second layer ***/
for (j = 1; j <= n2; j++) {
/*** Compute weighted sum of its inputs ***/
sum = 0.0;
for (k = 0; k <= n1; k++) {
sum += conn[k][j] * l1[k];
}
l2[j] = squash(sum);
}
}
//extern "C"
void bpnn_output_error(delta, target, output, nj, err)
float *delta, *target, *output, *err;
int nj;
{
int j;
float o, t, errsum;
errsum = 0.0;
for (j = 1; j <= nj; j++) {
o = output[j];
t = target[j];
delta[j] = o * (1.0 - o) * (t - o);
errsum += ABS(delta[j]);
}
*err = errsum;
}
void bpnn_hidden_error(delta_h,
nh,
delta_o,
no,
who,
hidden,
err)
float *delta_h, *delta_o, *hidden, **who, *err;
int nh, no;
{
int j, k;
float h, sum, errsum;
errsum = 0.0;
for (j = 1; j <= nh; j++) {
h = hidden[j];
sum = 0.0;
for (k = 1; k <= no; k++) {
sum += delta_o[k] * who[j][k];
}
delta_h[j] = h * (1.0 - h) * sum;
errsum += ABS(delta_h[j]);
}
*err = errsum;
}
void bpnn_adjust_weights(delta, ndelta, ly, nly, w, oldw)
float *delta, *ly, **w, **oldw;
{
float new_dw;
int k, j;
ly[0] = 1.0;
//eta = 0.3;
//momentum = 0.3;
#ifdef OPEN
omp_set_num_threads(NUM_THREAD);
#pragma omp parallel for \
shared(oldw, w, delta) \
private(j, k, new_dw) \
firstprivate(ndelta, nly)
#endif
for (j = 1; j <= ndelta; j++) {
for (k = 0; k <= nly; k++) {
new_dw = ((ETA * delta[j] * ly[k]) + (MOMENTUM * oldw[k][j]));
w[k][j] += new_dw;
oldw[k][j] = new_dw;
}
}
}
void bpnn_feedforward(net)
BPNN *net;
{
int in, hid, out;
in = net->input_n;
hid = net->hidden_n;
out = net->output_n;
/*** Feed forward input activations. ***/
bpnn_layerforward(net->input_units, net->hidden_units,
net->input_weights, in, hid);
bpnn_layerforward(net->hidden_units, net->output_units,
net->hidden_weights, hid, out);
}
void bpnn_train(net, eo, eh)
BPNN *net;
float *eo, *eh;
{
int in, hid, out;
float out_err, hid_err;
in = net->input_n;
hid = net->hidden_n;
out = net->output_n;
/*** Feed forward input activations. ***/
bpnn_layerforward(net->input_units, net->hidden_units,
net->input_weights, in, hid);
bpnn_layerforward(net->hidden_units, net->output_units,
net->hidden_weights, hid, out);
/*** Compute error on output and hidden units. ***/
bpnn_output_error(net->output_delta, net->target, net->output_units,
out, &out_err);
bpnn_hidden_error(net->hidden_delta, hid, net->output_delta, out,
net->hidden_weights, net->hidden_units, &hid_err);
*eo = out_err;
*eh = hid_err;
/*** Adjust input and hidden weights. ***/
bpnn_adjust_weights(net->output_delta, out, net->hidden_units, hid,
net->hidden_weights, net->hidden_prev_weights);
bpnn_adjust_weights(net->hidden_delta, hid, net->input_units, in,
net->input_weights, net->input_prev_weights);
}
void bpnn_save(net, filename)
BPNN *net;
char *filename;
{
int n1, n2, n3, i, j, memcnt;
float dvalue, **w;
char *mem;
///add//
FILE *pFile;
pFile = fopen( filename, "w+" );
///////
/*
if ((fd = creat(filename, 0644)) == -1) {
printf("BPNN_SAVE: Cannot create '%s'\n", filename);
return;
}
*/
n1 = net->input_n; n2 = net->hidden_n; n3 = net->output_n;
printf("Saving %dx%dx%d network to '%s'\n", n1, n2, n3, filename);
//fflush(stdout);
//write(fd, (char *) &n1, sizeof(int));
//write(fd, (char *) &n2, sizeof(int));
//write(fd, (char *) &n3, sizeof(int));
fwrite( (char *) &n1 , sizeof(char), sizeof(char), pFile);
fwrite( (char *) &n2 , sizeof(char), sizeof(char), pFile);
fwrite( (char *) &n3 , sizeof(char), sizeof(char), pFile);
memcnt = 0;
w = net->input_weights;
mem = (char *) malloc ((unsigned) ((n1+1) * (n2+1) * sizeof(float)));
for (i = 0; i <= n1; i++) {
for (j = 0; j <= n2; j++) {
dvalue = w[i][j];
fastcopy(&mem[memcnt], &dvalue, sizeof(float));
memcnt += sizeof(float);
}
}
//write(fd, mem, (n1+1) * (n2+1) * sizeof(float));
fwrite( mem , (unsigned)(sizeof(float)), (unsigned) ((n1+1) * (n2+1) * sizeof(float)) , pFile);
free(mem);
memcnt = 0;
w = net->hidden_weights;
mem = (char *) malloc ((unsigned) ((n2+1) * (n3+1) * sizeof(float)));
for (i = 0; i <= n2; i++) {
for (j = 0; j <= n3; j++) {
dvalue = w[i][j];
fastcopy(&mem[memcnt], &dvalue, sizeof(float));
memcnt += sizeof(float);
}
}
//write(fd, mem, (n2+1) * (n3+1) * sizeof(float));
fwrite( mem , sizeof(float), (unsigned) ((n2+1) * (n3+1) * sizeof(float)) , pFile);
free(mem);
fclose(pFile);
return;
}
BPNN *bpnn_read(filename)
char *filename;
{
char *mem;
BPNN *new;
int fd, n1, n2, n3, i, j, memcnt;
if ((fd = open(filename, 0, 0644)) == -1) {
return (NULL);
}
printf("Reading '%s'\n", filename); //fflush(stdout);
read(fd, (char *) &n1, sizeof(int));
read(fd, (char *) &n2, sizeof(int));
read(fd, (char *) &n3, sizeof(int));
new = bpnn_internal_create(n1, n2, n3);
printf("'%s' contains a %dx%dx%d network\n", filename, n1, n2, n3);
printf("Reading input weights..."); //fflush(stdout);
memcnt = 0;
mem = (char *) malloc ((unsigned) ((n1+1) * (n2+1) * sizeof(float)));
read(fd, mem, (n1+1) * (n2+1) * sizeof(float));
for (i = 0; i <= n1; i++) {
for (j = 0; j <= n2; j++) {
fastcopy(&(new->input_weights[i][j]), &mem[memcnt], sizeof(float));
memcnt += sizeof(float);
}
}
free(mem);
printf("Done\nReading hidden weights..."); //fflush(stdout);
memcnt = 0;
mem = (char *) malloc ((unsigned) ((n2+1) * (n3+1) * sizeof(float)));
read(fd, mem, (n2+1) * (n3+1) * sizeof(float));
for (i = 0; i <= n2; i++) {
for (j = 0; j <= n3; j++) {
fastcopy(&(new->hidden_weights[i][j]), &mem[memcnt], sizeof(float));
memcnt += sizeof(float);
}
}
free(mem);
close(fd);
printf("Done\n"); //fflush(stdout);
bpnn_zero_weights(new->input_prev_weights, n1, n2);
bpnn_zero_weights(new->hidden_prev_weights, n2, n3);
return (new);
}
|
attribute.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE %
% A A T T R R I B B U U T E %
% AAAAA T T RRRR I BBBB U U T EEE %
% A A T T R R I B B U U T E %
% A A T T R R IIIII BBBB UUU T EEEEE %
% %
% %
% MagickCore Get / Set Image Attributes %
% %
% Software Design %
% John Cristy %
% October 2002 %
% %
% %
% Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colormap-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/draw.h"
#include "magick/draw-private.h"
#include "magick/effect.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/histogram.h"
#include "magick/identify.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/magick.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/paint.h"
#include "magick/pixel.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/segment.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/transform.h"
#include "magick/utility.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e B o u n d i n g B o x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageBoundingBox() returns the bounding box of an image canvas.
%
% The format of the GetImageBoundingBox method is:
%
% RectangleInfo GetImageBoundingBox(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o bounds: Method GetImageBoundingBox returns the bounding box of an
% image canvas.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport RectangleInfo GetImageBoundingBox(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MagickPixelPacket
target[3],
zero;
RectangleInfo
bounds;
register const PixelPacket
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
bounds.width=0;
bounds.height=0;
bounds.x=(ssize_t) image->columns;
bounds.y=(ssize_t) image->rows;
GetMagickPixelPacket(image,&target[0]);
image_view=AcquireCacheView(image);
p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
return(bounds);
}
SetMagickPixelPacket(image,p,GetCacheViewAuthenticIndexQueue(image_view),
&target[0]);
GetMagickPixelPacket(image,&target[1]);
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1,
exception);
SetMagickPixelPacket(image,p,GetCacheViewAuthenticIndexQueue(image_view),
&target[1]);
GetMagickPixelPacket(image,&target[2]);
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1,
exception);
SetMagickPixelPacket(image,p,GetCacheViewAuthenticIndexQueue(image_view),
&target[2]);
status=MagickTrue;
GetMagickPixelPacket(image,&zero);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
RectangleInfo
bounding_box;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
bounding_box=bounds;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if ((x < bounding_box.x) &&
(IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse))
bounding_box.x=x;
if ((x > (ssize_t) bounding_box.width) &&
(IsMagickColorSimilar(&pixel,&target[1]) == MagickFalse))
bounding_box.width=(size_t) x;
if ((y < bounding_box.y) &&
(IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse))
bounding_box.y=y;
if ((y > (ssize_t) bounding_box.height) &&
(IsMagickColorSimilar(&pixel,&target[2]) == MagickFalse))
bounding_box.height=(size_t) y;
p++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
{
if (bounding_box.x < bounds.x)
bounds.x=bounding_box.x;
if (bounding_box.y < bounds.y)
bounds.y=bounding_box.y;
if (bounding_box.width > bounds.width)
bounds.width=bounding_box.width;
if (bounding_box.height > bounds.height)
bounds.height=bounding_box.height;
}
}
image_view=DestroyCacheView(image_view);
if ((bounds.width == 0) || (bounds.height == 0))
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
else
{
bounds.width-=(bounds.x-1);
bounds.height-=(bounds.y-1);
}
return(bounds);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelDepth() returns the depth of a particular image channel.
%
% The format of the GetImageChannelDepth method is:
%
% size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
% size_t GetImageChannelDepth(const Image *image,
% const ChannelType channel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
{
return(GetImageChannelDepth(image,CompositeChannels,exception));
}
MagickExport size_t GetImageChannelDepth(const Image *image,
const ChannelType channel,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
id;
size_t
*current_depth,
depth,
number_threads;
ssize_t
y;
/*
Compute image depth.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
number_threads=GetOpenMPMaximumThreads();
current_depth=(size_t *) AcquireQuantumMemory(number_threads,
sizeof(*current_depth));
if (current_depth == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
status=MagickTrue;
for (id=0; id < (ssize_t) number_threads; id++)
current_depth[id]=1;
if ((image->storage_class == PseudoClass) && (image->matte == MagickFalse))
{
register const PixelPacket
*restrict p;
register ssize_t
i;
p=image->colormap;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
const int
id = GetOpenMPThreadId();
if (status == MagickFalse)
continue;
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
MagickStatusType
status;
QuantumAny
range;
status=0;
range=GetQuantumRange(current_depth[id]);
if ((channel & RedChannel) != 0)
status|=GetPixelRed(p) != ScaleAnyToQuantum(ScaleQuantumToAny(GetPixelRed(p),
range),range);
if ((channel & GreenChannel) != 0)
status|=GetPixelGreen(p) != ScaleAnyToQuantum(ScaleQuantumToAny(GetPixelGreen(p),
range),range);
if ((channel & BlueChannel) != 0)
status|=GetPixelBlue(p) != ScaleAnyToQuantum(ScaleQuantumToAny(GetPixelBlue(p),
range),range);
if (status == 0)
break;
current_depth[id]++;
}
p++;
}
depth=current_depth[0];
for (id=1; id < (ssize_t) number_threads; id++)
if (depth < current_depth[id])
depth=current_depth[id];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
MagickStatusType
status;
QuantumAny
range;
status=0;
range=GetQuantumRange(current_depth[id]);
if ((channel & RedChannel) != 0)
status|=GetPixelRed(p) != ScaleAnyToQuantum(
ScaleQuantumToAny(GetPixelRed(p),range),range);
if ((channel & GreenChannel) != 0)
status|=GetPixelGreen(p) != ScaleAnyToQuantum(
ScaleQuantumToAny(GetPixelGreen(p),range),range);
if ((channel & BlueChannel) != 0)
status|=GetPixelBlue(p) != ScaleAnyToQuantum(
ScaleQuantumToAny(GetPixelBlue(p),range),range);
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
status|=GetPixelOpacity(p) != ScaleAnyToQuantum(
ScaleQuantumToAny(GetPixelOpacity(p),range),range);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
status|=GetPixelIndex(indexes+x) !=
ScaleAnyToQuantum(ScaleQuantumToAny(GetPixelIndex(indexes+
x),range),range);
if (status == 0)
break;
current_depth[id]++;
}
p++;
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (id=1; id < (ssize_t) number_threads; id++)
if (depth < current_depth[id])
depth=current_depth[id];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t u m D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantumDepth() returns the depth of the image rounded to a legal
% quantum depth: 8, 16, or 32.
%
% The format of the GetImageQuantumDepth method is:
%
% size_t GetImageQuantumDepth(const Image *image,
% const MagickBooleanType constrain)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o constrain: A value other than MagickFalse, constrains the depth to
% a maximum of MAGICKCORE_QUANTUM_DEPTH.
%
*/
static inline double MagickMin(const double x,const double y)
{
if (x < y)
return(x);
return(y);
}
MagickExport size_t GetImageQuantumDepth(const Image *image,
const MagickBooleanType constrain)
{
size_t
depth;
depth=image->depth;
if (depth <= 8)
depth=8;
else
if (depth <= 16)
depth=16;
else
if (depth <= 32)
depth=32;
else
if (depth <= 64)
depth=64;
if (constrain != MagickFalse)
depth=(size_t) MagickMin((double) depth,(double)
MAGICKCORE_QUANTUM_DEPTH);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageType() returns the potential type of image:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
%
% To ensure the image type matches its potential, use SetImageType():
%
% (void) SetImageType(image,GetImageType(image));
%
% The format of the GetImageType method is:
%
% ImageType GetImageType(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType GetImageType(const Image *image,ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == CMYKColorspace)
{
if (image->matte == MagickFalse)
return(ColorSeparationType);
return(ColorSeparationMatteType);
}
if (IsMonochromeImage(image,exception) != MagickFalse)
return(BilevelType);
if (IsGrayImage(image,exception) != MagickFalse)
{
if (image->matte != MagickFalse)
return(GrayscaleMatteType);
return(GrayscaleType);
}
if (IsPaletteImage(image,exception) != MagickFalse)
{
if (image->matte != MagickFalse)
return(PaletteMatteType);
return(PaletteType);
}
if (image->matte != MagickFalse)
return(TrueColorMatteType);
return(TrueColorType);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s G r a y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsGrayImage() returns MagickTrue if all the pixels in the image have the
% same red, green, and blue intensities.
%
% The format of the IsGrayImage method is:
%
% MagickBooleanType IsGrayImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsGrayImage(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleMatteType))
return(MagickTrue);
if (IsRGBColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
type=BilevelType;
image_view=AcquireCacheView(image);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsGrayPixel(p) == MagickFalse)
{
type=UndefinedType;
break;
}
if ((type == BilevelType) && (IsMonochromePixel(p) == MagickFalse))
type=GrayscaleType;
p++;
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if (type == UndefinedType)
return(MagickFalse);
((Image *) image)->type=type;
if ((type == GrayscaleType) && (image->matte != MagickFalse))
((Image *) image)->type=GrayscaleMatteType;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s M o n o c h r o m e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsMonochromeImage() returns MagickTrue if all the pixels in the image have
% the same red, green, and blue intensities and the intensity is either
% 0 or QuantumRange.
%
% The format of the IsMonochromeImage method is:
%
% MagickBooleanType IsMonochromeImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsMonochromeImage(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
register ssize_t
x;
register const PixelPacket
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->type == BilevelType)
return(MagickTrue);
if (IsRGBColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
type=BilevelType;
image_view=AcquireCacheView(image);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsMonochromePixel(p) == MagickFalse)
{
type=UndefinedType;
break;
}
p++;
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if (type == UndefinedType)
return(MagickFalse);
((Image *) image)->type=type;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s O p a q u e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsOpaqueImage() returns MagickTrue if none of the pixels in the image have
% an opacity value other than opaque (0).
%
% The format of the IsOpaqueImage method is:
%
% MagickBooleanType IsOpaqueImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsOpaqueImage(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
/*
Determine if image is opaque.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->matte == MagickFalse)
return(MagickTrue);
image_view=AcquireCacheView(image);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelOpacity(p) != OpaqueOpacity)
break;
p++;
}
if (x < (ssize_t) image->columns)
break;
}
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C h a n n e l D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageChannelDepth() sets the depth of the image.
%
% The format of the SetImageChannelDepth method is:
%
% MagickBooleanType SetImageDepth(Image *image,const size_t depth)
% MagickBooleanType SetImageChannelDepth(Image *image,
% const ChannelType channel,const size_t depth)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o depth: the image depth.
%
*/
MagickExport MagickBooleanType SetImageDepth(Image *image,
const size_t depth)
{
return(SetImageChannelDepth(image,CompositeChannels,depth));
}
MagickExport MagickBooleanType SetImageChannelDepth(Image *image,
const ChannelType channel,const size_t depth)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
QuantumAny
range;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (GetImageDepth(image,&image->exception) <= (size_t)
MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH))
{
image->depth=depth;
return(MagickTrue);
}
/*
Scale pixels to desired depth.
*/
status=MagickTrue;
range=GetQuantumRange(depth);
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ScaleAnyToQuantum(ScaleQuantumToAny(
GetPixelRed(q),range),range));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ScaleAnyToQuantum(ScaleQuantumToAny(
GetPixelGreen(q),range),range));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ScaleAnyToQuantum(ScaleQuantumToAny(
GetPixelBlue(q),range),range));
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
SetPixelOpacity(q,ScaleAnyToQuantum(ScaleQuantumToAny(
GetPixelOpacity(q),range),range));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,ScaleAnyToQuantum(ScaleQuantumToAny(
GetPixelIndex(indexes+x),range),range));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
register PixelPacket
*restrict p;
p=image->colormap;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((channel & RedChannel) != 0)
p->red=ScaleAnyToQuantum(ScaleQuantumToAny(p->red,range),range);
if ((channel & GreenChannel) != 0)
p->green=ScaleAnyToQuantum(ScaleQuantumToAny(p->green,range),range);
if ((channel & BlueChannel) != 0)
p->blue=ScaleAnyToQuantum(ScaleQuantumToAny(p->blue,range),range);
if ((channel & OpacityChannel) != 0)
p->opacity=ScaleAnyToQuantum(ScaleQuantumToAny(p->opacity,range),
range);
p++;
}
}
image->depth=depth;
return(status);
}
|
attribute.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE %
% A A T T R R I B B U U T E %
% AAAAA T T RRRR I BBBB U U T EEE %
% A A T T R R I B B U U T E %
% A A T T R R IIIII BBBB UUU T EEEEE %
% %
% %
% MagickCore Get / Set Image Attributes %
% %
% Software Design %
% Cristy %
% October 2002 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/effect.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/histogram.h"
#include "MagickCore/identify.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/magick.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/segment.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e B o u n d i n g B o x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageBoundingBox() returns the bounding box of an image canvas.
%
% The format of the GetImageBoundingBox method is:
%
% RectangleInfo GetImageBoundingBox(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o bounds: Method GetImageBoundingBox returns the bounding box of an
% image canvas.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _EdgeInfo
{
double
left,
right,
top,
bottom;
} EdgeInfo;
static double GetEdgeBackgroundCensus(const Image *image,
const CacheView *image_view,const GravityType gravity,const size_t width,
const size_t height,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
CacheView
*edge_view;
const char
*artifact;
double
census;
Image
*edge_image;
PixelInfo
background,
pixel;
RectangleInfo
edge_geometry;
const Quantum
*p;
ssize_t
y;
/*
Determine the percent of image background for this edge.
*/
switch (gravity)
{
case NorthWestGravity:
case NorthGravity:
default:
{
p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception);
break;
}
case NorthEastGravity:
case EastGravity:
{
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1,
exception);
break;
}
case SouthEastGravity:
case SouthGravity:
{
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,
(ssize_t) image->rows-1,1,1,exception);
break;
}
case SouthWestGravity:
case WestGravity:
{
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1,
exception);
break;
}
}
GetPixelInfoPixel(image,p,&background);
artifact=GetImageArtifact(image,"background");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&background,exception);
artifact=GetImageArtifact(image,"trim:background-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&background,exception);
edge_geometry.width=width;
edge_geometry.height=height;
edge_geometry.x=x_offset;
edge_geometry.y=y_offset;
GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry);
edge_image=CropImage(image,&edge_geometry,exception);
if (edge_image == (Image *) NULL)
return(0.0);
census=0.0;
edge_view=AcquireVirtualCacheView(edge_image,exception);
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
ssize_t
x;
p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
GetPixelInfoPixel(edge_image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&background) == MagickFalse)
census++;
p+=GetPixelChannels(edge_image);
}
}
census/=((double) edge_image->columns*edge_image->rows);
edge_view=DestroyCacheView(edge_view);
edge_image=DestroyImage(edge_image);
return(census);
}
static inline double GetMinEdgeBackgroundCensus(const EdgeInfo *edge)
{
double
census;
census=MagickMin(MagickMin(MagickMin(edge->left,edge->right),edge->top),
edge->bottom);
return(census);
}
static RectangleInfo GetEdgeBoundingBox(const Image *image,
ExceptionInfo *exception)
{
CacheView
*edge_view;
const char
*artifact;
double
background_census,
percent_background;
EdgeInfo
edge,
vertex;
Image
*edge_image;
RectangleInfo
bounds;
/*
Get the image bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
SetGeometry(image,&bounds);
edge_image=CloneImage(image,0,0,MagickTrue,exception);
if (edge_image == (Image *) NULL)
return(bounds);
(void) ParseAbsoluteGeometry("0x0+0+0",&edge_image->page);
(void) memset(&vertex,0,sizeof(vertex));
edge_view=AcquireVirtualCacheView(edge_image,exception);
edge.left=GetEdgeBackgroundCensus(edge_image,edge_view,WestGravity,
1,0,0,0,exception);
edge.right=GetEdgeBackgroundCensus(edge_image,edge_view,EastGravity,
1,0,0,0,exception);
edge.top=GetEdgeBackgroundCensus(edge_image,edge_view,NorthGravity,
0,1,0,0,exception);
edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view,SouthGravity,
0,1,0,0,exception);
percent_background=1.0;
artifact=GetImageArtifact(edge_image,"trim:percent-background");
if (artifact != (const char *) NULL)
percent_background=StringToDouble(artifact,(char **) NULL)/100.0;
percent_background=MagickMin(MagickMax(1.0-percent_background,MagickEpsilon),
1.0);
background_census=GetMinEdgeBackgroundCensus(&edge);
for ( ; background_census < percent_background;
background_census=GetMinEdgeBackgroundCensus(&edge))
{
if ((bounds.width == 0) || (bounds.height == 0))
break;
if (fabs(edge.left-background_census) < MagickEpsilon)
{
/*
Trim left edge.
*/
vertex.left++;
bounds.width--;
edge.left=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.top=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view,
SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.bottom,exception);
continue;
}
if (fabs(edge.right-background_census) < MagickEpsilon)
{
/*
Trim right edge.
*/
vertex.right++;
bounds.width--;
edge.right=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t)
vertex.top,exception);
edge.top=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view,
SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.bottom,exception);
continue;
}
if (fabs(edge.top-background_census) < MagickEpsilon)
{
/*
Trim top edge.
*/
vertex.top++;
bounds.height--;
edge.left=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.right=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t)
vertex.top,exception);
edge.top=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
continue;
}
if (fabs(edge.bottom-background_census) < MagickEpsilon)
{
/*
Trim bottom edge.
*/
vertex.bottom++;
bounds.height--;
edge.left=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.right=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t)
vertex.top,exception);
edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view,
SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.bottom,exception);
continue;
}
}
edge_view=DestroyCacheView(edge_view);
edge_image=DestroyImage(edge_image);
bounds.x=(ssize_t) vertex.left;
bounds.y=(ssize_t) vertex.top;
if ((bounds.width == 0) || (bounds.height == 0))
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
return(bounds);
}
MagickExport RectangleInfo GetImageBoundingBox(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
const char
*artifact;
MagickBooleanType
status;
PixelInfo
target[3],
zero;
RectangleInfo
bounds;
const Quantum
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
artifact=GetImageArtifact(image,"trim:percent-background");
if (artifact != (const char *) NULL)
return(GetEdgeBoundingBox(image,exception));
artifact=GetImageArtifact(image, "trim:edges");
if (artifact == (const char *) NULL)
{
bounds.width=0;
bounds.height=0;
bounds.x=(ssize_t) image->columns;
bounds.y=(ssize_t) image->rows;
}
else
{
char
*edges,
*p,
*q;
bounds.width=(ssize_t) image->columns;
bounds.height=(ssize_t) image->rows;
bounds.x=0;
bounds.y=0;
edges=AcquireString(artifact);
q=edges;
while ((p=StringToken(",",&q)) != (char *) NULL)
{
if (LocaleCompare(p,"north") == 0)
bounds.y=(ssize_t) image->rows;
if (LocaleCompare(p,"east") == 0)
bounds.width=0;
if (LocaleCompare(p,"south") == 0)
bounds.height=0;
if (LocaleCompare(p,"west") == 0)
bounds.x=(ssize_t) image->columns;
}
edges=DestroyString(edges);
}
GetPixelInfo(image,&target[0]);
image_view=AcquireVirtualCacheView(image,exception);
p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception);
if (p == (const Quantum *) NULL)
{
image_view=DestroyCacheView(image_view);
return(bounds);
}
GetPixelInfoPixel(image,p,&target[0]);
GetPixelInfo(image,&target[1]);
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1,
exception);
if (p != (const Quantum *) NULL)
GetPixelInfoPixel(image,p,&target[1]);
GetPixelInfo(image,&target[2]);
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1,
exception);
if (p != (const Quantum *) NULL)
GetPixelInfoPixel(image,p,&target[2]);
status=MagickTrue;
GetPixelInfo(image,&zero);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
RectangleInfo
bounding_box;
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
bounding_box=bounds;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,p,&pixel);
if ((x < bounding_box.x) &&
(IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse))
bounding_box.x=x;
if ((x > (ssize_t) bounding_box.width) &&
(IsFuzzyEquivalencePixelInfo(&pixel,&target[1]) == MagickFalse))
bounding_box.width=(size_t) x;
if ((y < bounding_box.y) &&
(IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse))
bounding_box.y=y;
if ((y > (ssize_t) bounding_box.height) &&
(IsFuzzyEquivalencePixelInfo(&pixel,&target[2]) == MagickFalse))
bounding_box.height=(size_t) y;
p+=GetPixelChannels(image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
{
if (bounding_box.x < bounds.x)
bounds.x=bounding_box.x;
if (bounding_box.y < bounds.y)
bounds.y=bounding_box.y;
if (bounding_box.width > bounds.width)
bounds.width=bounding_box.width;
if (bounding_box.height > bounds.height)
bounds.height=bounding_box.height;
}
}
image_view=DestroyCacheView(image_view);
if ((bounds.width == 0) || (bounds.height == 0))
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
else
{
bounds.width-=(bounds.x-1);
bounds.height-=(bounds.y-1);
}
return(bounds);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C o n v e x H u l l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageConvexHull() returns the convex hull points of an image canvas.
%
% The format of the GetImageConvexHull method is:
%
% PointInfo *GetImageConvexHull(const Image *image,
% size_t number_vertices,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_vertices: the number of vertices in the convex hull.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double LexicographicalOrder(PointInfo *a,PointInfo *b,PointInfo *c)
{
/*
Order by x-coordinate, and in case of a tie, by y-coordinate.
*/
return((b->x-a->x)*(c->y-a->y)-(b->y-a->y)*(c->x-a->x));
}
static PixelInfo GetEdgeBackgroundColor(const Image *image,
const CacheView *image_view,ExceptionInfo *exception)
{
const char
*artifact;
double
census[4],
edge_census;
PixelInfo
background[4],
edge_background;
ssize_t
i;
/*
Most dominant color of edges/corners is the background color of the image.
*/
artifact=GetImageArtifact(image,"convex-hull:background-color");
if (artifact == (const char *) NULL)
artifact=GetImageArtifact(image,"background");
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i < 4; i++)
{
CacheView
*edge_view;
GravityType
gravity;
Image
*edge_image;
PixelInfo
pixel;
RectangleInfo
edge_geometry;
const Quantum
*p;
ssize_t
y;
census[i]=0.0;
(void) memset(&edge_geometry,0,sizeof(edge_geometry));
switch (i)
{
case 0:
default:
{
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1,
exception);
gravity=WestGravity;
edge_geometry.width=1;
edge_geometry.height=0;
}
case 1:
{
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1,
exception);
gravity=EastGravity;
edge_geometry.width=1;
edge_geometry.height=0;
}
case 2:
{
p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception);
gravity=NorthGravity;
edge_geometry.width=0;
edge_geometry.height=1;
}
case 3:
{
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,
(ssize_t) image->rows-1,1,1,exception);
gravity=SouthGravity;
edge_geometry.width=0;
edge_geometry.height=1;
}
}
GetPixelInfoPixel(image,p,background+i);
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,background+i,
exception);
GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry);
edge_image=CropImage(image,&edge_geometry,exception);
if (edge_image == (Image *) NULL)
continue;
edge_view=AcquireVirtualCacheView(edge_image,exception);
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
ssize_t
x;
p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,
exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
GetPixelInfoPixel(edge_image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,background+i) == MagickFalse)
census[i]++;
p+=GetPixelChannels(edge_image);
}
}
edge_view=DestroyCacheView(edge_view);
edge_image=DestroyImage(edge_image);
}
edge_census=(-1.0);
for (i=0; i < 4; i++)
if (census[i] > edge_census)
{
edge_background=background[i];
edge_census=census[i];
}
return(edge_background);
}
void TraceConvexHull(PointInfo *vertices,size_t number_vertices,
PointInfo ***monotone_chain,size_t *chain_length)
{
PointInfo
**chain;
ssize_t
i;
size_t
demark,
n;
/*
Construct the upper and lower hulls: rightmost to leftmost counterclockwise.
*/
chain=(*monotone_chain);
n=0;
for (i=0; i < (ssize_t) number_vertices; i++)
{
while ((n >= 2) &&
(LexicographicalOrder(chain[n-2],chain[n-1],&vertices[i]) <= 0.0))
n--;
chain[n++]=(&vertices[i]);
}
demark=n+1;
for (i=(ssize_t) number_vertices-2; i >= 0; i--)
{
while ((n >= demark) &&
(LexicographicalOrder(chain[n-2],chain[n-1],&vertices[i]) <= 0.0))
n--;
chain[n++]=(&vertices[i]);
}
*chain_length=n;
}
MagickExport PointInfo *GetImageConvexHull(const Image *image,
size_t *number_vertices,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MemoryInfo
*monotone_info,
*vertices_info;
PixelInfo
background;
PointInfo
*convex_hull,
**monotone_chain,
*vertices;
size_t
n;
ssize_t
y;
/*
Identify convex hull vertices of image foreground object(s).
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*number_vertices=0;
vertices_info=AcquireVirtualMemory(image->columns,image->rows*
sizeof(*vertices));
monotone_info=AcquireVirtualMemory(2*image->columns,2*
image->rows*sizeof(*monotone_chain));
if ((vertices_info == (MemoryInfo *) NULL) ||
(monotone_info == (MemoryInfo *) NULL))
{
if (monotone_info != (MemoryInfo *) NULL)
monotone_info=(MemoryInfo *) RelinquishVirtualMemory(monotone_info);
if (vertices_info != (MemoryInfo *) NULL)
vertices_info=RelinquishVirtualMemory(vertices_info);
return((PointInfo *) NULL);
}
vertices=(PointInfo *) GetVirtualMemoryBlob(vertices_info);
monotone_chain=(PointInfo **) GetVirtualMemoryBlob(monotone_info);
image_view=AcquireVirtualCacheView(image,exception);
background=GetEdgeBackgroundColor(image,image_view,exception);
status=MagickTrue;
n=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelInfo
pixel;
GetPixelInfoPixel(image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&background) == MagickFalse)
{
vertices[n].x=(double) x;
vertices[n].y=(double) y;
n++;
}
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Return the convex hull of the image foreground object(s).
*/
TraceConvexHull(vertices,n,&monotone_chain,number_vertices);
convex_hull=(PointInfo *) AcquireQuantumMemory(*number_vertices,
sizeof(*convex_hull));
if (convex_hull != (PointInfo *) NULL)
for (n=0; n < *number_vertices; n++)
convex_hull[n]=(*monotone_chain[n]);
monotone_info=RelinquishVirtualMemory(monotone_info);
vertices_info=RelinquishVirtualMemory(vertices_info);
return(convex_hull);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDepth() returns the depth of a particular image channel.
%
% The format of the GetImageDepth method is:
%
% size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
i;
size_t
*current_depth,
depth,
number_threads;
ssize_t
y;
/*
Compute image depth.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
current_depth=(size_t *) AcquireQuantumMemory(number_threads,
sizeof(*current_depth));
if (current_depth == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
status=MagickTrue;
for (i=0; i < (ssize_t) number_threads; i++)
current_depth[i]=1;
if ((image->storage_class == PseudoClass) &&
(image->alpha_trait == UndefinedPixelTrait))
{
for (i=0; i < (ssize_t) image->colors; i++)
{
const int
id = GetOpenMPThreadId();
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
MagickBooleanType
atDepth;
QuantumAny
range;
atDepth=MagickTrue;
range=GetQuantumRange(current_depth[id]);
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].red),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) &&
(GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].green),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) &&
(GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].blue),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse))
break;
current_depth[id]++;
}
}
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
image_view=AcquireVirtualCacheView(image,exception);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
if ((1UL*QuantumRange) <= MaxMap)
{
size_t
*depth_map;
/*
Scale pixels to desired (optimized with depth map).
*/
depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map));
if (depth_map == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i <= (ssize_t) MaxMap; i++)
{
unsigned int
depth;
for (depth=1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++)
{
Quantum
pixel;
QuantumAny
range;
range=GetQuantumRange(depth);
pixel=(Quantum) i;
if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range))
break;
}
depth_map[i]=depth;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
continue;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (depth_map[ScaleQuantumToMap(p[i])] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(p[i])];
}
p+=GetPixelChannels(image);
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
depth_map=(size_t *) RelinquishMagickMemory(depth_map);
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
#endif
/*
Compute pixel depth.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
continue;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
QuantumAny
range;
range=GetQuantumRange(current_depth[id]);
if (p[i] == ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range),range))
break;
current_depth[id]++;
}
}
p+=GetPixelChannels(image);
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M i n i m u m B o u n d i n g B o x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMinimumBoundingBox() returns the points that form the minimum
% bounding box around the image foreground objects with the "Rotating
% Calipers" algorithm. The method also returns these properties:
% minimum-bounding-box:area, minimum-bounding-box:width,
% minimum-bounding-box:height, and minimum-bounding-box:angle.
%
% The format of the GetImageMinimumBoundingBox method is:
%
% PointInfo *GetImageMinimumBoundingBox(Image *image,
% size_t number_vertices,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_vertices: the number of vertices in the bounding box.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _CaliperInfo
{
double
area,
width,
height,
projection;
ssize_t
p,
q,
v;
} CaliperInfo;
static inline double getAngle(PointInfo *p,PointInfo *q)
{
/*
Get the angle between line (p,q) and horizontal axis, in degrees.
*/
return(RadiansToDegrees(atan2(q->y-p->y,q->x-p->x)));
}
static inline double getDistance(PointInfo *p,PointInfo *q)
{
double
distance;
distance=hypot(p->x-q->x,p->y-q->y);
return(distance*distance);
}
static inline double getProjection(PointInfo *p,PointInfo *q,PointInfo *v)
{
double
distance;
/*
Projection of vector (x,y) - p into a line passing through p and q.
*/
distance=getDistance(p,q);
if (distance < MagickEpsilon)
return(INFINITY);
return((q->x-p->x)*(v->x-p->x)+(v->y-p->y)*(q->y-p->y))/sqrt(distance);
}
static inline double getFeretDiameter(PointInfo *p,PointInfo *q,PointInfo *v)
{
double
distance;
/*
Distance from a point (x,y) to a line passing through p and q.
*/
distance=getDistance(p,q);
if (distance < MagickEpsilon)
return(INFINITY);
return((q->x-p->x)*(v->y-p->y)-(v->x-p->x)*(q->y-p->y))/sqrt(distance);
}
MagickExport PointInfo *GetImageMinimumBoundingBox(Image *image,
size_t *number_vertices,ExceptionInfo *exception)
{
CaliperInfo
caliper_info;
const char
*artifact;
double
angle,
diameter,
distance;
PointInfo
*bounding_box,
*vertices;
ssize_t
i;
size_t
number_hull_vertices;
/*
Generate the minimum bounding box with the "Rotating Calipers" algorithm.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*number_vertices=0;
vertices=GetImageConvexHull(image,&number_hull_vertices,exception);
if (vertices == (PointInfo *) NULL)
return((PointInfo *) NULL);
*number_vertices=4;
bounding_box=(PointInfo *) AcquireQuantumMemory(*number_vertices,
sizeof(*bounding_box));
if (bounding_box == (PointInfo *) NULL)
{
vertices=(PointInfo *) RelinquishMagickMemory(vertices);
return((PointInfo *) NULL);
}
caliper_info.area=2.0*image->columns*image->rows;
caliper_info.width=(double) image->columns+image->rows;
caliper_info.height=0.0;
caliper_info.projection=0.0;
caliper_info.p=(-1);
caliper_info.q=(-1);
caliper_info.v=(-1);
for (i=0; i < (ssize_t) number_hull_vertices; i++)
{
double
area = 0.0,
max_projection = 0.0,
min_diameter = -1.0,
min_projection = 0.0;
ssize_t
j,
k;
ssize_t
p = -1,
q = -1,
v = -1;
for (j=0; j < (ssize_t) number_hull_vertices; j++)
{
double
diameter;
diameter=fabs(getFeretDiameter(&vertices[i],
&vertices[(i+1) % number_hull_vertices],&vertices[j]));
if (min_diameter < diameter)
{
min_diameter=diameter;
p=i;
q=(i+1) % number_hull_vertices;
v=j;
}
}
for (k=0; k < (ssize_t) number_hull_vertices; k++)
{
double
projection;
/*
Rotating calipers.
*/
projection=getProjection(&vertices[p],&vertices[q],&vertices[k]);
min_projection=MagickMin(min_projection,projection);
max_projection=MagickMax(max_projection,projection);
}
area=min_diameter*(max_projection-min_projection);
if (caliper_info.area > area)
{
caliper_info.area=area;
caliper_info.width=min_diameter;
caliper_info.height=max_projection-min_projection;
caliper_info.projection=max_projection;
caliper_info.p=p;
caliper_info.q=q;
caliper_info.v=v;
}
}
/*
Initialize minimum bounding box.
*/
diameter=getFeretDiameter(&vertices[caliper_info.p],
&vertices[caliper_info.q],&vertices[caliper_info.v]);
angle=atan2(vertices[caliper_info.q].y-vertices[caliper_info.p].y,
vertices[caliper_info.q].x-vertices[caliper_info.p].x);
bounding_box[0].x=vertices[caliper_info.p].x+cos(angle)*
caliper_info.projection;
bounding_box[0].y=vertices[caliper_info.p].y+sin(angle)*
caliper_info.projection;
bounding_box[1].x=floor(bounding_box[0].x+cos(angle+MagickPI/2.0)*diameter+
0.5);
bounding_box[1].y=floor(bounding_box[0].y+sin(angle+MagickPI/2.0)*diameter+
0.5);
bounding_box[2].x=floor(bounding_box[1].x+cos(angle)*(-caliper_info.height)+
0.5);
bounding_box[2].y=floor(bounding_box[1].y+sin(angle)*(-caliper_info.height)+
0.5);
bounding_box[3].x=floor(bounding_box[2].x+cos(angle+MagickPI/2.0)*(-diameter)+
0.5);
bounding_box[3].y=floor(bounding_box[2].y+sin(angle+MagickPI/2.0)*(-diameter)+
0.5);
/*
Export minimum bounding box properties.
*/
(void) FormatImageProperty(image,"minimum-bounding-box:area","%.*g",
GetMagickPrecision(),caliper_info.area);
(void) FormatImageProperty(image,"minimum-bounding-box:width","%.*g",
GetMagickPrecision(),caliper_info.width);
(void) FormatImageProperty(image,"minimum-bounding-box:height","%.*g",
GetMagickPrecision(),caliper_info.height);
(void) FormatImageProperty(image,"minimum-bounding-box:_p","%.*g,%.*g",
GetMagickPrecision(),vertices[caliper_info.p].x,
GetMagickPrecision(),vertices[caliper_info.p].y);
(void) FormatImageProperty(image,"minimum-bounding-box:_q","%.*g,%.*g",
GetMagickPrecision(),vertices[caliper_info.q].x,
GetMagickPrecision(),vertices[caliper_info.q].y);
(void) FormatImageProperty(image,"minimum-bounding-box:_v","%.*g,%.*g",
GetMagickPrecision(),vertices[caliper_info.v].x,
GetMagickPrecision(),vertices[caliper_info.v].y);
/*
Find smallest angle to origin.
*/
distance=hypot(bounding_box[0].x,bounding_box[0].y);
angle=getAngle(&bounding_box[0],&bounding_box[1]);
for (i=1; i < 4; i++)
{
double d = hypot(bounding_box[i].x,bounding_box[i].y);
if (d < distance)
{
distance=d;
angle=getAngle(&bounding_box[i],&bounding_box[(i+1) % 4]);
}
}
artifact=GetImageArtifact(image,"minimum-bounding-box:orientation");
if (artifact != (const char *) NULL)
{
double
length,
q_length,
p_length;
PointInfo
delta,
point;
/*
Find smallest perpendicular distance from edge to origin.
*/
point=bounding_box[0];
for (i=1; i < 4; i++)
{
if (bounding_box[i].x < point.x)
point.x=bounding_box[i].x;
if (bounding_box[i].y < point.y)
point.y=bounding_box[i].y;
}
for (i=0; i < 4; i++)
{
bounding_box[i].x-=point.x;
bounding_box[i].y-=point.y;
}
for (i=0; i < 4; i++)
{
double
d,
intercept,
slope;
delta.x=bounding_box[(i+1) % 4].x-bounding_box[i].x;
delta.y=bounding_box[(i+1) % 4].y-bounding_box[i].y;
slope=delta.y*PerceptibleReciprocal(delta.x);
intercept=bounding_box[(i+1) % 4].y-slope*bounding_box[i].x;
d=fabs((slope*bounding_box[i].x-bounding_box[i].y+intercept)*
PerceptibleReciprocal(sqrt(slope*slope+1.0)));
if ((i == 0) || (d < distance))
{
distance=d;
point=delta;
}
}
angle=RadiansToDegrees(atan(point.y*PerceptibleReciprocal(point.x)));
length=hypot(point.x,point.y);
p_length=fabs((double) MagickMax(caliper_info.width,caliper_info.height)-
length);
q_length=fabs(length-(double) MagickMin(caliper_info.width,
caliper_info.height));
if (LocaleCompare(artifact,"landscape") == 0)
{
if (p_length > q_length)
angle+=(angle < 0.0) ? 90.0 : -90.0;
}
else
if (LocaleCompare(artifact,"portrait") == 0)
{
if (p_length < q_length)
angle+=(angle >= 0.0) ? 90.0 : -90.0;
}
}
(void) FormatImageProperty(image,"minimum-bounding-box:angle","%.*g",
GetMagickPrecision(),angle);
(void) FormatImageProperty(image,"minimum-bounding-box:unrotate","%.*g",
GetMagickPrecision(),-angle);
vertices=(PointInfo *) RelinquishMagickMemory(vertices);
return(bounding_box);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t u m D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantumDepth() returns the depth of the image rounded to a legal
% quantum depth: 8, 16, or 32.
%
% The format of the GetImageQuantumDepth method is:
%
% size_t GetImageQuantumDepth(const Image *image,
% const MagickBooleanType constrain)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o constrain: A value other than MagickFalse, constrains the depth to
% a maximum of MAGICKCORE_QUANTUM_DEPTH.
%
*/
MagickExport size_t GetImageQuantumDepth(const Image *image,
const MagickBooleanType constrain)
{
size_t
depth;
depth=image->depth;
if (depth <= 8)
depth=8;
else
if (depth <= 16)
depth=16;
else
if (depth <= 32)
depth=32;
else
if (depth <= 64)
depth=64;
if (constrain != MagickFalse)
depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageType() returns the type of image:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
%
% The format of the GetImageType method is:
%
% ImageType GetImageType(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport ImageType GetImageType(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->colorspace == CMYKColorspace)
{
if (image->alpha_trait == UndefinedPixelTrait)
return(ColorSeparationType);
return(ColorSeparationAlphaType);
}
if (IsImageMonochrome(image) != MagickFalse)
return(BilevelType);
if (IsImageGray(image) != MagickFalse)
{
if (image->alpha_trait != UndefinedPixelTrait)
return(GrayscaleAlphaType);
return(GrayscaleType);
}
if (IsPaletteImage(image) != MagickFalse)
{
if (image->alpha_trait != UndefinedPixelTrait)
return(PaletteAlphaType);
return(PaletteType);
}
if (image->alpha_trait != UndefinedPixelTrait)
return(TrueColorAlphaType);
return(TrueColorType);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e G r a y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentifyImageGray() returns grayscale if all the pixels in the image have
% the same red, green, and blue intensities, and bi-level is the intensity is
% either 0 or QuantumRange. Otherwise undefined is returned.
%
% The format of the IdentifyImageGray method is:
%
% ImageType IdentifyImageGray(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType IdentifyImageGray(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
const Quantum
*p;
ssize_t
x;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleAlphaType))
return(image->type);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(UndefinedType);
type=BilevelType;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsPixelGray(image,p) == MagickFalse)
{
type=UndefinedType;
break;
}
if ((type == BilevelType) &&
(IsPixelMonochrome(image,p) == MagickFalse))
type=GrayscaleType;
p+=GetPixelChannels(image);
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if ((type == GrayscaleType) && (image->alpha_trait != UndefinedPixelTrait))
type=GrayscaleAlphaType;
return(type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e M o n o c h r o m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentifyImageMonochrome() returns MagickTrue if all the pixels in the image
% have the same red, green, and blue intensities and the intensity is either
% 0 or QuantumRange.
%
% The format of the IdentifyImageMonochrome method is:
%
% MagickBooleanType IdentifyImageMonochrome(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IdentifyImageMonochrome(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
bilevel;
ssize_t
x;
const Quantum
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->type == BilevelType)
return(MagickTrue);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
bilevel=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsPixelMonochrome(image,p) == MagickFalse)
{
bilevel=MagickFalse;
break;
}
p+=GetPixelChannels(image);
}
if (bilevel == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
return(bilevel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentifyImageType() returns the potential type of image:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
%
% To ensure the image type matches its potential, use SetImageType():
%
% (void) SetImageType(image,IdentifyImageType(image,exception),exception);
%
% The format of the IdentifyImageType method is:
%
% ImageType IdentifyImageType(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType IdentifyImageType(const Image *image,
ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == CMYKColorspace)
{
if (image->alpha_trait == UndefinedPixelTrait)
return(ColorSeparationType);
return(ColorSeparationAlphaType);
}
if (IdentifyImageMonochrome(image,exception) != MagickFalse)
return(BilevelType);
if (IdentifyImageGray(image,exception) != UndefinedType)
{
if (image->alpha_trait != UndefinedPixelTrait)
return(GrayscaleAlphaType);
return(GrayscaleType);
}
if (IdentifyPaletteImage(image,exception) != MagickFalse)
{
if (image->alpha_trait != UndefinedPixelTrait)
return(PaletteAlphaType);
return(PaletteType);
}
if (image->alpha_trait != UndefinedPixelTrait)
return(TrueColorAlphaType);
return(TrueColorType);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e G r a y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageGray() returns MagickTrue if the type of the image is grayscale or
% bi-level.
%
% The format of the IsImageGray method is:
%
% MagickBooleanType IsImageGray(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageGray(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleAlphaType))
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e M o n o c h r o m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageMonochrome() returns MagickTrue if type of the image is bi-level.
%
% The format of the IsImageMonochrome method is:
%
% MagickBooleanType IsImageMonochrome(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageMonochrome(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->type == BilevelType)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e O p a q u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageOpaque() returns MagickTrue if none of the pixels in the image have
% an alpha value other than OpaqueAlpha (QuantumRange).
%
% Will return true immediatally is alpha channel is not available.
%
% The format of the IsImageOpaque method is:
%
% MagickBooleanType IsImageOpaque(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsImageOpaque(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
const Quantum
*p;
ssize_t
x;
ssize_t
y;
/*
Determine if image is opaque.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->alpha_trait == UndefinedPixelTrait)
return(MagickTrue);
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelAlpha(image,p) != OpaqueAlpha)
break;
p+=GetPixelChannels(image);
}
if (x < (ssize_t) image->columns)
break;
}
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageDepth() sets the depth of the image.
%
% The format of the SetImageDepth method is:
%
% MagickBooleanType SetImageDepth(Image *image,const size_t depth,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o depth: the image depth.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageDepth(Image *image,
const size_t depth,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
QuantumAny
range;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (depth >= MAGICKCORE_QUANTUM_DEPTH)
{
image->depth=depth;
return(MagickTrue);
}
range=GetQuantumRange(depth);
if (image->storage_class == PseudoClass)
{
ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].red),range),range);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].green),range),range);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].blue),range),range);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].alpha),range),range);
}
}
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
if ((1UL*QuantumRange) <= MaxMap)
{
Quantum
*depth_map;
ssize_t
i;
/*
Scale pixels to desired (optimized with depth map).
*/
depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map));
if (depth_map == (Quantum *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i <= (ssize_t) MaxMap; i++)
depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range),
range);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=depth_map[ScaleQuantumToMap(q[i])];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
depth_map=(Quantum *) RelinquishMagickMemory(depth_map);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
#endif
/*
Scale pixels to desired depth.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel((MagickRealType)
q[i]),range),range);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageType() sets the type of image. Choose from these types:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
% OptimizeType
%
% The format of the SetImageType method is:
%
% MagickBooleanType SetImageType(Image *image,const ImageType type,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: Image type.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type,
ExceptionInfo *exception)
{
const char
*artifact;
ImageInfo
*image_info;
MagickBooleanType
status;
QuantizeInfo
*quantize_info;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
status=MagickTrue;
image_info=AcquireImageInfo();
image_info->dither=image->dither;
artifact=GetImageArtifact(image,"dither");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"dither",artifact);
switch (type)
{
case BilevelType:
{
status=TransformImageColorspace(image,GRAYColorspace,exception);
(void) NormalizeImage(image,exception);
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->number_colors=2;
quantize_info->colorspace=GRAYColorspace;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case GrayscaleType:
{
status=TransformImageColorspace(image,GRAYColorspace,exception);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case GrayscaleAlphaType:
{
status=TransformImageColorspace(image,GRAYColorspace,exception);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case PaletteType:
{
status=TransformImageColorspace(image,sRGBColorspace,exception);
if ((image->storage_class == DirectClass) || (image->colors > 256))
{
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->number_colors=256;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
}
image->alpha_trait=UndefinedPixelTrait;
break;
}
case PaletteBilevelAlphaType:
{
ChannelType
channel_mask;
status=TransformImageColorspace(image,sRGBColorspace,exception);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
channel_mask=SetImageChannelMask(image,AlphaChannel);
(void) BilevelImage(image,(double) QuantumRange/2.0,exception);
(void) SetImageChannelMask(image,channel_mask);
quantize_info=AcquireQuantizeInfo(image_info);
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
break;
}
case PaletteAlphaType:
{
status=TransformImageColorspace(image,sRGBColorspace,exception);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->colorspace=TransparentColorspace;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
break;
}
case TrueColorType:
{
status=TransformImageColorspace(image,sRGBColorspace,exception);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass,exception);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case TrueColorAlphaType:
{
status=TransformImageColorspace(image,sRGBColorspace,exception);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass,exception);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case ColorSeparationType:
{
status=TransformImageColorspace(image,CMYKColorspace,exception);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass,exception);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case ColorSeparationAlphaType:
{
status=TransformImageColorspace(image,CMYKColorspace,exception);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass,exception);
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case OptimizeType:
case UndefinedType:
break;
}
image_info=DestroyImageInfo(image_info);
if (status == MagickFalse)
return(status);
image->type=type;
return(MagickTrue);
}
|
thd_info.c |
/******************************************************************************
* INCLUDES
*****************************************************************************/
#include "thd_info.h"
/******************************************************************************
* PRIVATE FUNCTIONS
*****************************************************************************/
/**
* @brief Perform a parallel SUM reduction.
*
* @param thds The thread structure we are using in the reduction.
* @param scratchid Which scratch array to reduce.
* @param nelems How many elements in the scratch array.
*/
static inline void p_reduce_sum(
thd_info * const thds,
idx_t const scratchid,
idx_t const nelems)
{
int const tid = splatt_omp_get_thread_num();
int const nthreads = splatt_omp_get_num_threads();
val_t * const myvals = (val_t *) thds[tid].scratch[scratchid];
int half = nthreads / 2;
while(half > 0) {
if(tid < half && tid + half < nthreads) {
val_t const * const target = (val_t *) thds[tid+half].scratch[scratchid];
for(idx_t i=0; i < nelems; ++i) {
myvals[i] += target[i];
}
}
#pragma omp barrier
/* check for odd number */
#pragma omp master
if(half > 1 && half % 2 == 1) {
val_t const * const last = (val_t *) thds[half-1].scratch[scratchid];
for(idx_t i=0; i < nelems; ++i) {
myvals[i] += last[i];
}
}
/* next iteration */
half /= 2;
}
/* account for odd thread at end */
#pragma omp master
{
if(nthreads % 2 == 1) {
val_t const * const last = (val_t *) thds[nthreads-1].scratch[scratchid];
for(idx_t i=0; i < nelems; ++i) {
myvals[i] += last[i];
}
}
}
#pragma omp barrier
}
/**
* @brief Perform a parallel MAX reduction.
*
* @param thds The thread structure we are using in the reduction.
* @param scratchid Which scratch array to reduce.
* @param nelems How many elements in the scratch array.
*/
static inline void p_reduce_max(
thd_info * const thds,
idx_t const scratchid,
idx_t const nelems)
{
int const tid = splatt_omp_get_thread_num();
int const nthreads = splatt_omp_get_num_threads();
val_t * const myvals = (val_t *) thds[tid].scratch[scratchid];
int half = nthreads / 2;
while(half > 0) {
if(tid < half && tid + half < nthreads) {
val_t const * const target = (val_t *) thds[tid+half].scratch[scratchid];
for(idx_t i=0; i < nelems; ++i) {
myvals[i] = SS_MAX(myvals[i], target[i]);
}
}
#pragma omp barrier
/* check for odd number */
#pragma omp master
if(half > 1 && half % 2 == 1) {
val_t const * const last = (val_t *) thds[half-1].scratch[scratchid];
for(idx_t i=0; i < nelems; ++i) {
myvals[i] = SS_MAX(myvals[i], last[i]);
}
}
/* next iteration */
half /= 2;
}
/* account for odd thread at end */
#pragma omp master
{
if(nthreads % 2 == 1) {
val_t const * const last = (val_t *) thds[nthreads-1].scratch[scratchid];
for(idx_t i=0; i < nelems; ++i) {
myvals[i] = SS_MAX(myvals[i], last[i]);
}
}
}
#pragma omp barrier
}
/******************************************************************************
* PUBLIC FUNCTIONS
*****************************************************************************/
void thd_reduce(
thd_info * const thds,
idx_t const scratchid,
idx_t const nelems,
splatt_reduce_type const which)
{
if(splatt_omp_get_num_threads() == 1) {
return;
}
/* just to be safe in case any thread data is being copied */
#pragma omp barrier
switch(which) {
case REDUCE_SUM:
p_reduce_sum(thds, scratchid, nelems);
break;
case REDUCE_MAX:
p_reduce_max(thds, scratchid, nelems);
break;
default:
fprintf(stderr, "SPLATT: thd_reduce supports SUM and MAX only.\n");
abort();
}
}
thd_info * thd_init(
idx_t const nthreads,
idx_t const nscratch,
...)
{
thd_info * thds = (thd_info *) malloc(nthreads * sizeof(thd_info));
for(idx_t t=0; t < nthreads; ++t) {
timer_reset(&thds[t].ttime);
thds[t].nscratch = nscratch;
thds[t].scratch = (void **) malloc(nscratch * sizeof(void*));
}
va_list args;
va_start(args, nscratch);
for(idx_t s=0; s < nscratch; ++s) {
idx_t const bytes = va_arg(args, idx_t);
for(idx_t t=0; t < nthreads; ++t) {
thds[t].scratch[s] = (void *) malloc(bytes);
memset(thds[t].scratch[s], 0, bytes);
}
}
va_end(args);
return thds;
}
void thd_times(
thd_info * thds,
idx_t const nthreads)
{
for(idx_t t=0; t < nthreads; ++t) {
printf(" thread: %"SPLATT_PF_IDX" %0.3fs\n", t, thds[t].ttime.seconds);
}
}
void thd_time_stats(
thd_info * thds,
idx_t const nthreads)
{
double max_time = 0.;
double avg_time = 0.;
for(idx_t t=0; t < nthreads; ++t) {
avg_time += thds[t].ttime.seconds;
max_time = SS_MAX(max_time, thds[t].ttime.seconds);
}
avg_time /= nthreads;
double const imbal = (max_time - avg_time) / max_time;
printf(" avg: %0.3fs max: %0.3fs (%0.1f%% imbalance)\n",
avg_time, max_time, 100. * imbal);
}
void thd_reset(
thd_info * thds,
idx_t const nthreads)
{
for(idx_t t=0; t < nthreads; ++t) {
timer_reset(&thds[t].ttime);
}
}
void thd_free(
thd_info * thds,
idx_t const nthreads)
{
for(idx_t t=0; t < nthreads; ++t) {
for(idx_t s=0; s < thds[t].nscratch; ++s) {
free(thds[t].scratch[s]);
}
free(thds[t].scratch);
}
free(thds);
}
|
single_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s -Wuninitialized
void foo();
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp single'}}
#pragma omp single
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp single'}}
#pragma omp single foo
void test_no_clause() {
int i;
#pragma omp single
foo();
#pragma omp single
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp parallel
#pragma omp single
{
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}}
#pragma omp single foo bar
foo();
}
void test_non_identifiers() {
int i, x;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}}
#pragma omp single;
foo();
#pragma omp parallel
// expected-error@+2 {{unexpected OpenMP clause 'linear' in directive '#pragma omp single'}}
// expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}}
#pragma omp single linear(x);
foo();
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}}
#pragma omp single private(x);
foo();
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}}
#pragma omp single, private(x);
foo();
}
void test_private() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp single private(
foo();
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp single private(,
foo();
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp single private(, )
foo();
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp single private()
foo();
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp single private(int)
foo();
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp single private(0)
foo();
int x, y, z;
#pragma omp parallel
#pragma omp single private(x)
foo();
#pragma omp parallel
#pragma omp single private(x, y)
foo();
#pragma omp parallel
#pragma omp single private(x, y, z)
foo();
}
void test_firstprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp single firstprivate(
foo();
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp single firstprivate(,
foo();
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp single firstprivate(, )
foo();
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp single firstprivate()
foo();
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp single firstprivate(int)
foo();
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp single firstprivate(0)
foo();
}
void test_nowait() {
#pragma omp single nowait nowait // expected-error {{directive '#pragma omp single' cannot contain more than one 'nowait' clause}}
for (int i = 0; i < 16; ++i)
;
}
|
ike_fmt_plug.c | /* PSK cracker patch for JtR. Hacked together during March of 2012 by
* Dhiru Kholia <dhiru.kholia at gmail.com> .
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>
* and it is hereby released to the general public under GPL
*
* The IKE Scanner (ike-scan) is Copyright (C) 2003-2007 Roy Hills,
* NTA Monitor Ltd.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* In addition, as a special exception, the copyright holders give
* permission to link the code of portions of this program with the
* OpenSSL library, and distribute linked combinations including the two.
*
* You must obey the GNU General Public License in all respects
* for all of the code used other than OpenSSL. If you modify
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you
* do not wish to do so, delete this exception statement from your
* version.
*
* If this license is unacceptable to you, I may be willing to negotiate
* alternative licenses (contact ike-scan@nta-monitor.com).
*
* You are encouraged to send comments, improvements or suggestions to
* me at ike-scan@nta-monitor.com.
*
* psk-crack.c -- IKE Aggressive Mode Pre-Shared Key cracker for ike-scan
*
* Author: Roy Hills
* Date: 8 July 2004
*
* July, 2012, JimF small changes made, many more should be done.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_ike;
#elif FMT_REGISTERS_H
john_register_one(&fmt_ike);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "ike-crack.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 16
#endif
static int omp_t = 1;
#endif
#include "memdbg.h"
#define FORMAT_LABEL "IKE"
#define FORMAT_NAME "PSK"
#define FORMAT_TAG "$ike$*"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "HMAC MD5/SHA1 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 32
#define BINARY_SIZE 20 /* SHA1 */
#define BINARY_SIZE_SMALLER 16 /* MD5 */
#define SALT_SIZE sizeof(psk_entry)
#define BINARY_ALIGN sizeof(uint32_t)
#define SALT_ALIGN sizeof(size_t)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 16
static struct fmt_tests ike_tests[] = {
{"$ike$*0*5c7916ddf8db4d233b3b36005bb3ccc115a73807e11a897be943fd4a2d0f942624cb00588d8b3a0a26502b73e639df217ef6c4cb90f96b0a3c3ef2f62ed025b4a705df9de65e33e380c1ba5fa23bf1f9911bbf388d0844256fa0131fc5cf8acb396936ba3295b4637b039d93f58db90a3a1cf1ef5051103bacf6e1a3334f9f89*fde8c68c5f324c7dbcbadde1d757af6962c63496c009f77cad647f2997fd4295e50821453a6dc2f6279fd7fef68768584d9cee0da6e68a534a097ce206bf77ecc798310206f3f82d92d02c885794e0a430ceb2d6b43c2aff45a6e14c6558382df0692ff65c2724eef750764ee456f31424a5ebd9e115d826bbb9722111aa4e01*b2a3c7aa4be95e85*756e3fa11c1b102c*00000001000000010000002c01010001000000240101000080010001800200018003000180040002800b0001000c000400007080*01000000ac100202*251d7ace920b17cb34f9d561bca46d037b337d19*e045819a64edbf022620bff3efdb935216584cc4*b9c594fa3fca6bb30a85c4208a8df348", "abc123"},
{"$ike$*0*9bdee7aa341cf1a6c19bc0191106b5056537ce6b837cd70678ea5a3ccb606b56dee4548feb67f24fd6f4d5f58967a9ff3c674d9d79e4195b7def5aac147c9fe9abdc2f8ba2eca58f4c863fedc7a8c8e1ad6e1551b1e44bf9a0e258561a5db1c2ca1e8b5dfda1b012012b6fdf24ecd07da6b10d76ab3b58d07b30b4f9da26aee4*c9b7ef0610a22b3e1c88b1a01ce4d4110edf6baa122ed1285eb2184cd75d30a11520a725c2d263de5a157f77f953880732f3b14521836d7f3585cb0ce3fcadf81c541dde2680bd81953cf88e8f8096c173470694ca7414fff9df0cdcdbb9d4f70ef1d6347293b507cfad965e2d2c1fa07326353e9a493d93284970040344fb11*3506592130312567*6c362583ce7a2a26*00000001000000010000002c01010001000000240101000080010001800200028003000180040002800b0001000c000400007080*01000000ac100202*84943233f42a0b5a9b33c327162fe0efee2545e4*76f451dce3fea6402b67f3fddae561ebdb4a6efe*f63f237b3c0f1fe57a5b852203cfd27cbf0c78d4", "abc123"},
{NULL}
};
static psk_entry *cur_salt;
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt);
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ptr, *ctcopy, *keeptr;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
if (!(ctcopy = strdup(ciphertext)))
return 0;
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN; /* skip leading '$ike$*' */
if (*ctcopy != '0' && *ctcopy != '1')
goto error;
/* skip '*0' */
ctcopy += 1;
if (*ctcopy != '*')
goto error;
ctcopy += 1;
if (!(ptr = strtokm(ctcopy, "*")))
goto error;
if (strlen(ptr) > MAXLEN)
goto error;
if (!ishexlc(ptr))
goto error;
if (!(ptr = strtokm(NULL, "*")))
goto error;
if (strlen(ptr) > MAXLEN)
goto error;
if (!ishexlc(ptr))
goto error;
if (!(ptr = strtokm(NULL, "*")))
goto error;
if (strlen(ptr) > MAXLEN)
goto error;
if (!ishexlc(ptr))
goto error;
if (!(ptr = strtokm(NULL, "*")))
goto error;
if (strlen(ptr) > MAXLEN)
goto error;
if (!ishexlc(ptr))
goto error;
if (!(ptr = strtokm(NULL, "*")))
goto error;
if (strlen(ptr) > MAXLEN)
goto error;
if (!ishexlc(ptr))
goto error;
if (!(ptr = strtokm(NULL, "*")))
goto error;
if (strlen(ptr) > MAXLEN)
goto error;
if (!ishexlc(ptr))
goto error;
if (!(ptr = strtokm(NULL, "*")))
goto error;
if (strlen(ptr) > MAXLEN)
goto error;
if (!ishexlc(ptr))
goto error;
if (!(ptr = strtokm(NULL, "*")))
goto error;
if (strlen(ptr) > MAXLEN)
goto error;
if (!ishexlc(ptr))
goto error;
if (!(ptr = strtokm(NULL, "*")))
goto error;
if (strlen(ptr) != 32 && strlen(ptr) != 40) // md5 or sha1 length.
goto error;
if (!ishexlc(ptr))
goto error;
MEM_FREE(keeptr);
return 1;
error:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
static psk_entry cs;
cs.isnortel = atoi(&ciphertext[FORMAT_TAG_LEN]);
load_psk_params(&ciphertext[FORMAT_TAG_LEN+2], NULL, &cs);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = strrchr(ciphertext, '*') + 1;
for (i = 0; i < BINARY_SIZE_SMALLER; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static void set_salt(void *salt)
{
cur_salt = (psk_entry *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
{
compute_hash(cur_salt, saved_key[index], (unsigned char*)crypt_out[index]);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (*((uint32_t*)binary) == crypt_out[index][0])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return (*((uint32_t*)binary) == crypt_out[index][0]);
}
static int cmp_exact(char *source, int index)
{
void *binary = get_binary(source);
return !memcmp(binary, crypt_out[index], BINARY_SIZE_SMALLER);
}
static void ike_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
/*
* For ike, the hash algorithm used for hmac
* is returned as the first "tunable cost":
* 1: MD5
* 2: SHA1
*
* However, the there is almost no difference in speed,
* so if the different hash types for HMAC shouldn't be reported,
* just define IKE_REPORT_TUNABLE_COSTS to be 0 instead of 1.
*/
#define IKE_REPORT_TUNABLE_COSTS 1
#if IKE_REPORT_TUNABLE_COSTS
static unsigned int tunable_cost_hmac_hash_type(void *salt)
{
psk_entry *my_salt;
my_salt = salt;
return (unsigned int) my_salt->hash_type;
}
#endif
struct fmt_main fmt_ike = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE_SMALLER,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT,
{
#if IKE_REPORT_TUNABLE_COSTS
"hash algorithm used for hmac [1:MD5 2:SHA1]",
#else
NULL
#endif
},
{ FORMAT_TAG },
ike_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
#if IKE_REPORT_TUNABLE_COSTS
tunable_cost_hmac_hash_type,
#else
NULL
#endif
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
ike_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
Graph.h | /*
* Graph.h
*
* Created on: 01.06.2014
* Author: Christian Staudt (christian.staudt@kit.edu), Klara Reichard (klara.reichard@gmail.com), Marvin Ritter (marvin.ritter@gmail.com)
*/
#ifndef GRAPH_H_
#define GRAPH_H_
#include <algorithm>
#include <vector>
#include <stack>
#include <queue>
#include <utility>
#include <stdexcept>
#include <functional>
#include <unordered_set>
#include "../Globals.h"
#include "Coordinates.h"
#include "../viz/Point.h"
#include "../auxiliary/Random.h"
#include "../auxiliary/FunctionTraits.h"
#include "../auxiliary/Log.h"
namespace NetworKit {
/**
* A weighted edge used for the graph constructor with
* initializer list syntax.
*/
struct WeightedEdge {
node u, v;
edgeweight weight;
WeightedEdge(node u, node v, edgeweight w) : u(u), v(v), weight(w) {
}
};
inline bool operator<(const WeightedEdge& e1, const WeightedEdge& e2) {
return e1.weight < e2.weight;
}
struct Edge {
node u, v;
Edge(node _u, node _v, bool sorted = false) {
if (sorted) {
u = std::min(_u, _v);
v = std::max(_u, _v);
} else {
u = _u;
v = _v;
}
}
};
inline bool operator==(const Edge& e1, const Edge& e2) {
return e1.u == e2.u && e1.v == e2.v;
}
}
namespace std {
template<>
struct hash<NetworKit::Edge> {
size_t operator()(const NetworKit::Edge& e) const {
return hash_node(e.u) ^ hash_node(e.v);
}
hash<NetworKit::node> hash_node;
};
}
namespace NetworKit {
// forward declaration to randomization/CurveballImpl.h
namespace CurveballDetails {class CurveballMaterialization;}
/**
* @ingroup graph
* A graph (with optional weights) and parallel iterator methods.
*/
class Graph final {
friend class ParallelPartitionCoarsening;
friend class GraphBuilder;
friend class CurveballDetails::CurveballMaterialization;
private:
// graph attributes
count id; //!< unique graph id, starts at 0
std::string name; //!< name of the graph, initially G#ID
// scalars
count n; //!< current number of nodes
count m; //!< current number of edges
count storedNumberOfSelfLoops; //!< current number of self loops, edges which have the same origin and target
node z; //!< current upper bound of node ids, z will be the id of the next node
edgeid omega; //!< current upper bound of edge ids, will be the id of the next edge
count t; //!< current time step
bool weighted; //!< true if the graph is weighted, false otherwise
bool directed; //!< true if the graph is directed, false otherwise
bool edgesIndexed; //!< true if edge ids have been assigned
// per node data
std::vector<bool> exists; //!< exists[v] is true if node v has not been removed from the graph
Coordinates<float> coordinates; //!< coordinates of nodes (if present)
std::vector<count> inDeg; //!< only used for directed graphs, number of edges incoming per node
std::vector<count> outDeg; //!< degree of every node, zero if node was removed. For directed graphs only outgoing edges count
std::vector< std::vector<node> > inEdges; //!< only used for directed graphs, inEdges[v] contains all nodes u that have an edge (u, v)
std::vector< std::vector<node> > outEdges; //!< (outgoing) edges, for each edge (u, v) v is saved in outEdges[u] and for undirected also u in outEdges[v]
std::vector< std::vector<edgeweight> > inEdgeWeights; //!< only used for directed graphs, same schema as inEdges
std::vector< std::vector<edgeweight> > outEdgeWeights; //!< same schema (and same order!) as outEdges
std::vector< std::vector<edgeid> > inEdgeIds; //!< only used for directed graphs, same schema as inEdges
std::vector< std::vector<edgeid> > outEdgeIds; //!< same schema (and same order!) as outEdges
/**
* Returns the next unique graph id.
*/
count getNextGraphId();
/**
* Returns the index of node u in the array of incoming edges of node v. (for directed graphs inEdges is searched, while for indirected outEdges is searched, which gives the same result as indexInOutEdgeArray).
*/
index indexInInEdgeArray(node v, node u) const;
/**
* Returns the index of node v in the array of outgoing edges of node u.
*/
index indexInOutEdgeArray(node u, node v) const;
/**
* Returns the edge weight of the outgoing edge of index i in the outgoing edges of node u
* @param u The node
* @param i The index
* @return The weight of the outgoing edge or defaultEdgeWeight if the graph is unweighted
*/
template<bool hasWeights>
inline edgeweight getOutEdgeWeight(node u, index i) const;
/**
* Returns the edge weight of the incoming edge of index i in the incoming edges of node u
*
* @param u The node
* @param i The index in the incoming edge array
* @return The weight of the incoming edge
*/
template<bool hasWeights>
inline edgeweight getInEdgeWeight(node u, index i) const;
/**
* Returns the edge id of the edge of index i in the outgoing edges of node u
*
* @param u The node
* @param i The index in the outgoing edges
* @return The edge id
*/
template<bool graphHasEdgeIds>
inline edgeid getOutEdgeId(node u, index i) const;
/**
* Returns the edge id of the edge of index i in the incoming edges of node u
*
* @param u The node
* @param i The index in the incoming edges of u
* @return The edge id
*/
template<bool graphHasEdgeIds>
inline edgeid getInEdgeId(node u, index i) const;
/**
* @brief Returns if the edge (u, v) shall be used in the iteration of all edgesIndexed
*
* @param u The source node of the edge
* @param v The target node of the edge
* @return If the node shall be used, i.e. if v is not none and in the undirected case if u >= v
*/
template<bool graphIsDirected>
inline bool useEdgeInIteration(node u, node v) const;
/**
* @brief Implementation of the for loop for outgoing edges of u
*
* Note: If all (valid) outgoing edges shall be considered, graphIsDirected needs to be set to true
*
* @param u The node
* @param handle The handle that shall be executed for each edge
* @return void
*/
template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>
inline void forOutEdgesOfImpl(node u, L handle) const;
/**
* @brief Implementation of the for loop for incoming edges of u
*
* For undirected graphs, this is the same as forOutEdgesOfImpl but u and v are changed in the handle
*
* @param u The node
* @param handle The handle that shall be executed for each edge
* @return void
*/
template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>
inline void forInEdgesOfImpl(node u, L handle) const;
/**
* @brief Implementation of the for loop for all edges, @see forEdges
*
* @param handle The handle that shall be executed for all edges
* @return void
*/
template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>
inline void forEdgeImpl(L handle) const;
/**
* @brief Parallel implementation of the for loop for all edges, @see parallelForEdges
*
* @param handle The handle that shall be executed for all edges
* @return void
*/
template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>
inline void parallelForEdgesImpl(L handle) const;
/**
* @brief Summation variant of the parallel for loop for all edges, @see parallelSumForEdges
*
* @param handle The handle that shall be executed for all edges
* @return void
*/
template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>
inline double parallelSumForEdgesImpl(L handle) const;
/*
* In the following definition, Aux::FunctionTraits is used in order to only execute lambda functions
* with the appropriate parameters. The decltype-return type is used for determining the return type of
* the lambda (needed for summation) but also determines if the lambda accepts the correct number of parameters.
* Otherwise the return type declaration fails and the function is excluded from overload resoluation.
* Then there are multiple possible lambdas with three (third parameter id or weight) and two (second parameter
* can be second node id or edge weight for neighbor iterators). This is checked using Aux::FunctionTraits and
* std::enable_if. std::enable_if only defines the type member when the given bool is true, this bool comes from
* std::is_same which compares two types. The function traits give either the parameter type or if it is out of bounds
* they define type as void.
*/
/**
* Triggers a static assert error when no other method is chosen. Because of the use of "..." as arguments, the priority
* of this method is lower than the priority of the other methods. This method avoids ugly and unreadable template substitution
* error messages from the other declarations.
*/
template<class F, void* = (void*)0>
typename Aux::FunctionTraits<F>::result_type edgeLambda(F&, ...) const {
// the strange condition is used in order to delay the eveluation of the static assert to the moment when this function is actually used
static_assert(! std::is_same<F, F>::value, "Your lambda does not support the required parameters or the parameters have the wrong type.");
return std::declval<typename Aux::FunctionTraits<F>::result_type>(); // use the correct return type (this won't compile)
}
/**
* Calls the given function f if its fourth argument is of the type edgeid and third of type edgeweight
* Note that the decltype check is not enough as edgeweight can be casted to node and we want to assure that .
*/
template < class F,
typename std::enable_if <
(Aux::FunctionTraits<F>::arity >= 3) &&
std::is_same<edgeweight, typename Aux::FunctionTraits<F>::template arg<2>::type>::value &&
std::is_same<edgeid, typename Aux::FunctionTraits<F>::template arg<3>::type>::value
>::type * = (void*)0 >
auto edgeLambda(F &f, node u, node v, edgeweight ew, edgeid id) const -> decltype(f(u, v, ew, id)) {
return f(u, v, ew, id);
}
/**
* Calls the given function f if its third argument is of the type edgeid, discards the edge weight
* Note that the decltype check is not enough as edgeweight can be casted to node.
*/
template<class F,
typename std::enable_if<
(Aux::FunctionTraits<F>::arity >= 2) &&
std::is_same<edgeid, typename Aux::FunctionTraits<F>::template arg<2>::type>::value &&
std::is_same<node, typename Aux::FunctionTraits<F>::template arg<1>::type>::value /* prevent f(v, weight, eid) */
>::type* = (void*)0>
auto edgeLambda(F&f, node u, node v, edgeweight, edgeid id) const -> decltype(f(u, v, id)) {
return f(u, v, id);
}
/**
* Calls the given function f if its third argument is of type edgeweight, discards the edge id
* Note that the decltype check is not enough as node can be casted to edgeweight.
*/
template<class F,
typename std::enable_if<
(Aux::FunctionTraits<F>::arity >= 2) &&
std::is_same<edgeweight, typename Aux::FunctionTraits<F>::template arg<2>::type>::value
>::type* = (void*)0>
auto edgeLambda(F&f, node u, node v, edgeweight ew, edgeid /*id*/) const -> decltype(f(u, v, ew)) {
return f(u, v, ew);
}
/**
* Calls the given function f if it has only two arguments and the second argument is of type node,
* discards edge weight and id
* Note that the decltype check is not enough as edgeweight can be casted to node.
*/
template<class F,
typename std::enable_if<
(Aux::FunctionTraits<F>::arity >= 1) &&
std::is_same<node, typename Aux::FunctionTraits<F>::template arg<1>::type>::value
>::type* = (void*)0>
auto edgeLambda(F&f, node u, node v, edgeweight /*ew*/, edgeid /*id*/) const -> decltype(f(u, v)) {
return f(u, v);
}
/**
* Calls the given function f if it has only two arguments and the second argument is of type edgeweight,
* discards the first node and the edge id
* Note that the decltype check is not enough as edgeweight can be casted to node.
*/
template<class F,
typename std::enable_if<
(Aux::FunctionTraits<F>::arity >= 1) &&
std::is_same<edgeweight, typename Aux::FunctionTraits<F>::template arg<1>::type>::value
>::type* = (void*)0>
auto edgeLambda(F&f, node u, node v, edgeweight ew, edgeid /*id*/) const -> decltype(f(u, ew)) {
return f(v, ew);
}
/**
* Calls the given function f if it has only one argument, discards the first
* node id, the edge weight and the edge id
*/
template<class F,
void* = (void*)0>
auto edgeLambda(F&f, node, node v, edgeweight, edgeid) const -> decltype(f(v)) {
return f(v);
}
/**
* Calls the given BFS handle with distance parameter
*/
template <class F>
auto callBFSHandle(F &f, node u, count dist) const -> decltype(f(u, dist)) {
return f(u, dist);
}
/**
* Calls the given BFS handle without distance parameter
*/
template <class F>
auto callBFSHandle(F &f, node u, count) const -> decltype(f(u)) {
return f(u);
}
public:
/**
* Create a graph of @a n nodes. The graph has assignable edge weights if @a weighted is set to <code>true</code>.
* If @a weighted is set to <code>false</code> each edge has edge weight 1.0 and any other weight assignment will
* be ignored.
* @param n Number of nodes.
* @param weighted If set to <code>true</code>, the graph has edge weights.
* @param directed If set to @c true, the graph will be directed.
*/
Graph(count n = 0, bool weighted = false, bool directed = false);
Graph(const Graph& G, bool weighted, bool directed);
/**
* Generate a weighted graph from a list of edges. (Useful for small
* graphs in unit tests that you do not want to read from a file.)
*
* @param[in] edges list of weighted edges
*/
Graph(std::initializer_list<WeightedEdge> edges);
/**
* Create a graph as copy of @a other.
* @param other The graph to copy.
*/
Graph(const Graph& other) = default;
/** Default move constructor */
Graph(Graph&& other) = default;
/** Default destructor */
~Graph() = default;
/** Default move assignment operator */
Graph& operator=(Graph&& other) = default;
/** Default copy assignment operator */
Graph& operator=(const Graph& other) = default;
/** EDGE IDS **/
/**
* Initially assign integer edge identifiers.
*
* @param force Force re-indexing of edges even if they have already been indexed
*/
void indexEdges(bool force = false);
/**
* Checks if edges have been indexed
*
* @return bool if edges have been indexed
*/
bool hasEdgeIds() const { return edgesIndexed; }
/**
* Get the id of the given edge.
*/
edgeid edgeId(node u, node v) const;
/**
* Get an upper bound for the edge ids in the graph.
* @return An upper bound for the edge ids.
*/
index upperEdgeIdBound() const { return omega; }
/** GRAPH INFORMATION **/
/**
* Get the ID of this graph. The ID is a unique unsigned integer given to
* every graph on construction.
*/
count getId() const { return id; }
/**
* Return the type of the graph.
* Graph: not weighted, undirected
* WeightedGraph: weighted, undirected
* DirectedGraph: not weighted, directed
* WeightedDirectedGraph: weighted, directed
*/
std::string typ() const;
/**
* Try to save some memory by shrinking internal data structures of the graph. Only run this
* once you finished editing the graph. Otherwise it will cause unnecessary reallocation of
* memory.
*/
void shrinkToFit();
/**
* Compacts the adjacency arrays by re-using no longer neede slots from deleted edges.
*/
void compactEdges();
/**
* Sorts the adjacency arrays by node id. While the running time is linear this
* temporarily duplicates the memory.
*/
void sortEdges();
/**
* Set name of graph to @a name.
* @param name The name.
*/
void setName(std::string name) { this->name = name; }
/*
* Returns the name of the graph.
* @return The name of the graph.
*/
std::string getName() const { return name; }
/**
* Returns a string representation of the graph.
* @return A string representation.
*/
std::string toString() const;
/* COPYING */
/*
* Copies all nodes to a new graph
* @return graph with the same nodes.
*/
Graph copyNodes() const;
/* NODE MODIFIERS */
/**
* Add a new node to the graph and return it.
* @return The new node.
*/
node addNode();
/**
* DEPRECATED: Coordinates should be handled outside the Graph class
* like general node attributes.
*
* Add a new node to the graph with coordinates @a x and @y and return it.
*/
// TODO: remove method
// [[deprecated("Deprecated: Node coordinates should be stored externally like any other node attribute")]]
node addNode(float x, float y);
/**
* Remove a node @a v and all incident edges from the graph.
*
* Incoming as well as outgoing edges will be removed.
*
* @param u Node.
*/
void removeNode(node v);
/**
* Check if node @a v exists in the graph.
*
* @param v Node.
* @return @c true if @a v exists, @c false otherwise.
*/
bool hasNode(node v) const { return (v < z) && this->exists[v]; }
/**
* Restores a previously deleted node @a v with its previous id in the graph.
*
* @param v Node.
*
*/
void restoreNode(node v);
// SET OPERATIONS
/**
* Appends another graph to this graph as a new subgraph. Performs node
* id remapping.
* @param G [description]
*/
void append(const Graph& G);
/**
* Modifies this graph to be the union of it and another graph.
* Nodes with the same ids are identified with each other.
* @param G [description]
*/
void merge(const Graph& G);
// SUBGRAPHS
Graph subgraphFromNodes(const std::unordered_set<node>& nodes) const;
/** NODE PROPERTIES **/
/**
* Returns the number of outgoing neighbors of @a v.
*
* @param v Node.
* @return The number of outgoing neighbors.
*/
count degree(node v) const { return outDeg[v]; }
/**
* Get the number of incoming neighbors of @a v.
*
* @param v Node.
* @return The number of incoming neighbors.
* @note If the graph is not directed, the outgoing degree is returned.
*/
count degreeIn(node v) const { return directed ? inDeg[v] : outDeg[v]; }
/**
* Get the number of outgoing neighbors of @a v.
*
* @param v Node.
* @return The number of outgoing neighbors.
*/
count degreeOut(node v) const { return outDeg[v]; }
/**
* Check whether @a v is isolated, i.e. degree is 0.
* @param v Node.
* @return @c true if the node is isolated (= degree is 0)
*/
bool isIsolated(node v) const { return outDeg[v] == 0 && (!directed || inDeg[v] == 0); }
/**
* Returns the weighted degree of @a v.
*
* @param v Node.
* @return Weighted degree of @a v.
* @note For directed graphs this is the sum of weights of all outgoing edges of @a v.
*/
edgeweight weightedDegree(node v) const;
/**
* Returns the volume of the @a v, which is the weighted degree with self-loops counted twice.
*
* @param v Node.
* @return The volume of the @a v.
*/
edgeweight volume(node v) const;
/**
* Returns a random node of the graph.
* @return A random node.
*/
node randomNode() const;
/**
* Returns a random neighbor of @a u and @c none if degree is zero.
*
* @param u Node.
* @return A random neighbor of @a u.
*/
node randomNeighbor(node u) const;
/* EDGE MODIFIERS */
/**
* Insert an edge between the nodes @a u and @a v. If the graph is weighted you can optionally
* set a weight for this edge. The default weight is 1.0.
* Note: Multi-edges are not supported and will NOT be handled consistently by the graph data
* structure.
* @param u Endpoint of edge.
* @param v Endpoint of edge.
* @param weight Optional edge weight.
*/
void addEdge(node u, node v, edgeweight ew = defaultEdgeWeight);
/**
* Removes the undirected edge {@a u,@a v}.
* @param u Endpoint of edge.
* @param v Endpoint of edge.
*/
void removeEdge(node u, node v);
/**
* Efficiently removes all the edges adjacent to a set of nodes that is not
* connected to the rest of the graph. This is meant to optimize the Kadabra
* algorithm.
* @param nodesInSet vector of nodes that form a connected component that is
* isolated from the rest of the graph.
*/
void removeEdgesFromIsolatedSet(const std::vector<node> &nodesInSet);
/**
* Removes all the edges in the graph.
*/
void removeAllEdges();
/**
* Removes all self-loops in the graph.
*/
void removeSelfLoops();
/**
* Changes the edges {@a s1, @a t1} into {@a s1, @a t2} and the edge {@a s2, @a t2} into {@a s2, @a t1}.
*
* If there are edge weights or edge ids, they are preserved. Note that no check is performed if the swap is actually possible, i.e. does not generate duplicate edges.
*
* @param s1 The first source
* @param t1 The first target
* @param s2 The second source
* @param t2 The second target
*/
void swapEdge(NetworKit::node s1, NetworKit::node t1, NetworKit::node s2, NetworKit::node t2);
/**
* Checks if undirected edge {@a u,@a v} exists in the graph.
* @param u Endpoint of edge.
* @param v Endpoint of edge.
* @return <code>true</code> if the edge exists, <code>false</code> otherwise.
*/
bool hasEdge(node u, node v) const;
/**
* Returns a random edge. By default a random node u is chosen and then some random neighbor v. So the probability of choosing (u, v) highly
* depends on the degree of u.
* Setting uniformDistribution to true, will give you a real uniform distributed edge, but will be very slow. So only use uniformDistribution
* for single calls outside of any loops.
*/
std::pair<node, node> randomEdge(bool uniformDistribution = false) const;
/**
* Returns a vector with nr random edges. The edges are chosen uniform random.
*/
std::vector< std::pair<node, node> > randomEdges(count nr) const;
/* GLOBAL PROPERTIES */
/**
* Returns <code>true</code> if this graph supports edge weights other than 1.0.
* @return <code>true</code> if this graph supports edge weights other than 1.0.
*/
bool isWeighted() const { return weighted; }
/**
* Return @c true if this graph supports directed edges.
* @return @c true if this graph supports directed edges.
*/
bool isDirected() const { return directed; }
/**
* Return <code>true</code> if graph contains no nodes.
* @return <code>true</code> if graph contains no nodes.
*/
bool isEmpty() const { return n == 0; }
/**
* Return the number of nodes in the graph.
* @return The number of nodes.
*/
count numberOfNodes() const { return n; }
/**
* Return the number of edges in the graph.
* @return The number of edges.
*/
count numberOfEdges() const { return m; }
/**
* @return a pair (n, m) where n is the number of nodes and m is the number of edges
*/
std::pair<count, count> const size() const { return {n, m}; };
/**
* @return the density of the graph
*/
double density() const {
count n = numberOfNodes();
count m = numberOfEdges();
count loops = numberOfSelfLoops();
m -= loops;
double d;
if (isDirected()) {
d = m / (double) (n * (n-1));
} else {
d = (2 * m) / (double) (n * (n-1));
}
return d;
}
/**
* Return the number of loops {v,v} in the graph.
* @return The number of loops.
* @note This involves calculation, so store result if needed multiple times.
*/
count numberOfSelfLoops() const;
/**
* Get an upper bound for the node ids in the graph.
* @return An upper bound for the node ids.
*/
index upperNodeIdBound() const { return z; }
/**
* Check for invalid graph states, such as multi-edges.
* @return False if the graph is in invalid state.
*/
bool checkConsistency() const;
/* DYNAMICS */
/**
* Trigger a time step - increments counter.
*/
void timeStep() { t++; }
/**
* Get time step counter.
* @return Time step counter.
*/
count time() { return t; }
/* COORDINATES */
/**
* DEPRECATED: Coordinates should be handled outside the Graph class
* like general node attributes.
*
* Sets the coordinate of @a v to @a value.
*
* @param v Node.
* @param value The coordinate of @a v.
*/
// TODO: remove method
// [[deprecated("Deprecated: Node coordinates should be stored externally like any other node attribute")]]
void setCoordinate(node v, Point<float> value) { coordinates.setCoordinate(v, value); }
/**
* DEPRECATED: Coordinates should be handled outside the Graph class
* like general node attributes.
*
* Get the coordinate of @a v.
* @param v Node.
* @return The coordinate of @a v.
*/
// TODO: remove method
// [[deprecated("Deprecated: Node coordinates should be stored externally like any other node attribute")]]
Point<float>& getCoordinate(node v) { return coordinates.getCoordinate(v); }
/**
* DEPRECATED: Coordinates should be handled outside the Graph class
* like general node attributes.
*
* Get minimum coordinate of all coordinates with respect to dimension @a dim.
* @param dim The dimension to search for minimum.
* @return The minimum coordinate in dimension @a dim.
*/
// TODO: remove method
// [[deprecated("Deprecated: Node coordinates should be stored externally like any other node attribute")]]
float minCoordinate(count dim) { return coordinates.minCoordinate(dim); }
/**
* DEPRECATED: Coordinates should be handled outside the Graph class
* like general node attributes.
*
* Get maximum coordinate of all coordinates with respect to dimension @a dim.
* @param dim The dimension to search for maximum.
* @return The maximum coordinate in dimension @a dim.
*/
// TODO: remove method
// [[deprecated("Deprecated: Node coordinates should be stored externally like any other node attribute")]]
float maxCoordinate(count dim) { return coordinates.maxCoordinate(dim); }
/**
* DEPRECATED: Coordinates should be handled outside the Graph class
* like general node attributes.
*
* Initializes the coordinates for the nodes in graph.
* @note This has to be called once and before you set coordinates. Call this method again if new nodes have
* been added.
*/
// TODO: remove method
// [[deprecated("Deprecated: Node coordinates should be stored externally like any other node attribute")]]
void initCoordinates() { coordinates.init(z); }
/* EDGE ATTRIBUTES */
/**
* Return edge weight of edge {@a u,@a v}. Returns 0 if edge does not exist.
* BEWARE: Running time is \Theta(deg(u))!
*
* @param u Endpoint of edge.
* @param v Endpoint of edge.
* @return Edge weight of edge {@a u,@a v} or 0 if edge does not exist.
*/
edgeweight weight(node u, node v) const;
/**
* Set the weight of an edge. If the edge does not exist,
* it will be inserted.
*
* @param[in] u endpoint of edge
* @param[in] v endpoint of edge
* @param[in] weight edge weight
*/
void setWeight(node u, node v, edgeweight ew);
/**
* Increase the weight of an edge. If the edge does not exist,
* it will be inserted.
*
* @param[in] u endpoint of edge
* @param[in] v endpoint of edge
* @param[in] weight edge weight
*/
void increaseWeight(node u, node v, edgeweight ew);
/* SUMS */
/**
* Returns the sum of all edge weights.
* @return The sum of all edge weights.
*/
edgeweight totalEdgeWeight() const;
/* Collections */
/**
* Get list of all nodes.
* @return List of all nodes.
*/
std::vector<node> nodes() const;
/**
* Get list of edges as node pairs.
* @return List of edges as node pairs.
*/
std::vector<std::pair<node, node> > edges() const;
/**
* Get list of neighbors of @a u.
*
* @param u Node.
* @return List of neighbors of @a u.
*/
std::vector<node> neighbors(node u) const;
/**
* Get i-th (outgoing) neighbor of @a u.
* WARNING: This function is deprecated or only temporary.
*
* @param u Node.
* @param i index; should be in [0, degreeOut(u))
* @return @a i -th (outgoing) neighbor of @a u, or @c none if no such
* neighbor exists.
*/
template<bool graphIsDirected>
node getIthNeighbor(node u, index i) const {
node v = outEdges[u][i];
if (useEdgeInIteration<graphIsDirected>(u, v))
return v;
else
return none;
}
/* Derivative Graphs */
/**
* Return an undirected version of this graph.
*
* @return undirected graph.
*/
Graph toUndirected() const;
/**
* Return an unweighted version of this graph.
*
* @return unweighted graph.
*/
Graph toUnweighted() const;
/**
* Return the transpose of this graph. The graph must be directed.
*
* @return transpose of the graph.
*/
Graph transpose() const;
/* NODE ITERATORS */
/**
* Iterate over all nodes of the graph and call @a handle (lambda closure).
*
* @param handle Takes parameter <code>(node)</code>.
*/
template<typename L> void forNodes(L handle) const;
/**
* Iterate randomly over all nodes of the graph and call @a handle (lambda closure).
*
* @param handle Takes parameter <code>(node)</code>.
*/
template<typename L> void parallelForNodes(L handle) const;
/** Iterate over all nodes of the graph and call @a handle (lambda closure) as long as @a condition remains true.
* This allows for breaking from a node loop.
*
* @param condition Returning <code>false</code> breaks the loop.
* @param handle Takes parameter <code>(node)</code>.
*/
template<typename C, typename L> void forNodesWhile(C condition, L handle) const;
/**
* Iterate randomly over all nodes of the graph and call @a handle (lambda closure).
*
* @param handle Takes parameter <code>(node)</code>.
*/
template<typename L> void forNodesInRandomOrder(L handle) const;
/**
* Iterate in parallel over all nodes of the graph and call handler (lambda closure).
* Using schedule(guided) to remedy load-imbalances due to e.g. unequal degree distribution.
*
* @param handle Takes parameter <code>(node)</code>.
*/
template<typename L> void balancedParallelForNodes(L handle) const;
/**
* Iterate over all undirected pairs of nodes and call @a handle (lambda closure).
*
* @param handle Takes parameters <code>(node, node)</code>.
*/
template<typename L> void forNodePairs(L handle) const;
/**
* Iterate over all undirected pairs of nodes in parallel and call @a handle (lambda closure).
*
* @param handle Takes parameters <code>(node, node)</code>.
*/
template<typename L> void parallelForNodePairs(L handle) const;
/* EDGE ITERATORS */
/**
* Iterate over all edges of the const graph and call @a handle (lambda closure).
*
* @param handle Takes parameters <code>(node, node)</code>, <code>(node, node, edgweight)</code>, <code>(node, node, edgeid)</code> or <code>(node, node, edgeweight, edgeid)</code>.
*/
template<typename L> void forEdges(L handle) const;
/**
* Iterate in parallel over all edges of the const graph and call @a handle (lambda closure).
*
* @param handle Takes parameters <code>(node, node)</code> or <code>(node, node, edgweight)</code>, <code>(node, node, edgeid)</code> or <code>(node, node, edgeweight, edgeid)</code>.
*/
template<typename L> void parallelForEdges(L handle) const;
/* NEIGHBORHOOD ITERATORS */
/**
* Iterate over all neighbors of a node and call @a handle (lamdba closure).
*
* @param u Node.
* @param handle Takes parameter <code>(node)</code> or <code>(node, edgeweight)</code> which is a neighbor of @a u.
* @note For directed graphs only outgoing edges from @a u are considered.
* A node is its own neighbor if there is a self-loop.
*
*/
template<typename L> void forNeighborsOf(node u, L handle) const;
/**
* Iterate over all incident edges of a node and call @a handle (lamdba closure).
*
* @param u Node.
* @param handle Takes parameters <code>(node, node)</code>, <code>(node, node, edgeweight)</code>, <code>(node, node, edgeid)</code> or <code>(node, node, edgeweight, edgeid)</code> where the first node is @a u and the second is a neighbor of @a u.
* @note For undirected graphs all edges incident to @a u are also outgoing edges.
*/
template<typename L> void forEdgesOf(node u, L handle) const;
/**
* Iterate over all neighbors of a node and call handler (lamdba closure).
* For directed graphs only incoming edges from u are considered.
*/
template<typename L> void forInNeighborsOf(node u, L handle) const;
/**
* Iterate over all incoming edges of a node and call handler (lamdba closure).
* @note For undirected graphs all edges incident to u are also incoming edges.
*
* Handle takes parameters (u, v) or (u, v, w) where w is the edge weight.
*/
template<typename L> void forInEdgesOf(node u, L handle) const;
/* REDUCTION ITERATORS */
/**
* Iterate in parallel over all nodes and sum (reduce +) the values returned by the handler
*/
template<typename L> double parallelSumForNodes(L handle) const;
/**
* Iterate in parallel over all edges and sum (reduce +) the values returned by the handler
*/
template<typename L> double parallelSumForEdges(L handle) const;
/* GRAPH SEARCHES */
/**
* Iterate over nodes in breadth-first search order starting from r until connected component
* of r has been visited.
*
* @param r Node.
* @param handle Takes parameter <code>(node)</code>.
*/
template<typename L> void BFSfrom(node r, L handle) const;
template<typename L> void BFSfrom(const std::vector<node> &startNodes, L handle) const;
template<typename L> void BFSEdgesFrom(node r, L handle) const;
/**
* Iterate over nodes in depth-first search order starting from r until connected component
* of r has been visited.
*
* @param r Node.
* @param handle Takes parameter <code>(node)</code>.
*/
template<typename L> void DFSfrom(node r, L handle) const;
template<typename L> void DFSEdgesFrom(node r, L handle) const;
};
/* NODE ITERATORS */
template<typename L>
void Graph::forNodes(L handle) const {
for (node v = 0; v < z; ++v) {
if (exists[v]) {
handle(v);
}
}
}
template<typename L>
void Graph::parallelForNodes(L handle) const {
#pragma omp parallel for
for (omp_index v = 0; v < static_cast<omp_index>(z); ++v) {
if (exists[v]) {
handle(v);
}
}
}
template<typename C, typename L>
void Graph::forNodesWhile(C condition, L handle) const {
for (node v = 0; v < z; ++v) {
if (exists[v]) {
if (!condition()) {
break;
}
handle(v);
}
}
}
template<typename L>
void Graph::forNodesInRandomOrder(L handle) const {
std::vector<node> randVec = nodes();
std::shuffle(randVec.begin(), randVec.end(), Aux::Random::getURNG());
for (node v : randVec) {
handle(v);
}
}
template<typename L>
void Graph::balancedParallelForNodes(L handle) const {
#pragma omp parallel for schedule(guided) // TODO: define min block size (and test it!)
for (omp_index v = 0; v < static_cast<omp_index>(z); ++v) {
if (exists[v]) {
handle(v);
}
}
}
template<typename L>
void Graph::forNodePairs(L handle) const {
for (node u = 0; u < z; ++u) {
if (exists[u]) {
for (node v = u + 1; v < z; ++v) {
if (exists[v]) {
handle(u, v);
}
}
}
}
}
template<typename L>
void Graph::parallelForNodePairs(L handle) const {
#pragma omp parallel for schedule(guided)
for (omp_index u = 0; u < static_cast<omp_index>(z); ++u) {
if (exists[u]) {
for (node v = u + 1; v < z; ++v) {
if (exists[v]) {
handle(u, v);
}
}
}
}
}
/* EDGE ITERATORS */
/* HELPERS */
template<bool hasWeights> // implementation for weighted == true
inline edgeweight Graph::getOutEdgeWeight(node u, index i) const {
return outEdgeWeights[u][i];
}
template<> // implementation for weighted == false
inline edgeweight Graph::getOutEdgeWeight<false>(node, index) const {
return defaultEdgeWeight;
}
template<bool hasWeights> // implementation for weighted == true
inline edgeweight Graph::getInEdgeWeight(node u, index i) const {
return inEdgeWeights[u][i];
}
template<> // implementation for weighted == false
inline edgeweight Graph::getInEdgeWeight<false>(node, index) const {
return defaultEdgeWeight;
}
template<bool graphHasEdgeIds> // implementation for hasEdgeIds == true
inline edgeid Graph::getOutEdgeId(node u, index i) const {
return outEdgeIds[u][i];
}
template<> // implementation for hasEdgeIds == false
inline edgeid Graph::getOutEdgeId<false>(node, index) const {
return 0;
}
template<bool graphHasEdgeIds> // implementation for hasEdgeIds == true
inline edgeid Graph::getInEdgeId(node u, index i) const {
return inEdgeIds[u][i];
}
template<> // implementation for hasEdgeIds == false
inline edgeid Graph::getInEdgeId<false>(node, index) const {
return 0;
}
template<bool graphIsDirected> // implementation for graphIsDirected == true
inline bool Graph::useEdgeInIteration(node /* u */, node v) const {
return v != none;
}
template<> // implementation for graphIsDirected == false
inline bool Graph::useEdgeInIteration<false>(node u, node v) const {
return u >= v;
}
template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>
inline void Graph::forOutEdgesOfImpl(node u, L handle) const {
for (index i = 0; i < outEdges[u].size(); ++i) {
node v = outEdges[u][i];
if (useEdgeInIteration<graphIsDirected>(u, v)) {
edgeLambda<L>(handle, u, v, getOutEdgeWeight<hasWeights>(u, i), getOutEdgeId<graphHasEdgeIds>(u, i));
}
}
}
template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>
inline void Graph::forInEdgesOfImpl(node u, L handle) const {
if (graphIsDirected) {
for (index i = 0; i < inEdges[u].size(); i++) {
node v = inEdges[u][i];
if (useEdgeInIteration<true>(u, v)) {
edgeLambda<L>(handle, u, v, getInEdgeWeight<hasWeights>(u, i), getInEdgeId<graphHasEdgeIds>(u, i));
}
}
} else {
for (index i = 0; i < outEdges[u].size(); ++i) {
node v = outEdges[u][i];
if (useEdgeInIteration<true>(u, v)) {
edgeLambda<L>(handle, u, v, getOutEdgeWeight<hasWeights>(u, i), getOutEdgeId<graphHasEdgeIds>(u, i));
}
}
}
}
template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>
inline void Graph::forEdgeImpl(L handle) const {
for (node u = 0; u < z; ++u) {
forOutEdgesOfImpl<graphIsDirected, hasWeights, graphHasEdgeIds, L>(u, handle);
}
}
template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>
inline void Graph::parallelForEdgesImpl(L handle) const {
#pragma omp parallel for schedule(guided)
for (omp_index u = 0; u < static_cast<omp_index>(z); ++u) {
forOutEdgesOfImpl<graphIsDirected, hasWeights, graphHasEdgeIds, L>(u, handle);
}
}
template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>
inline double Graph::parallelSumForEdgesImpl(L handle) const {
double sum = 0.0;
#pragma omp parallel for reduction(+:sum)
for (omp_index u = 0; u < static_cast<omp_index>(z); ++u) {
for (index i = 0; i < outEdges[u].size(); ++i) {
node v = outEdges[u][i];
// undirected, do not iterate over edges twice
// {u, v} instead of (u, v); if v == none, u > v is not fulfilled
if (useEdgeInIteration<graphIsDirected>(u, v)) {
sum += edgeLambda<L>(handle, u, v, getOutEdgeWeight<hasWeights>(u, i), getOutEdgeId<graphHasEdgeIds>(u, i));
}
}
}
return sum;
}
template<typename L>
void Graph::forEdges(L handle) const {
switch (weighted + 2 * directed + 4 * edgesIndexed) {
case 0: // unweighted, undirected, no edgeIds
forEdgeImpl<false, false, false, L>(handle);
break;
case 1: // weighted, undirected, no edgeIds
forEdgeImpl<false, true, false, L>(handle);
break;
case 2: // unweighted, directed, no edgeIds
forEdgeImpl<true, false, false, L>(handle);
break;
case 3: // weighted, directed, no edgeIds
forEdgeImpl<true, true, false, L>(handle);
break;
case 4: // unweighted, undirected, with edgeIds
forEdgeImpl<false, false, true, L>(handle);
break;
case 5: // weighted, undirected, with edgeIds
forEdgeImpl<false, true, true, L>(handle);
break;
case 6: // unweighted, directed, with edgeIds
forEdgeImpl<true, false, true, L>(handle);
break;
case 7: // weighted, directed, with edgeIds
forEdgeImpl<true, true, true, L>(handle);
break;
}
}
template<typename L>
void Graph::parallelForEdges(L handle) const {
switch (weighted + 2 * directed + 4 * edgesIndexed) {
case 0: // unweighted, undirected, no edgeIds
parallelForEdgesImpl<false, false, false, L>(handle);
break;
case 1: // weighted, undirected, no edgeIds
parallelForEdgesImpl<false, true, false, L>(handle);
break;
case 2: // unweighted, directed, no edgeIds
parallelForEdgesImpl<true, false, false, L>(handle);
break;
case 3: // weighted, directed, no edgeIds
parallelForEdgesImpl<true, true, false, L>(handle);
break;
case 4: // unweighted, undirected, with edgeIds
parallelForEdgesImpl<false, false, true, L>(handle);
break;
case 5: // weighted, undirected, with edgeIds
parallelForEdgesImpl<false, true, true, L>(handle);
break;
case 6: // unweighted, directed, with edgeIds
parallelForEdgesImpl<true, false, true, L>(handle);
break;
case 7: // weighted, directed, with edgeIds
parallelForEdgesImpl<true, true, true, L>(handle);
break;
}
}
/* NEIGHBORHOOD ITERATORS */
template<typename L>
void Graph::forNeighborsOf(node u, L handle) const {
forEdgesOf(u, handle);
}
template<typename L>
void Graph::forEdgesOf(node u, L handle) const {
switch (weighted + 2 * edgesIndexed) {
case 0: //not weighted, no edge ids
forOutEdgesOfImpl<true, false, false, L>(u, handle);
break;
case 1: //weighted, no edge ids
forOutEdgesOfImpl<true, true, false, L>(u, handle);
break;
case 2: //not weighted, with edge ids
forOutEdgesOfImpl<true, false, true, L>(u, handle);
break;
case 3: //weighted, with edge ids
forOutEdgesOfImpl<true, true, true, L>(u, handle);
break;
}
}
template<typename L>
void Graph::forInNeighborsOf(node u, L handle) const {
forInEdgesOf(u, handle);
}
template<typename L>
void Graph::forInEdgesOf(node u, L handle) const {
switch (weighted + 2 * directed + 4 * edgesIndexed) {
case 0: //unweighted, undirected, no edge ids
forInEdgesOfImpl<false, false, false, L>(u, handle);
break;
case 1: //weighted, undirected, no edge ids
forInEdgesOfImpl<false, true, false, L>(u, handle);
break;
case 2: //unweighted, directed, no edge ids
forInEdgesOfImpl<true, false, false, L>(u, handle);
break;
case 3: //weighted, directed, no edge ids
forInEdgesOfImpl<true, true, false, L>(u, handle);
break;
case 4: //unweighted, undirected, with edge ids
forInEdgesOfImpl<false, false, true, L>(u, handle);
break;
case 5: //weighted, undirected, with edge ids
forInEdgesOfImpl<false, true, true, L>(u, handle);
break;
case 6: //unweighted, directed, with edge ids
forInEdgesOfImpl<true, false, true, L>(u, handle);
break;
case 7: //weighted, directed, with edge ids
forInEdgesOfImpl<true, true, true, L>(u, handle);
break;
}
}
/* REDUCTION ITERATORS */
template<typename L>
double Graph::parallelSumForNodes(L handle) const {
double sum = 0.0;
#pragma omp parallel for reduction(+:sum)
for (omp_index v = 0; v < static_cast<omp_index>(z); ++v) {
if (exists[v]) {
sum += handle(v);
}
}
return sum;
}
template<typename L>
double Graph::parallelSumForEdges(L handle) const {
double sum = 0.0;
switch (weighted + 2 * directed + 4 * edgesIndexed) {
case 0: // unweighted, undirected, no edge ids
sum = parallelSumForEdgesImpl<false, false, false, L>(handle);
break;
case 1: // weighted, undirected, no edge ids
sum = parallelSumForEdgesImpl<false, true, false, L>(handle);
break;
case 2: // unweighted, directed, no edge ids
sum = parallelSumForEdgesImpl<true, false, false, L>(handle);
break;
case 3: // weighted, directed, no edge ids
sum = parallelSumForEdgesImpl<true, true, false, L>(handle);
break;
case 4: // unweighted, undirected, with edge ids
sum = parallelSumForEdgesImpl<false, false, true, L>(handle);
break;
case 5: // weighted, undirected, with edge ids
sum = parallelSumForEdgesImpl<false, true, true, L>(handle);
break;
case 6: // unweighted, directed, with edge ids
sum = parallelSumForEdgesImpl<true, false, true, L>(handle);
break;
case 7: // weighted, directed, with edge ids
sum = parallelSumForEdgesImpl<true, true, true, L>(handle);
break;
}
return sum;
}
/* GRAPH SEARCHES */
template<typename L>
void Graph::BFSfrom(node r, L handle) const {
std::vector<node> startNodes(1, r);
BFSfrom(startNodes, handle);
}
template<typename L>
void Graph::BFSfrom(const std::vector<node> &startNodes, L handle) const {
std::vector<bool> marked(z);
std::queue<node> q, qNext;
count dist = 0;
// enqueue start nodes
for (node u : startNodes) {
q.push(u);
marked[u] = true;
}
do {
node u = q.front();
q.pop();
// apply function
callBFSHandle(handle, u, dist);
forNeighborsOf(u, [&](node v) {
if (!marked[v]) {
qNext.push(v);
marked[v] = true;
}
});
if (q.empty() && !qNext.empty()) {
q.swap(qNext);
++dist;
}
} while (!q.empty());
}
template<typename L>
void Graph::BFSEdgesFrom(node r, L handle) const {
std::vector<bool> marked(z);
std::queue<node> q;
q.push(r); // enqueue root
marked[r] = true;
do {
node u = q.front();
q.pop();
// apply function
forNeighborsOf(u, [&](node, node v, edgeweight w, edgeid eid) {
if (!marked[v]) {
handle(u, v, w, eid);
q.push(v);
marked[v] = true;
}
});
} while (!q.empty());
}
template<typename L>
void Graph::DFSfrom(node r, L handle) const {
std::vector<bool> marked(z);
std::stack<node> s;
s.push(r); // enqueue root
marked[r] = true;
do {
node u = s.top();
s.pop();
// apply function
handle(u);
forNeighborsOf(u, [&](node v) {
if (!marked[v]) {
s.push(v);
marked[v] = true;
}
});
} while (!s.empty());
}
template<typename L>
void Graph::DFSEdgesFrom(node r, L handle) const {
std::vector<bool> marked(z);
std::stack<node> s;
s.push(r); // enqueue root
marked[r] = true;
do {
node u = s.top();
s.pop();
// apply function
forNeighborsOf(u, [&](node v) {
if (!marked[v]) {
handle(u, v);
s.push(v);
marked[v] = true;
}
});
} while (!s.empty());
}
} /* namespace NetworKit */
#endif /* GRAPH_H_ */
|
pdlange.c | /**
*
* @file pdlange.c
*
* PLASMA auxiliary routines
* PLASMA is a software package provided by Univ. of Tennessee,
* Univ. of California Berkeley and Univ. of Colorado Denver
*
* @version 2.6.0
* @author Emmanuel Agullo
* @author Mathieu Faverge
* @date 2010-11-15
* @generated d Tue Jan 7 11:45:11 2014
*
**/
#include <stdlib.h>
#include <math.h>
#include "common.h"
#define A(m, n, i, j, ldt) (BLKADDR(A, double, m, n)+((j)*(ldt)+(i)))
#define Ad(m, n) BLKADDR(A, double, m, n)
#define descx(m, n) BLKADDR(descx, double, m, n)
#define descSx(m, n) BLKADDR(descSx, double, m, n)
/***************************************************************************//**
*
**/
void plasma_pdlange(plasma_context_t *plasma)
{
PLASMA_enum norm;
PLASMA_desc A;
double *work;
double *result;
PLASMA_sequence *sequence;
PLASMA_request *request;
int m, n;
int next_m;
int next_n;
int ldam;
int step, lrank;
int X, X1, X2, Y, Y1, Y2;
double* lwork;
double normtmp, normtmp2;
double *scale, *sumsq;
plasma_unpack_args_6(norm, A, work, result, sequence, request);
*result = 0.0;
if (PLASMA_RANK == 0) {
if ( norm == PlasmaFrobeniusNorm ) {
memset(work, 0, 2*PLASMA_SIZE*sizeof(double));
} else {
memset(work, 0, PLASMA_SIZE*sizeof(double));
}
}
ss_init(PLASMA_SIZE, 1, 0);
switch (norm) {
/*
* PlasmaMaxNorm
*/
case PlasmaMaxNorm:
n = 0;
m = PLASMA_RANK;
while (m >= A.mt && n < A.nt) {
n++;
m = m-A.mt;
}
while (n < A.nt) {
next_m = m;
next_n = n;
next_m += PLASMA_SIZE;
while (next_m >= A.mt && next_n < A.nt) {
next_n++;
next_m = next_m-A.mt;
}
X1 = m == 0 ? A.i %A.mb : 0;
X2 = m == A.mt-1 ? (A.i+A.m-1)%A.mb+1 : A.mb;
X = X2 - X1;
Y1 = n == 0 ? A.j %A.nb : 0;
Y2 = n == A.nt-1 ? (A.j+A.n-1)%A.nb+1 : A.nb;
Y = Y2 - Y1;
ldam = BLKLDD(A, m);
CORE_dlange(PlasmaMaxNorm, X, Y, A(m, n, X1, Y1, ldam), ldam, NULL, &normtmp);
if (normtmp > work[PLASMA_RANK])
work[PLASMA_RANK] = normtmp;
m = next_m;
n = next_n;
}
ss_cond_set(PLASMA_RANK, 0, 1);
break;
/*
* PlasmaOneNorm
*/
case PlasmaOneNorm:
n = PLASMA_RANK;
normtmp2 = 0.0;
lwork = (double*)plasma_private_alloc(plasma, A.nb, PlasmaRealDouble);
while (n < A.nt) {
Y1 = n == 0 ? A.j %A.nb : 0;
Y2 = n == A.nt-1 ? (A.j+A.n-1)%A.nb+1 : A.nb;
Y = Y2 - Y1;
memset(lwork, 0, A.nb*sizeof(double));
for (m = 0; m < A.mt; m++) {
X1 = m == 0 ? A.i %A.mb : 0;
X2 = m == A.mt-1 ? (A.i+A.m-1)%A.mb+1 : A.mb;
X = X2 - X1;
ldam = BLKLDD(A, m);
CORE_dasum(
PlasmaColumnwise, PlasmaUpperLower,
X, Y,
A(m, n, X1, Y1, ldam), ldam,
lwork);
}
CORE_dlange(PlasmaMaxNorm, Y, 1, lwork, 1, NULL, &normtmp);
if (normtmp > normtmp2)
normtmp2 = normtmp;
n += PLASMA_SIZE;
}
work[PLASMA_RANK] = normtmp2;
ss_cond_set(PLASMA_RANK, 0, 1);
plasma_private_free(plasma, lwork);
break;
/*
* PlasmaInfNorm
*/
case PlasmaInfNorm:
m = PLASMA_RANK;
normtmp2 = 0.0;
lwork = (double*)plasma_private_alloc(plasma, A.mb, PlasmaRealDouble);
while (m < A.mt) {
X1 = m == 0 ? A.i %A.mb : 0;
X2 = m == A.mt-1 ? (A.i+A.m-1)%A.mb+1 : A.mb;
X = X2 - X1;
ldam = BLKLDD(A, m);
memset(lwork, 0, A.mb*sizeof(double));
for (n = 0; n < A.nt; n++) {
Y1 = n == 0 ? A.j %A.nb : 0;
Y2 = n == A.nt-1 ? (A.j+A.n-1)%A.nb+1 : A.nb;
Y = Y2 - Y1;
CORE_dasum(
PlasmaRowwise, PlasmaUpperLower,
X, Y,
A(m, n, X1, Y1, ldam), ldam,
lwork);
}
CORE_dlange(PlasmaMaxNorm, X, 1, lwork, 1, NULL, &normtmp);
if (normtmp > normtmp2)
normtmp2 = normtmp;
m += PLASMA_SIZE;
}
work[PLASMA_RANK] = normtmp2;
ss_cond_set(PLASMA_RANK, 0, 1);
plasma_private_free(plasma, lwork);
break;
/*
* PlasmaFrobeniusNorm
*/
case PlasmaFrobeniusNorm:
n = 0;
m = PLASMA_RANK;
scale = work + 2 * PLASMA_RANK;
sumsq = work + 2 * PLASMA_RANK + 1;
*scale = 0.;
*sumsq = 1.;
while (m >= A.mt && n < A.nt) {
n++;
m = m-A.mt;
}
while (n < A.nt) {
next_m = m;
next_n = n;
next_m += PLASMA_SIZE;
while (next_m >= A.mt && next_n < A.nt) {
next_n++;
next_m = next_m-A.mt;
}
X1 = m == 0 ? A.i %A.mb : 0;
X2 = m == A.mt-1 ? (A.i+A.m-1)%A.mb+1 : A.mb;
X = X2 - X1;
Y1 = n == 0 ? A.j %A.nb : 0;
Y2 = n == A.nt-1 ? (A.j+A.n-1)%A.nb+1 : A.nb;
Y = Y2 - Y1;
ldam = BLKLDD(A, m);
CORE_dgessq( X, Y, A(m, n, X1, Y1, ldam), ldam, scale, sumsq );
m = next_m;
n = next_n;
}
ss_cond_set(PLASMA_RANK, 0, 1);
break;
default:;
}
if (norm != PlasmaFrobeniusNorm) {
step = 1;
lrank = PLASMA_RANK;
while ( (lrank%2 == 0) && (PLASMA_RANK+step < PLASMA_SIZE) ) {
ss_cond_wait(PLASMA_RANK+step, 0, step);
work[PLASMA_RANK] = max(work[PLASMA_RANK], work[PLASMA_RANK+step]);
lrank = lrank >> 1;
step = step << 1;
ss_cond_set(PLASMA_RANK, 0, step);
}
if (PLASMA_RANK > 0) {
while( lrank != 0 ) {
if (lrank%2 == 1) {
ss_cond_set(PLASMA_RANK, 0, step);
lrank = 0;
} else {
lrank = lrank >> 1;
step = step << 1;
ss_cond_set(PLASMA_RANK, 0, step);
}
}
}
if (PLASMA_RANK == 0)
*result = work[0];
}
else {
step = 1;
lrank = PLASMA_RANK;
while ( (lrank%2 == 0) && (PLASMA_RANK+step < PLASMA_SIZE) ) {
double scale1, scale2;
double sumsq1, sumsq2;
ss_cond_wait(PLASMA_RANK+step, 0, step);
scale1 = work[ 2 * PLASMA_RANK ];
sumsq1 = work[ 2 * PLASMA_RANK + 1 ];
scale2 = work[ 2 * (PLASMA_RANK+step) ];
sumsq2 = work[ 2 * (PLASMA_RANK+step) + 1 ];
if ( scale2 != 0. ){
if( scale1 < scale2 ) {
work[2 * PLASMA_RANK+1] = sumsq2 + (sumsq1 * (( scale1 / scale2 ) * ( scale1 / scale2 )));
work[2 * PLASMA_RANK ] = scale2;
} else {
work[2 * PLASMA_RANK+1] = sumsq1 + (sumsq2 * (( scale2 / scale1 ) * ( scale2 / scale1 )));
}
}
lrank = lrank >> 1;
step = step << 1;
ss_cond_set(PLASMA_RANK, 0, step);
}
if (PLASMA_RANK > 0) {
while( lrank != 0 ) {
if (lrank%2 == 1) {
ss_cond_set(PLASMA_RANK, 0, step);
lrank = 0;
} else {
lrank = lrank >> 1;
step = step << 1;
ss_cond_set(PLASMA_RANK, 0, step);
}
}
}
if (PLASMA_RANK == 0)
*result = work[0] * sqrt( work[1] );
}
ss_finalize();
}
/***************************************************************************//**
*
**/
void plasma_pdlange_quark(PLASMA_enum norm, PLASMA_desc A, double *work, double *result,
PLASMA_sequence *sequence, PLASMA_request *request)
{
plasma_context_t *plasma;
Quark_Task_Flags task_flags = Quark_Task_Flags_Initializer;
double* lwork;
int X, X1, X2, Y, Y1, Y2;
int ldam;
int m, n, k;
int szeW;
int nbworker = 1;
//int PLASMA_SIZE2 = omp_get_num_threads();
double e0 = 0; double tol = 0.30; int nnz; int IONE=1; int ISEED[4] = {0,0,0,1}; int i, j; double normx; double normSx; int cnt = 0;int maxitr = 100;
double *res; double beta; double zbeta;
double zone = (double)1.0;
int ldak, ldbn, ldbk, ldcm;
int tempmm, tempnn, tempkn, tempkm;
plasma = plasma_context_self();
if (sequence->status != PLASMA_SUCCESS)
return;
QUARK_Task_Flag_Set(&task_flags, TASK_SEQUENCE, (intptr_t)sequence->quark_sequence);
*result = 0.0;
switch ( norm ) {
/*
* PlasmaMaxNorm
*/
case PlasmaMaxNorm:
szeW = A.mt*A.nt;
lwork = (double*)plasma_shared_alloc(plasma, szeW, PlasmaRealDouble);
#pragma omp register ([szeW]lwork)
memset(lwork, 0, szeW*sizeof(double));
for(m = 0; m < A.mt; m++) {
X1 = m == 0 ? A.i %A.mb : 0;
X2 = m == A.mt-1 ? (A.i+A.m-1)%A.mb+1 : A.mb;
X = X2 - X1;
ldam = BLKLDD(A, m);
for(n = 0; n < A.nt; n++) {
Y1 = n == 0 ? A.j %A.nb : 0;
Y2 = n == A.nt-1 ? (A.j+A.n-1)%A.nb+1 : A.nb;
Y = Y2 - Y1;
RT_CORE_dlange_f1(
plasma->quark, &task_flags,
PlasmaMaxNorm, X, Y,
A(m, n, X1, Y1, ldam), ldam, ldam*Y,
0, &(lwork[A.mt*n+m]),
lwork, szeW);
}
}
RT_CORE_dlange(
plasma->quark, &task_flags,
PlasmaMaxNorm, A.mt, A.nt,
lwork, A.mt, szeW,
0, result);
RT_CORE_free(plasma->quark, &task_flags, lwork, szeW*sizeof(double));
break;
/*
* PlasmaOneNorm
*/
case PlasmaOneNorm:
lwork = (double*)plasma_shared_alloc(plasma, (A.n+1), PlasmaRealDouble);
#pragma omp register ([A.n+1]lwork)
memset(lwork, 0, (A.n+1)*sizeof(double));
for(m = 0; m < A.mt; m++) {
X1 = m == 0 ? A.i %A.mb : 0;
X2 = m == A.mt-1 ? (A.i+A.m-1)%A.mb+1 : A.mb;
X = X2 - X1;
ldam = BLKLDD(A, m);
for(n = 0; n < A.nt; n++) {
Y1 = n == 0 ? A.j %A.nb : 0;
Y2 = n == A.nt-1 ? (A.j+A.n-1)%A.nb+1 : A.nb;
Y = Y2 - Y1;
RT_CORE_dasum_f1(
plasma->quark, &task_flags,
PlasmaColumnwise, PlasmaUpperLower, X, Y,
A(m, n, X1, Y1, ldam), ldam, ldam*Y,
&(lwork[n*A.nb+1]), A.nb,
lwork, A.n);
}
}
RT_CORE_dlange(
plasma->quark, &task_flags,
PlasmaMaxNorm, A.n+1, 1,
lwork, 1, A.n+1,
0, result);
RT_CORE_free(plasma->quark, &task_flags, lwork, (A.n+1)*sizeof(double));
break;
/*
* PlasmaSecondNorm
*/
case PlasmaSecondNorm:
lwork = (double*)plasma_shared_alloc(plasma, (A.n+1), PlasmaRealDouble);
#pragma omp register ([A.n+1]lwork)
memset(lwork, 0, (A.n+1)*sizeof(double));
double *Sx = (double*)plasma_shared_alloc(plasma, (A.n+1), PlasmaRealDouble);
#pragma omp register ([A.n+1]Sx)
PLASMA_desc *descx2 ; PLASMA_Desc_Create( &descx2 , lwork, PlasmaRealDouble, A.nb, 1, A.nb, A.n, 1, 0, 0, A.n, 1);
PLASMA_desc descx = plasma_desc_submatrix(*descx2, 0, 0, A.n, 1);
PLASMA_desc *descSx2; PLASMA_Desc_Create( &descSx2, Sx , PlasmaRealDouble, A.nb, 1, A.nb, A.n, 1, 0, 0, A.n, 1);
PLASMA_desc descSx = plasma_desc_submatrix(*descSx2, 0, 0, A.n, 1);
for(m = 0; m < A.mt; m++) {
X1 = m == 0 ? A.i %A.mb : 0;
X2 = m == A.mt-1 ? (A.i+A.m-1)%A.mb+1 : A.mb;
X = X2 - X1;
ldam = BLKLDD(A, m);
for(n = 0; n < A.nt; n++) {
Y1 = n == 0 ? A.j %A.nb : 0;
Y2 = n == A.nt-1 ? (A.j+A.n-1)%A.nb+1 : A.nb;
Y = Y2 - Y1;
RT_CORE_dasum_f1(
plasma->quark, &task_flags,
PlasmaColumnwise, PlasmaUpperLower, X, Y,
A(m, n, X1, Y1, ldam), ldam, ldam*Y,
&(lwork[n*A.nb+1]), A.nb,
lwork, A.n);
}
}
RT_CORE_dlange(
plasma->quark, &task_flags,
PlasmaFrobeniusNorm, A.n+1, 1,
lwork, 1, A.n+1,
0, result); // Why in One and Inf norm take the whole lwork
RT_CORE_dscal( plasma->quark, &task_flags, A.n, result, lwork, 1);
beta = 0.0; double conv = result[0]; double tolconv = tol*result[0];
RT_CORE_tolconv(plasma->quark, &task_flags, &tol, result, &tolconv, &conv);
//while ( conv > tolconv ){
while ( cnt < 2 )
//#pragma omp task in(&conv)
{
//if(conv < tolconv){return;}
RT_CORE_dgemm_2norm( plasma->quark, &task_flags, PlasmaNoTrans, PlasmaNoTrans, 1.0, A, lwork, A.n, 0.0, Sx, A.n);
RT_CORE_dlange(
plasma->quark, &task_flags,
PlasmaFrobeniusNorm, A.n, 1,
lwork, 1, A.n,
0, &normx);
RT_CORE_dlange(
plasma->quark, &task_flags,
PlasmaFrobeniusNorm, A.n, 1,
Sx, 1, A.n,
0, &normSx);
RT_CORE_dscal(plasma->quark, &task_flags, A.n, &normx, lwork, 1);
//RT_CORE_dconv( &normx, &normSx, result, &e0);
RT_CORE_dconv(plasma->quark, &task_flags, &normx, &normSx, result, &e0);
RT_CORE_conv(plasma->quark, &task_flags, result, &e0, &conv);
cnt = cnt + 1;
if ( cnt > maxitr){
printf("\n normest:notconverge \n");
return;
}
//#pragma omp atomic
//RT_CORE_conv(plasma->quark, &task_flags, result, &e0, &conv);
}
RT_CORE_free(plasma->quark, &task_flags, lwork, (A.n+1)*sizeof(double));
RT_CORE_free(plasma->quark, &task_flags, Sx, (A.n+1)*sizeof(double));
RT_dynamic_sync();
break;
/*
* PlasmaInfNorm
*/
case PlasmaInfNorm:
lwork = (double*)plasma_shared_alloc(plasma, (A.m+1), PlasmaRealDouble);
#pragma omp register ([A.m+1]lwork)
memset(lwork, 0, (A.m+1)*sizeof(double));
for(m = 0; m < A.mt; m++) {
X1 = m == 0 ? A.i %A.mb : 0;
X2 = m == A.mt-1 ? (A.i+A.m-1)%A.mb+1 : A.mb;
X = X2 - X1;
ldam = BLKLDD(A, m);
for(n = 0; n < A.nt; n++) {
Y1 = n == 0 ? A.j %A.nb : 0;
Y2 = n == A.nt-1 ? (A.j+A.n-1)%A.nb+1 : A.nb;
Y = Y2 - Y1;
RT_CORE_dasum_f1(
plasma->quark, &task_flags,
PlasmaRowwise, PlasmaUpperLower, X, Y,
A(m, n, X1, Y1, ldam), ldam, ldam*Y,
&(lwork[m*A.mb+1]), A.mb,
lwork, A.m);
}
}
RT_CORE_dlange(
plasma->quark, &task_flags,
PlasmaMaxNorm, A.m+1, 1,
lwork, 1, A.m+1,
0, result);
RT_CORE_free(plasma->quark, &task_flags, lwork, (A.m+1)*sizeof(double));
break;
/*
* PlasmaFrobeniusNorm
*/
case PlasmaFrobeniusNorm:
szeW = 2*(PLASMA_SIZE+1);
printf("\n PLASMA_SIZE %d \n", PLASMA_SIZE);
lwork = (double*)plasma_shared_alloc(plasma, szeW, PlasmaRealDouble);
#pragma omp register ([szeW]lwork)
for(m = 0; m < PLASMA_SIZE+1; m++) {
lwork[2*m ] = 0.;
lwork[2*m+1] = 1.;
}
k = 0;
for(m = 0; m < A.mt; m++) {
X1 = m == 0 ? A.i %A.mb : 0;
X2 = m == A.mt-1 ? (A.i+A.m-1)%A.mb+1 : A.mb;
X = X2 - X1;
ldam = BLKLDD(A, m);
for(n = 0; n < A.nt; n++) {
Y1 = n == 0 ? A.j %A.nb : 0;
Y2 = n == A.nt-1 ? (A.j+A.n-1)%A.nb+1 : A.nb;
Y = Y2 - Y1;
k++; nbworker++;
RT_CORE_dgessq_f1(
plasma->quark, &task_flags,
X, Y,
A(m, n, X1, Y1, ldam), ldam,
lwork + 2*k,
lwork + 2*k + 1,
lwork, szeW, OUTPUT | GATHERV );
k = k % PLASMA_SIZE;
}
}
RT_CORE_dplssq(
plasma->quark, &task_flags,
min(nbworker, PLASMA_SIZE+1), lwork, result );
RT_CORE_free(plasma->quark, &task_flags, lwork, szeW*sizeof(double));
break;
default:;
}
}
|
ompfor-static.c | /*
* Static schedule
*/
#include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
int foo(int lower, int upper, int stride)
{
int i;
#pragma omp for schedule(static,3)
for (i=lower;i<upper;i+=stride)
{
printf("Iteration %2d is carried out by thread %2d\n",\
i, omp_get_thread_num());
}
}
int main(void)
{
#pragma omp parallel
{
#pragma omp single
printf ("Using %d threads.\n",omp_get_num_threads());
foo(0,10,2);
}
}
|
par_grid_hash.c | #include "par_grid_hash.h"
int main(int argc, char* argv[]){
char* input_name; /**< Input data file name */
int generations = 0; /**< Number of generations to proccess */
int cube_size = 0; /**< Size of the 3D space */
GraphNode*** graph; /**< Graph representation - 2D array of lists */
Hashtable* hashtable; /**< Contains the information of nodes that are alive */
/* Iterator variables */
int g, i, j;
GraphNode* g_it = NULL;
Node* it = NULL;
/* Lock variables */
omp_lock_t** graph_lock;
parseArgs(argc, argv, &input_name, &generations);
int initial_alive = getAlive(input_name);
/* Create the hashtable */
hashtable = createHashtable(HASH_RATIO * initial_alive);
graph = parseFile(input_name, hashtable, &cube_size);
debug_print("Hashtable: Occupation %.1f, Average %.2f elements per bucket", (hashtable->occupied*1.0) / hashtable->size, (hashtable->elements*1.0) / hashtable->occupied);
/* Initialize lock variables */
graph_lock = (omp_lock_t**)malloc(cube_size * sizeof(omp_lock_t*));
for(i = 0; i < cube_size; i++){
graph_lock[i] = (omp_lock_t*) malloc(cube_size * sizeof(omp_lock_t));
for(j = 0; j < cube_size; j++){
omp_init_lock(&(graph_lock[i][j]));
}
}
double start = omp_get_wtime(); // Start Timer
/* Generations */
for(g = 1; g <= generations; g++){
/* Convert hashtable to an array of currently alive nodes */
int num_alive = hashtable->elements;
Node** vector = (Node**) malloc(sizeof(Node*) * num_alive);
i = 0;
for(j = 0; j < hashtable->size; j++){
for (it = hashtable->table[j]; it != NULL; it = it->next){
vector[i++] = it;
}
}
/* Create the num_alive * 6 matrix that will store the neighbours of each alive node */
Node*** neighbour_vector = (Node***)malloc(sizeof(Node**) * num_alive);
#pragma omp parallel for private(i, j)
for(i = 0; i < num_alive; i++){
neighbour_vector[i] = (Node**)malloc(sizeof(Node*) * 6);
for(j = 0; j < 6; j++){
neighbour_vector[i][j] = NULL;
}
}
#pragma omp parallel for private(i, j)
/* Notify each of the neighbours, inserting them in the graph if needed */
for (i = 0; i < num_alive; i++){
/* Get the coordinates from the alive node */
coordinate x = vector[i]->x;
coordinate y = vector[i]->y;
coordinate z = vector[i]->z;
/* Calculate the coordinates of the 6 neighbours */
coordinate x1, x2, y1, y2, z1, z2;
x1 = (x+1) % cube_size; x2 = (x-1) < 0 ? (cube_size-1) : (x-1);
y1 = (y+1) % cube_size; y2 = (y-1) < 0 ? (cube_size-1) : (y-1);
z1 = (z+1) % cube_size; z2 = (z-1) < 0 ? (cube_size-1) : (z-1);
/* Auxiliary matrix of neighbour coordinates for simpler code */
coordinate c[6][3] = {{x1,y,z}, {x2,y,z}, {x,y1,z}, {x,y2,z}, {x,y,z1}, {x,y,z2}};
GraphNode* ptr; // Auxiliary GraphNode pointer to a newly inserted node
/* When a node is inserted in the graph, a pointer to it is stored in the hashtable */
for(j = 0; j < 6; j++){
if(graphNodeAddNeighbour( &(graph [c[j][X]] [c[j][Y]]), c[j][Z], &ptr, &(graph_lock [c[j][X]] [c[j][Y]]))){
neighbour_vector[i][j] = nodeInsert(NULL, c[j][X], c[j][Y], c[j][Z], ptr);
}else{
neighbour_vector[i][j] = NULL;
}
}
}
#pragma omp parallel for private(it, i, j)
/* Determine the next state of each of the cells */
for(i = 0; i < num_alive; i++){
/* Process alive node in vector*/
it = vector[i];
unsigned char live_neighbours = it->ptr->neighbours;
it->ptr->neighbours = 0;
if(it->ptr->state == ALIVE){
if(live_neighbours < 2 || live_neighbours > 4){
it->ptr->state = DEAD;
graphNodeRemove(&(graph[it->x][it->y]), it->z, &(graph_lock[it->x][it->y]));
hashtableRemove(hashtable, it->x, it->y, it->z);
}
}
/* Process its neighbours */
for(j = 0; j < 6; j++){
it = neighbour_vector[i][j];
if(it != NULL){
unsigned char live_neighbours = it->ptr->neighbours;
it->ptr->neighbours = 0;
if(it->ptr->state == DEAD){
if(live_neighbours == 2 || live_neighbours == 3){
it->ptr->state = ALIVE;
hashtableWrite(hashtable, it->x, it->y, it->z, it->ptr);
}
else{
graphNodeRemove(&(graph[it->x][it->y]), it->z, &(graph_lock[it->x][it->y]));
}
}
}
}
}
/* Cleanup matrix and vector*/
for(i = 0; i < num_alive; i++){
for(j = 0; j < 6; j++){
free(neighbour_vector[i][j]);
}
free(neighbour_vector[i]);
}
free(neighbour_vector);
free(vector);
}
double end = omp_get_wtime(); // Stop Timer
/* Print the final set of live cells */
printAndSortActive(graph, cube_size);
time_print(" %f\n", end - start);
/* Free resources */
freeGraph(graph, cube_size);
hashtableFree(hashtable);
for(i = 0; i < cube_size; i++){
for(j=0; j<cube_size; j++){
omp_destroy_lock(&(graph_lock[i][j]));
}
}
free(input_name);
return 0;
}
/* Graph related functions */
GraphNode*** initGraph(int size){
int i,j;
GraphNode*** graph = (GraphNode***) malloc(sizeof(GraphNode**) * size);
for (i = 0; i < size; i++){
graph[i] = (GraphNode**) malloc(sizeof(GraphNode*) * size);
for (j = 0; j < size; j++){
graph[i][j] = NULL;
}
}
return graph;
}
void freeGraph(GraphNode*** graph, int size){
int i, j;
if (graph != NULL){
for (i = 0; i < size; i++){
for (j = 0; j < size; j++){
graphNodeDelete(graph[i][j]);
}
free(graph[i]);
}
free(graph);
}
}
void printAndSortActive(GraphNode*** graph, int cube_size){
int x,y;
GraphNode* it;
for (x = 0; x < cube_size; ++x){
for (y = 0; y < cube_size; ++y){
/* Sort the list by ascending coordinate z */
graphNodeSort(&(graph[x][y]));
for (it = graph[x][y]; it != NULL; it = it->next){
if (it->state == ALIVE)
out_print("%d %d %d\n", x, y, it->z);
}
}
}
}
void printSortedGraphToFile(GraphNode*** graph, int cube_size, char* input_name, int generations){
int x,y;
GraphNode* it;
char* output_name = generateOuputFilename(input_name, generations);
FILE* output = fopen(output_name, "w");
if (output == NULL){
err_print("Could not open output file");
exit(EXIT_FAILURE);
}
for (x = 0; x < cube_size; ++x){
for (y = 0; y < cube_size; ++y){
/* Sort the list by ascending coordinate z */
graphNodeSort(&(graph[x][y]));
for (it = graph[x][y]; it != NULL; it = it->next){
fprintf(output, "%d %d %d\n", x, y, it->z);
}
}
}
free(output_name);
fclose(output);
}
char* generateOuputFilename(char* input_name, int generations){
char generation[GEN_BUFFER_SIZE];
sprintf(generation, "%d", generations);
char* in_ext = findLastDot(input_name);
int aux_length = strlen(input_name) - strlen(in_ext);
char* aux = malloc(sizeof(char) * (aux_length + 1));
strncpy(aux, input_name, aux_length);
aux[aux_length] = '\0';
char* output_name = malloc(sizeof(char) * (aux_length + strlen(generation) + strlen(OUT_EXT)) + 3);
sprintf(output_name, "%s.%s.%s", aux, generation, OUT_EXT);
free(aux);
return output_name;
}
char* findLastDot(char* str){
char *ptr = str;
char *dot = NULL;
while(*ptr++){
if (*ptr == '.')
dot = ptr;
}
return dot;
}
/* File parsing functions */
void parseArgs(int argc, char* argv[], char** file, int* generations){
if (argc == 3){
char* file_name = malloc(sizeof(char) * (strlen(argv[1]) + 1));
strcpy(file_name, argv[1]);
*file = file_name;
*generations = atoi(argv[2]);
if (*generations > 0 && file_name != NULL)
return;
}
printf("Usage: %s [data_file.in] [number_generations]", argv[0]);
exit(EXIT_FAILURE);
}
int getAlive(char* file){
FILE* fp = fopen(file, "r");
int alive_num = 0;
if (fp == NULL){
return EXIT_FAILURE;
}
while(!feof(fp)){
if(fgetc(fp) == '\n'){
alive_num++;
}
}
fclose(fp);
return alive_num - 1;
}
GraphNode*** parseFile(char* input_name, Hashtable* hashtable, int* cube_size){
int first = 0;
char line[BUFFER_SIZE];
int x, y, z;
FILE* fp = fopen(input_name, "r");
if(fp == NULL){
err_print("Please input a valid file name");
exit(EXIT_FAILURE);
}
GraphNode*** graph;
while(fgets(line, sizeof(line), fp)){
if(!first){
if(sscanf(line, "%d\n", cube_size) == 1){
first = 1;
graph = initGraph(*cube_size);
}
}else{
if(sscanf(line, "%d %d %d\n", &x, &y, &z) == 3){
/* Insert live nodes in the graph and the update set */
graph[x][y] = graphNodeInsert(graph[x][y], z, ALIVE);
hashtableWrite(hashtable, x, y, z, (GraphNode*)(graph[x][y]));
}
}
}
fclose(fp);
return graph;
}
|
multiply.h | #pragma once
#include <vector>
#include <unordered_map>
#include <algorithm>
#include <omp.h>
#include "_cuda.h"
using std::vector;
using std::unordered_map;
using std::max;
template <class T>
void multiply(T *x, int N, T v) {
for (int i=0; i<N; i++)
x[i] *= v;
}
template <class T>
void multiply(vector<T>& x, T v) {
multiply(x.data(), x.size(), v);
}
template <class K, class T>
void multiply(unordered_map<K, T>& x, T v) {
for (auto& p : x) p.second *= v;
}
template <class T>
void multiply(T *a, T *x, T *y, int N) {
for (int i=0; i<N; i++)
a[i] = x[i] * y[i];
}
template <class T>
void multiply(vector<T>& a, vector<T>& x, vector<T>& y) {
multiply(a.data(), x.data(), y.data(), a.size());
}
template <class K, class T>
void multiply(unordered_map<K, T>& a, unordered_map<K, T>& x, unordered_map<K, T>& y) {
for (auto& p : x)
a[p.first] = x[p.first] * y[p.first];
}
template <class T, class C>
void multiplyAt(T *x, C&& is , T v) {
for (int i : is)
x[i] *= v;
}
template <class T, class C>
void multiplyAt(vector<T>& x, C&& is, T v) {
multiplyAt(x.data(), is, v);
}
template <class K, class T, class C>
void multiplyAt(unordered_map<K, T>& x, C&& ks, T v) {
for (auto&& k : ks)
x[k] *= v;
}
template <class T>
void multiplyOmp(T *x, int N, T v) {
#pragma omp parallel for
for (int i=0; i<N; i++)
x[i] *= v;
}
template <class T>
void multiplyOmp(vector<T>& x, T v) {
multiplyOmp(x.data(), x.size(), v);
}
template <class T>
__device__ void multiplyKernelLoop(T *a, int N, T v, int i, int DI) {
for (; i<N; i+=DI)
a[i] *= v;
}
template <class T>
__global__ void multiplyKernel(T *a, int N, T v) {
DEFINE(t, b, B, G);
multiplyKernelLoop(a, N, v, B*b+t, G*B);
}
template <class T>
void multiplyCuda(T *a, int N, T v) {
int threads = _THREADS;
int blocks = min(ceilDiv(N, threads), _BLOCKS);
size_t A1 = N * sizeof(T);
T *aD;
TRY( cudaMalloc(&aD, A1) );
TRY( cudaMemcpy(aD, a, A1, cudaMemcpyHostToDevice) );
multiplyKernel<<<blocks, threads>>>(aD, N, v);
TRY( cudaMemcpy(a, aD, A1, cudaMemcpyDeviceToHost) );
TRY( cudaFree(aD) );
}
template <class T>
void multiplyCuda(vector<T>& x, T v) {
multiplyCuda(x.data(), x.size(), v);
}
template <class T>
__device__ void multiplyKernelLoop(T *a, T *x, T *y, int N, int i, int DI) {
for (; i<N; i+=DI)
a[i] = x[i] * y[i];
}
template <class T>
__global__ void multiplyKernel(T *a, T *x, T* y, int N) {
DEFINE(t, b, B, G);
multiplyKernelLoop(a, x, y, N, B*b+t, G*B);
}
template <class T>
void multiplyCuda(T *a, T *x, T *y, int N, T v) {
int threads = _THREADS;
int blocks = min(ceilDiv(N, threads), _BLOCKS);
size_t A1 = N * sizeof(T);
T *xD, *yD;
TRY( cudaMalloc(&xD, A1) );
TRY( cudaMalloc(&yD, A1) );
TRY( cudaMemcpy(xD, x, A1, cudaMemcpyHostToDevice) );
TRY( cudaMemcpy(yD, y, A1, cudaMemcpyHostToDevice) );
multiplyKernel<<<blocks, threads>>>(xD, xD, yD, N);
TRY( cudaMemcpy(a, xD, A1, cudaMemcpyDeviceToHost) );
TRY( cudaFree(xD) );
TRY( cudaFree(yD) );
}
template <class T>
void multiplyCuda(vector<T>& a, vector<T>& x, vector<T> &y) {
multiplyCuda(a.data(), x.data(), y.data(), a.size());
}
|
bitmap.h | /*!
* Copyright 2014 by Contributors
* \file bitmap.h
* \brief a simple implement of bitmap
* NOTE: bitmap is only threadsafe per word access, remember this when using bitmap
* \author Tianqi Chen
*/
#ifndef XGBOOST_COMMON_BITMAP_H_
#define XGBOOST_COMMON_BITMAP_H_
#include <dmlc/omp.h>
#include <vector>
namespace xgboost {
namespace common {
/*! \brief bit map that contains set of bit indicators */
struct BitMap {
/*! \brief internal data structure */
std::vector<uint32_t> data;
/*!
* \brief resize the bitmap to be certain size
* \param size the size of bitmap
*/
inline void Resize(size_t size) {
data.resize((size + 31U) >> 5, 0);
}
/*!
* \brief query the i-th position of bitmap
* \param i the position in
*/
inline bool Get(size_t i) const {
return (data[i >> 5] >> (i & 31U)) & 1U;
}
/*!
* \brief set i-th position to true
* \param i position index
*/
inline void SetTrue(size_t i) {
data[i >> 5] |= (1 << (i & 31U));
}
/*! \brief initialize the value of bit map from vector of bool*/
inline void InitFromBool(const std::vector<int>& vec) {
this->Resize(vec.size());
// parallel over the full cases
auto nsize = static_cast<bst_omp_uint>(vec.size() / 32);
#pragma omp parallel for schedule(static)
for (bst_omp_uint i = 0; i < nsize; ++i) {
uint32_t res = 0;
for (int k = 0; k < 32; ++k) {
uint32_t bit = vec[(i << 5) | k];
res |= (bit << k);
}
data[i] = res;
}
if (nsize != vec.size()) data.back() = 0;
for (size_t i = nsize; i < vec.size(); ++i) {
if (vec[i]) this->SetTrue(i);
}
}
/*! \brief clear the bitmap, set all places to false */
inline void Clear() {
std::fill(data.begin(), data.end(), 0U);
}
};
} // namespace common
} // namespace xgboost
#endif // XGBOOST_COMMON_BITMAP_H_
|
GB_subassign_15.c | //------------------------------------------------------------------------------
// GB_subassign_15: C(I,J)<!M> += scalar ; using S
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Method 15: C(I,J)<!M> += scalar ; using S
// M: present
// Mask_comp: true
// C_replace: false
// accum: present
// A: scalar
// S: constructed
// C: not bitmap, but can be full since no zombies are inserted in that case
// M: not bitmap
#include "GB_subassign_methods.h"
GrB_Info GB_subassign_15
(
GrB_Matrix C,
// input:
const GrB_Index *I,
const int64_t ni,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
const GrB_Index *J,
const int64_t nj,
const int64_t nJ,
const int Jkind,
const int64_t Jcolon [3],
const GrB_Matrix M,
const bool Mask_struct,
const GrB_BinaryOp accum,
const void *scalar,
const GrB_Type atype,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (!GB_IS_BITMAP (C)) ;
ASSERT (!GB_aliased (C, M)) ; // NO ALIAS of C==M
//--------------------------------------------------------------------------
// S = C(I,J)
//--------------------------------------------------------------------------
GB_EMPTY_TASKLIST ;
GB_OK (GB_subassign_symbolic (S, C, I, ni, J, nj, true, Context)) ;
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
GB_MATRIX_WAIT_IF_JUMBLED (M) ;
GB_GET_C ; // C must not be bitmap
const int64_t Cnvec = C->nvec ;
const int64_t *restrict Ch = C->h ;
const int64_t *restrict Cp = C->p ;
const bool C_is_hyper = (Ch != NULL) ;
GB_GET_MASK ;
GB_GET_S ;
GB_GET_ACCUM_SCALAR ;
//--------------------------------------------------------------------------
// Method 15: C(I,J)<!M> += scalar ; using S
//--------------------------------------------------------------------------
// Time: Close to optimal; must visit all IxJ, so Omega(|I|*|J|) is
// required. The sparsity of !M cannot be exploited.
// Methods 13, 15, 17, and 19 are very similar.
//--------------------------------------------------------------------------
// Parallel: all IxJ (Methods 01, 03, 13, 15, 17, 19)
//--------------------------------------------------------------------------
GB_SUBASSIGN_IXJ_SLICE ;
//--------------------------------------------------------------------------
// phase 1: create zombies, update entries, and count pending tuples
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iA_start, iA_end) ;
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//------------------------------------------------------------------
// get jC, the corresponding vector of C
//------------------------------------------------------------------
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
//------------------------------------------------------------------
// get S(iA_start:end,j) and M(iA_start:end,j)
//------------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iA_start) ;
GB_GET_VECTOR_FOR_IXJ (M, iA_start) ;
//------------------------------------------------------------------
// C(I(iA_start,iA_end-1),jC)<!M> += scalar
//------------------------------------------------------------------
for (int64_t iA = iA_start ; iA < iA_end ; iA++)
{
//--------------------------------------------------------------
// Get the indices at the top of each list.
//--------------------------------------------------------------
int64_t iS = (pS < pS_end) ? GBI (Si, pS, Svlen) : INT64_MAX ;
int64_t iM = (pM < pM_end) ? GBI (Mi, pM, Mvlen) : INT64_MAX ;
//--------------------------------------------------------------
// find the smallest index of [iS iA iM] (always iA)
//--------------------------------------------------------------
int64_t i = iA ;
//--------------------------------------------------------------
// get M(i,j)
//--------------------------------------------------------------
bool mij ;
if (i == iM)
{
// mij = (bool) M [pM]
mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ;
GB_NEXT (M) ;
}
else
{
// mij not present, implicitly false
ASSERT (i < iM) ;
mij = false ;
}
// complement the mask entry mij since Mask_comp is true
mij = !mij ;
//--------------------------------------------------------------
// accumulate the entry
//--------------------------------------------------------------
if (i == iS)
{
ASSERT (i == iA) ;
{
// both S (i,j) and A (i,j) present
if (mij)
{
// ----[C A 1] or [X A 1]---------------------------
// [C A 1]: action: ( =C+A ): apply accum
// [X A 1]: action: ( undelete ): zombie lives
GB_C_S_LOOKUP ;
GB_withaccum_C_A_1_scalar ;
}
GB_NEXT (S) ;
}
}
else
{
ASSERT (i == iA) ;
{
// S (i,j) is not present, A (i,j) is present
if (mij)
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
task_pending++ ;
}
}
}
}
}
GB_PHASE1_TASK_WRAPUP ;
}
//--------------------------------------------------------------------------
// phase 2: insert pending tuples
//--------------------------------------------------------------------------
GB_PENDING_CUMSUM ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iA_start, iA_end) ;
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//------------------------------------------------------------------
// get jC, the corresponding vector of C
//------------------------------------------------------------------
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
//------------------------------------------------------------------
// get S(iA_start:end,j) and M(iA_start:end,j)
//------------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iA_start) ;
GB_GET_VECTOR_FOR_IXJ (M, iA_start) ;
//------------------------------------------------------------------
// C(I(iA_start,iA_end-1),jC)<!M> += scalar
//------------------------------------------------------------------
for (int64_t iA = iA_start ; iA < iA_end ; iA++)
{
//--------------------------------------------------------------
// Get the indices at the top of each list.
//--------------------------------------------------------------
int64_t iS = (pS < pS_end) ? GBI (Si, pS, Svlen) : INT64_MAX ;
int64_t iM = (pM < pM_end) ? GBI (Mi, pM, Mvlen) : INT64_MAX ;
//--------------------------------------------------------------
// find the smallest index of [iS iA iM] (always iA)
//--------------------------------------------------------------
int64_t i = iA ;
//--------------------------------------------------------------
// get M(i,j)
//--------------------------------------------------------------
bool mij ;
if (i == iM)
{
// mij = (bool) M [pM]
mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ;
GB_NEXT (M) ;
}
else
{
// mij not present, implicitly false
ASSERT (i < iM) ;
mij = false ;
}
// complement the mask entry mij since Mask_comp is true
mij = !mij ;
//--------------------------------------------------------------
// accumulate the entry
//--------------------------------------------------------------
if (i == iS)
{
ASSERT (i == iA) ;
{
GB_NEXT (S) ;
}
}
else
{
ASSERT (i == iA) ;
{
// S (i,j) is not present, A (i,j) is present
if (mij)
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
GB_PENDING_INSERT (scalar) ;
}
}
}
}
}
GB_PHASE2_TASK_WRAPUP ;
}
//--------------------------------------------------------------------------
// finalize the matrix and return result
//--------------------------------------------------------------------------
GB_SUBASSIGN_WRAPUP ;
}
|
master.c | // RUN: %libomp-compile-and-run | FileCheck %s
// REQUIRES: ompt
// GCC generates code that does not call the runtime for the master construct
// XFAIL: gcc
#define USE_PRIVATE_TOOL 1
#include "callback.h"
#include <omp.h>
int main() {
int x = 0;
#pragma omp parallel num_threads(2)
{
#pragma omp master
{
print_fuzzy_address(1);
x++;
}
print_current_address(2);
}
printf("%" PRIu64 ": x=%d\n", ompt_get_thread_data()->value, x);
return 0;
}
static void on_ompt_callback_master(ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra) {
switch (endpoint) {
case ompt_scope_begin:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_master_begin: codeptr_ra=%p\n",
ompt_get_thread_data()->value, codeptr_ra);
break;
case ompt_scope_end:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_master_end: codeptr_ra=%p\n",
ompt_get_thread_data()->value, codeptr_ra);
break;
case ompt_scope_beginend:
printf("ompt_scope_beginend should never be passed to %s\n", __func__);
exit(-1);
}
}
static void on_ompt_callback_thread_begin(ompt_thread_t thread_type,
ompt_data_t *thread_data) {
if (thread_data->ptr)
printf("%s\n", "0: thread_data initially not null");
thread_data->value = ompt_get_unique_id();
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_thread_begin: thread_type=%s=%d, thread_id=%" PRIu64 "\n",
ompt_get_thread_data()->value, ompt_thread_t_values[thread_type],
thread_type, thread_data->value);
}
int ompt_initialize(ompt_function_lookup_t lookup, int initial_device_num,
ompt_data_t *tool_data) {
ompt_set_callback = (ompt_set_callback_t)lookup("ompt_set_callback");
ompt_get_unique_id = (ompt_get_unique_id_t)lookup("ompt_get_unique_id");
ompt_get_thread_data = (ompt_get_thread_data_t)lookup("ompt_get_thread_data");
register_callback(ompt_callback_master);
printf("0: NULL_POINTER=%p\n", (void *)NULL);
return 1; // success
}
void ompt_finalize(ompt_data_t *tool_data) {}
ompt_start_tool_result_t *ompt_start_tool(unsigned int omp_version,
const char *runtime_version) {
static ompt_start_tool_result_t ompt_start_tool_result = {&ompt_initialize,
&ompt_finalize, 0};
return &ompt_start_tool_result;
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_master'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_master_begin:
// CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_master_end:
// CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS_END:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: current_address={{.*}}[[RETURN_ADDRESS_END]]
|
AlloySparseMatrix.h | /*
* Copyright(C) 2015, Blake C. Lucas, Ph.D. (img.science@gmail.com)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef ALLOYSPARSEMATRIX_H_
#define ALLOYSPARSEMATRIX_H_
#include "AlloyVector.h"
#include "cereal/types/vector.hpp"
#include "cereal/types/list.hpp"
#include "cereal/types/tuple.hpp"
#include "cereal/types/map.hpp"
#include "cereal/types/memory.hpp"
#include <vector>
#include <list>
#include <map>
namespace aly {
template<class T, int C> struct SparseMatrix {
private:
std::vector<std::map<size_t, vec<T, C>>>storage;
public:
size_t rows, cols;
SparseMatrix() :rows(0), cols(0)
{
}
template<class Archive> void serialize(Archive & archive)
{
archive(CEREAL_NVP(rows), CEREAL_NVP(cols), cereal::make_nvp(MakeString() << "matrix" << C, storage));
}
std::map<size_t, vec<T, C>>& operator[](size_t i)
{
if (i >= rows || i < 0)throw std::runtime_error(MakeString() << "Index (" << i << ",*) exceeds matrix bounds [" << rows << "," << cols << "]");
return storage[i];
}
const std::map<size_t, vec<T, C>>& operator[](size_t i) const
{
if (i >= rows || i < 0)throw std::runtime_error(MakeString() << "Index (" << i << ",*) exceeds matrix bounds [" << rows << "," << cols << "]");
return storage[i];
}
SparseMatrix(size_t rows, size_t cols) :storage(rows), rows(rows), cols(cols)
{
}
void resize(size_t rows, size_t cols){
this->rows=rows;
this->cols=cols;
storage.resize(rows);
}
void set(size_t i, size_t j, const vec<T, C>& value)
{
if (i >= rows || j >= cols || i < 0 || j < 0)throw std::runtime_error(MakeString() << "Index (" << i << "," << j << ") exceeds matrix bounds [" << rows << "," << cols << "]");
storage[i][j] = value;
}
void set(size_t i, size_t j, const T& value)
{
if (i >= rows || j >= cols || i < 0 || j < 0)throw std::runtime_error(MakeString() << "Index (" << i << "," << j << ") exceeds matrix bounds [" << rows << "," << cols << "]");
storage[i][j] = vec<T, C>(value);
}
vec<T, C>& operator()(size_t i, size_t j)
{
if (i >= rows || j >= cols || i < 0 || j < 0)throw std::runtime_error(MakeString() << "Index (" << i << "," << j << ") exceeds matrix bounds [" << rows << "," << cols << "]");
return storage[i][j];
}
vec<T, C> get(size_t i, size_t j) const
{
if (i >= rows || j >= cols || i < 0 || j < 0)throw std::runtime_error(MakeString() << "Index (" << i << "," << j << ") exceeds matrix bounds [" << rows << "," << cols << "]");
if (storage[i].find(j) == storage[i].end())
{
return vec<T, C>(T(0));
}
else
{
return storage[i].at(j);
}
}
vec<T, C> operator()(size_t i, size_t j) const
{
return get(i, j);
}
SparseMatrix<T, C> transpose() const
{
SparseMatrix<T, C> M(cols, rows);
for (int i = 0;i < (int)storage.size();i++)
{
for (const std::pair<size_t, vec<T, C>>& iv : storage[i])
{
M.set(iv.first, i, iv.second);
}
}
return M;
}
static SparseMatrix<T, C> identity(size_t M,size_t N)
{
SparseMatrix<T, C> A(M,N);
int K=(int)aly::min(M,N);
#pragma omp parallel for
for (int k=0;k<K;k++)
{
A[k][k]=vec<T,C>(T(1));
}
return A;
}
static SparseMatrix<T, C> diagonal(const Vector<T,C>& v)
{
SparseMatrix<T, C> A(v.size(),v.size());
#pragma omp parallel for
for (int k = 0;k<(int)v.size();k++)
{
A[k][k] = v[k];
}
return A;
}
};
template<class A, class B, class T, int C> std::basic_ostream<A, B> & operator <<(
std::basic_ostream<A, B> & ss, const SparseMatrix<T, C>& M) {
for (int i = 0; i < (int)M.rows; i++) {
ss << "M[" << i << ",*]=";
for (const std::pair<size_t, vec<T, C>>& pr : M[i]) {
ss << "<" << pr.first << ":" << pr.second << "> ";
}
ss << std::endl;
}
return ss;
}
template<class T, int C> Vector<T, C> operator*(const SparseMatrix<T, 1>& A,
const Vector<T, C>& v) {
Vector<T, C> out(A.rows);
#pragma omp parallel for
for (int i = 0; i < (int) A.rows; i++) {
vec<double, C> sum(0.0);
for (const std::pair<size_t, vec<T, 1>>& pr : A[i]) {
sum += vec<double, C>(v[pr.first]) * (double) pr.second.x;
}
out[i] = vec<T, C>(sum);
}
return out;
}
template<class T, int C> SparseMatrix<T, C>& operator*=(
SparseMatrix<T, C>& A, const vec<T, C>& v) {
#pragma omp parallel for
for (int i = 0; i < (int) A.rows; i++) {
for (std::pair<size_t, vec<T, C>>& pr : A[i]) {
A[i][pr.first] = pr.second * v;
}
}
return A;
}
template<class T, int C> SparseMatrix<T, C>& operator/=(
const SparseMatrix<T, C>& A, const vec<T, C>& v) {
#pragma omp parallel for
for (int i = 0; i < (int) A.rows; i++) {
for (std::pair<size_t, vec<T, C>>& pr : A[i]) {
A[i][pr.first] = pr.second / v;
}
}
return A;
}
template<class T, int C> SparseMatrix<T, C>& operator+=(
SparseMatrix<T, C>& A, const vec<T, C>& v) {
#pragma omp parallel for
for (int i = 0; i < (int) A.rows; i++) {
for (std::pair<size_t, vec<T, C>>& pr : A[i]) {
A[i][pr.first] = pr.second + v;
}
}
return A;
}
template<class T, int C> SparseMatrix<T, C>& operator-=(
SparseMatrix<T, C>& A, const vec<T, C>& v) {
#pragma omp parallel for
for (int i = 0; i < (int) A.rows; i++) {
for (std::pair<size_t, vec<T, C>>& pr : A[i]) {
A[i][pr.first] = pr.second - v;
}
}
return A;
}
template<class T, int C> SparseMatrix<T, C>& operator*=(
SparseMatrix<T, C>& A, const T& v) {
#pragma omp parallel for
for (int i = 0; i < (int) A.rows; i++) {
for (std::pair<size_t, vec<T, C>>& pr : A[i]) {
A[i][pr.first] = pr.second * v;
}
}
return A;
}
template<class T, int C> SparseMatrix<T, C>& operator/=(
SparseMatrix<T, C>& A, const T& v) {
#pragma omp parallel for
for (int i = 0; i < (int) A.rows; i++) {
for (std::pair<size_t, vec<T, C>>& pr : A[i]) {
A[i][pr.first] = pr.second / v;
}
}
return A;
}
template<class T, int C> SparseMatrix<T, C>& operator+=(
SparseMatrix<T, C>& A, const T& v) {
#pragma omp parallel for
for (int i = 0; i < (int) A.rows; i++) {
for (std::pair<size_t, vec<T, C>>& pr : A[i]) {
A[i][pr.first] = pr.second + v;
}
}
return A;
}
template<class T, int C> SparseMatrix<T, C>& operator-=(
SparseMatrix<T, C>& A, const T& v) {
#pragma omp parallel for
for (int i = 0; i < (int) A.rows; i++) {
for (std::pair<size_t, vec<T, C>>& pr : A[i]) {
A[i][pr.first] = pr.second - v;
}
}
return A;
}
template<class T, int C> SparseMatrix<T, C> operator*(
const SparseMatrix<T, C>& A, const SparseMatrix<T, C>& B) {
if (A.cols != B.rows)
throw std::runtime_error(
MakeString()
<< "Cannot multiply matrices. Inner dimensions do not match. "
<< "[" << A.rows << "," << A.cols << "] * [" << B.rows
<< "," << B.cols << "]");
SparseMatrix<T, C> out(A.rows, B.cols);
#pragma omp parallel for
for (int i = 0; i < (int) out.rows; i++) { //a[i,*]
for (std::pair<size_t, vec<T, C>> pr1 : A[i]) { //a[i,k]
int k = (int)pr1.first;
for (std::pair<size_t, vec<T, C>> pr2 : B[k]) { //b[k,j]
int j = (int)pr2.first;
out[i][j] += pr1.second * pr2.second;
}
}
}
return out;
}
template<class T, int C> SparseMatrix<T, C> operator*(const T& v,
const SparseMatrix<T, C>& A) {
SparseMatrix<T, C> out = A;
#pragma omp parallel for
for (int i = 0; i < (int) out.rows; i++) {
for (std::pair<size_t, vec<T, C>> pr : A[i]) {
out[i][pr.first] = v * pr.second;
}
}
return out;
}
template<class T, int C> SparseMatrix<T, C> operator/(const T& v,
const SparseMatrix<T, C>& A) {
SparseMatrix<T, C> out = A;
#pragma omp parallel for
for (int i = 0; i < (int) out.rows; i++) {
for (std::pair<size_t, vec<T, C>> pr : A[i]) {
out[i][pr.first] = v / pr.second;
}
}
return out;
}
template<class T, int C> SparseMatrix<T, C> operator+(const T& v,
const SparseMatrix<T, C>& A) {
SparseMatrix<T, C> out = A;
#pragma omp parallel for
for (int i = 0; i < (int) out.rows; i++) {
for (std::pair<size_t, vec<T, C>> pr : A[i]) {
out[i][pr.first] = vec<T, C>(v) + pr.second;
}
}
return out;
}
template<class T, int C> SparseMatrix<T, C> operator-(const T& v,
const SparseMatrix<T, C>& A) {
SparseMatrix<T, C> out = A;
#pragma omp parallel for
for (int i = 0; i < (int) out.rows; i++) {
for (std::pair<size_t, vec<T, C>> pr : A[i]) {
out[i][pr.first] = vec<T, C>(v) - pr.second;
}
}
return out;
}
template<class T, int C> SparseMatrix<T, C> operator*(const vec<T, C>& v,
const SparseMatrix<T, C>& A) {
SparseMatrix<T, C> out = A;
#pragma omp parallel for
for (int i = 0; i < (int) out.rows; i++) {
for (std::pair<size_t, vec<T, C>> pr : A[i]) {
out[i][pr.first] = v * pr.second;
}
}
return out;
}
template<class T, int C> SparseMatrix<T, C> operator/(const vec<T, C>& v,
const SparseMatrix<T, C>& A) {
SparseMatrix<T, C> out = A;
#pragma omp parallel for
for (int i = 0; i < (int) out.rows; i++) {
for (std::pair<size_t, vec<T, C>> pr : A[i]) {
out[i][pr.first] = v / pr.second;
}
}
return out;
}
template<class T, int C> SparseMatrix<T, C> operator+(const vec<T, C>& v,
const SparseMatrix<T, C>& A) {
SparseMatrix<T, C> out = A;
#pragma omp parallel for
for (int i = 0; i < (int) out.rows; i++) {
for (std::pair<size_t, vec<T, C>> pr : A[i]) {
out[i][pr.first] = v + pr.second;
}
}
return out;
}
template<class T, int C> SparseMatrix<T, C> operator-(const vec<T, C>& v,
const SparseMatrix<T, C>& A) {
SparseMatrix<T, C> out = A;
#pragma omp parallel for
for (int i = 0; i < (int) out.rows; i++) {
for (std::pair<size_t, vec<T, C>> pr : A[i]) {
out[i][pr.first] = v - pr.second;
}
}
return out;
}
template<class T, int C> SparseMatrix<T, C> operator-(
const SparseMatrix<T, C>& A, const vec<T, C>& v) {
SparseMatrix<T, C> out = A;
#pragma omp parallel for
for (int i = 0; i < (int) out.rows; i++) {
for (std::pair<size_t, vec<T, C>> pr : A[i]) {
out[i][pr.first] = pr.second - v;
}
}
return out;
}
template<class T, int C> SparseMatrix<T, C> operator+(
const SparseMatrix<T, C>& A, const vec<T, C>& v) {
SparseMatrix<T, C> out = A;
#pragma omp parallel for
for (int i = 0; i < (int) out.rows; i++) {
for (std::pair<size_t, vec<T, C>> pr : A[i]) {
out[i][pr.first] = pr.second + v;
}
}
return out;
}
template<class T, int C> SparseMatrix<T, C> operator*(
const SparseMatrix<T, C>& A, const vec<T, C>& v) {
SparseMatrix<T, C> out = A;
#pragma omp parallel for
for (int i = 0; i < (int) out.rows; i++) {
for (std::pair<size_t, vec<T, C>> pr : A[i]) {
out[i][pr.first] = pr.second * v;
}
}
return out;
}
template<class T, int C> SparseMatrix<T, C> operator/(
const SparseMatrix<T, C>& A, const vec<T, C>& v) {
SparseMatrix<T, C> out = A;
#pragma omp parallel for
for (int i = 0; i < (int) out.rows; i++) {
for (std::pair<size_t, vec<T, C>> pr : A[i]) {
out[i][pr.first] = pr.second / v;
}
}
return out;
}
template<class T, int C> SparseMatrix<T, C> operator-(
const SparseMatrix<T, C>& A, const T& v) {
SparseMatrix<T, C> out = A;
#pragma omp parallel for
for (int i = 0; i < (int) out.rows; i++) {
for (std::pair<size_t, vec<T, C>> pr : A[i]) {
out[i][pr.first] = pr.second - vec<T, C>(v);
}
}
return out;
}
template<class T, int C> SparseMatrix<T, C> operator+(
const SparseMatrix<T, C>& A, const T& v) {
SparseMatrix<T, C> out = A;
#pragma omp parallel for
for (int i = 0; i < (int) out.rows; i++) {
for (std::pair<size_t, vec<T, C>> pr : A[i]) {
out[i][pr.first] = pr.second + vec<T, C>(v);
}
}
return out;
}
template<class T, int C> SparseMatrix<T, C> operator*(
const SparseMatrix<T, C>& A, const T& v) {
SparseMatrix<T, C> out = A;
#pragma omp parallel for
for (int i = 0; i < (int) out.rows; i++) {
for (std::pair<size_t, vec<T, C>> pr : A[i]) {
out[i][pr.first] = pr.second * v;
}
}
return out;
}
template<class T, int C> SparseMatrix<T, C> operator/(
const SparseMatrix<T, C>& A, const T& v) {
SparseMatrix<T, C> out = A;
#pragma omp parallel for
for (int i = 0; i < (int) out.rows; i++) {
for (std::pair<size_t, vec<T, C>> pr : A[i]) {
out[i][pr.first] = pr.second / v;
}
}
return out;
}
template<class T, int C> SparseMatrix<T, C> operator-(
const SparseMatrix<T, C>& A) {
SparseMatrix<T, C> out = A;
#pragma omp parallel for
for (int i = 0; i < (int) out.rows; i++) {
for (std::pair<size_t, vec<T, C>> pr : A[i]) {
out[i][pr.first] = -pr.second;
}
}
return out;
}
template<class T, int C> SparseMatrix<T, C> operator+(
const SparseMatrix<T, C>& A, const SparseMatrix<T, C>& B) {
if (A.rows != B.rows || A.cols != B.cols)
throw std::runtime_error(
MakeString() << "Cannot add matrices. Dimensions do not match. "
<< "[" << A.rows << "," << A.cols << "] * [" << B.rows
<< "," << B.cols << "]");
SparseMatrix<T, C> out = A;
#pragma omp parallel for
for (int i = 0; i < (int) out.rows; i++) {
for (std::pair<size_t, vec<T, C>> pr : B[i]) {
out[i][pr.first] += pr.second;
}
}
return out;
}
template<class T, int C> SparseMatrix<T, C> operator-(
const SparseMatrix<T, C>& A, const SparseMatrix<T, C>& B) {
if (A.rows != B.rows || A.cols != B.cols)
throw std::runtime_error(
MakeString()
<< "Cannot subtract matrices. Dimensions do not match. "
<< "[" << A.rows << "," << A.cols << "] * [" << B.rows
<< "," << B.cols << "]");
SparseMatrix<T, C> out = A;
#pragma omp parallel for
for (int i = 0; i < (int) out.rows; i++) {
for (std::pair<size_t, vec<T, C>> pr : B[i]) {
out[i][pr.first] -= pr.second;
}
}
return out;
}
template<class T, int C> SparseMatrix<T, C>& operator+=(
SparseMatrix<T, C>& A, const SparseMatrix<T, C>& B) {
if (A.rows != B.rows || A.cols != B.cols)
throw std::runtime_error(
MakeString() << "Cannot add matrices. Dimensions do not match. "
<< "[" << A.rows << "," << A.cols << "] * [" << B.rows
<< "," << B.cols << "]");
#pragma omp parallel for
for (int i = 0; i < (int) A.rows; i++) {
for (std::pair<size_t, vec<T, C>> pr : B[i]) {
A[i][pr.first] += pr.second;
}
}
return A;
}
template<class T, int C> SparseMatrix<T, C>& operator-=(
SparseMatrix<T, C>& A, const SparseMatrix<T, C>& B) {
if (A.rows != B.rows || A.cols != B.cols)
throw std::runtime_error(
MakeString()
<< "Cannot subtract matrices. Dimensions do not match. "
<< "[" << A.rows << "," << A.cols << "] * [" << B.rows
<< "," << B.cols << "]");
#pragma omp parallel for
for (int i = 0; i < (int) A.rows; i++) {
for (std::pair<size_t, vec<T, C>> pr : B[i]) {
A[i][pr.first] -= pr.second;
}
}
return A;
}
template<class T, int C> void Multiply(Vector<T, C>& out,
const SparseMatrix<T, 1>& A, const Vector<T, C>& v) {
out.resize(A.rows);
#pragma omp parallel for
for (int i = 0; i < (int) A.rows; i++) {
vec<double, C> sum(0.0);
for (const std::pair<size_t, vec<T, 1>>& pr : A[i]) {
sum += vec<double, C>(v[pr.first]) * (double) pr.second.x;
}
out[i] = vec<T, C>(sum);
}
}
template<class T, int C> void AddMultiply(Vector<T, C>& out,
const Vector<T, C>& b, const SparseMatrix<T, 1>& A,
const Vector<T, C>& v) {
out.resize(A.rows);
#pragma omp parallel for
for (int i = 0; i < (int) A.rows; i++) {
vec<double, C> sum(0.0);
for (const std::pair<size_t, vec<T, 1>>& pr : A[i]) {
sum += vec<double, C>(v[pr.first]) * (double) pr.second.x;
}
out[i] = b[i] + vec<T, C>(sum);
}
}
template<class T, int C> void SubtractMultiply(Vector<T, C>& out,
const Vector<T, C>& b, const SparseMatrix<T, 1>& A,
const Vector<T, C>& v) {
out.resize(A.rows);
#pragma omp parallel for
for (int i = 0; i < (int) A.rows; i++) {
vec<double, C> sum(0.0);
for (const std::pair<size_t, vec<T, 1>>& pr : A[i]) {
sum += vec<double, C>(v[pr.first]) * (double) pr.second.x;
}
out[i] = b[i] - vec<T, C>(sum);
}
}
template<class T, int C> Vector<T, C> operator*(const SparseMatrix<T, C>& A,
const Vector<T, C>& v) {
Vector<T, C> out(A.rows);
#pragma omp parallel for
for (int i = 0; i < (int) A.rows; i++) {
vec<double, C> sum(0.0);
for (const std::pair<size_t, vec<T, C>>& pr : A[i]) {
sum += vec<double, C>(v[pr.first]) * vec<double, C>(pr.second);
}
out[i] = vec<T, C>(sum);
}
return out;
}
template<class T, int C> void MultiplyVec(Vector<T, C>& out,
const SparseMatrix<T, C>& A, const Vector<T, C>& v) {
out.resize(A.rows);
#pragma omp parallel for
for (int i = 0; i < (int) A.rows; i++) {
vec<double, C> sum(0.0);
for (const std::pair<size_t, vec<T, C>>& pr : A[i]) {
sum += vec<double, C>(v[pr.first]) * vec<double, C>(pr.second);
}
out[i] = vec<T, C>(sum);
}
}
template<class T, int C> void AddMultiplyVec(Vector<T, C>& out,
const Vector<T, C>& b, const SparseMatrix<T, C>& A,
const Vector<T, C>& v) {
out.resize(A.rows);
#pragma omp parallel for
for (int i = 0; i < A.rows; i++) {
vec<double, C> sum(0.0);
for (const std::pair<size_t, vec<T, C>>& pr : A[i]) {
sum += vec<double, C>(v[pr.first]) * vec<double, C>(pr.second);
}
out[i] = b[i] + vec<T, C>(sum);
}
}
template<class T, int C> void SubtractMultiplyVec(Vector<T, C>& out,
const Vector<T, C>& b, const SparseMatrix<T, C>& A,
const Vector<T, C>& v) {
out.resize(A.rows);
#pragma omp parallel for
for (int i = 0; i < (int) A.rows; i++) {
vec<double, C> sum(0.0);
for (const std::pair<size_t, vec<T, C>>& pr : A[i]) {
sum += vec<double, C>(v[pr.first]) * vec<double, C>(pr.second);
}
out[i] = b[i] - vec<T, C>(sum);
}
}
typedef SparseMatrix<float, 4> SparseMatrix4f;
typedef SparseMatrix<float, 3> SparseMatrix3f;
typedef SparseMatrix<float, 2> SparseMatrix2f;
typedef SparseMatrix<float, 1> SparseMatrix1f;
typedef SparseMatrix<double, 4> SparseMatrix4d;
typedef SparseMatrix<double, 3> SparseMatrix3d;
typedef SparseMatrix<double, 2> SparseMatrix2d;
typedef SparseMatrix<double, 1> SparseMatrix1d;
}
#endif
|
GB_unaryop__abs_int32_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int32_fp32
// op(A') function: GB_tran__abs_int32_fp32
// C type: int32_t
// A type: float
// cast: int32_t cij ; GB_CAST_SIGNED(cij,aij,32)
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, aij) \
int32_t z ; GB_CAST_SIGNED(z,aij,32) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT32 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int32_fp32
(
int32_t *Cx, // Cx and Ax may be aliased
float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
statistic.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS TTTTT AAA TTTTT IIIII SSSSS TTTTT IIIII CCCC %
% SS T A A T I SS T I C %
% SSS T AAAAA T I SSS T I C %
% SS T A A T I SS T I C %
% SSSSS T A A T IIIII SSSSS T IIIII CCCC %
% %
% %
% MagickCore Image Statistical Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/accelerate-private.h"
#include "magick/animate.h"
#include "magick/animate.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/compress.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/display.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/list.h"
#include "magick/image-private.h"
#include "magick/magic.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/module.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-private.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/random-private.h"
#include "magick/resource_.h"
#include "magick/segment.h"
#include "magick/semaphore.h"
#include "magick/signature-private.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/timer.h"
#include "magick/utility.h"
#include "magick/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E v a l u a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EvaluateImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the EvaluateImageChannel method is:
%
% MagickBooleanType EvaluateImage(Image *image,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImages(Image *images,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImageChannel(Image *image,
% const ChannelType channel,const MagickEvaluateOperator op,
% const double value,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o op: A channel op.
%
% o value: A value value.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickPixelPacket **DestroyPixelThreadSet(MagickPixelPacket **pixels)
{
register ssize_t
i;
assert(pixels != (MagickPixelPacket **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (MagickPixelPacket *) NULL)
pixels[i]=(MagickPixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(MagickPixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static MagickPixelPacket **AcquirePixelThreadSet(const Image *image,
const size_t number_images)
{
MagickPixelPacket
**pixels;
register ssize_t
i,
j;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(MagickPixelPacket **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (MagickPixelPacket **) NULL)
return((MagickPixelPacket **) NULL);
(void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(MagickPixelPacket *) AcquireQuantumMemory(image->columns,
sizeof(**pixels));
if (pixels[i] == (MagickPixelPacket *) NULL)
return(DestroyPixelThreadSet(pixels));
for (j=0; j < (ssize_t) image->columns; j++)
GetMagickPixelPacket(image,&pixels[i][j]);
}
return(pixels);
}
static inline double EvaluateMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const MagickPixelPacket
*color_1,
*color_2;
int
intensity;
color_1=(const MagickPixelPacket *) x;
color_2=(const MagickPixelPacket *) y;
intensity=(int) MagickPixelIntensity(color_2)-(int)
MagickPixelIntensity(color_1);
return(intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickRealType ApplyEvaluateOperator(RandomInfo *random_info,
const Quantum pixel,const MagickEvaluateOperator op,
const MagickRealType value)
{
MagickRealType
result;
result=0.0;
switch (op)
{
case UndefinedEvaluateOperator:
break;
case AbsEvaluateOperator:
{
result=(MagickRealType) fabs((double) (pixel+value));
break;
}
case AddEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case AddModulusEvaluateOperator:
{
/*
This returns a 'floored modulus' of the addition which is a
positive result. It differs from % or fmod() which returns a
'truncated modulus' result, where floor() is replaced by trunc()
and could return a negative result (which is clipped).
*/
result=pixel+value;
result-=(QuantumRange+1.0)*floor((double) result/(QuantumRange+1.0));
break;
}
case AndEvaluateOperator:
{
result=(MagickRealType) ((size_t) pixel & (size_t) (value+0.5));
break;
}
case CosineEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*(0.5*cos((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case DivideEvaluateOperator:
{
result=pixel/(value == 0.0 ? 1.0 : value);
break;
}
case ExponentialEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*exp((double) (value*QuantumScale*
pixel)));
break;
}
case GaussianNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
GaussianNoise,value);
break;
}
case ImpulseNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
ImpulseNoise,value);
break;
}
case LaplacianNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
LaplacianNoise,value);
break;
}
case LeftShiftEvaluateOperator:
{
result=(MagickRealType) ((size_t) pixel << (size_t) (value+0.5));
break;
}
case LogEvaluateOperator:
{
if ((QuantumScale*pixel) >= MagickEpsilon)
result=(MagickRealType) (QuantumRange*log((double) (QuantumScale*value*
pixel+1.0))/log((double) (value+1.0)));
break;
}
case MaxEvaluateOperator:
{
result=(MagickRealType) EvaluateMax((double) pixel,value);
break;
}
case MeanEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case MedianEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case MinEvaluateOperator:
{
result=(MagickRealType) MagickMin((double) pixel,value);
break;
}
case MultiplicativeNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
MultiplicativeGaussianNoise,value);
break;
}
case MultiplyEvaluateOperator:
{
result=(MagickRealType) (value*pixel);
break;
}
case OrEvaluateOperator:
{
result=(MagickRealType) ((size_t) pixel | (size_t) (value+0.5));
break;
}
case PoissonNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
PoissonNoise,value);
break;
}
case PowEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*pow((double) (QuantumScale*pixel),
(double) value));
break;
}
case RightShiftEvaluateOperator:
{
result=(MagickRealType) ((size_t) pixel >> (size_t) (value+0.5));
break;
}
case RootMeanSquareEvaluateOperator:
{
result=(MagickRealType) (pixel*pixel+value);
break;
}
case SetEvaluateOperator:
{
result=value;
break;
}
case SineEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*(0.5*sin((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case SubtractEvaluateOperator:
{
result=(MagickRealType) (pixel-value);
break;
}
case SumEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case ThresholdEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel <= value) ? 0 :
QuantumRange);
break;
}
case ThresholdBlackEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel <= value) ? 0 : pixel);
break;
}
case ThresholdWhiteEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel > value) ? QuantumRange :
pixel);
break;
}
case UniformNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
UniformNoise,value);
break;
}
case XorEvaluateOperator:
{
result=(MagickRealType) ((size_t) pixel ^ (size_t) (value+0.5));
break;
}
}
return(result);
}
static Image *AcquireImageCanvas(const Image *images,ExceptionInfo *exception)
{
const Image
*p,
*q;
size_t
columns,
number_channels,
rows;
q=images;
columns=images->columns;
rows=images->rows;
number_channels=0;
for (p=images; p != (Image *) NULL; p=p->next)
{
size_t
channels;
channels=3;
if (p->matte != MagickFalse)
channels+=1;
if (p->colorspace == CMYKColorspace)
channels+=1;
if (channels > number_channels)
{
number_channels=channels;
q=p;
}
if (p->columns > columns)
columns=p->columns;
if (p->rows > rows)
rows=p->rows;
}
return(CloneImage(q,columns,rows,MagickTrue,exception));
}
MagickExport MagickBooleanType EvaluateImage(Image *image,
const MagickEvaluateOperator op,const double value,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=EvaluateImageChannel(image,CompositeChannels,op,value,exception);
return(status);
}
MagickExport Image *EvaluateImages(const Image *images,
const MagickEvaluateOperator op,ExceptionInfo *exception)
{
#define EvaluateImageTag "Evaluate/Image"
CacheView
*evaluate_view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
**magick_restrict evaluate_pixels,
zero;
RandomInfo
**magick_restrict random_info;
size_t
number_images;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImageCanvas(images,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
image=DestroyImage(image);
return((Image *) NULL);
}
number_images=GetImageListLength(images);
evaluate_pixels=AcquirePixelThreadSet(images,number_images);
if (evaluate_pixels == (MagickPixelPacket **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Evaluate image pixels.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(images,&zero);
random_info=AcquireRandomInfoThreadSet();
evaluate_view=AcquireAuthenticCacheView(image,exception);
if (op == MedianEvaluateOperator)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register IndexPacket
*magick_restrict evaluate_indexes;
register MagickPixelPacket
*evaluate_pixel;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
evaluate_indexes=GetCacheViewAuthenticIndexQueue(evaluate_view);
evaluate_pixel=evaluate_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) number_images; i++)
evaluate_pixel[i]=zero;
next=images;
for (i=0; i < (ssize_t) number_images; i++)
{
register const IndexPacket
*indexes;
register const PixelPacket
*p;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,x,y,1,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
evaluate_pixel[i].red=ApplyEvaluateOperator(random_info[id],
GetPixelRed(p),op,evaluate_pixel[i].red);
evaluate_pixel[i].green=ApplyEvaluateOperator(random_info[id],
GetPixelGreen(p),op,evaluate_pixel[i].green);
evaluate_pixel[i].blue=ApplyEvaluateOperator(random_info[id],
GetPixelBlue(p),op,evaluate_pixel[i].blue);
evaluate_pixel[i].opacity=ApplyEvaluateOperator(random_info[id],
GetPixelAlpha(p),op,evaluate_pixel[i].opacity);
if (image->colorspace == CMYKColorspace)
evaluate_pixel[i].index=ApplyEvaluateOperator(random_info[id],
*indexes,op,evaluate_pixel[i].index);
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
qsort((void *) evaluate_pixel,number_images,sizeof(*evaluate_pixel),
IntensityCompare);
SetPixelRed(q,ClampToQuantum(evaluate_pixel[i/2].red));
SetPixelGreen(q,ClampToQuantum(evaluate_pixel[i/2].green));
SetPixelBlue(q,ClampToQuantum(evaluate_pixel[i/2].blue));
SetPixelAlpha(q,ClampToQuantum(evaluate_pixel[i/2].opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(evaluate_indexes+i,ClampToQuantum(
evaluate_pixel[i/2].index));
q++;
}
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EvaluateImages)
#endif
proceed=SetImageProgress(images,EvaluateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
else
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register IndexPacket
*magick_restrict evaluate_indexes;
register ssize_t
i,
x;
register MagickPixelPacket
*evaluate_pixel;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
evaluate_indexes=GetCacheViewAuthenticIndexQueue(evaluate_view);
evaluate_pixel=evaluate_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
evaluate_pixel[x]=zero;
next=images;
for (i=0; i < (ssize_t) number_images; i++)
{
register const IndexPacket
*indexes;
register const PixelPacket
*p;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,
exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
evaluate_pixel[x].red=ApplyEvaluateOperator(random_info[id],
GetPixelRed(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].red);
evaluate_pixel[x].green=ApplyEvaluateOperator(random_info[id],
GetPixelGreen(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].green);
evaluate_pixel[x].blue=ApplyEvaluateOperator(random_info[id],
GetPixelBlue(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].blue);
evaluate_pixel[x].opacity=ApplyEvaluateOperator(random_info[id],
GetPixelAlpha(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].opacity);
if (image->colorspace == CMYKColorspace)
evaluate_pixel[x].index=ApplyEvaluateOperator(random_info[id],
GetPixelIndex(indexes+x),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].index);
p++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (op == MeanEvaluateOperator)
for (x=0; x < (ssize_t) image->columns; x++)
{
evaluate_pixel[x].red/=number_images;
evaluate_pixel[x].green/=number_images;
evaluate_pixel[x].blue/=number_images;
evaluate_pixel[x].opacity/=number_images;
evaluate_pixel[x].index/=number_images;
}
if (op == RootMeanSquareEvaluateOperator)
for (x=0; x < (ssize_t) image->columns; x++)
{
evaluate_pixel[x].red=sqrt((double) evaluate_pixel[x].red/
number_images);
evaluate_pixel[x].green=sqrt((double) evaluate_pixel[x].green/
number_images);
evaluate_pixel[x].blue=sqrt((double) evaluate_pixel[x].blue/
number_images);
evaluate_pixel[x].opacity=sqrt((double) evaluate_pixel[x].opacity/
number_images);
evaluate_pixel[x].index=sqrt((double) evaluate_pixel[x].index/
number_images);
}
if (op == MultiplyEvaluateOperator)
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) (number_images-1); j++)
{
evaluate_pixel[x].red*=(MagickRealType) QuantumScale;
evaluate_pixel[x].green*=(MagickRealType) QuantumScale;
evaluate_pixel[x].blue*=(MagickRealType) QuantumScale;
evaluate_pixel[x].opacity*=(MagickRealType) QuantumScale;
evaluate_pixel[x].index*=(MagickRealType) QuantumScale;
}
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(evaluate_pixel[x].red));
SetPixelGreen(q,ClampToQuantum(evaluate_pixel[x].green));
SetPixelBlue(q,ClampToQuantum(evaluate_pixel[x].blue));
SetPixelAlpha(q,ClampToQuantum(evaluate_pixel[x].opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(evaluate_indexes+x,ClampToQuantum(
evaluate_pixel[x].index));
q++;
}
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EvaluateImages)
#endif
proceed=SetImageProgress(images,EvaluateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
evaluate_view=DestroyCacheView(evaluate_view);
evaluate_pixels=DestroyPixelThreadSet(evaluate_pixels);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
MagickExport MagickBooleanType EvaluateImageChannel(Image *image,
const ChannelType channel,const MagickEvaluateOperator op,const double value,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
result;
if ((channel & RedChannel) != 0)
{
result=ApplyEvaluateOperator(random_info[id],GetPixelRed(q),op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelRed(q,ClampToQuantum(result));
}
if ((channel & GreenChannel) != 0)
{
result=ApplyEvaluateOperator(random_info[id],GetPixelGreen(q),op,
value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelGreen(q,ClampToQuantum(result));
}
if ((channel & BlueChannel) != 0)
{
result=ApplyEvaluateOperator(random_info[id],GetPixelBlue(q),op,
value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelBlue(q,ClampToQuantum(result));
}
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
{
result=ApplyEvaluateOperator(random_info[id],GetPixelOpacity(q),
op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelOpacity(q,ClampToQuantum(result));
}
else
{
result=ApplyEvaluateOperator(random_info[id],GetPixelAlpha(q),
op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelAlpha(q,ClampToQuantum(result));
}
}
if (((channel & IndexChannel) != 0) && (indexes != (IndexPacket *) NULL))
{
result=ApplyEvaluateOperator(random_info[id],GetPixelIndex(indexes+x),
op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelIndex(indexes+x,ClampToQuantum(result));
}
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EvaluateImageChannel)
#endif
proceed=SetImageProgress(image,EvaluateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F u n c t i o n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FunctionImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the FunctionImageChannel method is:
%
% MagickBooleanType FunctionImage(Image *image,
% const MagickFunction function,const ssize_t number_parameters,
% const double *parameters,ExceptionInfo *exception)
% MagickBooleanType FunctionImageChannel(Image *image,
% const ChannelType channel,const MagickFunction function,
% const ssize_t number_parameters,const double *argument,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o function: A channel function.
%
% o parameters: one or more parameters.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum ApplyFunction(Quantum pixel,const MagickFunction function,
const size_t number_parameters,const double *parameters,
ExceptionInfo *exception)
{
MagickRealType
result;
register ssize_t
i;
(void) exception;
result=0.0;
switch (function)
{
case PolynomialFunction:
{
/*
* Polynomial
* Parameters: polynomial constants, highest to lowest order
* For example: c0*x^3 + c1*x^2 + c2*x + c3
*/
result=0.0;
for (i=0; i < (ssize_t) number_parameters; i++)
result=result*QuantumScale*pixel + parameters[i];
result*=QuantumRange;
break;
}
case SinusoidFunction:
{
/* Sinusoid Function
* Parameters: Freq, Phase, Ampl, bias
*/
double freq,phase,ampl,bias;
freq = ( number_parameters >= 1 ) ? parameters[0] : 1.0;
phase = ( number_parameters >= 2 ) ? parameters[1] : 0.0;
ampl = ( number_parameters >= 3 ) ? parameters[2] : 0.5;
bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5;
result=(MagickRealType) (QuantumRange*(ampl*sin((double) (2.0*MagickPI*
(freq*QuantumScale*pixel + phase/360.0) )) + bias ) );
break;
}
case ArcsinFunction:
{
/* Arcsin Function (peged at range limits for invalid results)
* Parameters: Width, Center, Range, Bias
*/
double width,range,center,bias;
width = ( number_parameters >= 1 ) ? parameters[0] : 1.0;
center = ( number_parameters >= 2 ) ? parameters[1] : 0.5;
range = ( number_parameters >= 3 ) ? parameters[2] : 1.0;
bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5;
result = 2.0/width*(QuantumScale*pixel - center);
if ( result <= -1.0 )
result = bias - range/2.0;
else if ( result >= 1.0 )
result = bias + range/2.0;
else
result=(MagickRealType) (range/MagickPI*asin((double) result)+bias);
result *= QuantumRange;
break;
}
case ArctanFunction:
{
/* Arctan Function
* Parameters: Slope, Center, Range, Bias
*/
double slope,range,center,bias;
slope = ( number_parameters >= 1 ) ? parameters[0] : 1.0;
center = ( number_parameters >= 2 ) ? parameters[1] : 0.5;
range = ( number_parameters >= 3 ) ? parameters[2] : 1.0;
bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5;
result=(MagickRealType) (MagickPI*slope*(QuantumScale*pixel-center));
result=(MagickRealType) (QuantumRange*(range/MagickPI*atan((double)
result) + bias ) );
break;
}
case UndefinedFunction:
break;
}
return(ClampToQuantum(result));
}
MagickExport MagickBooleanType FunctionImage(Image *image,
const MagickFunction function,const size_t number_parameters,
const double *parameters,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=FunctionImageChannel(image,CompositeChannels,function,
number_parameters,parameters,exception);
return(status);
}
MagickExport MagickBooleanType FunctionImageChannel(Image *image,
const ChannelType channel,const MagickFunction function,
const size_t number_parameters,const double *parameters,
ExceptionInfo *exception)
{
#define FunctionImageTag "Function/Image "
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
return(MagickFalse);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
status=AccelerateFunctionImage(image,channel,function,number_parameters,
parameters,exception);
if (status != MagickFalse)
return(status);
#endif
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ApplyFunction(GetPixelRed(q),function,
number_parameters,parameters,exception));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ApplyFunction(GetPixelGreen(q),function,
number_parameters,parameters,exception));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ApplyFunction(GetPixelBlue(q),function,
number_parameters,parameters,exception));
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
SetPixelOpacity(q,ApplyFunction(GetPixelOpacity(q),function,
number_parameters,parameters,exception));
else
SetPixelAlpha(q,ApplyFunction((Quantum) GetPixelAlpha(q),function,
number_parameters,parameters,exception));
}
if (((channel & IndexChannel) != 0) && (indexes != (IndexPacket *) NULL))
SetPixelIndex(indexes+x,ApplyFunction(GetPixelIndex(indexes+x),function,
number_parameters,parameters,exception));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FunctionImageChannel)
#endif
proceed=SetImageProgress(image,FunctionImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l E n t r o p y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelEntropy() returns the entropy of one or more image channels.
%
% The format of the GetImageChannelEntropy method is:
%
% MagickBooleanType GetImageChannelEntropy(const Image *image,
% const ChannelType channel,double *entropy,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o entropy: the average entropy of the selected channels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageEntropy(const Image *image,
double *entropy,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelEntropy(image,CompositeChannels,entropy,exception);
return(status);
}
MagickExport MagickBooleanType GetImageChannelEntropy(const Image *image,
const ChannelType channel,double *entropy,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
size_t
channels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageChannelStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
channels=0;
channel_statistics[CompositeChannels].entropy=0.0;
if ((channel & RedChannel) != 0)
{
channel_statistics[CompositeChannels].entropy+=
channel_statistics[RedChannel].entropy;
channels++;
}
if ((channel & GreenChannel) != 0)
{
channel_statistics[CompositeChannels].entropy+=
channel_statistics[GreenChannel].entropy;
channels++;
}
if ((channel & BlueChannel) != 0)
{
channel_statistics[CompositeChannels].entropy+=
channel_statistics[BlueChannel].entropy;
channels++;
}
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
{
channel_statistics[CompositeChannels].entropy+=
channel_statistics[OpacityChannel].entropy;
channels++;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_statistics[CompositeChannels].entropy+=
channel_statistics[BlackChannel].entropy;
channels++;
}
channel_statistics[CompositeChannels].entropy/=channels;
*entropy=channel_statistics[CompositeChannels].entropy;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e C h a n n e l E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelExtrema() returns the extrema of one or more image channels.
%
% The format of the GetImageChannelExtrema method is:
%
% MagickBooleanType GetImageChannelExtrema(const Image *image,
% const ChannelType channel,size_t *minima,size_t *maxima,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageExtrema(const Image *image,
size_t *minima,size_t *maxima,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelExtrema(image,CompositeChannels,minima,maxima,
exception);
return(status);
}
MagickExport MagickBooleanType GetImageChannelExtrema(const Image *image,
const ChannelType channel,size_t *minima,size_t *maxima,
ExceptionInfo *exception)
{
double
max,
min;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageChannelRange(image,channel,&min,&max,exception);
*minima=(size_t) ceil(min-0.5);
*maxima=(size_t) floor(max+0.5);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l K u r t o s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelKurtosis() returns the kurtosis and skewness of one or more
% image channels.
%
% The format of the GetImageChannelKurtosis method is:
%
% MagickBooleanType GetImageChannelKurtosis(const Image *image,
% const ChannelType channel,double *kurtosis,double *skewness,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o kurtosis: the kurtosis of the channel.
%
% o skewness: the skewness of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageKurtosis(const Image *image,
double *kurtosis,double *skewness,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelKurtosis(image,CompositeChannels,kurtosis,skewness,
exception);
return(status);
}
MagickExport MagickBooleanType GetImageChannelKurtosis(const Image *image,
const ChannelType channel,double *kurtosis,double *skewness,
ExceptionInfo *exception)
{
double
area,
mean,
standard_deviation,
sum_squares,
sum_cubes,
sum_fourth_power;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*kurtosis=0.0;
*skewness=0.0;
area=0.0;
mean=0.0;
standard_deviation=0.0;
sum_squares=0.0;
sum_cubes=0.0;
sum_fourth_power=0.0;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
{
mean+=GetPixelRed(p);
sum_squares+=(double) GetPixelRed(p)*GetPixelRed(p);
sum_cubes+=(double) GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p);
sum_fourth_power+=(double) GetPixelRed(p)*GetPixelRed(p)*
GetPixelRed(p)*GetPixelRed(p);
area++;
}
if ((channel & GreenChannel) != 0)
{
mean+=GetPixelGreen(p);
sum_squares+=(double) GetPixelGreen(p)*GetPixelGreen(p);
sum_cubes+=(double) GetPixelGreen(p)*GetPixelGreen(p)*
GetPixelGreen(p);
sum_fourth_power+=(double) GetPixelGreen(p)*GetPixelGreen(p)*
GetPixelGreen(p)*GetPixelGreen(p);
area++;
}
if ((channel & BlueChannel) != 0)
{
mean+=GetPixelBlue(p);
sum_squares+=(double) GetPixelBlue(p)*GetPixelBlue(p);
sum_cubes+=(double) GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p);
sum_fourth_power+=(double) GetPixelBlue(p)*GetPixelBlue(p)*
GetPixelBlue(p)*GetPixelBlue(p);
area++;
}
if ((channel & OpacityChannel) != 0)
{
mean+=GetPixelAlpha(p);
sum_squares+=(double) GetPixelOpacity(p)*GetPixelAlpha(p);
sum_cubes+=(double) GetPixelOpacity(p)*GetPixelAlpha(p)*
GetPixelAlpha(p);
sum_fourth_power+=(double) GetPixelAlpha(p)*GetPixelAlpha(p)*
GetPixelAlpha(p)*GetPixelAlpha(p);
area++;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
double
index;
index=(double) GetPixelIndex(indexes+x);
mean+=index;
sum_squares+=index*index;
sum_cubes+=index*index*index;
sum_fourth_power+=index*index*index*index;
area++;
}
p++;
}
}
if (y < (ssize_t) image->rows)
return(MagickFalse);
if (area != 0.0)
{
mean/=area;
sum_squares/=area;
sum_cubes/=area;
sum_fourth_power/=area;
}
standard_deviation=sqrt(sum_squares-(mean*mean));
if (standard_deviation != 0.0)
{
*kurtosis=sum_fourth_power-4.0*mean*sum_cubes+6.0*mean*mean*sum_squares-
3.0*mean*mean*mean*mean;
*kurtosis/=standard_deviation*standard_deviation*standard_deviation*
standard_deviation;
*kurtosis-=3.0;
*skewness=sum_cubes-3.0*mean*sum_squares+2.0*mean*mean*mean;
*skewness/=standard_deviation*standard_deviation*standard_deviation;
}
return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l M e a n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelMean() returns the mean and standard deviation of one or more
% image channels.
%
% The format of the GetImageChannelMean method is:
%
% MagickBooleanType GetImageChannelMean(const Image *image,
% const ChannelType channel,double *mean,double *standard_deviation,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o mean: the average value in the channel.
%
% o standard_deviation: the standard deviation of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageMean(const Image *image,double *mean,
double *standard_deviation,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelMean(image,CompositeChannels,mean,standard_deviation,
exception);
return(status);
}
MagickExport MagickBooleanType GetImageChannelMean(const Image *image,
const ChannelType channel,double *mean,double *standard_deviation,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
size_t
channels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageChannelStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
channels=0;
channel_statistics[CompositeChannels].mean=0.0;
channel_statistics[CompositeChannels].standard_deviation=0.0;
if ((channel & RedChannel) != 0)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[RedChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[RedChannel].standard_deviation;
channels++;
}
if ((channel & GreenChannel) != 0)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[GreenChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[GreenChannel].standard_deviation;
channels++;
}
if ((channel & BlueChannel) != 0)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[BlueChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[BlueChannel].standard_deviation;
channels++;
}
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
{
channel_statistics[CompositeChannels].mean+=
(QuantumRange-channel_statistics[OpacityChannel].mean);
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[OpacityChannel].standard_deviation;
channels++;
}
if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[BlackChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[CompositeChannels].standard_deviation;
channels++;
}
channel_statistics[CompositeChannels].mean/=channels;
channel_statistics[CompositeChannels].standard_deviation/=channels;
*mean=channel_statistics[CompositeChannels].mean;
*standard_deviation=channel_statistics[CompositeChannels].standard_deviation;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l M o m e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelMoments() returns the normalized moments of one or more image
% channels.
%
% The format of the GetImageChannelMoments method is:
%
% ChannelMoments *GetImageChannelMoments(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ChannelMoments *GetImageChannelMoments(const Image *image,
ExceptionInfo *exception)
{
#define MaxNumberImageMoments 8
ChannelMoments
*channel_moments;
double
M00[CompositeChannels+1],
M01[CompositeChannels+1],
M02[CompositeChannels+1],
M03[CompositeChannels+1],
M10[CompositeChannels+1],
M11[CompositeChannels+1],
M12[CompositeChannels+1],
M20[CompositeChannels+1],
M21[CompositeChannels+1],
M22[CompositeChannels+1],
M30[CompositeChannels+1];
MagickPixelPacket
pixel;
PointInfo
centroid[CompositeChannels+1];
ssize_t
channel,
channels,
y;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
length=CompositeChannels+1UL;
channel_moments=(ChannelMoments *) AcquireQuantumMemory(length,
sizeof(*channel_moments));
if (channel_moments == (ChannelMoments *) NULL)
return(channel_moments);
(void) ResetMagickMemory(channel_moments,0,length*sizeof(*channel_moments));
(void) ResetMagickMemory(centroid,0,sizeof(centroid));
(void) ResetMagickMemory(M00,0,sizeof(M00));
(void) ResetMagickMemory(M01,0,sizeof(M01));
(void) ResetMagickMemory(M02,0,sizeof(M02));
(void) ResetMagickMemory(M03,0,sizeof(M03));
(void) ResetMagickMemory(M10,0,sizeof(M10));
(void) ResetMagickMemory(M11,0,sizeof(M11));
(void) ResetMagickMemory(M12,0,sizeof(M12));
(void) ResetMagickMemory(M20,0,sizeof(M20));
(void) ResetMagickMemory(M21,0,sizeof(M21));
(void) ResetMagickMemory(M22,0,sizeof(M22));
(void) ResetMagickMemory(M30,0,sizeof(M30));
GetMagickPixelPacket(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
/*
Compute center of mass (centroid).
*/
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
M00[RedChannel]+=QuantumScale*pixel.red;
M10[RedChannel]+=x*QuantumScale*pixel.red;
M01[RedChannel]+=y*QuantumScale*pixel.red;
M00[GreenChannel]+=QuantumScale*pixel.green;
M10[GreenChannel]+=x*QuantumScale*pixel.green;
M01[GreenChannel]+=y*QuantumScale*pixel.green;
M00[BlueChannel]+=QuantumScale*pixel.blue;
M10[BlueChannel]+=x*QuantumScale*pixel.blue;
M01[BlueChannel]+=y*QuantumScale*pixel.blue;
if (image->matte != MagickFalse)
{
M00[OpacityChannel]+=QuantumScale*pixel.opacity;
M10[OpacityChannel]+=x*QuantumScale*pixel.opacity;
M01[OpacityChannel]+=y*QuantumScale*pixel.opacity;
}
if (image->colorspace == CMYKColorspace)
{
M00[IndexChannel]+=QuantumScale*pixel.index;
M10[IndexChannel]+=x*QuantumScale*pixel.index;
M01[IndexChannel]+=y*QuantumScale*pixel.index;
}
p++;
}
}
for (channel=0; channel <= CompositeChannels; channel++)
{
/*
Compute center of mass (centroid).
*/
if (M00[channel] < MagickEpsilon)
{
M00[channel]+=MagickEpsilon;
centroid[channel].x=(double) image->columns/2.0;
centroid[channel].y=(double) image->rows/2.0;
continue;
}
M00[channel]+=MagickEpsilon;
centroid[channel].x=M10[channel]/M00[channel];
centroid[channel].y=M01[channel]/M00[channel];
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
/*
Compute the image moments.
*/
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
M11[RedChannel]+=(x-centroid[RedChannel].x)*(y-
centroid[RedChannel].y)*QuantumScale*pixel.red;
M20[RedChannel]+=(x-centroid[RedChannel].x)*(x-
centroid[RedChannel].x)*QuantumScale*pixel.red;
M02[RedChannel]+=(y-centroid[RedChannel].y)*(y-
centroid[RedChannel].y)*QuantumScale*pixel.red;
M21[RedChannel]+=(x-centroid[RedChannel].x)*(x-
centroid[RedChannel].x)*(y-centroid[RedChannel].y)*QuantumScale*
pixel.red;
M12[RedChannel]+=(x-centroid[RedChannel].x)*(y-
centroid[RedChannel].y)*(y-centroid[RedChannel].y)*QuantumScale*
pixel.red;
M22[RedChannel]+=(x-centroid[RedChannel].x)*(x-
centroid[RedChannel].x)*(y-centroid[RedChannel].y)*(y-
centroid[RedChannel].y)*QuantumScale*pixel.red;
M30[RedChannel]+=(x-centroid[RedChannel].x)*(x-
centroid[RedChannel].x)*(x-centroid[RedChannel].x)*QuantumScale*
pixel.red;
M03[RedChannel]+=(y-centroid[RedChannel].y)*(y-
centroid[RedChannel].y)*(y-centroid[RedChannel].y)*QuantumScale*
pixel.red;
M11[GreenChannel]+=(x-centroid[GreenChannel].x)*(y-
centroid[GreenChannel].y)*QuantumScale*pixel.green;
M20[GreenChannel]+=(x-centroid[GreenChannel].x)*(x-
centroid[GreenChannel].x)*QuantumScale*pixel.green;
M02[GreenChannel]+=(y-centroid[GreenChannel].y)*(y-
centroid[GreenChannel].y)*QuantumScale*pixel.green;
M21[GreenChannel]+=(x-centroid[GreenChannel].x)*(x-
centroid[GreenChannel].x)*(y-centroid[GreenChannel].y)*QuantumScale*
pixel.green;
M12[GreenChannel]+=(x-centroid[GreenChannel].x)*(y-
centroid[GreenChannel].y)*(y-centroid[GreenChannel].y)*QuantumScale*
pixel.green;
M22[GreenChannel]+=(x-centroid[GreenChannel].x)*(x-
centroid[GreenChannel].x)*(y-centroid[GreenChannel].y)*(y-
centroid[GreenChannel].y)*QuantumScale*pixel.green;
M30[GreenChannel]+=(x-centroid[GreenChannel].x)*(x-
centroid[GreenChannel].x)*(x-centroid[GreenChannel].x)*QuantumScale*
pixel.green;
M03[GreenChannel]+=(y-centroid[GreenChannel].y)*(y-
centroid[GreenChannel].y)*(y-centroid[GreenChannel].y)*QuantumScale*
pixel.green;
M11[BlueChannel]+=(x-centroid[BlueChannel].x)*(y-
centroid[BlueChannel].y)*QuantumScale*pixel.blue;
M20[BlueChannel]+=(x-centroid[BlueChannel].x)*(x-
centroid[BlueChannel].x)*QuantumScale*pixel.blue;
M02[BlueChannel]+=(y-centroid[BlueChannel].y)*(y-
centroid[BlueChannel].y)*QuantumScale*pixel.blue;
M21[BlueChannel]+=(x-centroid[BlueChannel].x)*(x-
centroid[BlueChannel].x)*(y-centroid[BlueChannel].y)*QuantumScale*
pixel.blue;
M12[BlueChannel]+=(x-centroid[BlueChannel].x)*(y-
centroid[BlueChannel].y)*(y-centroid[BlueChannel].y)*QuantumScale*
pixel.blue;
M22[BlueChannel]+=(x-centroid[BlueChannel].x)*(x-
centroid[BlueChannel].x)*(y-centroid[BlueChannel].y)*(y-
centroid[BlueChannel].y)*QuantumScale*pixel.blue;
M30[BlueChannel]+=(x-centroid[BlueChannel].x)*(x-
centroid[BlueChannel].x)*(x-centroid[BlueChannel].x)*QuantumScale*
pixel.blue;
M03[BlueChannel]+=(y-centroid[BlueChannel].y)*(y-
centroid[BlueChannel].y)*(y-centroid[BlueChannel].y)*QuantumScale*
pixel.blue;
if (image->matte != MagickFalse)
{
M11[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(y-
centroid[OpacityChannel].y)*QuantumScale*pixel.opacity;
M20[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x-
centroid[OpacityChannel].x)*QuantumScale*pixel.opacity;
M02[OpacityChannel]+=(y-centroid[OpacityChannel].y)*(y-
centroid[OpacityChannel].y)*QuantumScale*pixel.opacity;
M21[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x-
centroid[OpacityChannel].x)*(y-centroid[OpacityChannel].y)*
QuantumScale*pixel.opacity;
M12[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(y-
centroid[OpacityChannel].y)*(y-centroid[OpacityChannel].y)*
QuantumScale*pixel.opacity;
M22[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x-
centroid[OpacityChannel].x)*(y-centroid[OpacityChannel].y)*(y-
centroid[OpacityChannel].y)*QuantumScale*pixel.opacity;
M30[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x-
centroid[OpacityChannel].x)*(x-centroid[OpacityChannel].x)*
QuantumScale*pixel.opacity;
M03[OpacityChannel]+=(y-centroid[OpacityChannel].y)*(y-
centroid[OpacityChannel].y)*(y-centroid[OpacityChannel].y)*
QuantumScale*pixel.opacity;
}
if (image->colorspace == CMYKColorspace)
{
M11[IndexChannel]+=(x-centroid[IndexChannel].x)*(y-
centroid[IndexChannel].y)*QuantumScale*pixel.index;
M20[IndexChannel]+=(x-centroid[IndexChannel].x)*(x-
centroid[IndexChannel].x)*QuantumScale*pixel.index;
M02[IndexChannel]+=(y-centroid[IndexChannel].y)*(y-
centroid[IndexChannel].y)*QuantumScale*pixel.index;
M21[IndexChannel]+=(x-centroid[IndexChannel].x)*(x-
centroid[IndexChannel].x)*(y-centroid[IndexChannel].y)*
QuantumScale*pixel.index;
M12[IndexChannel]+=(x-centroid[IndexChannel].x)*(y-
centroid[IndexChannel].y)*(y-centroid[IndexChannel].y)*
QuantumScale*pixel.index;
M22[IndexChannel]+=(x-centroid[IndexChannel].x)*(x-
centroid[IndexChannel].x)*(y-centroid[IndexChannel].y)*(y-
centroid[IndexChannel].y)*QuantumScale*pixel.index;
M30[IndexChannel]+=(x-centroid[IndexChannel].x)*(x-
centroid[IndexChannel].x)*(x-centroid[IndexChannel].x)*
QuantumScale*pixel.index;
M03[IndexChannel]+=(y-centroid[IndexChannel].y)*(y-
centroid[IndexChannel].y)*(y-centroid[IndexChannel].y)*
QuantumScale*pixel.index;
}
p++;
}
}
channels=3;
M00[CompositeChannels]+=(M00[RedChannel]+M00[GreenChannel]+M00[BlueChannel]);
M01[CompositeChannels]+=(M01[RedChannel]+M01[GreenChannel]+M01[BlueChannel]);
M02[CompositeChannels]+=(M02[RedChannel]+M02[GreenChannel]+M02[BlueChannel]);
M03[CompositeChannels]+=(M03[RedChannel]+M03[GreenChannel]+M03[BlueChannel]);
M10[CompositeChannels]+=(M10[RedChannel]+M10[GreenChannel]+M10[BlueChannel]);
M11[CompositeChannels]+=(M11[RedChannel]+M11[GreenChannel]+M11[BlueChannel]);
M12[CompositeChannels]+=(M12[RedChannel]+M12[GreenChannel]+M12[BlueChannel]);
M20[CompositeChannels]+=(M20[RedChannel]+M20[GreenChannel]+M20[BlueChannel]);
M21[CompositeChannels]+=(M21[RedChannel]+M21[GreenChannel]+M21[BlueChannel]);
M22[CompositeChannels]+=(M22[RedChannel]+M22[GreenChannel]+M22[BlueChannel]);
M30[CompositeChannels]+=(M30[RedChannel]+M30[GreenChannel]+M30[BlueChannel]);
if (image->matte != MagickFalse)
{
channels+=1;
M00[CompositeChannels]+=M00[OpacityChannel];
M01[CompositeChannels]+=M01[OpacityChannel];
M02[CompositeChannels]+=M02[OpacityChannel];
M03[CompositeChannels]+=M03[OpacityChannel];
M10[CompositeChannels]+=M10[OpacityChannel];
M11[CompositeChannels]+=M11[OpacityChannel];
M12[CompositeChannels]+=M12[OpacityChannel];
M20[CompositeChannels]+=M20[OpacityChannel];
M21[CompositeChannels]+=M21[OpacityChannel];
M22[CompositeChannels]+=M22[OpacityChannel];
M30[CompositeChannels]+=M30[OpacityChannel];
}
if (image->colorspace == CMYKColorspace)
{
channels+=1;
M00[CompositeChannels]+=M00[IndexChannel];
M01[CompositeChannels]+=M01[IndexChannel];
M02[CompositeChannels]+=M02[IndexChannel];
M03[CompositeChannels]+=M03[IndexChannel];
M10[CompositeChannels]+=M10[IndexChannel];
M11[CompositeChannels]+=M11[IndexChannel];
M12[CompositeChannels]+=M12[IndexChannel];
M20[CompositeChannels]+=M20[IndexChannel];
M21[CompositeChannels]+=M21[IndexChannel];
M22[CompositeChannels]+=M22[IndexChannel];
M30[CompositeChannels]+=M30[IndexChannel];
}
M00[CompositeChannels]/=(double) channels;
M01[CompositeChannels]/=(double) channels;
M02[CompositeChannels]/=(double) channels;
M03[CompositeChannels]/=(double) channels;
M10[CompositeChannels]/=(double) channels;
M11[CompositeChannels]/=(double) channels;
M12[CompositeChannels]/=(double) channels;
M20[CompositeChannels]/=(double) channels;
M21[CompositeChannels]/=(double) channels;
M22[CompositeChannels]/=(double) channels;
M30[CompositeChannels]/=(double) channels;
for (channel=0; channel <= CompositeChannels; channel++)
{
/*
Compute elliptical angle, major and minor axes, eccentricity, & intensity.
*/
channel_moments[channel].centroid=centroid[channel];
channel_moments[channel].ellipse_axis.x=sqrt((2.0/M00[channel])*
((M20[channel]+M02[channel])+sqrt(4.0*M11[channel]*M11[channel]+
(M20[channel]-M02[channel])*(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_axis.y=sqrt((2.0/M00[channel])*
((M20[channel]+M02[channel])-sqrt(4.0*M11[channel]*M11[channel]+
(M20[channel]-M02[channel])*(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_angle=RadiansToDegrees(0.5*atan(2.0*
M11[channel]/(M20[channel]-M02[channel]+MagickEpsilon)));
if (fabs(M11[channel]) < MagickEpsilon)
{
if (fabs(M20[channel]-M02[channel]) < MagickEpsilon)
channel_moments[channel].ellipse_angle+=0.0;
else
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=0.0;
}
else
if (M11[channel] < 0.0)
{
if (fabs(M20[channel]-M02[channel]) < MagickEpsilon)
channel_moments[channel].ellipse_angle+=0.0;
else
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=180.0;
}
else
{
if (fabs(M20[channel]-M02[channel]) < MagickEpsilon)
channel_moments[channel].ellipse_angle+=0.0;
else
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=0.0;
}
channel_moments[channel].ellipse_eccentricity=sqrt(1.0-(
channel_moments[channel].ellipse_axis.y/
(channel_moments[channel].ellipse_axis.x+MagickEpsilon)));
channel_moments[channel].ellipse_intensity=M00[channel]/
(MagickPI*channel_moments[channel].ellipse_axis.x*
channel_moments[channel].ellipse_axis.y+MagickEpsilon);
}
for (channel=0; channel <= CompositeChannels; channel++)
{
/*
Normalize image moments.
*/
M10[channel]=0.0;
M01[channel]=0.0;
M11[channel]/=pow(M00[channel],1.0+(1.0+1.0)/2.0);
M20[channel]/=pow(M00[channel],1.0+(2.0+0.0)/2.0);
M02[channel]/=pow(M00[channel],1.0+(0.0+2.0)/2.0);
M21[channel]/=pow(M00[channel],1.0+(2.0+1.0)/2.0);
M12[channel]/=pow(M00[channel],1.0+(1.0+2.0)/2.0);
M22[channel]/=pow(M00[channel],1.0+(2.0+2.0)/2.0);
M30[channel]/=pow(M00[channel],1.0+(3.0+0.0)/2.0);
M03[channel]/=pow(M00[channel],1.0+(0.0+3.0)/2.0);
M00[channel]=1.0;
}
for (channel=0; channel <= CompositeChannels; channel++)
{
/*
Compute Hu invariant moments.
*/
channel_moments[channel].I[0]=M20[channel]+M02[channel];
channel_moments[channel].I[1]=(M20[channel]-M02[channel])*
(M20[channel]-M02[channel])+4.0*M11[channel]*M11[channel];
channel_moments[channel].I[2]=(M30[channel]-3.0*M12[channel])*
(M30[channel]-3.0*M12[channel])+(3.0*M21[channel]-M03[channel])*
(3.0*M21[channel]-M03[channel]);
channel_moments[channel].I[3]=(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])+(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]);
channel_moments[channel].I[4]=(M30[channel]-3.0*M12[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))+(3.0*M21[channel]-M03[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].I[5]=(M20[channel]-M02[channel])*
((M30[channel]+M12[channel])*(M30[channel]+M12[channel])-
(M21[channel]+M03[channel])*(M21[channel]+M03[channel]))+
4.0*M11[channel]*(M30[channel]+M12[channel])*(M21[channel]+M03[channel]);
channel_moments[channel].I[6]=(3.0*M21[channel]-M03[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))-(M30[channel]-3*M12[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].I[7]=M11[channel]*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M03[channel]+M21[channel])*
(M03[channel]+M21[channel]))-(M20[channel]-M02[channel])*
(M30[channel]+M12[channel])*(M03[channel]+M21[channel]);
}
if (y < (ssize_t) image->rows)
channel_moments=(ChannelMoments *) RelinquishMagickMemory(channel_moments);
return(channel_moments);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l P e r c e p t u a l H a s h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelPerceptualHash() returns the perceptual hash of one or more
% image channels.
%
% The format of the GetImageChannelPerceptualHash method is:
%
% ChannelPerceptualHash *GetImageChannelPerceptualHash(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
MagickExport ChannelPerceptualHash *GetImageChannelPerceptualHash(
const Image *image,ExceptionInfo *exception)
{
ChannelMoments
*moments;
ChannelPerceptualHash
*perceptual_hash;
Image
*hash_image;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
channel;
/*
Blur then transform to sRGB colorspace.
*/
hash_image=BlurImage(image,0.0,1.0,exception);
if (hash_image == (Image *) NULL)
return((ChannelPerceptualHash *) NULL);
hash_image->depth=8;
status=TransformImageColorspace(hash_image,sRGBColorspace);
if (status == MagickFalse)
return((ChannelPerceptualHash *) NULL);
moments=GetImageChannelMoments(hash_image,exception);
hash_image=DestroyImage(hash_image);
if (moments == (ChannelMoments *) NULL)
return((ChannelPerceptualHash *) NULL);
perceptual_hash=(ChannelPerceptualHash *) AcquireQuantumMemory(
CompositeChannels+1UL,sizeof(*perceptual_hash));
if (perceptual_hash == (ChannelPerceptualHash *) NULL)
return((ChannelPerceptualHash *) NULL);
for (channel=0; channel <= CompositeChannels; channel++)
for (i=0; i < MaximumNumberOfImageMoments; i++)
perceptual_hash[channel].P[i]=(-MagickLog10(moments[channel].I[i]));
moments=(ChannelMoments *) RelinquishMagickMemory(moments);
/*
Blur then transform to HCLp colorspace.
*/
hash_image=BlurImage(image,0.0,1.0,exception);
if (hash_image == (Image *) NULL)
{
perceptual_hash=(ChannelPerceptualHash *) RelinquishMagickMemory(
perceptual_hash);
return((ChannelPerceptualHash *) NULL);
}
hash_image->depth=8;
status=TransformImageColorspace(hash_image,HCLpColorspace);
if (status == MagickFalse)
{
perceptual_hash=(ChannelPerceptualHash *) RelinquishMagickMemory(
perceptual_hash);
return((ChannelPerceptualHash *) NULL);
}
moments=GetImageChannelMoments(hash_image,exception);
hash_image=DestroyImage(hash_image);
if (moments == (ChannelMoments *) NULL)
{
perceptual_hash=(ChannelPerceptualHash *) RelinquishMagickMemory(
perceptual_hash);
return((ChannelPerceptualHash *) NULL);
}
for (channel=0; channel <= CompositeChannels; channel++)
for (i=0; i < MaximumNumberOfImageMoments; i++)
perceptual_hash[channel].Q[i]=(-MagickLog10(moments[channel].I[i]));
moments=(ChannelMoments *) RelinquishMagickMemory(moments);
return(perceptual_hash);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l R a n g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelRange() returns the range of one or more image channels.
%
% The format of the GetImageChannelRange method is:
%
% MagickBooleanType GetImageChannelRange(const Image *image,
% const ChannelType channel,double *minima,double *maxima,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageRange(const Image *image,
double *minima,double *maxima,ExceptionInfo *exception)
{
return(GetImageChannelRange(image,CompositeChannels,minima,maxima,exception));
}
MagickExport MagickBooleanType GetImageChannelRange(const Image *image,
const ChannelType channel,double *minima,double *maxima,
ExceptionInfo *exception)
{
MagickPixelPacket
pixel;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*maxima=(-MagickMaximumValue);
*minima=MagickMaximumValue;
GetMagickPixelPacket(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if ((channel & RedChannel) != 0)
{
if (pixel.red < *minima)
*minima=(double) pixel.red;
if (pixel.red > *maxima)
*maxima=(double) pixel.red;
}
if ((channel & GreenChannel) != 0)
{
if (pixel.green < *minima)
*minima=(double) pixel.green;
if (pixel.green > *maxima)
*maxima=(double) pixel.green;
}
if ((channel & BlueChannel) != 0)
{
if (pixel.blue < *minima)
*minima=(double) pixel.blue;
if (pixel.blue > *maxima)
*maxima=(double) pixel.blue;
}
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
{
if ((QuantumRange-pixel.opacity) < *minima)
*minima=(double) (QuantumRange-pixel.opacity);
if ((QuantumRange-pixel.opacity) > *maxima)
*maxima=(double) (QuantumRange-pixel.opacity);
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
if ((double) pixel.index < *minima)
*minima=(double) pixel.index;
if ((double) pixel.index > *maxima)
*maxima=(double) pixel.index;
}
p++;
}
}
return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l S t a t i s t i c s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelStatistics() returns statistics for each channel in the
% image. The statistics include the channel depth, its minima, maxima, mean,
% standard deviation, kurtosis and skewness. You can access the red channel
% mean, for example, like this:
%
% channel_statistics=GetImageChannelStatistics(image,exception);
% red_mean=channel_statistics[RedChannel].mean;
%
% Use MagickRelinquishMemory() to free the statistics buffer.
%
% The format of the GetImageChannelStatistics method is:
%
% ChannelStatistics *GetImageChannelStatistics(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ChannelStatistics *GetImageChannelStatistics(const Image *image,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
double
area,
standard_deviation;
MagickPixelPacket
number_bins,
*histogram;
QuantumAny
range;
register ssize_t
i;
size_t
channels,
depth,
length;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
length=CompositeChannels+1UL;
channel_statistics=(ChannelStatistics *) AcquireQuantumMemory(length,
sizeof(*channel_statistics));
histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1U,
sizeof(*histogram));
if ((channel_statistics == (ChannelStatistics *) NULL) ||
(histogram == (MagickPixelPacket *) NULL))
{
if (histogram != (MagickPixelPacket *) NULL)
histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram);
if (channel_statistics != (ChannelStatistics *) NULL)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
(void) ResetMagickMemory(channel_statistics,0,length*
sizeof(*channel_statistics));
for (i=0; i <= (ssize_t) CompositeChannels; i++)
{
channel_statistics[i].depth=1;
channel_statistics[i].maxima=(-MagickMaximumValue);
channel_statistics[i].minima=MagickMaximumValue;
}
(void) ResetMagickMemory(histogram,0,(MaxMap+1U)*sizeof(*histogram));
(void) ResetMagickMemory(&number_bins,0,sizeof(number_bins));
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
/*
Compute pixel statistics.
*/
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; )
{
if (channel_statistics[RedChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[RedChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelRed(p),range) == MagickFalse)
{
channel_statistics[RedChannel].depth++;
continue;
}
}
if (channel_statistics[GreenChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[GreenChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelGreen(p),range) == MagickFalse)
{
channel_statistics[GreenChannel].depth++;
continue;
}
}
if (channel_statistics[BlueChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[BlueChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelBlue(p),range) == MagickFalse)
{
channel_statistics[BlueChannel].depth++;
continue;
}
}
if (image->matte != MagickFalse)
{
if (channel_statistics[OpacityChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[OpacityChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelAlpha(p),range) == MagickFalse)
{
channel_statistics[OpacityChannel].depth++;
continue;
}
}
}
if (image->colorspace == CMYKColorspace)
{
if (channel_statistics[BlackChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[BlackChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelIndex(indexes+x),range) == MagickFalse)
{
channel_statistics[BlackChannel].depth++;
continue;
}
}
}
if ((double) GetPixelRed(p) < channel_statistics[RedChannel].minima)
channel_statistics[RedChannel].minima=(double) GetPixelRed(p);
if ((double) GetPixelRed(p) > channel_statistics[RedChannel].maxima)
channel_statistics[RedChannel].maxima=(double) GetPixelRed(p);
channel_statistics[RedChannel].sum+=GetPixelRed(p);
channel_statistics[RedChannel].sum_squared+=(double) GetPixelRed(p)*
GetPixelRed(p);
channel_statistics[RedChannel].sum_cubed+=(double)
GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p);
channel_statistics[RedChannel].sum_fourth_power+=(double)
GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p);
if ((double) GetPixelGreen(p) < channel_statistics[GreenChannel].minima)
channel_statistics[GreenChannel].minima=(double) GetPixelGreen(p);
if ((double) GetPixelGreen(p) > channel_statistics[GreenChannel].maxima)
channel_statistics[GreenChannel].maxima=(double) GetPixelGreen(p);
channel_statistics[GreenChannel].sum+=GetPixelGreen(p);
channel_statistics[GreenChannel].sum_squared+=(double) GetPixelGreen(p)*
GetPixelGreen(p);
channel_statistics[GreenChannel].sum_cubed+=(double) GetPixelGreen(p)*
GetPixelGreen(p)*GetPixelGreen(p);
channel_statistics[GreenChannel].sum_fourth_power+=(double)
GetPixelGreen(p)*GetPixelGreen(p)*GetPixelGreen(p)*GetPixelGreen(p);
if ((double) GetPixelBlue(p) < channel_statistics[BlueChannel].minima)
channel_statistics[BlueChannel].minima=(double) GetPixelBlue(p);
if ((double) GetPixelBlue(p) > channel_statistics[BlueChannel].maxima)
channel_statistics[BlueChannel].maxima=(double) GetPixelBlue(p);
channel_statistics[BlueChannel].sum+=GetPixelBlue(p);
channel_statistics[BlueChannel].sum_squared+=(double) GetPixelBlue(p)*
GetPixelBlue(p);
channel_statistics[BlueChannel].sum_cubed+=(double) GetPixelBlue(p)*
GetPixelBlue(p)*GetPixelBlue(p);
channel_statistics[BlueChannel].sum_fourth_power+=(double)
GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p);
histogram[ScaleQuantumToMap(GetPixelRed(p))].red++;
histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++;
histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++;
if (image->matte != MagickFalse)
{
if ((double) GetPixelAlpha(p) < channel_statistics[OpacityChannel].minima)
channel_statistics[OpacityChannel].minima=(double) GetPixelAlpha(p);
if ((double) GetPixelAlpha(p) > channel_statistics[OpacityChannel].maxima)
channel_statistics[OpacityChannel].maxima=(double) GetPixelAlpha(p);
channel_statistics[OpacityChannel].sum+=GetPixelAlpha(p);
channel_statistics[OpacityChannel].sum_squared+=(double)
GetPixelAlpha(p)*GetPixelAlpha(p);
channel_statistics[OpacityChannel].sum_cubed+=(double)
GetPixelAlpha(p)*GetPixelAlpha(p)*GetPixelAlpha(p);
channel_statistics[OpacityChannel].sum_fourth_power+=(double)
GetPixelAlpha(p)*GetPixelAlpha(p)*GetPixelAlpha(p)*GetPixelAlpha(p);
histogram[ScaleQuantumToMap(GetPixelAlpha(p))].opacity++;
}
if (image->colorspace == CMYKColorspace)
{
if ((double) GetPixelIndex(indexes+x) < channel_statistics[BlackChannel].minima)
channel_statistics[BlackChannel].minima=(double)
GetPixelIndex(indexes+x);
if ((double) GetPixelIndex(indexes+x) > channel_statistics[BlackChannel].maxima)
channel_statistics[BlackChannel].maxima=(double)
GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum+=GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum_squared+=(double)
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum_cubed+=(double)
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum_fourth_power+=(double)
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x);
histogram[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index++;
}
x++;
p++;
}
}
for (i=0; i < (ssize_t) CompositeChannels; i++)
{
double
area,
mean,
standard_deviation;
/*
Normalize pixel statistics.
*/
area=PerceptibleReciprocal((double) image->columns*image->rows);
mean=channel_statistics[i].sum*area;
channel_statistics[i].sum=mean;
channel_statistics[i].sum_squared*=area;
channel_statistics[i].sum_cubed*=area;
channel_statistics[i].sum_fourth_power*=area;
channel_statistics[i].mean=mean;
channel_statistics[i].variance=channel_statistics[i].sum_squared;
standard_deviation=sqrt(channel_statistics[i].variance-(mean*mean));
area=PerceptibleReciprocal((double) image->columns*image->rows-1.0)*
((double) image->columns*image->rows);
standard_deviation=sqrt(area*standard_deviation*standard_deviation);
channel_statistics[i].standard_deviation=standard_deviation;
}
for (i=0; i < (ssize_t) (MaxMap+1U); i++)
{
if (histogram[i].red > 0.0)
number_bins.red++;
if (histogram[i].green > 0.0)
number_bins.green++;
if (histogram[i].blue > 0.0)
number_bins.blue++;
if ((image->matte != MagickFalse) && (histogram[i].opacity > 0.0))
number_bins.opacity++;
if ((image->colorspace == CMYKColorspace) && (histogram[i].index > 0.0))
number_bins.index++;
}
area=PerceptibleReciprocal((double) image->columns*image->rows);
for (i=0; i < (ssize_t) (MaxMap+1U); i++)
{
/*
Compute pixel entropy.
*/
histogram[i].red*=area;
if (number_bins.red > MagickEpsilon)
channel_statistics[RedChannel].entropy+=-histogram[i].red*
MagickLog10(histogram[i].red)/MagickLog10((double) number_bins.red);
histogram[i].green*=area;
if (number_bins.green > MagickEpsilon)
channel_statistics[GreenChannel].entropy+=-histogram[i].green*
MagickLog10(histogram[i].green)/MagickLog10((double) number_bins.green);
histogram[i].blue*=area;
if (number_bins.blue > MagickEpsilon)
channel_statistics[BlueChannel].entropy+=-histogram[i].blue*
MagickLog10(histogram[i].blue)/MagickLog10((double) number_bins.blue);
if (image->matte != MagickFalse)
{
histogram[i].opacity*=area;
if (number_bins.opacity > MagickEpsilon)
channel_statistics[OpacityChannel].entropy+=-histogram[i].opacity*
MagickLog10(histogram[i].opacity)/MagickLog10((double)
number_bins.opacity);
}
if (image->colorspace == CMYKColorspace)
{
histogram[i].index*=area;
if (number_bins.index > MagickEpsilon)
channel_statistics[IndexChannel].entropy+=-histogram[i].index*
MagickLog10(histogram[i].index)/MagickLog10((double)
number_bins.index);
}
}
/*
Compute overall statistics.
*/
for (i=0; i < (ssize_t) CompositeChannels; i++)
{
channel_statistics[CompositeChannels].depth=(size_t) EvaluateMax((double)
channel_statistics[CompositeChannels].depth,(double)
channel_statistics[i].depth);
channel_statistics[CompositeChannels].minima=MagickMin(
channel_statistics[CompositeChannels].minima,
channel_statistics[i].minima);
channel_statistics[CompositeChannels].maxima=EvaluateMax(
channel_statistics[CompositeChannels].maxima,
channel_statistics[i].maxima);
channel_statistics[CompositeChannels].sum+=channel_statistics[i].sum;
channel_statistics[CompositeChannels].sum_squared+=
channel_statistics[i].sum_squared;
channel_statistics[CompositeChannels].sum_cubed+=
channel_statistics[i].sum_cubed;
channel_statistics[CompositeChannels].sum_fourth_power+=
channel_statistics[i].sum_fourth_power;
channel_statistics[CompositeChannels].mean+=channel_statistics[i].mean;
channel_statistics[CompositeChannels].variance+=
channel_statistics[i].variance-channel_statistics[i].mean*
channel_statistics[i].mean;
standard_deviation=sqrt(channel_statistics[i].variance-
(channel_statistics[i].mean*channel_statistics[i].mean));
area=PerceptibleReciprocal((double) image->columns*image->rows-1.0)*
((double) image->columns*image->rows);
standard_deviation=sqrt(area*standard_deviation*standard_deviation);
channel_statistics[CompositeChannels].standard_deviation=standard_deviation;
channel_statistics[CompositeChannels].entropy+=
channel_statistics[i].entropy;
}
channels=3;
if (image->matte != MagickFalse)
channels++;
if (image->colorspace == CMYKColorspace)
channels++;
channel_statistics[CompositeChannels].sum/=channels;
channel_statistics[CompositeChannels].sum_squared/=channels;
channel_statistics[CompositeChannels].sum_cubed/=channels;
channel_statistics[CompositeChannels].sum_fourth_power/=channels;
channel_statistics[CompositeChannels].mean/=channels;
channel_statistics[CompositeChannels].kurtosis/=channels;
channel_statistics[CompositeChannels].skewness/=channels;
channel_statistics[CompositeChannels].entropy/=channels;
i=CompositeChannels;
area=PerceptibleReciprocal((double) channels*image->columns*image->rows);
channel_statistics[i].variance=channel_statistics[i].sum_squared;
channel_statistics[i].mean=channel_statistics[i].sum;
standard_deviation=sqrt(channel_statistics[i].variance-
(channel_statistics[i].mean*channel_statistics[i].mean));
standard_deviation=sqrt(PerceptibleReciprocal((double) channels*
image->columns*image->rows-1.0)*channels*image->columns*image->rows*
standard_deviation*standard_deviation);
channel_statistics[i].standard_deviation=standard_deviation;
for (i=0; i <= (ssize_t) CompositeChannels; i++)
{
/*
Compute kurtosis & skewness statistics.
*/
standard_deviation=PerceptibleReciprocal(
channel_statistics[i].standard_deviation);
channel_statistics[i].skewness=(channel_statistics[i].sum_cubed-3.0*
channel_statistics[i].mean*channel_statistics[i].sum_squared+2.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation);
channel_statistics[i].kurtosis=(channel_statistics[i].sum_fourth_power-4.0*
channel_statistics[i].mean*channel_statistics[i].sum_cubed+6.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].sum_squared-3.0*channel_statistics[i].mean*
channel_statistics[i].mean*1.0*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation*standard_deviation)-3.0;
}
channel_statistics[CompositeChannels].mean=0.0;
channel_statistics[CompositeChannels].standard_deviation=0.0;
for (i=0; i < (ssize_t) CompositeChannels; i++)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[i].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[i].standard_deviation;
}
channel_statistics[CompositeChannels].mean/=(double) channels;
channel_statistics[CompositeChannels].standard_deviation/=(double) channels;
histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram);
if (y < (ssize_t) image->rows)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l y n o m i a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolynomialImage() returns a new image where each pixel is the sum of the
% pixels in the image sequence after applying its corresponding terms
% (coefficient and degree pairs).
%
% The format of the PolynomialImage method is:
%
% Image *PolynomialImage(const Image *images,const size_t number_terms,
% const double *terms,ExceptionInfo *exception)
% Image *PolynomialImageChannel(const Image *images,
% const size_t number_terms,const ChannelType channel,
% const double *terms,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o channel: the channel.
%
% o number_terms: the number of terms in the list. The actual list length
% is 2 x number_terms + 1 (the constant).
%
% o terms: the list of polynomial coefficients and degree pairs and a
% constant.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolynomialImage(const Image *images,
const size_t number_terms,const double *terms,ExceptionInfo *exception)
{
Image
*polynomial_image;
polynomial_image=PolynomialImageChannel(images,DefaultChannels,number_terms,
terms,exception);
return(polynomial_image);
}
MagickExport Image *PolynomialImageChannel(const Image *images,
const ChannelType channel,const size_t number_terms,const double *terms,
ExceptionInfo *exception)
{
#define PolynomialImageTag "Polynomial/Image"
CacheView
*polynomial_view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
**magick_restrict polynomial_pixels,
zero;
size_t
number_images;
ssize_t
y;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImageCanvas(images,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
image=DestroyImage(image);
return((Image *) NULL);
}
number_images=GetImageListLength(images);
polynomial_pixels=AcquirePixelThreadSet(images,number_images);
if (polynomial_pixels == (MagickPixelPacket **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Polynomial image pixels.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(images,&zero);
polynomial_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register IndexPacket
*magick_restrict polynomial_indexes;
register MagickPixelPacket
*polynomial_pixel;
register PixelPacket
*magick_restrict q;
register ssize_t
i,
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(polynomial_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
polynomial_indexes=GetCacheViewAuthenticIndexQueue(polynomial_view);
polynomial_pixel=polynomial_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
polynomial_pixel[x]=zero;
next=images;
for (i=0; i < (ssize_t) number_images; i++)
{
register const IndexPacket
*indexes;
register const PixelPacket
*p;
if (i >= (ssize_t) number_terms)
break;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
coefficient,
degree;
coefficient=terms[i << 1];
degree=terms[(i << 1)+1];
if ((channel & RedChannel) != 0)
polynomial_pixel[x].red+=coefficient*pow(QuantumScale*p->red,degree);
if ((channel & GreenChannel) != 0)
polynomial_pixel[x].green+=coefficient*pow(QuantumScale*p->green,
degree);
if ((channel & BlueChannel) != 0)
polynomial_pixel[x].blue+=coefficient*pow(QuantumScale*p->blue,
degree);
if ((channel & OpacityChannel) != 0)
polynomial_pixel[x].opacity+=coefficient*pow(QuantumScale*
(QuantumRange-p->opacity),degree);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
polynomial_pixel[x].index+=coefficient*pow(QuantumScale*indexes[x],
degree);
p++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumRange*polynomial_pixel[x].red));
SetPixelGreen(q,ClampToQuantum(QuantumRange*polynomial_pixel[x].green));
SetPixelBlue(q,ClampToQuantum(QuantumRange*polynomial_pixel[x].blue));
if (image->matte == MagickFalse)
SetPixelOpacity(q,ClampToQuantum(QuantumRange-QuantumRange*
polynomial_pixel[x].opacity));
else
SetPixelAlpha(q,ClampToQuantum(QuantumRange-QuantumRange*
polynomial_pixel[x].opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(polynomial_indexes+x,ClampToQuantum(QuantumRange*
polynomial_pixel[x].index));
q++;
}
if (SyncCacheViewAuthenticPixels(polynomial_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_PolynomialImages)
#endif
proceed=SetImageProgress(images,PolynomialImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
polynomial_view=DestroyCacheView(polynomial_view);
polynomial_pixels=DestroyPixelThreadSet(polynomial_pixels);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t a t i s t i c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StatisticImage() makes each pixel the min / max / median / mode / etc. of
% the neighborhood of the specified width and height.
%
% The format of the StatisticImage method is:
%
% Image *StatisticImage(const Image *image,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
% Image *StatisticImageChannel(const Image *image,
% const ChannelType channel,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the image channel.
%
% o type: the statistic type (median, mode, etc.).
%
% o width: the width of the pixel neighborhood.
%
% o height: the height of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
#define ListChannels 5
typedef struct _ListNode
{
size_t
next[9],
count,
signature;
} ListNode;
typedef struct _SkipList
{
ssize_t
level;
ListNode
*nodes;
} SkipList;
typedef struct _PixelList
{
size_t
length,
seed,
signature;
SkipList
lists[ListChannels];
} PixelList;
static PixelList *DestroyPixelList(PixelList *pixel_list)
{
register ssize_t
i;
if (pixel_list == (PixelList *) NULL)
return((PixelList *) NULL);
for (i=0; i < ListChannels; i++)
if (pixel_list->lists[i].nodes != (ListNode *) NULL)
pixel_list->lists[i].nodes=(ListNode *) RelinquishAlignedMemory(
pixel_list->lists[i].nodes);
pixel_list=(PixelList *) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList **DestroyPixelListThreadSet(PixelList **pixel_list)
{
register ssize_t
i;
assert(pixel_list != (PixelList **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixel_list[i] != (PixelList *) NULL)
pixel_list[i]=DestroyPixelList(pixel_list[i]);
pixel_list=(PixelList **) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList *AcquirePixelList(const size_t width,const size_t height)
{
PixelList
*pixel_list;
register ssize_t
i;
pixel_list=(PixelList *) AcquireMagickMemory(sizeof(*pixel_list));
if (pixel_list == (PixelList *) NULL)
return(pixel_list);
(void) ResetMagickMemory((void *) pixel_list,0,sizeof(*pixel_list));
pixel_list->length=width*height;
for (i=0; i < ListChannels; i++)
{
pixel_list->lists[i].nodes=(ListNode *) AcquireAlignedMemory(65537UL,
sizeof(*pixel_list->lists[i].nodes));
if (pixel_list->lists[i].nodes == (ListNode *) NULL)
return(DestroyPixelList(pixel_list));
(void) ResetMagickMemory(pixel_list->lists[i].nodes,0,65537UL*
sizeof(*pixel_list->lists[i].nodes));
}
pixel_list->signature=MagickCoreSignature;
return(pixel_list);
}
static PixelList **AcquirePixelListThreadSet(const size_t width,
const size_t height)
{
PixelList
**pixel_list;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixel_list=(PixelList **) AcquireQuantumMemory(number_threads,
sizeof(*pixel_list));
if (pixel_list == (PixelList **) NULL)
return((PixelList **) NULL);
(void) ResetMagickMemory(pixel_list,0,number_threads*sizeof(*pixel_list));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_list[i]=AcquirePixelList(width,height);
if (pixel_list[i] == (PixelList *) NULL)
return(DestroyPixelListThreadSet(pixel_list));
}
return(pixel_list);
}
static void AddNodePixelList(PixelList *pixel_list,const ssize_t channel,
const size_t color)
{
register SkipList
*list;
register ssize_t
level;
size_t
search,
update[9];
/*
Initialize the node.
*/
list=pixel_list->lists+channel;
list->nodes[color].signature=pixel_list->signature;
list->nodes[color].count=1;
/*
Determine where it belongs in the list.
*/
search=65536UL;
for (level=list->level; level >= 0; level--)
{
while (list->nodes[search].next[level] < color)
search=list->nodes[search].next[level];
update[level]=search;
}
/*
Generate a pseudo-random level for this node.
*/
for (level=0; ; level++)
{
pixel_list->seed=(pixel_list->seed*42893621L)+1L;
if ((pixel_list->seed & 0x300) != 0x300)
break;
}
if (level > 8)
level=8;
if (level > (list->level+2))
level=list->level+2;
/*
If we're raising the list's level, link back to the root node.
*/
while (level > list->level)
{
list->level++;
update[list->level]=65536UL;
}
/*
Link the node into the skip-list.
*/
do
{
list->nodes[color].next[level]=list->nodes[update[level]].next[level];
list->nodes[update[level]].next[level]=color;
} while (level-- > 0);
}
static void GetMaximumPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color,
maximum;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the maximum value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
maximum=list->nodes[color].next[0];
do
{
color=list->nodes[color].next[0];
if (color > maximum)
maximum=color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) maximum;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetMeanPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
MagickRealType
sum;
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the mean value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
sum=0.0;
do
{
color=list->nodes[color].next[0];
sum+=(MagickRealType) list->nodes[color].count*color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
channels[channel]=(unsigned short) sum;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetMedianPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the median value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
do
{
color=list->nodes[color].next[0];
count+=list->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
channels[channel]=(unsigned short) color;
}
GetMagickPixelPacket((const Image *) NULL,pixel);
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetMinimumPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color,
minimum;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the minimum value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
count=0;
color=65536UL;
minimum=list->nodes[color].next[0];
do
{
color=list->nodes[color].next[0];
if (color < minimum)
minimum=color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) minimum;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetModePixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color,
max_count,
mode;
ssize_t
count;
unsigned short
channels[5];
/*
Make each pixel the 'predominant color' of the specified neighborhood.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
mode=color;
max_count=list->nodes[mode].count;
count=0;
do
{
color=list->nodes[color].next[0];
if (list->nodes[color].count > max_count)
{
mode=color;
max_count=list->nodes[mode].count;
}
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) mode;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetNonpeakPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color,
next,
previous;
ssize_t
count;
unsigned short
channels[5];
/*
Finds the non peak value for each of the colors.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
next=list->nodes[color].next[0];
count=0;
do
{
previous=color;
color=next;
next=list->nodes[color].next[0];
count+=list->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
if ((previous == 65536UL) && (next != 65536UL))
color=next;
else
if ((previous != 65536UL) && (next == 65536UL))
color=previous;
channels[channel]=(unsigned short) color;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetRootMeanSquarePixelList(PixelList *pixel_list,
MagickPixelPacket *pixel)
{
MagickRealType
sum;
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the root mean square value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
sum=0.0;
do
{
color=list->nodes[color].next[0];
sum+=(MagickRealType) (list->nodes[color].count*color*color);
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
channels[channel]=(unsigned short) sqrt(sum);
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetStandardDeviationPixelList(PixelList *pixel_list,
MagickPixelPacket *pixel)
{
MagickRealType
sum,
sum_squared;
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the standard-deviation value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
sum=0.0;
sum_squared=0.0;
do
{
register ssize_t
i;
color=list->nodes[color].next[0];
sum+=(MagickRealType) list->nodes[color].count*color;
for (i=0; i < (ssize_t) list->nodes[color].count; i++)
sum_squared+=((MagickRealType) color)*((MagickRealType) color);
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
sum_squared/=pixel_list->length;
channels[channel]=(unsigned short) sqrt(sum_squared-(sum*sum));
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static inline void InsertPixelList(const Image *image,const PixelPacket *pixel,
const IndexPacket *indexes,PixelList *pixel_list)
{
size_t
signature;
unsigned short
index;
index=ScaleQuantumToShort(GetPixelRed(pixel));
signature=pixel_list->lists[0].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[0].nodes[index].count++;
else
AddNodePixelList(pixel_list,0,index);
index=ScaleQuantumToShort(GetPixelGreen(pixel));
signature=pixel_list->lists[1].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[1].nodes[index].count++;
else
AddNodePixelList(pixel_list,1,index);
index=ScaleQuantumToShort(GetPixelBlue(pixel));
signature=pixel_list->lists[2].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[2].nodes[index].count++;
else
AddNodePixelList(pixel_list,2,index);
index=ScaleQuantumToShort(GetPixelOpacity(pixel));
signature=pixel_list->lists[3].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[3].nodes[index].count++;
else
AddNodePixelList(pixel_list,3,index);
if (image->colorspace == CMYKColorspace)
index=ScaleQuantumToShort(GetPixelIndex(indexes));
signature=pixel_list->lists[4].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[4].nodes[index].count++;
else
AddNodePixelList(pixel_list,4,index);
}
static void ResetPixelList(PixelList *pixel_list)
{
int
level;
register ListNode
*root;
register SkipList
*list;
register ssize_t
channel;
/*
Reset the skip-list.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
root=list->nodes+65536UL;
list->level=0;
for (level=0; level < 9; level++)
root->next[level]=65536UL;
}
pixel_list->seed=pixel_list->signature++;
}
MagickExport Image *StatisticImage(const Image *image,const StatisticType type,
const size_t width,const size_t height,ExceptionInfo *exception)
{
Image
*statistic_image;
statistic_image=StatisticImageChannel(image,DefaultChannels,type,width,
height,exception);
return(statistic_image);
}
MagickExport Image *StatisticImageChannel(const Image *image,
const ChannelType channel,const StatisticType type,const size_t width,
const size_t height,ExceptionInfo *exception)
{
#define StatisticImageTag "Statistic/Image"
CacheView
*image_view,
*statistic_view;
Image
*statistic_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelList
**magick_restrict pixel_list;
size_t
neighbor_height,
neighbor_width;
ssize_t
y;
/*
Initialize statistics image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
statistic_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (statistic_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(statistic_image,DirectClass) == MagickFalse)
{
InheritException(exception,&statistic_image->exception);
statistic_image=DestroyImage(statistic_image);
return((Image *) NULL);
}
neighbor_width=width == 0 ? GetOptimalKernelWidth2D((double) width,0.5) :
width;
neighbor_height=height == 0 ? GetOptimalKernelWidth2D((double) height,0.5) :
height;
pixel_list=AcquirePixelListThreadSet(neighbor_width,neighbor_height);
if (pixel_list == (PixelList **) NULL)
{
statistic_image=DestroyImage(statistic_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Make each pixel the min / max / median / mode / etc. of the neighborhood.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
statistic_view=AcquireAuthenticCacheView(statistic_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,statistic_image,statistic_image->rows,1)
#endif
for (y=0; y < (ssize_t) statistic_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict statistic_indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) neighbor_width/2L),y-
(ssize_t) (neighbor_height/2L),image->columns+neighbor_width,
neighbor_height,exception);
q=QueueCacheViewAuthenticPixels(statistic_view,0,y,statistic_image->columns, 1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
statistic_indexes=GetCacheViewAuthenticIndexQueue(statistic_view);
for (x=0; x < (ssize_t) statistic_image->columns; x++)
{
MagickPixelPacket
pixel;
register const IndexPacket
*magick_restrict s;
register const PixelPacket
*magick_restrict r;
register ssize_t
u,
v;
r=p;
s=indexes+x;
ResetPixelList(pixel_list[id]);
for (v=0; v < (ssize_t) neighbor_height; v++)
{
for (u=0; u < (ssize_t) neighbor_width; u++)
InsertPixelList(image,r+u,s+u,pixel_list[id]);
r+=image->columns+neighbor_width;
s+=image->columns+neighbor_width;
}
GetMagickPixelPacket(image,&pixel);
SetMagickPixelPacket(image,p+neighbor_width*neighbor_height/2,indexes+x+
neighbor_width*neighbor_height/2,&pixel);
switch (type)
{
case GradientStatistic:
{
MagickPixelPacket
maximum,
minimum;
GetMinimumPixelList(pixel_list[id],&pixel);
minimum=pixel;
GetMaximumPixelList(pixel_list[id],&pixel);
maximum=pixel;
pixel.red=MagickAbsoluteValue(maximum.red-minimum.red);
pixel.green=MagickAbsoluteValue(maximum.green-minimum.green);
pixel.blue=MagickAbsoluteValue(maximum.blue-minimum.blue);
pixel.opacity=MagickAbsoluteValue(maximum.opacity-minimum.opacity);
if (image->colorspace == CMYKColorspace)
pixel.index=MagickAbsoluteValue(maximum.index-minimum.index);
break;
}
case MaximumStatistic:
{
GetMaximumPixelList(pixel_list[id],&pixel);
break;
}
case MeanStatistic:
{
GetMeanPixelList(pixel_list[id],&pixel);
break;
}
case MedianStatistic:
default:
{
GetMedianPixelList(pixel_list[id],&pixel);
break;
}
case MinimumStatistic:
{
GetMinimumPixelList(pixel_list[id],&pixel);
break;
}
case ModeStatistic:
{
GetModePixelList(pixel_list[id],&pixel);
break;
}
case NonpeakStatistic:
{
GetNonpeakPixelList(pixel_list[id],&pixel);
break;
}
case RootMeanSquareStatistic:
{
GetRootMeanSquarePixelList(pixel_list[id],&pixel);
break;
}
case StandardDeviationStatistic:
{
GetStandardDeviationPixelList(pixel_list[id],&pixel);
break;
}
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(pixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(pixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(pixel.blue));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(statistic_indexes+x,ClampToQuantum(pixel.index));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(statistic_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_StatisticImage)
#endif
proceed=SetImageProgress(image,StatisticImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
statistic_view=DestroyCacheView(statistic_view);
image_view=DestroyCacheView(image_view);
pixel_list=DestroyPixelListThreadSet(pixel_list);
if (status == MagickFalse)
statistic_image=DestroyImage(statistic_image);
return(statistic_image);
}
|
Example_task_dep.12.c | /*
* @@name: task_dep.12c
* @@type: C
* @@compilable: yes
* @@linkable: yes
* @@expect: success
* @@version: omp_4.0
*/
#include <stdio.h>
int main (int argc, char *argv[])
{
int x = 0;
#pragma omp parallel
#pragma omp single
{
/* first explicit task */
#pragma omp task shared(x) depend(out: x)
x = 1;
/* second explicit task */
#pragma omp task shared(x) depend(inout: x) if(0)
x = 2;
/* statement executed by parent implicit task
prints: x = 2 */
printf("x = %d\n", x);
}
return 0;
}
|
burgers1d_b.c | /* Generated by TAPENADE (INRIA, Ecuador team)
Tapenade 3.14 (r7079) - 5 Oct 2018 09:55
*/
#include <adBuffer.h>
/*
Differentiation of fmax in reverse (adjoint) mode:
gradient of useful results: fmax b
with respect to varying inputs: b
*/
void fmax_b(double a, double b, double *bb, double fmaxb) {
double fmax;
if (a <= b)
*bb = *bb + fmaxb;
}
double fmax_nodiff(double a, double b) {
if (a > b)
return a;
else
return b;
}
/*
Differentiation of fmin in reverse (adjoint) mode:
gradient of useful results: fmin b
with respect to varying inputs: b
*/
void fmin_b(double a, double b, double *bb, double fminb) {
double fmin;
if (a >= b)
*bb = *bb + fminb;
}
double fmin_nodiff(double a, double b) {
if (a < b)
return a;
else
return b;
}
/*
Differentiation of burgers1d in reverse (adjoint) mode:
gradient of useful results: *u *u_1
with respect to varying inputs: *u *u_1
RW status of diff variables: *u:in-out *u_1:incr
Plus diff mem management of: u:in u_1:in
*/
void burgers1d_b(double *u, double *ub, double *u_1, double *u_1b, double D,
double C, int n) {
int i;
double result1;
double result1b;
double result2;
double result2b;
double tempb;
double tempb0;
//#pragma omp parallel for private(i)
for (i = 1; i < n-1; ++i) {
pushReal8(result1);
result1 = fmin_nodiff(0, u_1[i]);
pushReal8(result2);
result2 = fmax_nodiff(0, u_1[i]);
}
#pragma omp parallel for private(i, tempb, tempb0, result1b, result2b)
for (i = n-2; i > 0; --i) {
tempb = -(C*ub[i]);
tempb0 = D*ub[i];
#pragma omp atomic
u_1b[i + 1] = u_1b[i + 1] + tempb0 + result1*tempb;
#pragma omp atomic
u_1b[i] = u_1b[i] + ub[i] - 2.0*tempb0 + (result2-result1)*tempb;
result1b = (u_1[i+1]-u_1[i])*tempb;
#pragma omp atomic
u_1b[i - 1] = u_1b[i - 1] + tempb0 - result2*tempb;
result2b = (u_1[i]-u_1[i-1])*tempb;
popReal8(&result2);
fmax_b(0, u_1[i], &(tempb), result2b);
#pragma omp atomic
u_1b[i] += tempb;
popReal8(&result1);
fmin_b(0, u_1[i], &(tempb), result1b);
#pragma omp atomic
u_1b[i] += tempb;
}
}
|
feature.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF EEEEE AAA TTTTT U U RRRR EEEEE %
% F E A A T U U R R E %
% FFF EEE AAAAA T U U RRRR EEE %
% F E A A T U U R R E %
% F EEEEE A A T UUU R R EEEEE %
% %
% %
% MagickCore Image Feature Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/animate.h"
#include "magick/artifact.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/compress.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/display.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/feature.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/list.h"
#include "magick/image-private.h"
#include "magick/magic.h"
#include "magick/magick.h"
#include "magick/matrix.h"
#include "magick/memory_.h"
#include "magick/module.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/morphology-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-private.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/resource_.h"
#include "magick/segment.h"
#include "magick/semaphore.h"
#include "magick/signature-private.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/timer.h"
#include "magick/token.h"
#include "magick/utility.h"
#include "magick/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C a n n y E d g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CannyEdgeImage() uses a multi-stage algorithm to detect a wide range of
% edges in images.
%
% The format of the CannyEdgeImage method is:
%
% Image *CannyEdgeImage(const Image *image,const double radius,
% const double sigma,const double lower_percent,
% const double upper_percent,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the gaussian smoothing filter.
%
% o sigma: the sigma of the gaussian smoothing filter.
%
% o lower_percent: percentage of edge pixels in the lower threshold.
%
% o upper_percent: percentage of edge pixels in the upper threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _CannyInfo
{
double
magnitude,
intensity;
int
orientation;
ssize_t
x,
y;
} CannyInfo;
static inline MagickBooleanType IsAuthenticPixel(const Image *image,
const ssize_t x,const ssize_t y)
{
if ((x < 0) || (x >= (ssize_t) image->columns))
return(MagickFalse);
if ((y < 0) || (y >= (ssize_t) image->rows))
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType TraceEdges(Image *edge_image,CacheView *edge_view,
MatrixInfo *canny_cache,const ssize_t x,const ssize_t y,
const double lower_threshold,ExceptionInfo *exception)
{
CannyInfo
edge,
pixel;
MagickBooleanType
status;
register PixelPacket
*q;
register ssize_t
i;
q=GetCacheViewAuthenticPixels(edge_view,x,y,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickFalse);
q->red=QuantumRange;
q->green=QuantumRange;
q->blue=QuantumRange;
status=SyncCacheViewAuthenticPixels(edge_view,exception);
if (status == MagickFalse)
return(MagickFalse);
if (GetMatrixElement(canny_cache,0,0,&edge) == MagickFalse)
return(MagickFalse);
edge.x=x;
edge.y=y;
if (SetMatrixElement(canny_cache,0,0,&edge) == MagickFalse)
return(MagickFalse);
for (i=1; i != 0; )
{
ssize_t
v;
i--;
status=GetMatrixElement(canny_cache,i,0,&edge);
if (status == MagickFalse)
return(MagickFalse);
for (v=(-1); v <= 1; v++)
{
ssize_t
u;
for (u=(-1); u <= 1; u++)
{
if ((u == 0) && (v == 0))
continue;
if (IsAuthenticPixel(edge_image,edge.x+u,edge.y+v) == MagickFalse)
continue;
/*
Not an edge if gradient value is below the lower threshold.
*/
q=GetCacheViewAuthenticPixels(edge_view,edge.x+u,edge.y+v,1,1,
exception);
if (q == (PixelPacket *) NULL)
return(MagickFalse);
status=GetMatrixElement(canny_cache,edge.x+u,edge.y+v,&pixel);
if (status == MagickFalse)
return(MagickFalse);
if ((GetPixelIntensity(edge_image,q) == 0.0) &&
(pixel.intensity >= lower_threshold))
{
q->red=QuantumRange;
q->green=QuantumRange;
q->blue=QuantumRange;
status=SyncCacheViewAuthenticPixels(edge_view,exception);
if (status == MagickFalse)
return(MagickFalse);
edge.x+=u;
edge.y+=v;
status=SetMatrixElement(canny_cache,i,0,&edge);
if (status == MagickFalse)
return(MagickFalse);
i++;
}
}
}
}
return(MagickTrue);
}
MagickExport Image *CannyEdgeImage(const Image *image,const double radius,
const double sigma,const double lower_percent,const double upper_percent,
ExceptionInfo *exception)
{
#define CannyEdgeImageTag "CannyEdge/Image"
CacheView
*edge_view;
CannyInfo
element;
char
geometry[MaxTextExtent];
double
lower_threshold,
max,
min,
upper_threshold;
Image
*edge_image;
KernelInfo
*kernel_info;
MagickBooleanType
status;
MagickOffsetType
progress;
MatrixInfo
*canny_cache;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Filter out noise.
*/
(void) FormatLocaleString(geometry,MaxTextExtent,
"blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma);
kernel_info=AcquireKernelInfo(geometry);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
edge_image=MorphologyImageChannel(image,DefaultChannels,ConvolveMorphology,1,
kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
if (edge_image == (Image *) NULL)
return((Image *) NULL);
if (TransformImageColorspace(edge_image,GRAYColorspace) == MagickFalse)
{
edge_image=DestroyImage(edge_image);
return((Image *) NULL);
}
(void) SetImageAlphaChannel(edge_image,DeactivateAlphaChannel);
/*
Find the intensity gradient of the image.
*/
canny_cache=AcquireMatrixInfo(edge_image->columns,edge_image->rows,
sizeof(CannyInfo),exception);
if (canny_cache == (MatrixInfo *) NULL)
{
edge_image=DestroyImage(edge_image);
return((Image *) NULL);
}
status=MagickTrue;
edge_view=AcquireVirtualCacheView(edge_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(edge_image,edge_image,edge_image->rows,1)
#endif
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns+1,2,
exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
CannyInfo
pixel;
double
dx,
dy;
register const PixelPacket
*magick_restrict kernel_pixels;
ssize_t
v;
static double
Gx[2][2] =
{
{ -1.0, +1.0 },
{ -1.0, +1.0 }
},
Gy[2][2] =
{
{ +1.0, +1.0 },
{ -1.0, -1.0 }
};
(void) memset(&pixel,0,sizeof(pixel));
dx=0.0;
dy=0.0;
kernel_pixels=p;
for (v=0; v < 2; v++)
{
ssize_t
u;
for (u=0; u < 2; u++)
{
double
intensity;
intensity=GetPixelIntensity(edge_image,kernel_pixels+u);
dx+=0.5*Gx[v][u]*intensity;
dy+=0.5*Gy[v][u]*intensity;
}
kernel_pixels+=edge_image->columns+1;
}
pixel.magnitude=hypot(dx,dy);
pixel.orientation=0;
if (fabs(dx) > MagickEpsilon)
{
double
slope;
slope=dy/dx;
if (slope < 0.0)
{
if (slope < -2.41421356237)
pixel.orientation=0;
else
if (slope < -0.414213562373)
pixel.orientation=1;
else
pixel.orientation=2;
}
else
{
if (slope > 2.41421356237)
pixel.orientation=0;
else
if (slope > 0.414213562373)
pixel.orientation=3;
else
pixel.orientation=2;
}
}
if (SetMatrixElement(canny_cache,x,y,&pixel) == MagickFalse)
continue;
p++;
}
}
edge_view=DestroyCacheView(edge_view);
/*
Non-maxima suppression, remove pixels that are not considered to be part
of an edge.
*/
progress=0;
(void) GetMatrixElement(canny_cache,0,0,&element);
max=element.intensity;
min=element.intensity;
edge_view=AcquireAuthenticCacheView(edge_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(edge_image,edge_image,edge_image->rows,1)
#endif
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(edge_view,0,y,edge_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
CannyInfo
alpha_pixel,
beta_pixel,
pixel;
(void) GetMatrixElement(canny_cache,x,y,&pixel);
switch (pixel.orientation)
{
case 0:
default:
{
/*
0 degrees, north and south.
*/
(void) GetMatrixElement(canny_cache,x,y-1,&alpha_pixel);
(void) GetMatrixElement(canny_cache,x,y+1,&beta_pixel);
break;
}
case 1:
{
/*
45 degrees, northwest and southeast.
*/
(void) GetMatrixElement(canny_cache,x-1,y-1,&alpha_pixel);
(void) GetMatrixElement(canny_cache,x+1,y+1,&beta_pixel);
break;
}
case 2:
{
/*
90 degrees, east and west.
*/
(void) GetMatrixElement(canny_cache,x-1,y,&alpha_pixel);
(void) GetMatrixElement(canny_cache,x+1,y,&beta_pixel);
break;
}
case 3:
{
/*
135 degrees, northeast and southwest.
*/
(void) GetMatrixElement(canny_cache,x+1,y-1,&beta_pixel);
(void) GetMatrixElement(canny_cache,x-1,y+1,&alpha_pixel);
break;
}
}
pixel.intensity=pixel.magnitude;
if ((pixel.magnitude < alpha_pixel.magnitude) ||
(pixel.magnitude < beta_pixel.magnitude))
pixel.intensity=0;
(void) SetMatrixElement(canny_cache,x,y,&pixel);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CannyEdgeImage)
#endif
{
if (pixel.intensity < min)
min=pixel.intensity;
if (pixel.intensity > max)
max=pixel.intensity;
}
q->red=0;
q->green=0;
q->blue=0;
q++;
}
if (SyncCacheViewAuthenticPixels(edge_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CannyEdgeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
edge_view=DestroyCacheView(edge_view);
/*
Estimate hysteresis threshold.
*/
lower_threshold=lower_percent*(max-min)+min;
upper_threshold=upper_percent*(max-min)+min;
/*
Hysteresis threshold.
*/
edge_view=AcquireAuthenticCacheView(edge_image,exception);
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
register ssize_t
x;
if (status == MagickFalse)
continue;
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
CannyInfo
pixel;
register const PixelPacket
*magick_restrict p;
/*
Edge if pixel gradient higher than upper threshold.
*/
p=GetCacheViewVirtualPixels(edge_view,x,y,1,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
status=GetMatrixElement(canny_cache,x,y,&pixel);
if (status == MagickFalse)
continue;
if ((GetPixelIntensity(edge_image,p) == 0.0) &&
(pixel.intensity >= upper_threshold))
status=TraceEdges(edge_image,edge_view,canny_cache,x,y,lower_threshold,
exception);
}
}
edge_view=DestroyCacheView(edge_view);
/*
Free resources.
*/
canny_cache=DestroyMatrixInfo(canny_cache);
return(edge_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l F e a t u r e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelFeatures() returns features for each channel in the image in
% each of four directions (horizontal, vertical, left and right diagonals)
% for the specified distance. The features include the angular second
% moment, contrast, correlation, sum of squares: variance, inverse difference
% moment, sum average, sum varience, sum entropy, entropy, difference variance,% difference entropy, information measures of correlation 1, information
% measures of correlation 2, and maximum correlation coefficient. You can
% access the red channel contrast, for example, like this:
%
% channel_features=GetImageChannelFeatures(image,1,exception);
% contrast=channel_features[RedChannel].contrast[0];
%
% Use MagickRelinquishMemory() to free the features buffer.
%
% The format of the GetImageChannelFeatures method is:
%
% ChannelFeatures *GetImageChannelFeatures(const Image *image,
% const size_t distance,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o distance: the distance.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
MagickExport ChannelFeatures *GetImageChannelFeatures(const Image *image,
const size_t distance,ExceptionInfo *exception)
{
typedef struct _ChannelStatistics
{
DoublePixelPacket
direction[4]; /* horizontal, vertical, left and right diagonals */
} ChannelStatistics;
CacheView
*image_view;
ChannelFeatures
*channel_features;
ChannelStatistics
**cooccurrence,
correlation,
*density_x,
*density_xy,
*density_y,
entropy_x,
entropy_xy,
entropy_xy1,
entropy_xy2,
entropy_y,
mean,
**Q,
*sum,
sum_squares,
variance;
LongPixelPacket
gray,
*grays;
MagickBooleanType
status;
register ssize_t
i;
size_t
length;
ssize_t
y;
unsigned int
number_grays;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->columns < (distance+1)) || (image->rows < (distance+1)))
return((ChannelFeatures *) NULL);
length=CompositeChannels+1UL;
channel_features=(ChannelFeatures *) AcquireQuantumMemory(length,
sizeof(*channel_features));
if (channel_features == (ChannelFeatures *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_features,0,length*
sizeof(*channel_features));
/*
Form grays.
*/
grays=(LongPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*grays));
if (grays == (LongPixelPacket *) NULL)
{
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
for (i=0; i <= (ssize_t) MaxMap; i++)
{
grays[i].red=(~0U);
grays[i].green=(~0U);
grays[i].blue=(~0U);
grays[i].opacity=(~0U);
grays[i].index=(~0U);
}
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
grays[ScaleQuantumToMap(GetPixelRed(p))].red=
ScaleQuantumToMap(GetPixelRed(p));
grays[ScaleQuantumToMap(GetPixelGreen(p))].green=
ScaleQuantumToMap(GetPixelGreen(p));
grays[ScaleQuantumToMap(GetPixelBlue(p))].blue=
ScaleQuantumToMap(GetPixelBlue(p));
if (image->colorspace == CMYKColorspace)
grays[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index=
ScaleQuantumToMap(GetPixelIndex(indexes+x));
if (image->matte != MagickFalse)
grays[ScaleQuantumToMap(GetPixelOpacity(p))].opacity=
ScaleQuantumToMap(GetPixelOpacity(p));
p++;
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
grays=(LongPixelPacket *) RelinquishMagickMemory(grays);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
return(channel_features);
}
(void) memset(&gray,0,sizeof(gray));
for (i=0; i <= (ssize_t) MaxMap; i++)
{
if (grays[i].red != ~0U)
grays[(ssize_t) gray.red++].red=grays[i].red;
if (grays[i].green != ~0U)
grays[(ssize_t) gray.green++].green=grays[i].green;
if (grays[i].blue != ~0U)
grays[(ssize_t) gray.blue++].blue=grays[i].blue;
if (image->colorspace == CMYKColorspace)
if (grays[i].index != ~0U)
grays[(ssize_t) gray.index++].index=grays[i].index;
if (image->matte != MagickFalse)
if (grays[i].opacity != ~0U)
grays[(ssize_t) gray.opacity++].opacity=grays[i].opacity;
}
/*
Allocate spatial dependence matrix.
*/
number_grays=gray.red;
if (gray.green > number_grays)
number_grays=gray.green;
if (gray.blue > number_grays)
number_grays=gray.blue;
if (image->colorspace == CMYKColorspace)
if (gray.index > number_grays)
number_grays=gray.index;
if (image->matte != MagickFalse)
if (gray.opacity > number_grays)
number_grays=gray.opacity;
cooccurrence=(ChannelStatistics **) AcquireQuantumMemory(number_grays,
sizeof(*cooccurrence));
density_x=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1),
sizeof(*density_x));
density_xy=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1),
sizeof(*density_xy));
density_y=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1),
sizeof(*density_y));
Q=(ChannelStatistics **) AcquireQuantumMemory(number_grays,sizeof(*Q));
sum=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(*sum));
if ((cooccurrence == (ChannelStatistics **) NULL) ||
(density_x == (ChannelStatistics *) NULL) ||
(density_xy == (ChannelStatistics *) NULL) ||
(density_y == (ChannelStatistics *) NULL) ||
(Q == (ChannelStatistics **) NULL) ||
(sum == (ChannelStatistics *) NULL))
{
if (Q != (ChannelStatistics **) NULL)
{
for (i=0; i < (ssize_t) number_grays; i++)
Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]);
Q=(ChannelStatistics **) RelinquishMagickMemory(Q);
}
if (sum != (ChannelStatistics *) NULL)
sum=(ChannelStatistics *) RelinquishMagickMemory(sum);
if (density_y != (ChannelStatistics *) NULL)
density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y);
if (density_xy != (ChannelStatistics *) NULL)
density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy);
if (density_x != (ChannelStatistics *) NULL)
density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x);
if (cooccurrence != (ChannelStatistics **) NULL)
{
for (i=0; i < (ssize_t) number_grays; i++)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(
cooccurrence);
}
grays=(LongPixelPacket *) RelinquishMagickMemory(grays);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
(void) memset(&correlation,0,sizeof(correlation));
(void) memset(density_x,0,2*(number_grays+1)*sizeof(*density_x));
(void) memset(density_xy,0,2*(number_grays+1)*sizeof(*density_xy));
(void) memset(density_y,0,2*(number_grays+1)*sizeof(*density_y));
(void) memset(&mean,0,sizeof(mean));
(void) memset(sum,0,number_grays*sizeof(*sum));
(void) memset(&sum_squares,0,sizeof(sum_squares));
(void) memset(density_xy,0,2*number_grays*sizeof(*density_xy));
(void) memset(&entropy_x,0,sizeof(entropy_x));
(void) memset(&entropy_xy,0,sizeof(entropy_xy));
(void) memset(&entropy_xy1,0,sizeof(entropy_xy1));
(void) memset(&entropy_xy2,0,sizeof(entropy_xy2));
(void) memset(&entropy_y,0,sizeof(entropy_y));
(void) memset(&variance,0,sizeof(variance));
for (i=0; i < (ssize_t) number_grays; i++)
{
cooccurrence[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays,
sizeof(**cooccurrence));
Q[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(**Q));
if ((cooccurrence[i] == (ChannelStatistics *) NULL) ||
(Q[i] == (ChannelStatistics *) NULL))
break;
(void) memset(cooccurrence[i],0,number_grays*
sizeof(**cooccurrence));
(void) memset(Q[i],0,number_grays*sizeof(**Q));
}
if (i < (ssize_t) number_grays)
{
for (i--; i >= 0; i--)
{
if (Q[i] != (ChannelStatistics *) NULL)
Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]);
if (cooccurrence[i] != (ChannelStatistics *) NULL)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
}
Q=(ChannelStatistics **) RelinquishMagickMemory(Q);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence);
sum=(ChannelStatistics *) RelinquishMagickMemory(sum);
density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y);
density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy);
density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x);
grays=(LongPixelPacket *) RelinquishMagickMemory(grays);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
/*
Initialize spatial dependence matrix.
*/
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
ssize_t
i,
offset,
u,
v;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-(ssize_t) distance,y,image->columns+
2*distance,distance+2,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
p+=distance;
indexes+=distance;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < 4; i++)
{
switch (i)
{
case 0:
default:
{
/*
Horizontal adjacency.
*/
offset=(ssize_t) distance;
break;
}
case 1:
{
/*
Vertical adjacency.
*/
offset=(ssize_t) (image->columns+2*distance);
break;
}
case 2:
{
/*
Right diagonal adjacency.
*/
offset=(ssize_t) ((image->columns+2*distance)-distance);
break;
}
case 3:
{
/*
Left diagonal adjacency.
*/
offset=(ssize_t) ((image->columns+2*distance)+distance);
break;
}
}
u=0;
v=0;
while (grays[u].red != ScaleQuantumToMap(GetPixelRed(p)))
u++;
while (grays[v].red != ScaleQuantumToMap(GetPixelRed(p+offset)))
v++;
cooccurrence[u][v].direction[i].red++;
cooccurrence[v][u].direction[i].red++;
u=0;
v=0;
while (grays[u].green != ScaleQuantumToMap(GetPixelGreen(p)))
u++;
while (grays[v].green != ScaleQuantumToMap(GetPixelGreen(p+offset)))
v++;
cooccurrence[u][v].direction[i].green++;
cooccurrence[v][u].direction[i].green++;
u=0;
v=0;
while (grays[u].blue != ScaleQuantumToMap(GetPixelBlue(p)))
u++;
while (grays[v].blue != ScaleQuantumToMap((p+offset)->blue))
v++;
cooccurrence[u][v].direction[i].blue++;
cooccurrence[v][u].direction[i].blue++;
if (image->colorspace == CMYKColorspace)
{
u=0;
v=0;
while (grays[u].index != ScaleQuantumToMap(GetPixelIndex(indexes+x)))
u++;
while (grays[v].index != ScaleQuantumToMap(GetPixelIndex(indexes+x+offset)))
v++;
cooccurrence[u][v].direction[i].index++;
cooccurrence[v][u].direction[i].index++;
}
if (image->matte != MagickFalse)
{
u=0;
v=0;
while (grays[u].opacity != ScaleQuantumToMap(GetPixelOpacity(p)))
u++;
while (grays[v].opacity != ScaleQuantumToMap((p+offset)->opacity))
v++;
cooccurrence[u][v].direction[i].opacity++;
cooccurrence[v][u].direction[i].opacity++;
}
}
p++;
}
}
grays=(LongPixelPacket *) RelinquishMagickMemory(grays);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
for (i=0; i < (ssize_t) number_grays; i++)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
/*
Normalize spatial dependence matrix.
*/
for (i=0; i < 4; i++)
{
double
normalize;
register ssize_t
y;
switch (i)
{
case 0:
default:
{
/*
Horizontal adjacency.
*/
normalize=2.0*image->rows*(image->columns-distance);
break;
}
case 1:
{
/*
Vertical adjacency.
*/
normalize=2.0*(image->rows-distance)*image->columns;
break;
}
case 2:
{
/*
Right diagonal adjacency.
*/
normalize=2.0*(image->rows-distance)*(image->columns-distance);
break;
}
case 3:
{
/*
Left diagonal adjacency.
*/
normalize=2.0*(image->rows-distance)*(image->columns-distance);
break;
}
}
normalize=PerceptibleReciprocal(normalize);
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
cooccurrence[x][y].direction[i].red*=normalize;
cooccurrence[x][y].direction[i].green*=normalize;
cooccurrence[x][y].direction[i].blue*=normalize;
if (image->colorspace == CMYKColorspace)
cooccurrence[x][y].direction[i].index*=normalize;
if (image->matte != MagickFalse)
cooccurrence[x][y].direction[i].opacity*=normalize;
}
}
}
/*
Compute texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
y;
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Angular second moment: measure of homogeneity of the image.
*/
channel_features[RedChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].red*
cooccurrence[x][y].direction[i].red;
channel_features[GreenChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].green*
cooccurrence[x][y].direction[i].green;
channel_features[BlueChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].blue*
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].index*
cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
channel_features[OpacityChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].opacity*
cooccurrence[x][y].direction[i].opacity;
/*
Correlation: measure of linear-dependencies in the image.
*/
sum[y].direction[i].red+=cooccurrence[x][y].direction[i].red;
sum[y].direction[i].green+=cooccurrence[x][y].direction[i].green;
sum[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
sum[y].direction[i].index+=cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
sum[y].direction[i].opacity+=cooccurrence[x][y].direction[i].opacity;
correlation.direction[i].red+=x*y*cooccurrence[x][y].direction[i].red;
correlation.direction[i].green+=x*y*
cooccurrence[x][y].direction[i].green;
correlation.direction[i].blue+=x*y*
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
correlation.direction[i].index+=x*y*
cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
correlation.direction[i].opacity+=x*y*
cooccurrence[x][y].direction[i].opacity;
/*
Inverse Difference Moment.
*/
channel_features[RedChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].red/((y-x)*(y-x)+1);
channel_features[GreenChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].green/((y-x)*(y-x)+1);
channel_features[BlueChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].blue/((y-x)*(y-x)+1);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].index/((y-x)*(y-x)+1);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].opacity/((y-x)*(y-x)+1);
/*
Sum average.
*/
density_xy[y+x+2].direction[i].red+=
cooccurrence[x][y].direction[i].red;
density_xy[y+x+2].direction[i].green+=
cooccurrence[x][y].direction[i].green;
density_xy[y+x+2].direction[i].blue+=
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_xy[y+x+2].direction[i].index+=
cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
density_xy[y+x+2].direction[i].opacity+=
cooccurrence[x][y].direction[i].opacity;
/*
Entropy.
*/
channel_features[RedChannel].entropy[i]-=
cooccurrence[x][y].direction[i].red*
MagickLog10(cooccurrence[x][y].direction[i].red);
channel_features[GreenChannel].entropy[i]-=
cooccurrence[x][y].direction[i].green*
MagickLog10(cooccurrence[x][y].direction[i].green);
channel_features[BlueChannel].entropy[i]-=
cooccurrence[x][y].direction[i].blue*
MagickLog10(cooccurrence[x][y].direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].entropy[i]-=
cooccurrence[x][y].direction[i].index*
MagickLog10(cooccurrence[x][y].direction[i].index);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].entropy[i]-=
cooccurrence[x][y].direction[i].opacity*
MagickLog10(cooccurrence[x][y].direction[i].opacity);
/*
Information Measures of Correlation.
*/
density_x[x].direction[i].red+=cooccurrence[x][y].direction[i].red;
density_x[x].direction[i].green+=cooccurrence[x][y].direction[i].green;
density_x[x].direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_x[x].direction[i].index+=
cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
density_x[x].direction[i].opacity+=
cooccurrence[x][y].direction[i].opacity;
density_y[y].direction[i].red+=cooccurrence[x][y].direction[i].red;
density_y[y].direction[i].green+=cooccurrence[x][y].direction[i].green;
density_y[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_y[y].direction[i].index+=
cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
density_y[y].direction[i].opacity+=
cooccurrence[x][y].direction[i].opacity;
}
mean.direction[i].red+=y*sum[y].direction[i].red;
sum_squares.direction[i].red+=y*y*sum[y].direction[i].red;
mean.direction[i].green+=y*sum[y].direction[i].green;
sum_squares.direction[i].green+=y*y*sum[y].direction[i].green;
mean.direction[i].blue+=y*sum[y].direction[i].blue;
sum_squares.direction[i].blue+=y*y*sum[y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
{
mean.direction[i].index+=y*sum[y].direction[i].index;
sum_squares.direction[i].index+=y*y*sum[y].direction[i].index;
}
if (image->matte != MagickFalse)
{
mean.direction[i].opacity+=y*sum[y].direction[i].opacity;
sum_squares.direction[i].opacity+=y*y*sum[y].direction[i].opacity;
}
}
/*
Correlation: measure of linear-dependencies in the image.
*/
channel_features[RedChannel].correlation[i]=
(correlation.direction[i].red-mean.direction[i].red*
mean.direction[i].red)/(sqrt(sum_squares.direction[i].red-
(mean.direction[i].red*mean.direction[i].red))*sqrt(
sum_squares.direction[i].red-(mean.direction[i].red*
mean.direction[i].red)));
channel_features[GreenChannel].correlation[i]=
(correlation.direction[i].green-mean.direction[i].green*
mean.direction[i].green)/(sqrt(sum_squares.direction[i].green-
(mean.direction[i].green*mean.direction[i].green))*sqrt(
sum_squares.direction[i].green-(mean.direction[i].green*
mean.direction[i].green)));
channel_features[BlueChannel].correlation[i]=
(correlation.direction[i].blue-mean.direction[i].blue*
mean.direction[i].blue)/(sqrt(sum_squares.direction[i].blue-
(mean.direction[i].blue*mean.direction[i].blue))*sqrt(
sum_squares.direction[i].blue-(mean.direction[i].blue*
mean.direction[i].blue)));
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].correlation[i]=
(correlation.direction[i].index-mean.direction[i].index*
mean.direction[i].index)/(sqrt(sum_squares.direction[i].index-
(mean.direction[i].index*mean.direction[i].index))*sqrt(
sum_squares.direction[i].index-(mean.direction[i].index*
mean.direction[i].index)));
if (image->matte != MagickFalse)
channel_features[OpacityChannel].correlation[i]=
(correlation.direction[i].opacity-mean.direction[i].opacity*
mean.direction[i].opacity)/(sqrt(sum_squares.direction[i].opacity-
(mean.direction[i].opacity*mean.direction[i].opacity))*sqrt(
sum_squares.direction[i].opacity-(mean.direction[i].opacity*
mean.direction[i].opacity)));
}
/*
Compute more texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
x;
for (x=2; x < (ssize_t) (2*number_grays); x++)
{
/*
Sum average.
*/
channel_features[RedChannel].sum_average[i]+=
x*density_xy[x].direction[i].red;
channel_features[GreenChannel].sum_average[i]+=
x*density_xy[x].direction[i].green;
channel_features[BlueChannel].sum_average[i]+=
x*density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].sum_average[i]+=
x*density_xy[x].direction[i].index;
if (image->matte != MagickFalse)
channel_features[OpacityChannel].sum_average[i]+=
x*density_xy[x].direction[i].opacity;
/*
Sum entropy.
*/
channel_features[RedChannel].sum_entropy[i]-=
density_xy[x].direction[i].red*
MagickLog10(density_xy[x].direction[i].red);
channel_features[GreenChannel].sum_entropy[i]-=
density_xy[x].direction[i].green*
MagickLog10(density_xy[x].direction[i].green);
channel_features[BlueChannel].sum_entropy[i]-=
density_xy[x].direction[i].blue*
MagickLog10(density_xy[x].direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].sum_entropy[i]-=
density_xy[x].direction[i].index*
MagickLog10(density_xy[x].direction[i].index);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].sum_entropy[i]-=
density_xy[x].direction[i].opacity*
MagickLog10(density_xy[x].direction[i].opacity);
/*
Sum variance.
*/
channel_features[RedChannel].sum_variance[i]+=
(x-channel_features[RedChannel].sum_entropy[i])*
(x-channel_features[RedChannel].sum_entropy[i])*
density_xy[x].direction[i].red;
channel_features[GreenChannel].sum_variance[i]+=
(x-channel_features[GreenChannel].sum_entropy[i])*
(x-channel_features[GreenChannel].sum_entropy[i])*
density_xy[x].direction[i].green;
channel_features[BlueChannel].sum_variance[i]+=
(x-channel_features[BlueChannel].sum_entropy[i])*
(x-channel_features[BlueChannel].sum_entropy[i])*
density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].sum_variance[i]+=
(x-channel_features[IndexChannel].sum_entropy[i])*
(x-channel_features[IndexChannel].sum_entropy[i])*
density_xy[x].direction[i].index;
if (image->matte != MagickFalse)
channel_features[OpacityChannel].sum_variance[i]+=
(x-channel_features[OpacityChannel].sum_entropy[i])*
(x-channel_features[OpacityChannel].sum_entropy[i])*
density_xy[x].direction[i].opacity;
}
}
/*
Compute more texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
y;
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Sum of Squares: Variance
*/
variance.direction[i].red+=(y-mean.direction[i].red+1)*
(y-mean.direction[i].red+1)*cooccurrence[x][y].direction[i].red;
variance.direction[i].green+=(y-mean.direction[i].green+1)*
(y-mean.direction[i].green+1)*cooccurrence[x][y].direction[i].green;
variance.direction[i].blue+=(y-mean.direction[i].blue+1)*
(y-mean.direction[i].blue+1)*cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
variance.direction[i].index+=(y-mean.direction[i].index+1)*
(y-mean.direction[i].index+1)*cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
variance.direction[i].opacity+=(y-mean.direction[i].opacity+1)*
(y-mean.direction[i].opacity+1)*
cooccurrence[x][y].direction[i].opacity;
/*
Sum average / Difference Variance.
*/
density_xy[MagickAbsoluteValue(y-x)].direction[i].red+=
cooccurrence[x][y].direction[i].red;
density_xy[MagickAbsoluteValue(y-x)].direction[i].green+=
cooccurrence[x][y].direction[i].green;
density_xy[MagickAbsoluteValue(y-x)].direction[i].blue+=
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_xy[MagickAbsoluteValue(y-x)].direction[i].index+=
cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
density_xy[MagickAbsoluteValue(y-x)].direction[i].opacity+=
cooccurrence[x][y].direction[i].opacity;
/*
Information Measures of Correlation.
*/
entropy_xy.direction[i].red-=cooccurrence[x][y].direction[i].red*
MagickLog10(cooccurrence[x][y].direction[i].red);
entropy_xy.direction[i].green-=cooccurrence[x][y].direction[i].green*
MagickLog10(cooccurrence[x][y].direction[i].green);
entropy_xy.direction[i].blue-=cooccurrence[x][y].direction[i].blue*
MagickLog10(cooccurrence[x][y].direction[i].blue);
if (image->colorspace == CMYKColorspace)
entropy_xy.direction[i].index-=cooccurrence[x][y].direction[i].index*
MagickLog10(cooccurrence[x][y].direction[i].index);
if (image->matte != MagickFalse)
entropy_xy.direction[i].opacity-=
cooccurrence[x][y].direction[i].opacity*MagickLog10(
cooccurrence[x][y].direction[i].opacity);
entropy_xy1.direction[i].red-=(cooccurrence[x][y].direction[i].red*
MagickLog10(density_x[x].direction[i].red*
density_y[y].direction[i].red));
entropy_xy1.direction[i].green-=(cooccurrence[x][y].direction[i].green*
MagickLog10(density_x[x].direction[i].green*
density_y[y].direction[i].green));
entropy_xy1.direction[i].blue-=(cooccurrence[x][y].direction[i].blue*
MagickLog10(density_x[x].direction[i].blue*
density_y[y].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_xy1.direction[i].index-=(
cooccurrence[x][y].direction[i].index*MagickLog10(
density_x[x].direction[i].index*density_y[y].direction[i].index));
if (image->matte != MagickFalse)
entropy_xy1.direction[i].opacity-=(
cooccurrence[x][y].direction[i].opacity*MagickLog10(
density_x[x].direction[i].opacity*
density_y[y].direction[i].opacity));
entropy_xy2.direction[i].red-=(density_x[x].direction[i].red*
density_y[y].direction[i].red*MagickLog10(
density_x[x].direction[i].red*density_y[y].direction[i].red));
entropy_xy2.direction[i].green-=(density_x[x].direction[i].green*
density_y[y].direction[i].green*MagickLog10(
density_x[x].direction[i].green*density_y[y].direction[i].green));
entropy_xy2.direction[i].blue-=(density_x[x].direction[i].blue*
density_y[y].direction[i].blue*MagickLog10(
density_x[x].direction[i].blue*density_y[y].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_xy2.direction[i].index-=(density_x[x].direction[i].index*
density_y[y].direction[i].index*MagickLog10(
density_x[x].direction[i].index*density_y[y].direction[i].index));
if (image->matte != MagickFalse)
entropy_xy2.direction[i].opacity-=(density_x[x].direction[i].opacity*
density_y[y].direction[i].opacity*MagickLog10(
density_x[x].direction[i].opacity*
density_y[y].direction[i].opacity));
}
}
channel_features[RedChannel].variance_sum_of_squares[i]=
variance.direction[i].red;
channel_features[GreenChannel].variance_sum_of_squares[i]=
variance.direction[i].green;
channel_features[BlueChannel].variance_sum_of_squares[i]=
variance.direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[RedChannel].variance_sum_of_squares[i]=
variance.direction[i].index;
if (image->matte != MagickFalse)
channel_features[RedChannel].variance_sum_of_squares[i]=
variance.direction[i].opacity;
}
/*
Compute more texture features.
*/
(void) memset(&variance,0,sizeof(variance));
(void) memset(&sum_squares,0,sizeof(sum_squares));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Difference variance.
*/
variance.direction[i].red+=density_xy[x].direction[i].red;
variance.direction[i].green+=density_xy[x].direction[i].green;
variance.direction[i].blue+=density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
variance.direction[i].index+=density_xy[x].direction[i].index;
if (image->matte != MagickFalse)
variance.direction[i].opacity+=density_xy[x].direction[i].opacity;
sum_squares.direction[i].red+=density_xy[x].direction[i].red*
density_xy[x].direction[i].red;
sum_squares.direction[i].green+=density_xy[x].direction[i].green*
density_xy[x].direction[i].green;
sum_squares.direction[i].blue+=density_xy[x].direction[i].blue*
density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
sum_squares.direction[i].index+=density_xy[x].direction[i].index*
density_xy[x].direction[i].index;
if (image->matte != MagickFalse)
sum_squares.direction[i].opacity+=density_xy[x].direction[i].opacity*
density_xy[x].direction[i].opacity;
/*
Difference entropy.
*/
channel_features[RedChannel].difference_entropy[i]-=
density_xy[x].direction[i].red*
MagickLog10(density_xy[x].direction[i].red);
channel_features[GreenChannel].difference_entropy[i]-=
density_xy[x].direction[i].green*
MagickLog10(density_xy[x].direction[i].green);
channel_features[BlueChannel].difference_entropy[i]-=
density_xy[x].direction[i].blue*
MagickLog10(density_xy[x].direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].difference_entropy[i]-=
density_xy[x].direction[i].index*
MagickLog10(density_xy[x].direction[i].index);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].difference_entropy[i]-=
density_xy[x].direction[i].opacity*
MagickLog10(density_xy[x].direction[i].opacity);
/*
Information Measures of Correlation.
*/
entropy_x.direction[i].red-=(density_x[x].direction[i].red*
MagickLog10(density_x[x].direction[i].red));
entropy_x.direction[i].green-=(density_x[x].direction[i].green*
MagickLog10(density_x[x].direction[i].green));
entropy_x.direction[i].blue-=(density_x[x].direction[i].blue*
MagickLog10(density_x[x].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_x.direction[i].index-=(density_x[x].direction[i].index*
MagickLog10(density_x[x].direction[i].index));
if (image->matte != MagickFalse)
entropy_x.direction[i].opacity-=(density_x[x].direction[i].opacity*
MagickLog10(density_x[x].direction[i].opacity));
entropy_y.direction[i].red-=(density_y[x].direction[i].red*
MagickLog10(density_y[x].direction[i].red));
entropy_y.direction[i].green-=(density_y[x].direction[i].green*
MagickLog10(density_y[x].direction[i].green));
entropy_y.direction[i].blue-=(density_y[x].direction[i].blue*
MagickLog10(density_y[x].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_y.direction[i].index-=(density_y[x].direction[i].index*
MagickLog10(density_y[x].direction[i].index));
if (image->matte != MagickFalse)
entropy_y.direction[i].opacity-=(density_y[x].direction[i].opacity*
MagickLog10(density_y[x].direction[i].opacity));
}
/*
Difference variance.
*/
channel_features[RedChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].red)-
(variance.direction[i].red*variance.direction[i].red))/
((double) number_grays*number_grays*number_grays*number_grays);
channel_features[GreenChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].green)-
(variance.direction[i].green*variance.direction[i].green))/
((double) number_grays*number_grays*number_grays*number_grays);
channel_features[BlueChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].blue)-
(variance.direction[i].blue*variance.direction[i].blue))/
((double) number_grays*number_grays*number_grays*number_grays);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].opacity)-
(variance.direction[i].opacity*variance.direction[i].opacity))/
((double) number_grays*number_grays*number_grays*number_grays);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].index)-
(variance.direction[i].index*variance.direction[i].index))/
((double) number_grays*number_grays*number_grays*number_grays);
/*
Information Measures of Correlation.
*/
channel_features[RedChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].red-entropy_xy1.direction[i].red)/
(entropy_x.direction[i].red > entropy_y.direction[i].red ?
entropy_x.direction[i].red : entropy_y.direction[i].red);
channel_features[GreenChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].green-entropy_xy1.direction[i].green)/
(entropy_x.direction[i].green > entropy_y.direction[i].green ?
entropy_x.direction[i].green : entropy_y.direction[i].green);
channel_features[BlueChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].blue-entropy_xy1.direction[i].blue)/
(entropy_x.direction[i].blue > entropy_y.direction[i].blue ?
entropy_x.direction[i].blue : entropy_y.direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].index-entropy_xy1.direction[i].index)/
(entropy_x.direction[i].index > entropy_y.direction[i].index ?
entropy_x.direction[i].index : entropy_y.direction[i].index);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].opacity-entropy_xy1.direction[i].opacity)/
(entropy_x.direction[i].opacity > entropy_y.direction[i].opacity ?
entropy_x.direction[i].opacity : entropy_y.direction[i].opacity);
channel_features[RedChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].red-
entropy_xy.direction[i].red)))));
channel_features[GreenChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].green-
entropy_xy.direction[i].green)))));
channel_features[BlueChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].blue-
entropy_xy.direction[i].blue)))));
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].index-
entropy_xy.direction[i].index)))));
if (image->matte != MagickFalse)
channel_features[OpacityChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].opacity-
entropy_xy.direction[i].opacity)))));
}
/*
Compute more texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
z;
for (z=0; z < (ssize_t) number_grays; z++)
{
register ssize_t
y;
ChannelStatistics
pixel;
(void) memset(&pixel,0,sizeof(pixel));
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Contrast: amount of local variations present in an image.
*/
if (((y-x) == z) || ((x-y) == z))
{
pixel.direction[i].red+=cooccurrence[x][y].direction[i].red;
pixel.direction[i].green+=cooccurrence[x][y].direction[i].green;
pixel.direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
pixel.direction[i].index+=cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
pixel.direction[i].opacity+=
cooccurrence[x][y].direction[i].opacity;
}
/*
Maximum Correlation Coefficient.
*/
if ((fabs(density_x[z].direction[i].red) > MagickEpsilon) &&
(fabs(density_y[x].direction[i].red) > MagickEpsilon))
Q[z][y].direction[i].red+=cooccurrence[z][x].direction[i].red*
cooccurrence[y][x].direction[i].red/density_x[z].direction[i].red/
density_y[x].direction[i].red;
if ((fabs(density_x[z].direction[i].green) > MagickEpsilon) &&
(fabs(density_y[x].direction[i].red) > MagickEpsilon))
Q[z][y].direction[i].green+=cooccurrence[z][x].direction[i].green*
cooccurrence[y][x].direction[i].green/
density_x[z].direction[i].green/density_y[x].direction[i].red;
if ((fabs(density_x[z].direction[i].blue) > MagickEpsilon) &&
(fabs(density_y[x].direction[i].blue) > MagickEpsilon))
Q[z][y].direction[i].blue+=cooccurrence[z][x].direction[i].blue*
cooccurrence[y][x].direction[i].blue/
density_x[z].direction[i].blue/density_y[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
if ((fabs(density_x[z].direction[i].index) > MagickEpsilon) &&
(fabs(density_y[x].direction[i].index) > MagickEpsilon))
Q[z][y].direction[i].index+=cooccurrence[z][x].direction[i].index*
cooccurrence[y][x].direction[i].index/
density_x[z].direction[i].index/density_y[x].direction[i].index;
if (image->matte != MagickFalse)
if ((fabs(density_x[z].direction[i].opacity) > MagickEpsilon) &&
(fabs(density_y[x].direction[i].opacity) > MagickEpsilon))
Q[z][y].direction[i].opacity+=
cooccurrence[z][x].direction[i].opacity*
cooccurrence[y][x].direction[i].opacity/
density_x[z].direction[i].opacity/
density_y[x].direction[i].opacity;
}
}
channel_features[RedChannel].contrast[i]+=z*z*pixel.direction[i].red;
channel_features[GreenChannel].contrast[i]+=z*z*pixel.direction[i].green;
channel_features[BlueChannel].contrast[i]+=z*z*pixel.direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackChannel].contrast[i]+=z*z*
pixel.direction[i].index;
if (image->matte != MagickFalse)
channel_features[OpacityChannel].contrast[i]+=z*z*
pixel.direction[i].opacity;
}
/*
Maximum Correlation Coefficient.
Future: return second largest eigenvalue of Q.
*/
channel_features[RedChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
channel_features[GreenChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
channel_features[BlueChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
}
/*
Relinquish resources.
*/
sum=(ChannelStatistics *) RelinquishMagickMemory(sum);
for (i=0; i < (ssize_t) number_grays; i++)
Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]);
Q=(ChannelStatistics **) RelinquishMagickMemory(Q);
density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y);
density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy);
density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x);
for (i=0; i < (ssize_t) number_grays; i++)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence);
return(channel_features);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% H o u g h L i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Use HoughLineImage() in conjunction with any binary edge extracted image (we
% recommand Canny) to identify lines in the image. The algorithm accumulates
% counts for every white pixel for every possible orientation (for angles from
% 0 to 179 in 1 degree increments) and distance from the center of the image to
% the corner (in 1 px increments) and stores the counts in an accumulator
% matrix of angle vs distance. The size of the accumulator is 180x(diagonal/2).% Next it searches this space for peaks in counts and converts the locations
% of the peaks to slope and intercept in the normal x,y input image space. Use
% the slope/intercepts to find the endpoints clipped to the bounds of the
% image. The lines are then drawn. The counts are a measure of the length of
% the lines.
%
% The format of the HoughLineImage method is:
%
% Image *HoughLineImage(const Image *image,const size_t width,
% const size_t height,const size_t threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width, height: find line pairs as local maxima in this neighborhood.
%
% o threshold: the line count threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
static Image *RenderHoughLines(const ImageInfo *image_info,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define BoundingBox "viewbox"
DrawInfo
*draw_info;
Image
*image;
MagickBooleanType
status;
/*
Open image.
*/
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image->columns=columns;
image->rows=rows;
draw_info=CloneDrawInfo(image_info,(DrawInfo *) NULL);
draw_info->affine.sx=image->x_resolution == 0.0 ? 1.0 : image->x_resolution/
DefaultResolution;
draw_info->affine.sy=image->y_resolution == 0.0 ? 1.0 : image->y_resolution/
DefaultResolution;
image->columns=(size_t) (draw_info->affine.sx*image->columns);
image->rows=(size_t) (draw_info->affine.sy*image->rows);
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
return(DestroyImageList(image));
if (SetImageBackgroundColor(image) == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Render drawing.
*/
if (GetBlobStreamData(image) == (unsigned char *) NULL)
draw_info->primitive=FileToString(image->filename,~0UL,exception);
else
{
draw_info->primitive=(char *) AcquireQuantumMemory(1,(size_t)
GetBlobSize(image)+1);
if (draw_info->primitive != (char *) NULL)
{
(void) memcpy(draw_info->primitive,GetBlobStreamData(image),
(size_t) GetBlobSize(image));
draw_info->primitive[GetBlobSize(image)]='\0';
}
}
(void) DrawImage(image,draw_info);
draw_info=DestroyDrawInfo(draw_info);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
MagickExport Image *HoughLineImage(const Image *image,const size_t width,
const size_t height,const size_t threshold,ExceptionInfo *exception)
{
#define HoughLineImageTag "HoughLine/Image"
CacheView
*image_view;
char
message[MaxTextExtent],
path[MaxTextExtent];
const char
*artifact;
double
hough_height;
Image
*lines_image = NULL;
ImageInfo
*image_info;
int
file;
MagickBooleanType
status;
MagickOffsetType
progress;
MatrixInfo
*accumulator;
PointInfo
center;
register ssize_t
y;
size_t
accumulator_height,
accumulator_width,
line_count;
/*
Create the accumulator.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
accumulator_width=180;
hough_height=((sqrt(2.0)*(double) (image->rows > image->columns ?
image->rows : image->columns))/2.0);
accumulator_height=(size_t) (2.0*hough_height);
accumulator=AcquireMatrixInfo(accumulator_width,accumulator_height,
sizeof(double),exception);
if (accumulator == (MatrixInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
if (NullMatrix(accumulator) == MagickFalse)
{
accumulator=DestroyMatrixInfo(accumulator);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Populate the accumulator.
*/
status=MagickTrue;
progress=0;
center.x=(double) image->columns/2.0;
center.y=(double) image->rows/2.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelIntensity(image,p) > (QuantumRange/2.0))
{
register ssize_t
i;
for (i=0; i < 180; i++)
{
double
count,
radius;
radius=(((double) x-center.x)*cos(DegreesToRadians((double) i)))+
(((double) y-center.y)*sin(DegreesToRadians((double) i)));
(void) GetMatrixElement(accumulator,i,(ssize_t)
MagickRound(radius+hough_height),&count);
count++;
(void) SetMatrixElement(accumulator,i,(ssize_t)
MagickRound(radius+hough_height),&count);
}
}
p++;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,HoughLineImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
accumulator=DestroyMatrixInfo(accumulator);
return((Image *) NULL);
}
/*
Generate line segments from accumulator.
*/
file=AcquireUniqueFileResource(path);
if (file == -1)
{
accumulator=DestroyMatrixInfo(accumulator);
return((Image *) NULL);
}
(void) FormatLocaleString(message,MaxTextExtent,
"# Hough line transform: %.20gx%.20g%+.20g\n",(double) width,
(double) height,(double) threshold);
if (write(file,message,strlen(message)) != (ssize_t) strlen(message))
status=MagickFalse;
(void) FormatLocaleString(message,MaxTextExtent,"viewbox 0 0 %.20g %.20g\n",
(double) image->columns,(double) image->rows);
if (write(file,message,strlen(message)) != (ssize_t) strlen(message))
status=MagickFalse;
(void) FormatLocaleString(message,MaxTextExtent,
"# x1,y1 x2,y2 # count angle distance\n");
if (write(file,message,strlen(message)) != (ssize_t) strlen(message))
status=MagickFalse;
line_count=image->columns > image->rows ? image->columns/4 : image->rows/4;
if (threshold != 0)
line_count=threshold;
for (y=0; y < (ssize_t) accumulator_height; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) accumulator_width; x++)
{
double
count;
(void) GetMatrixElement(accumulator,x,y,&count);
if (count >= (double) line_count)
{
double
maxima;
SegmentInfo
line;
ssize_t
v;
/*
Is point a local maxima?
*/
maxima=count;
for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++)
{
ssize_t
u;
for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++)
{
if ((u != 0) || (v !=0))
{
(void) GetMatrixElement(accumulator,x+u,y+v,&count);
if (count > maxima)
{
maxima=count;
break;
}
}
}
if (u < (ssize_t) (width/2))
break;
}
(void) GetMatrixElement(accumulator,x,y,&count);
if (maxima > count)
continue;
if ((x >= 45) && (x <= 135))
{
/*
y = (r-x cos(t))/sin(t)
*/
line.x1=0.0;
line.y1=((double) (y-(accumulator_height/2.0))-((line.x1-
(image->columns/2.0))*cos(DegreesToRadians((double) x))))/
sin(DegreesToRadians((double) x))+(image->rows/2.0);
line.x2=(double) image->columns;
line.y2=((double) (y-(accumulator_height/2.0))-((line.x2-
(image->columns/2.0))*cos(DegreesToRadians((double) x))))/
sin(DegreesToRadians((double) x))+(image->rows/2.0);
}
else
{
/*
x = (r-y cos(t))/sin(t)
*/
line.y1=0.0;
line.x1=((double) (y-(accumulator_height/2.0))-((line.y1-
(image->rows/2.0))*sin(DegreesToRadians((double) x))))/
cos(DegreesToRadians((double) x))+(image->columns/2.0);
line.y2=(double) image->rows;
line.x2=((double) (y-(accumulator_height/2.0))-((line.y2-
(image->rows/2.0))*sin(DegreesToRadians((double) x))))/
cos(DegreesToRadians((double) x))+(image->columns/2.0);
}
(void) FormatLocaleString(message,MaxTextExtent,
"line %g,%g %g,%g # %g %g %g\n",line.x1,line.y1,line.x2,line.y2,
maxima,(double) x,(double) y);
if (write(file,message,strlen(message)) != (ssize_t) strlen(message))
status=MagickFalse;
}
}
}
(void) close(file);
/*
Render lines to image canvas.
*/
image_info=AcquireImageInfo();
image_info->background_color=image->background_color;
(void) FormatLocaleString(image_info->filename,MaxTextExtent,"%s",path);
artifact=GetImageArtifact(image,"background");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"background",artifact);
artifact=GetImageArtifact(image,"fill");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"fill",artifact);
artifact=GetImageArtifact(image,"stroke");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"stroke",artifact);
artifact=GetImageArtifact(image,"strokewidth");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"strokewidth",artifact);
lines_image=RenderHoughLines(image_info,image->columns,image->rows,exception);
artifact=GetImageArtifact(image,"hough-lines:accumulator");
if ((lines_image != (Image *) NULL) &&
(IsMagickTrue(artifact) != MagickFalse))
{
Image
*accumulator_image;
accumulator_image=MatrixToImage(accumulator,exception);
if (accumulator_image != (Image *) NULL)
AppendImageToList(&lines_image,accumulator_image);
}
/*
Free resources.
*/
accumulator=DestroyMatrixInfo(accumulator);
image_info=DestroyImageInfo(image_info);
(void) RelinquishUniqueFileResource(path);
return(GetFirstImageInList(lines_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M e a n S h i f t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MeanShiftImage() delineate arbitrarily shaped clusters in the image. For
% each pixel, it visits all the pixels in the neighborhood specified by
% the window centered at the pixel and excludes those that are outside the
% radius=(window-1)/2 surrounding the pixel. From those pixels, it finds those
% that are within the specified color distance from the current mean, and
% computes a new x,y centroid from those coordinates and a new mean. This new
% x,y centroid is used as the center for a new window. This process iterates
% until it converges and the final mean is replaces the (original window
% center) pixel value. It repeats this process for the next pixel, etc.,
% until it processes all pixels in the image. Results are typically better with
% colorspaces other than sRGB. We recommend YIQ, YUV or YCbCr.
%
% The format of the MeanShiftImage method is:
%
% Image *MeanShiftImage(const Image *image,const size_t width,
% const size_t height,const double color_distance,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width, height: find pixels in this neighborhood.
%
% o color_distance: the color distance.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MeanShiftImage(const Image *image,const size_t width,
const size_t height,const double color_distance,ExceptionInfo *exception)
{
#define MaxMeanShiftIterations 100
#define MeanShiftImageTag "MeanShift/Image"
CacheView
*image_view,
*mean_view,
*pixel_view;
Image
*mean_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
mean_image=CloneImage(image,0,0,MagickTrue,exception);
if (mean_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(mean_image,DirectClass) == MagickFalse)
{
InheritException(exception,&mean_image->exception);
mean_image=DestroyImage(mean_image);
return((Image *) NULL);
}
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
pixel_view=AcquireVirtualCacheView(image,exception);
mean_view=AcquireAuthenticCacheView(mean_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status,progress) \
magick_number_threads(mean_image,mean_image,mean_image->rows,1)
#endif
for (y=0; y < (ssize_t) mean_image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(mean_view,0,y,mean_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) mean_image->columns; x++)
{
MagickPixelPacket
mean_pixel,
previous_pixel;
PointInfo
mean_location,
previous_location;
register ssize_t
i;
GetMagickPixelPacket(image,&mean_pixel);
SetMagickPixelPacket(image,p,indexes+x,&mean_pixel);
mean_location.x=(double) x;
mean_location.y=(double) y;
for (i=0; i < MaxMeanShiftIterations; i++)
{
double
distance,
gamma;
MagickPixelPacket
sum_pixel;
PointInfo
sum_location;
ssize_t
count,
v;
sum_location.x=0.0;
sum_location.y=0.0;
GetMagickPixelPacket(image,&sum_pixel);
previous_location=mean_location;
previous_pixel=mean_pixel;
count=0;
for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++)
{
ssize_t
u;
for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++)
{
if ((v*v+u*u) <= (ssize_t) ((width/2)*(height/2)))
{
PixelPacket
pixel;
status=GetOneCacheViewVirtualPixel(pixel_view,(ssize_t)
MagickRound(mean_location.x+u),(ssize_t) MagickRound(
mean_location.y+v),&pixel,exception);
distance=(mean_pixel.red-pixel.red)*(mean_pixel.red-pixel.red)+
(mean_pixel.green-pixel.green)*(mean_pixel.green-pixel.green)+
(mean_pixel.blue-pixel.blue)*(mean_pixel.blue-pixel.blue);
if (distance <= (color_distance*color_distance))
{
sum_location.x+=mean_location.x+u;
sum_location.y+=mean_location.y+v;
sum_pixel.red+=pixel.red;
sum_pixel.green+=pixel.green;
sum_pixel.blue+=pixel.blue;
sum_pixel.opacity+=pixel.opacity;
count++;
}
}
}
}
gamma=PerceptibleReciprocal(count);
mean_location.x=gamma*sum_location.x;
mean_location.y=gamma*sum_location.y;
mean_pixel.red=gamma*sum_pixel.red;
mean_pixel.green=gamma*sum_pixel.green;
mean_pixel.blue=gamma*sum_pixel.blue;
mean_pixel.opacity=gamma*sum_pixel.opacity;
distance=(mean_location.x-previous_location.x)*
(mean_location.x-previous_location.x)+
(mean_location.y-previous_location.y)*
(mean_location.y-previous_location.y)+
255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)*
255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)+
255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)*
255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)+
255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue)*
255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue);
if (distance <= 3.0)
break;
}
q->red=ClampToQuantum(mean_pixel.red);
q->green=ClampToQuantum(mean_pixel.green);
q->blue=ClampToQuantum(mean_pixel.blue);
q->opacity=ClampToQuantum(mean_pixel.opacity);
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(mean_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MeanShiftImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
mean_view=DestroyCacheView(mean_view);
pixel_view=DestroyCacheView(pixel_view);
image_view=DestroyCacheView(image_view);
return(mean_image);
}
|
core_sgemm.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zgemm.c, normal z -> s, Fri Sep 28 17:38:18 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "core_lapack.h"
/***************************************************************************//**
*
* @ingroup core_gemm
*
* Performs one of the matrix-matrix operations
*
* \f[ C = \alpha [op( A )\times op( B )] + \beta C, \f]
*
* where op( X ) is one of:
* \f[ op( X ) = X, \f]
* \f[ op( X ) = X^T, \f]
* \f[ op( X ) = X^T, \f]
*
* alpha and beta are scalars, and A, B and C are matrices, with op( A )
* an m-by-k matrix, op( B ) a k-by-n matrix and C an m-by-n matrix.
*
*******************************************************************************
*
* @param[in] transa
* - PlasmaNoTrans: A is not transposed,
* - PlasmaTrans: A is transposed,
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] transb
* - PlasmaNoTrans: B is not transposed,
* - PlasmaTrans: B is transposed,
* - PlasmaConjTrans: B is conjugate transposed.
*
* @param[in] m
* The number of rows of the matrix op( A ) and of the matrix C.
* m >= 0.
*
* @param[in] n
* The number of columns of the matrix op( B ) and of the matrix C.
* n >= 0.
*
* @param[in] k
* The number of columns of the matrix op( A ) and the number of rows
* of the matrix op( B ). k >= 0.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* An lda-by-ka matrix, where ka is k when transa = PlasmaNoTrans,
* and is m otherwise.
*
* @param[in] lda
* The leading dimension of the array A.
* When transa = PlasmaNoTrans, lda >= max(1,m),
* otherwise, lda >= max(1,k).
*
* @param[in] B
* An ldb-by-kb matrix, where kb is n when transb = PlasmaNoTrans,
* and is k otherwise.
*
* @param[in] ldb
* The leading dimension of the array B.
* When transb = PlasmaNoTrans, ldb >= max(1,k),
* otherwise, ldb >= max(1,n).
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] C
* An ldc-by-n matrix. On exit, the array is overwritten by the m-by-n
* matrix ( alpha*op( A )*op( B ) + beta*C ).
*
* @param[in] ldc
* The leading dimension of the array C. ldc >= max(1,m).
*
******************************************************************************/
__attribute__((weak))
void plasma_core_sgemm(plasma_enum_t transa, plasma_enum_t transb,
int m, int n, int k,
float alpha, const float *A, int lda,
const float *B, int ldb,
float beta, float *C, int ldc)
{
cblas_sgemm(CblasColMajor,
(CBLAS_TRANSPOSE)transa, (CBLAS_TRANSPOSE)transb,
m, n, k,
(alpha), A, lda,
B, ldb,
(beta), C, ldc);
}
/******************************************************************************/
void plasma_core_omp_sgemm(
plasma_enum_t transa, plasma_enum_t transb,
int m, int n, int k,
float alpha, const float *A, int lda,
const float *B, int ldb,
float beta, float *C, int ldc,
plasma_sequence_t *sequence, plasma_request_t *request)
{
int ak;
if (transa == PlasmaNoTrans)
ak = k;
else
ak = m;
int bk;
if (transb == PlasmaNoTrans)
bk = n;
else
bk = k;
#pragma omp task depend(in:A[0:lda*ak]) \
depend(in:B[0:ldb*bk]) \
depend(inout:C[0:ldc*n])
{
if (sequence->status == PlasmaSuccess)
plasma_core_sgemm(transa, transb,
m, n, k,
alpha, A, lda,
B, ldb,
beta, C, ldc);
}
}
|
GB_binop__lor_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_01__lor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__lor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_03__lor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_uint16)
// A*D function (colscale): GB (_AxD__lor_uint16)
// D*A function (rowscale): GB (_DxB__lor_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__lor_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__lor_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_uint16)
// C=scalar+B GB (_bind1st__lor_uint16)
// C=scalar+B' GB (_bind1st_tran__lor_uint16)
// C=A+scalar GB (_bind2nd__lor_uint16)
// C=A'+scalar GB (_bind2nd_tran__lor_uint16)
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = ((aij != 0) || (bij != 0))
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) || (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOR || GxB_NO_UINT16 || GxB_NO_LOR_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lor_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lor_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lor_uint16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lor_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__lor_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lor_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__lor_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lor_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lor_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) || (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lor_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) || (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) || (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lor_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) || (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
slow.c | #include <stdio.h>
#include <omp.h>
#define N 1024*1024*64
int main() {
int sum = 0;
double startTime = omp_get_wtime();
#pragma omp parallel for
for(int i = 0; i < N; ++i) {
#pragma omp critical
sum++;
}
double endTime = omp_get_wtime();
printf("sum: %d, time: %2.2f seconds\n", sum, endTime-startTime);
return 0;
}
|
core_ztrtri.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> c d s
*
**/
#include "core_blas.h"
#include "plasma_types.h"
#include "core_lapack.h"
/***************************************************************************//**
*
* @ingroup core_trtri
*
* Computes the inverse of an upper or lower
* triangular matrix A.
*
*******************************************************************************
*
* @param[in] uplo
* = PlasmaUpper: Upper triangle of A is stored;
* = PlasmaLower: Lower triangle of A is stored.
*
* @param[in] diag
* = PlasmaNonUnit: A is non-unit triangular;
* = PlasmaUnit: A is unit triangular.
*
* @param[in] n
* The order of the matrix A. n >= 0.
*
* @param[in,out] A
* On entry, the triangular matrix A. If uplo = 'U', the
* leading n-by-n upper triangular part of the array A
* contains the upper triangular matrix, and the strictly
* lower triangular part of A is not referenced. If uplo =
* 'L', the leading n-by-n lower triangular part of the array
* A contains the lower triangular matrix, and the strictly
* upper triangular part of A is not referenced. If diag =
* 'U', the diagonal elements of A are also not referenced and
* are assumed to be 1. On exit, the (triangular) inverse of
* the original matrix.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,n).
*
* @retval PlasmaSuccess on successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
* @retval > 0 if i, A(i,i) is exactly zero. The triangular
* matrix is singular and its inverse can not be computed.
*
******************************************************************************/
int core_ztrtri(plasma_enum_t uplo, plasma_enum_t diag,
int n,
plasma_complex64_t *A, int lda)
{
return LAPACKE_ztrtri_work(LAPACK_COL_MAJOR,
lapack_const(uplo), lapack_const(diag),
n, A, lda);
}
/******************************************************************************/
void core_omp_ztrtri(plasma_enum_t uplo, plasma_enum_t diag,
int n,
plasma_complex64_t *A, int lda,
int iinfo,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(inout:A[0:lda*n])
{
if (sequence->status == PlasmaSuccess) {
int info = core_ztrtri(uplo, diag,
n, A, lda);
if (info != 0)
plasma_request_fail(sequence, request, iinfo+info);
}
}
}
|
lstm_bwd.c | #include <libxsmm.h>
#include <libxsmm_intrinsics_x86.h>
#if defined(LIBXSMM_OFFLOAD_TARGET)
# pragma offload_attribute(push,target(LIBXSMM_OFFLOAD_TARGET))
#endif
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#if defined(_OPENMP)
# include <omp.h>
#endif
#include "lstm_bwd.h"
#if defined(LIBXSMM_OFFLOAD_TARGET)
# pragma offload_attribute(pop)
#endif
#define CHKERR_LIBXSMM_DNN(A) if ( A != LIBXSMM_DNN_SUCCESS ) fprintf(stderr, "%s\n", libxsmm_dnn_get_error(A) );
#if 0
# define PRINT_LAYOUT2(DESC, LAYOUT) print_layout2(DESC, LAYOUT)
#else
# define PRINT_LAYOUT2(DESC, LAYOUT)
#endif
void print_layout2(char *desc, libxsmm_dnn_tensor_datalayout *layout) {
char *dim_name[] = {"N", "H", "W", "C", "K", "R", "S", "X", "RLM", "RLK", "RLN"};
int i;
printf("%s: F:%d TT: %d [", desc, layout->format, layout->tensor_type);
for(i = layout->num_dims - 1; i >= 0; i--) {
printf("%s:%d%s", dim_name[layout->dim_type[i]], layout->dim_size[i], i == 0 ? "" : ", ");
}
printf("]\n");
}
void zero_buf(float* buf, size_t size) {
int i;
#if defined(_OPENMP)
# pragma omp parallel for private(i)
#endif
for (i = 0; i < (int)size; ++i) {
buf[i] = 0.0f;
}
}
void* lstm_bwd_create( int N, /* minibatch size */
int C, /* input size */
int K, /* output size */
int t, /* timesteps = 1 */
int nThreads, /* number of threads */
const int w_in_kcck,
const int w_in_trans,
const float *xt,
const float *csp,
const float *hp,
const float *ht,
const float *w,
const float *r,
const float *cst,
const float *it,
const float *ft,
const float *ot,
const float *cit,
const float *cot,
const float *dcs,
const float *dht,
float *dxt,
float *dcspt,
float *dhpt,
float *dw,
float *dr,
float *db )
{
libxsmm_dnn_rnncell_desc lstmcell_desc;
libxsmm_dnn_rnncell* libxsmm_handle;
libxsmm_dnn_tensor* libxsmm_input;
libxsmm_dnn_tensor* libxsmm_cs_prev;
libxsmm_dnn_tensor* libxsmm_hidden_state_prev;
libxsmm_dnn_tensor* libxsmm_hidden_state = NULL;
libxsmm_dnn_tensor* libxsmm_weight;
libxsmm_dnn_tensor* libxsmm_recur_weight;
libxsmm_dnn_tensor* libxsmm_cs;
libxsmm_dnn_tensor* libxsmm_i;
libxsmm_dnn_tensor* libxsmm_f;
libxsmm_dnn_tensor* libxsmm_o;
libxsmm_dnn_tensor* libxsmm_ci;
libxsmm_dnn_tensor* libxsmm_co;
libxsmm_dnn_tensor* libxsmm_dinput;
libxsmm_dnn_tensor* libxsmm_dcs_prev;
libxsmm_dnn_tensor* libxsmm_dhidden_state_prev;
libxsmm_dnn_tensor* libxsmm_dweight;
libxsmm_dnn_tensor* libxsmm_drecur_weight;
libxsmm_dnn_tensor* libxsmm_dbias;
libxsmm_dnn_tensor* libxsmm_dcs;
libxsmm_dnn_tensor* libxsmm_dhidden_state;
libxsmm_dnn_tensor_datalayout* libxsmm_layout;
libxsmm_dnn_err_t status;
if (N <= 0) {
printf("N: %d should be > 0\n", N);
}
if (C <= 0) {
printf("C: %d should be > 0\n", C);
}
if (K <= 0) {
printf("K: %d should be > 0\n", K);
}
if (xt == 0 || csp == 0 || hp == 0 || w == 0 || r == 0 || (t > 1 && ht == 0) ||
cst == 0 || it == 0 || ft == 0 || ot == 0 || cit == 0 || cot == 0 ||
dxt == 0 || dcspt== 0|| dhpt== 0|| dw == 0 || dr == 0 || db == 0 || dht == 0 || dcs == 0) {
printf("None of the pointers should be NULL::\n");
printf("x:%p\n", xt);
printf("csp:%p\n", csp);
printf("h_prev:%p\n", hp);
printf("ht:%p\n", ht);
printf("w:%p\n", w);
printf("r:%p\n", r);
printf("cs:%p\n", cst);
printf("i:%p\n", it);
printf("f:%p\n", ft);
printf("o:%p\n", ot);
printf("ci:%p\n", cit);
printf("co:%p\n", cot);
printf("dcs:%p\n", dcs);
printf("dxt:%p\n", dxt);
printf("dcspt:%p\n", dcspt);
printf("dhpt:%p\n", dhpt);
printf("dw:%p\n", dw);
printf("dr:%p\n", dr);
printf("db:%p\n", db);
printf("dht:%p\n", dht);
}
/* setup LIBXSMM handle */
lstmcell_desc.threads = nThreads;
lstmcell_desc.N = N;
lstmcell_desc.C = C;
lstmcell_desc.K = K;
lstmcell_desc.max_T = t;
lstmcell_desc.bn = 24;
if(N % 24 == 0) lstmcell_desc.bn = 24;
else if(N % 16 == 0) lstmcell_desc.bn = 16;
else if(N % 12 == 0) lstmcell_desc.bn = 12;
else if(N % 8 == 0) lstmcell_desc.bn = 8;
else if(N % 6 == 0) lstmcell_desc.bn = 6;
lstmcell_desc.bc = 64;
lstmcell_desc.bk = 64;
lstmcell_desc.cell_type = LIBXSMM_DNN_RNNCELL_LSTM;
lstmcell_desc.datatype_in = LIBXSMM_DNN_DATATYPE_F32;
lstmcell_desc.datatype_out = LIBXSMM_DNN_DATATYPE_F32;
lstmcell_desc.buffer_format = LIBXSMM_DNN_TENSOR_FORMAT_NC;
lstmcell_desc.filter_format = (w_in_kcck ? LIBXSMM_DNN_TENSOR_FORMAT_CKPACKED : LIBXSMM_DNN_TENSOR_FORMAT_CK);
libxsmm_handle = libxsmm_dnn_create_rnncell( lstmcell_desc, &status );
CHKERR_LIBXSMM_DNN( status );
/* setup LIBXSMM buffers and filter */
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_REGULAR_INPUT, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("Xt", libxsmm_layout);
libxsmm_input = libxsmm_dnn_link_tensor( libxsmm_layout, xt, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_REGULAR_CS_PREV, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("CSP", libxsmm_layout);
libxsmm_cs_prev = libxsmm_dnn_link_tensor( libxsmm_layout, csp, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_REGULAR_HIDDEN_STATE_PREV, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("HP", libxsmm_layout);
libxsmm_hidden_state_prev = libxsmm_dnn_link_tensor( libxsmm_layout, hp, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
if(t > 1) {
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_REGULAR_HIDDEN_STATE, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("HT", libxsmm_layout);
libxsmm_hidden_state = libxsmm_dnn_link_tensor( libxsmm_layout, ht, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
}
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, w_in_trans ? LIBXSMM_DNN_RNN_REGULAR_WEIGHT_TRANS : LIBXSMM_DNN_RNN_REGULAR_WEIGHT, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("W", libxsmm_layout);
libxsmm_weight = libxsmm_dnn_link_tensor( libxsmm_layout, w, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, w_in_trans ? LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT_TRANS : LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("R", libxsmm_layout);
libxsmm_recur_weight = libxsmm_dnn_link_tensor( libxsmm_layout, r, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_REGULAR_CS, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("CSt", libxsmm_layout);
libxsmm_cs = libxsmm_dnn_link_tensor( libxsmm_layout, cst, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_INTERNAL_I, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("It", libxsmm_layout);
libxsmm_i = libxsmm_dnn_link_tensor( libxsmm_layout, it, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_INTERNAL_F, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("Ft", libxsmm_layout);
libxsmm_f = libxsmm_dnn_link_tensor( libxsmm_layout, ft, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_INTERNAL_O, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("Ot", libxsmm_layout);
libxsmm_o = libxsmm_dnn_link_tensor( libxsmm_layout, ot, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_INTERNAL_CI, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("CIt", libxsmm_layout);
libxsmm_ci = libxsmm_dnn_link_tensor( libxsmm_layout, cit, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_INTERNAL_CO, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("COt", libxsmm_layout);
libxsmm_co = libxsmm_dnn_link_tensor( libxsmm_layout, cot, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_GRADIENT_INPUT, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("dXt", libxsmm_layout);
libxsmm_dinput = libxsmm_dnn_link_tensor( libxsmm_layout, dxt, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_GRADIENT_CS_PREV, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("dCSPt", libxsmm_layout);
libxsmm_dcs_prev = libxsmm_dnn_link_tensor( libxsmm_layout, dcspt, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_GRADIENT_HIDDEN_STATE_PREV, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("dHPt", libxsmm_layout);
libxsmm_dhidden_state_prev = libxsmm_dnn_link_tensor( libxsmm_layout, dhpt, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_GRADIENT_WEIGHT, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("dW", libxsmm_layout);
libxsmm_dweight = libxsmm_dnn_link_tensor( libxsmm_layout, dw, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_GRADIENT_RECUR_WEIGHT, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("dR", libxsmm_layout);
libxsmm_drecur_weight = libxsmm_dnn_link_tensor( libxsmm_layout, dr, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_GRADIENT_BIAS, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("dB", libxsmm_layout);
libxsmm_dbias = libxsmm_dnn_link_tensor( libxsmm_layout, db, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_GRADIENT_CS, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("dCS", libxsmm_layout);
libxsmm_dcs = libxsmm_dnn_link_tensor( libxsmm_layout, dcs, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_GRADIENT_HIDDEN_STATE, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("dHt", libxsmm_layout);
libxsmm_dhidden_state = libxsmm_dnn_link_tensor( libxsmm_layout, dht, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
/* bind buffers and filter to handle */
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_input, LIBXSMM_DNN_RNN_REGULAR_INPUT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_cs_prev, LIBXSMM_DNN_RNN_REGULAR_CS_PREV ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_hidden_state_prev, LIBXSMM_DNN_RNN_REGULAR_HIDDEN_STATE_PREV ) );
if(t > 1) {
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_hidden_state, LIBXSMM_DNN_RNN_REGULAR_HIDDEN_STATE ) );
}
if(w_in_trans) {
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_weight, LIBXSMM_DNN_RNN_REGULAR_WEIGHT_TRANS ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_recur_weight, LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT_TRANS ) );
} else {
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_weight, LIBXSMM_DNN_RNN_REGULAR_WEIGHT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_recur_weight, LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT ) );
}
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_cs, LIBXSMM_DNN_RNN_REGULAR_CS ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_i, LIBXSMM_DNN_RNN_INTERNAL_I ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_f, LIBXSMM_DNN_RNN_INTERNAL_F ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_o, LIBXSMM_DNN_RNN_INTERNAL_O ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_ci, LIBXSMM_DNN_RNN_INTERNAL_CI ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_co, LIBXSMM_DNN_RNN_INTERNAL_CO ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_dinput, LIBXSMM_DNN_RNN_GRADIENT_INPUT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_dcs_prev, LIBXSMM_DNN_RNN_GRADIENT_CS_PREV ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_dhidden_state_prev, LIBXSMM_DNN_RNN_GRADIENT_HIDDEN_STATE_PREV ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_dweight, LIBXSMM_DNN_RNN_GRADIENT_WEIGHT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_drecur_weight, LIBXSMM_DNN_RNN_GRADIENT_RECUR_WEIGHT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_dbias, LIBXSMM_DNN_RNN_GRADIENT_BIAS ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_dcs, LIBXSMM_DNN_RNN_GRADIENT_CS ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_dhidden_state, LIBXSMM_DNN_RNN_GRADIENT_HIDDEN_STATE ) );
size_t scratch_size = libxsmm_dnn_rnncell_get_scratch_size( libxsmm_handle, LIBXSMM_DNN_COMPUTE_KIND_BWDUPD, &status );
CHKERR_LIBXSMM_DNN( status );
if (scratch_size > 0) {
void* scratch = libxsmm_aligned_malloc( scratch_size, 2097152 );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_scratch( libxsmm_handle, LIBXSMM_DNN_COMPUTE_KIND_BWDUPD, scratch ) );
zero_buf( (float*)scratch, scratch_size/4 );
}
return (void*)libxsmm_handle;
}
void lstm_bwd_set_ptr( void* libxsmm_handle_, int w_in_trans,
const int t,
const float *xt,
const float *csp,
const float *hp,
const float *ht,
const float *w,
const float *r,
const float *cst,
const float *it,
const float *ft,
const float *ot,
const float *cit,
const float *cot,
const float *dcs,
const float *dht,
float *dxt,
float *dcspt,
float *dhpt,
float *dw,
float *dr,
float *db )
{
libxsmm_dnn_err_t status = LIBXSMM_DNN_SUCCESS;
libxsmm_dnn_rnncell* handle = (libxsmm_dnn_rnncell*) libxsmm_handle_;
if (xt == 0 || csp == 0 || hp == 0 || w == 0 || r == 0 ||
cst == 0 || it == 0 || ft == 0 || ot == 0 || cit == 0 || cot == 0 ||
dxt == 0 || dcspt== 0|| dhpt== 0|| dw == 0 || dr == 0 || db == 0 || dht == 0 || dcs == 0) {
printf("None of the pointers should be NULL::\n");
printf("x:%p\n", xt);
printf("cst:%p\n", csp);
printf("h_prev:%p\n", hp);
printf("ht:%p\n", ht);
printf("w:%p\n", w);
printf("r:%p\n", r);
printf("cs:%p\n", cst);
printf("i:%p\n", it);
printf("f:%p\n", ft);
printf("o:%p\n", ot);
printf("ci:%p\n", cit);
printf("co:%p\n", cot);
printf("dcs:%p\n", dcs);
printf("dxt:%p\n", dxt);
printf("dcspt:%p\n", dcspt);
printf("dhpt:%p\n", dhpt);
printf("dw:%p\n", dw);
printf("dr:%p\n", dr);
printf("db:%p\n", db);
printf("dht:%p\n", dht);
}
/* bind buffers and filter to handle */
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_set_sequence_length( handle, t) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_INPUT, &status), xt) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_CS_PREV, &status), csp) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_HIDDEN_STATE_PREV, &status), hp) );
if(ht != 0) { CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_HIDDEN_STATE, &status), ht) ); }
if(w_in_trans) {
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_WEIGHT_TRANS, &status), w) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT_TRANS, &status), r) );
} else {
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_WEIGHT, &status), w) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT, &status), r) );
}
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_CS, &status), cst) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_INTERNAL_I, &status), it) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_INTERNAL_F, &status), ft) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_INTERNAL_O, &status), ot) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_INTERNAL_CI, &status), cit) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_INTERNAL_CO, &status), cot) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_INPUT, &status), dxt) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_CS_PREV, &status), dcspt) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_HIDDEN_STATE_PREV, &status), dhpt) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_WEIGHT, &status), dw) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_RECUR_WEIGHT, &status), dr) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_BIAS, &status), db) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_CS, &status), dcs) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_HIDDEN_STATE, &status), dht) );
}
void lstm_bwd_execute_omp( void* libxsmm_handle_ )
{
#ifdef _OPENMP
libxsmm_dnn_rnncell* handle = (libxsmm_dnn_rnncell*) libxsmm_handle_;
/* run LIBXSMM LSTM BWD */
#pragma omp parallel
{
int tid = omp_get_thread_num();
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_execute_st( handle, LIBXSMM_DNN_COMPUTE_KIND_BWDUPD, 0, tid ) );
}
#else
printf("%s:%d Shouldn't come here... exiting\n", __FILE__, __LINE__);
exit(1);
#endif
}
void lstm_bwd_execute_st( void* libxsmm_handle_, int tid )
{
libxsmm_dnn_rnncell* handle = (libxsmm_dnn_rnncell*) libxsmm_handle_;
/* run LIBXSMM LSTM BWD */
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_execute_st( handle, LIBXSMM_DNN_COMPUTE_KIND_BWDUPD, 0, tid ) );
}
void lstm_bwd_destroy( void* libxsmm_handle_ )
{
libxsmm_dnn_rnncell* handle = (libxsmm_dnn_rnncell*) libxsmm_handle_;
libxsmm_dnn_err_t status = LIBXSMM_DNN_SUCCESS;
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_INPUT, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_CS_PREV, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_HIDDEN_STATE_PREV, &status) ) );
if(libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_HIDDEN_STATE, &status)) {
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_HIDDEN_STATE, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_REGULAR_HIDDEN_STATE ) );
}
if(libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_WEIGHT, &status)) {
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_WEIGHT, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_REGULAR_WEIGHT ) );
}
if(libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT, &status)) {
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT ) );
}
if(libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_WEIGHT_TRANS, &status)) {
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_WEIGHT_TRANS, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_REGULAR_WEIGHT_TRANS ) );
}
if(libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT_TRANS, &status)) {
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT_TRANS, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT_TRANS ) );
}
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_CS, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_INTERNAL_I, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_INTERNAL_F, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_INTERNAL_O, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_INTERNAL_CI, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_INTERNAL_CO, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_INPUT, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_CS_PREV, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_HIDDEN_STATE_PREV, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_WEIGHT, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_RECUR_WEIGHT, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_BIAS, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_CS, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_HIDDEN_STATE, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_REGULAR_INPUT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_REGULAR_CS_PREV ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_REGULAR_HIDDEN_STATE_PREV ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_REGULAR_CS ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_INTERNAL_I ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_INTERNAL_F ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_INTERNAL_O ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_INTERNAL_CI ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_INTERNAL_CO ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_GRADIENT_INPUT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_GRADIENT_CS_PREV ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_GRADIENT_HIDDEN_STATE_PREV ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_GRADIENT_WEIGHT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_GRADIENT_RECUR_WEIGHT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_GRADIENT_BIAS ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_GRADIENT_CS ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_GRADIENT_HIDDEN_STATE ) );
size_t scratch_size = libxsmm_dnn_rnncell_get_scratch_size( handle, LIBXSMM_DNN_COMPUTE_KIND_BWDUPD, &status );
if (scratch_size > 0) {
void *scratch = libxsmm_dnn_rnncell_get_scratch_ptr( handle, &status );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_scratch( handle, LIBXSMM_DNN_COMPUTE_KIND_BWDUPD ) );
if(scratch) libxsmm_free(scratch);
}
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_rnncell( handle ) );
}
|
GB_binop__eq_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__eq_fp32
// A.*B function (eWiseMult): GB_AemultB__eq_fp32
// A*D function (colscale): GB_AxD__eq_fp32
// D*A function (rowscale): GB_DxB__eq_fp32
// C+=B function (dense accum): GB_Cdense_accumB__eq_fp32
// C+=b function (dense accum): GB_Cdense_accumb__eq_fp32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__eq_fp32
// C=scalar+B GB_bind1st__eq_fp32
// C=scalar+B' GB_bind1st_tran__eq_fp32
// C=A+scalar GB_bind2nd__eq_fp32
// C=A'+scalar GB_bind2nd_tran__eq_fp32
// C type: bool
// A type: float
// B,b type: float
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
float bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x == y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EQ || GxB_NO_FP32 || GxB_NO_EQ_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__eq_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__eq_fp32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__eq_fp32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__eq_fp32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__eq_fp32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__eq_fp32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__eq_fp32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__eq_fp32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = Bx [p] ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__eq_fp32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = Ax [p] ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB_bind1st_tran__eq_fp32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB_bind2nd_tran__eq_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Tanh.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/Tanh.c"
#else
void THNN_(Tanh_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output)
{
THTensor_(tanh)(output, input);
}
void THNN_(Tanh_updateGradInput)(
THNNState *state,
THTensor *gradOutput,
THTensor *gradInput,
THTensor *output)
{
THNN_CHECK_SHAPE(output, gradOutput);
THTensor_(resizeAs)(gradInput, output);
if (output->nDimension == 1 ||
!THTensor_(isContiguous)(output) ||
!THTensor_(isContiguous)(gradOutput) ||
!THTensor_(isContiguous)(gradInput))
{
TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, output,
real z = *output_data; \
*gradInput_data = *gradOutput_data * (1. - z*z);
);
}
else
{
real* ptr_gradOutput = THTensor_(data)(gradOutput);
real* ptr_gradInput = THTensor_(data)(gradInput);
real* ptr_output = THTensor_(data)(output);
int64_t i;
#pragma omp parallel for private(i)
for (i = 0; i < THTensor_(nElement)(gradInput); i++)
{
real z = ptr_output[i];
ptr_gradInput[i] = ptr_gradOutput[i] * (1. - z*z);
}
}
}
#endif
|
fixup.c | /******************************************************************************
* *
* FIXUP.C *
* *
* REPAIR INTEGRATION FAILURES *
* DRIFT FRAME DENSITY AND INTERNAL ENERGY FLOORS FOLLOWING A. TCHEKHOVSKOY
* SEE RESSLER ET AL. 2016
* *
******************************************************************************/
#include "decs.h"
// Apply floors to density, internal energy
void fixup(grid_prim_type Pv, grid_eosvar_type extra) {
timer_start(TIMER_FIXUP);
#pragma omp parallel for collapse(3) // schedule(dynamic)
ZLOOP fixup1zone(i, j, k, Pv[i][j][k], extra[i][j][k]);
timer_stop(TIMER_FIXUP);
}
void ucon_to_utcon(
double ucon[NDIM], struct of_geom *geom, double utcon[NDIM]) {
double alpha, beta[NDIM], gamma;
alpha = 1. / sqrt(-geom->gcon[0][0]);
for (int i = 1; i < NDIM; i++) {
beta[i] = geom->gcon[0][i] * alpha * alpha;
}
gamma = alpha * ucon[0];
utcon[0] = 0.;
for (int i = 1; i < NDIM; i++) {
utcon[i] = ucon[i] + gamma * beta[i] / alpha;
}
}
void ut_calc_3vel(double vcon[NDIM], struct of_geom *geom, double *ut) {
double AA, BB, CC, DD, one_over_alpha_sq;
AA = geom->gcov[0][0];
BB = 2. * (geom->gcov[0][1] * vcon[1] + geom->gcov[0][2] * vcon[2] +
geom->gcov[0][3] * vcon[3]);
CC = geom->gcov[1][1] * vcon[1] * vcon[1] +
geom->gcov[2][2] * vcon[2] * vcon[2] +
geom->gcov[3][3] * vcon[3] * vcon[3] +
2. * (geom->gcov[1][2] * vcon[1] * vcon[2] +
geom->gcov[1][3] * vcon[1] * vcon[3] +
geom->gcov[2][3] * vcon[2] * vcon[3]);
DD = 1. / (AA + BB + CC);
one_over_alpha_sq = -geom->gcon[0][0];
if (DD < one_over_alpha_sq) {
DD = one_over_alpha_sq;
}
*ut = sqrt(DD);
}
// ORIGINAL FLOORS
/*
double rhoscal, uscal;
rhoscal = pow(r,-1.5);
uscal = rhoscal/r;
rhoflr = RHOMIN*rhoscal;
uflr = UUMIN*uscal;
if (rhoflr < RHOMINLIMIT) rhoflr = RHOMINLIMIT;
if (uflr < UUMINLIMIT) uflr = UUMINLIMIT;
pv[RHO] = MY_MAX(rhoflr, pv[RHO]);
pv[UU] = MY_MAX(uflr, pv[UU]);
if (mhd_gamma_calc(pv, geom, &gamma)) {
pflag[i][j][k] = -333;
} else {
if (gamma > GAMMAMAX) {
f = sqrt((GAMMAMAX*GAMMAMAX - 1.)/(gamma*gamma - 1.));
pv[U1] *= f;
pv[U2] *= f;
pv[U3] *= f;
}
}
return;*/
double get_scale(int i, int j, int k) {
#if METRIC == MINKOWSKI
{ return 1.e-2; }
#elif METRIC == MKS
{
double scale, r, th, X[NDIM];
coord(i, j, k, CENT, X);
bl_coord(X, &r, &th);
if (r <= FLR_R0) {
scale = pow(r, -FLR_POWER1);
} else {
scale = pow(FLR_R0, FLR_POWER2 - FLR_POWER1) * pow(r, -FLR_POWER2);
}
return scale;
}
#else
{
fprintf(stderr, "[fixup1zone]: Unknown metric!\n");
exit(1);
}
#endif // METRIC
}
void fixup1zone(
int i, int j, int k, double pv[NVAR], double extra[EOS_NUM_EXTRA]) {
double rhoflr, uflr, f, gamma;
struct of_geom *geom;
struct of_state q;
double bsq;
double pv_prefloor[NVAR];
PLOOP pv_prefloor[ip] = pv[ip];
double scale = get_scale(i, j, k);
#if EOS == EOS_TYPE_TABLE
EOS_SC_fill(pv, extra);
#endif
// Enhance floors in case of large magnetic energy density
geom = get_geometry(i, j, k, CENT);
get_state(pv, geom, &q);
bsq = dot(q.bcon, q.bcov);
EOS_set_floors(scale, pv[RHO], pv[UU], bsq, &rhoflr, &uflr, extra);
#if EOS == EOS_TYPE_TABLE && POLYTROPE_FALLBACK && COLD_FLOORS
double rhosave = pv[RHO];
#endif // POLYTROPE_FALLBACK
if (rhoflr > pv[RHO] || uflr > pv[UU]) { // Apply floors
fixup_required[i][j][k] = 1;
double trans =
10. * bsq / (MY_MIN(fabs(pv[RHO]), fabs(pv[UU])) + SMALL) - 1.;
if (trans > 0.) { // Strongly magnetized region; use drift frame floors
pv[RHO] = MY_MAX(pv[RHO], rhoflr);
pv[UU] = MY_MAX(pv[UU], uflr);
double betapar, betasqmax, betasq, gamma, ucondr[NDIM], Bcon[NDIM];
double Bcov[NDIM], udotB, Bsq, B, wold, QdotB, wnew, x, vpar;
double one_over_ucondr_t, vcon[NDIM], ucon[NDIM], ut, utcon[NDIM];
trans = MY_MIN(trans, 1.);
// Set velocity to drift velocity
betapar = -q.bcon[0] / ((bsq + SMALL) * q.ucon[0]);
betasq = betapar * betapar * bsq;
betasqmax = 1. - 1. / (GAMMAMAX * GAMMAMAX);
betasq = MY_MIN(betasq, betasqmax);
gamma = 1. / sqrt(1. - betasq);
DLOOP1 ucondr[mu] = gamma * (q.ucon[mu] + betapar * q.bcon[mu]);
Bcon[0] = 0.;
for (int i = 1; i < NDIM; i++) {
Bcon[i] = pv[B1 - 1 + i];
}
lower(Bcon, geom->gcov, Bcov);
udotB = dot(q.ucon, Bcov);
Bsq = dot(Bcon, Bcov);
B = sqrt(Bsq);
// Enthalpy before floors are applied
#if EOS == EOS_TYPE_TABLE
EOS_SC_fill(pv_prefloor, extra);
#endif
wold = EOS_enthalpy_rho0_u(pv_prefloor[RHO], pv_prefloor[UU], extra);
QdotB = udotB * wold * q.ucon[0];
// Apply floors to enthalpy and recompute parallel velocity
#if EOS == EOS_TYPE_TABLE
EOS_SC_fill(pv, extra);
#endif
wnew = EOS_enthalpy_rho0_u(pv[RHO], pv[UU], extra);
x = 2. * QdotB / (B * wnew * ucondr[0] + SMALL);
vpar = x / (ucondr[0] * (1. + sqrt(1. + x * x)));
one_over_ucondr_t = 1. / ucondr[0];
vcon[0] = 1.;
for (int i = 1; i < NDIM; i++) {
vcon[i] = vpar * Bcon[i] / (B + SMALL) + ucondr[i] * one_over_ucondr_t;
}
ut_calc_3vel(vcon, geom, &ut);
DLOOP1 ucon[mu] = ut * vcon[mu];
ucon_to_utcon(ucon, geom, utcon);
// Convert 3-velocity to relative 4-velocity and store in primitives
for (int i = 1; i < NDIM; i++) {
pv[i + UU] = utcon[i] * trans + pv_prefloor[i + UU] * (1. - trans);
}
} else { // Weakly magnetized region; use normal observer frame floors
double Padd[NVAR], Uadd[NVAR];
PLOOP Padd[ip] = 0.;
PLOOP Uadd[ip] = 0.;
Padd[RHO] = MY_MAX(0.0, rhoflr - pv[RHO]);
Padd[UU] = MY_MAX(0.0, uflr - pv[UU]);
get_state(Padd, &ggeom[i][j][CENT], &q);
primtoflux(Padd, &q, 0, 0, &ggeom[i][j][CENT], Uadd);
double Utot[NVAR];
get_state(pv, &ggeom[i][j][CENT], &q);
primtoflux(pv, &q, 0, 0, &ggeom[i][j][CENT], Utot);
PLOOP Utot[ip] += Uadd[ip];
PLOOP pv[ip] += Padd[ip];
// Record fails here?
Utoprim(Utot, &ggeom[i][j][CENT], pv);
}
}
#if EOS == EOS_TYPE_TABLE && POLYTROPE_FALLBACK && COLD_FLOORS
// set to zero temperature anywhere in the floor region
if (rhosave < RHOEPS * rho_poly_thresh) {
pv[UU] = EOS_SC_get_minu(pv[RHO], pv[YE], scale);
}
#endif
#if NVAR_PASSIVE > 0
fixup_passive(i, j, k, pv, pv_prefloor);
#endif
#if ELECTRONS && EOS == EOS_TYPE_GAMMA
// Reset entropy after floors
pv[KTOT] = EOS_Gamma_entropy_rho0_u(pv[RHO], pv[UU]);
// Set KTOTMAX to 3 by controlling u, to avoid anomalous cooling from funnel
// wall
double KTOTMAX = 3.;
if (pv[KTOT] > KTOTMAX) {
pv[UU] = KTOTMAX * pow(pv[RHO], gam) / (gam - 1.);
pv[KTOT] = KTOTMAX;
}
#endif // ELECTRONS
// Limit gamma with respect to normal observer
if (mhd_gamma_calc(pv, geom, &gamma)) {
pflag[i][j][k] = -333;
} else {
if (gamma > GAMMAMAX) {
f = sqrt((GAMMAMAX * GAMMAMAX - 1.) / (gamma * gamma - 1.));
pv[U1] *= f;
pv[U2] *= f;
pv[U3] *= f;
}
}
}
static grid_prim_type Pv_tmp;
static grid_int_type pflag_tmp, pflag_save;
// Replace bad points with values interpolated from neighbors
#define FLOOP for (int ip = 0; ip < B1; ip++)
void fixup_utoprim(grid_prim_type Pv, grid_eosvar_type extra) {
timer_start(TIMER_FIXUP);
int bad;
double sum[B1], wsum;
// Flip the logic of the pflag[] so that it now indicates which cells are good
#pragma omp parallel for collapse(3)
ZSLOOP(-NG, (N1 - 1 + NG), -NG, (N2 - 1 + NG), -NG, (N3 - 1 + NG)) {
pflag_save[i][j][k] = pflag[i][j][k];
pflag[i][j][k] = !pflag[i][j][k];
}
// Make sure we are not using ill defined corner regions
for (int i = 0; i < NG; i++) {
for (int j = 0; j < NG; j++) {
for (int k = 0; k < NG; k++) {
pflag[i][j][k] = 0;
pflag[i + N1 + NG][j][k] = 0;
pflag[i][j + N2 + NG][k] = 0;
pflag[i][j][k + N3 + NG] = 0;
pflag[i + N1 + NG][j + N2 + NG][k] = 0;
pflag[i + N1 + NG][j][k + N3 + NG] = 0;
pflag[i][j + N2 + NG][k + N3 + NG] = 0;
// pflag[i+N1+NG][j+N2+NG][k+N3-1+NG] = 0;
pflag[i + N1 + NG][j + N2 + NG][k + N3 + NG] = 0;
}
}
}
// Fix the interior points first
int fail_consec = 0;
do {
bad = 0;
#pragma omp parallel for collapse(3)
ZSLOOP(-NG, N1 + NG - 1, -NG, N2 + NG - 1, -NG, N3 + NG - 1)
FLOOP Pv_tmp[i][j][k][ip] = Pv[i][j][k][ip];
#pragma omp parallel for collapse(3)
ZSLOOP(-NG, N1 + NG - 1, -NG, N2 + NG - 1, -NG, N3 + NG - 1)
pflag_tmp[i][j][k] = pflag[i][j][k];
//#pragma omp parallel for collapse(3) reduction(+:bad)
ZSLOOP(0, (N1 - 1), 0, (N2 - 1), 0, (N3 - 1)) {
if (pflag_tmp[i][j][k] == 0) {
wsum = 0.;
FLOOP sum[ip] = 0.;
for (int l = -1; l < 2; l++) {
for (int m = -1; m < 2; m++) {
for (int n = -1; n < 2; n++) {
double w = 1. / (abs(l) + abs(m) + abs(n) + 1) *
pflag_tmp[i + l][j + m][k + n];
wsum += w;
FLOOP sum[ip] += w * Pv_tmp[i + l][j + m][k + n][ip];
}
}
}
if (wsum < 1.e-10) {
// No usable neighbors. Average over all neighbors.
fail_consec++;
fprintf(stderr,
"[%i][istart=%i] fixup_utoprim problem: No usable neighbors!\n",
mpi_myrank(), global_start[1]);
fprintf(stderr, "i j k = %i %i %i pflag = %d wsum = %e\n", i, j, k,
pflag_save[i][j][k], wsum);
// exit(-1); // DEBUG
for (int l = -1; l < 2; l++) {
for (int m = -1; m < 2; m++) {
for (int n = -1; n < 2; n++) {
double w = 1. / (abs(l) + abs(m) + abs(n) + 1);
FLOOP sum[ip] += w * Pv_tmp[i + l][j + m][k + n][ip];
}
}
}
Pv_tmp[i][j][k][U1] = 0.;
Pv_tmp[i][j][k][U2] = 0.;
Pv_tmp[i][j][k][U3] = 0.;
bad++;
continue;
}
fail_consec = 0;
FLOOP Pv[i][j][k][ip] = sum[ip] / wsum;
// Cell is fixed, can now use for other interpolations
pflag[i][j][k] = 1;
fixup1zone(i, j, k, Pv[i][j][k], extra[i][j][k]);
}
}
} while (bad > 0 && fail_consec < N1 * N2 * N3);
if (fail_consec == N1 * N2 * N3)
fixup(Pv, extra);
timer_stop(TIMER_FIXUP);
}
#undef FLOOP
|
dynamic_fmt.c | /*
* This software was written by Jim Fougeron jfoug AT cox dot net
* in 2009-2013. No copyright is claimed, and the software is hereby
* placed in the public domain. In case this attempt to disclaim
* copyright and place the software in the public domain is deemed
* null and void, then the software is Copyright (c) 2009-2013 Jim Fougeron
* and it is hereby released to the general public under the following
* terms:
*
* This software may be modified, redistributed, and used for any
* purpose, in source and binary forms, with or without modification.
*
* Generic 'scriptable' hash cracker for JtR
*
* Renamed and changed from md5_gen* to dynamic*. We handle MD5 and SHA1
* at the present time. More crypt types 'may' be added later.
* Added SHA2 (SHA224, SHA256, SHA384, SHA512), GOST, Whirlpool crypt types.
* Whirlpool use oSSSL if OPENSSL_VERSION_NUMBER >= 0x10000000, otherwise use sph_* code.
*
* There used to be a todo list, and other commenting here. It has been
* moved to ./docs/dynamic_history.txt
*
* KNOWN issues, and things to do.
*
* 1. create a new optimize flag, MGF_PASS_AFTER_FIXEDSALT and
* MGF_PASS_BEFORE_FIXEDSALT. Then create DynamicFunc__appendsalt_after_pass[12]
* These would only be valid for a FIXED length salted format. Then
* we can write the pass right into the buffer, and get_key() would read
* it back from there, either skipping over the salt, or removing the salt
* from the end. This would allow crypt($s.$p) and crypt($p.s) to be optimized
* in the way of string loading, and many fewer buffer copies. So dyna_1 could
* be optimized to something like:
// dynamic_1 Joomla md5($p.$s)
static DYNAMIC_primitive_funcp _Funcs_1[] =
{
//Flags=MGF_PASS_BEFORE_FIXEDSALT | MGF_SALTED
// saltlen=3 (or whatever). This fixed size is 'key'
DynamicFunc__appendsalt_after_pass1,
DynamicFunc__crypt_md5,
NULL
};
* WELL, the fixed size salt, it 'may' not be key for the MGF_PASS_BEFORE_FIXEDSALT,
* I think I can make that 'work' for variable sized salts. But for the
* MGF_PASS_AFTER_FIXEDSALT, i.e. crypt($s.$p) the fixed size salt IS key. I would
* like to store all PW's at salt_len offset in the buffer, and simply overwrite the
* first part of each buffer with the salt, never moving the password after the first
* time it is written. THEN it is very important this ONLY be allowed when we KNOW
* the salt length ahead of time.
*
* 2. The flat buffer length is getting 'tight'. Right now the buffer length is
* PLAINTEXT_LENGTH_X86+EX_BUF_LEN which is 124+136 == 260. We currently have
* a couple of hashes that are 256 bytes long (sha512(sha512($p).sha512($p)) and
* the same for whirlpool. This does not give much room for growth. But these buffers
* should not be made to be 'too' much larger than needed. This is an issue that needs
* to be looked into. NOTE, we might want to go to 3 input buffers. That way, we
* could make input buffer 1 be 128 bytes, input buffer2 256 and input buffer3 be
* 512. This would allow us to use a smaller buffer (buffer1), IF 128 bytes is
* enough, and hopefully reduce working set. But then have a double length buffer
* and a new quad length buffer IF we need them (for large hashes if there are multiple
* appended hashes). This may add a BUNCH of extra functions. NOTE, I have seen slowdowns
* in current setup (2 buffers), if buffersize is 260 bytes, vs 256 bytes. I am sure this
* is due to page swapping, since this crosses 2 256 byte blocks.
*
* 3. Add SHA2 intrinsic code. Also, make a 'plan' on how to do SSE code for the other
* large hash types (and get SHA1 working 'better'). NOTE there are OMP implications
* which make this harder. Switching in/out of SSE buffers is very expensive.
*
* 4. optimize the SHA1 vs MD5 (sse). Possibly keep SHA1 in SSE buffers, and have a
* a method to switch the buffer into LE md5/4 sse buffer space. Same may go for
* other BE 64 byte hashes. There will be no way to switch back and forth 'easily'
* between 128 byte hashes, into 64 byte, unless they contain 55 characters or
* less. Also, the length constrains on the 128 byte buffers is much less, for a
* single block crypt. 64 byte hashes, can do 55 passwords (8 needed for length + 1 for
* the 0x80). 128 byte hashes can do 111 byte passwords (16 needed for length + 1
* for 0x80). But on large hashes, if we allow over 55 byte passwords, we lose ability
* to switch into 64 byte SSE hash space. NOTE that md4/md5 are the same. sha1, sha224
* and sha256 are the same. The size of ALL of these are the same, but they differ in
* endianity. sha384, sha512 are the same, but they are 128 byte vs 64 byte per limb.
* NOTE, this has been totally changed, in design. SHA1 has been removed from the intermixed
* MMX_COEF buffers, and now only uses 'flat' buffers. The mix to MMX_COEF code has been
* placed inside the SSE intrinsic body. There has been a 10-15% slowdown on some formats
* (most notably the 'raw-sha1', but this is just the way it will be. There were some
* performance IMPROVEMENTS on some formats. The biggest benefit, is that this becomes
* OMP usable, and it reduces the code complexity a LOT, and makes writing scripts easier,
* with less internal knowledge of the strange way SHA1 worked before, to make an optimal
* speed format. SHA224/256 have been done in SSE2. Only SHA384/512 left to do.
*
* 5. Change regen-salts to be generic. Add the logic to dynamic_fmt.c proper, and change
* the fake-salts.c, and options so that 'generic' regen-salts can be done.
*
* 6. Make sure all big crypts list their crypt type in the algo name, and not MD5_BODY
*
* 7. Add big crypt md5/md4 and start to port the formats to use them.
*/
#include <string.h>
#include <time.h>
#include "arch.h"
#ifdef MMX_COEF
#include "sse-intrinsics.h"
#endif
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "md5.h"
#include "md4.h"
#include "dynamic.h"
#include "options.h"
#include "config.h"
#include "sha.h"
#include "gost.h"
#include "memory.h"
#include "unicode.h"
#include "johnswap.h"
#include "pkzip.h"
#include "aligned.h"
#include "fake_salts.h"
#ifdef _OPENMP
#include <omp.h>
static int m_ompt;
#endif
#include "dynamic_types.h"
#include "memdbg.h"
#define STRINGIZE2(s) #s
#define STRINGIZE(s) STRINGIZE2(s)
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
static struct fmt_main fmt_Dynamic;
static struct fmt_main *pFmts;
static int nFmts;
static int force_md5_ctx;
static void dynamic_RESET(struct fmt_main *fmt);
#define eLargeOut dyna_eLargeOut
eLargeOut_t *eLargeOut;
#if ARCH_LITTLE_ENDIAN
// MD5_go is SUPER slow on big endian. In the case of bigendian, we simply
// fall back, and use OpenSSL MD5 calls, which are usually MUCH faster.
#ifndef _OPENMP
// NOTE, MD5_go is NOT thread safe.
#define USE_MD5_Go
#endif
#define MD5_swap(x, y, count)
#define MD5_swap2(a,b,c,d,e)
#else
extern char *MD5_DumpHexStr(void *p);
static void MD5_swap(MD5_word *x, MD5_word *y, int count)
{
do {
*y++ = JOHNSWAP(*x++);
} while (--count);
}
#if MD5_X2
static void MD5_swap2(MD5_word *x, MD5_word *x2, MD5_word *y, MD5_word *y2, int count)
{
do {
*y++ = JOHNSWAP(*x++);
*y2++ = JOHNSWAP(*x2++);
} while (--count);
}
#endif
#endif
#define FORMAT_LABEL "dynamic"
#define FORMAT_NAME "Generic MD5"
#ifdef MMX_COEF
# define GETPOS(i, index) ( (index&(MMX_COEF-1))*4 + ((i)&(0xffffffff-3) )*MMX_COEF + ((i)&3) )
# define SHAGETPOS(i, index) ( (index&(MMX_COEF-1))*4 + ((i)&(0xffffffff-3) )*MMX_COEF + (3-((i)&3)) ) //for endianity conversion
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define CIPHERTEXT_LENGTH 32
#define BINARY_SIZE 16
#define BINARY_SIZE_SHA 20
#define BINARY_ALIGN MEM_ALIGN_WORD
// Computation for 'salt_size' The salt (and salt2) is appended to the end of the hash entry.
// The format of a salted entry is: $dynamic_#$hash$SALT_VAL[$$2SALT2_VAL]
// salt 64 bytes,
// salt2 64 bytes,
// salt signature $ 1 byte
// salt2 signature $$2 3 bytes
// null termination 1 byte. This this allows 2 64 byte salt's.
// Note, we now have up to 10 of these.
#define SALT_SIZE (64*4+1+3+1)
#define SALT_ALIGN MEM_ALIGN_WORD
// slots to do 24 'tests'. Note, we copy the
// same 3 tests over and over again. Simply to validate that
// tests use 'multiple' blocks.
static struct fmt_tests dynamic_tests[] = {
{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},
{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},
{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL}
};
#ifdef MMX_COEF
// SSE2 works only with 54 byte keys. Thus, md5(md5($p).md5($s)) can NOT be used
// with the SSE2, since that final md5 will be over a 64 byte block of data.
static union {
ARCH_WORD_32 w[(64*MMX_COEF)/sizeof(ARCH_WORD_32)];
unsigned char c[64*MMX_COEF];
} *input_buf, *input_buf2;
static union {
ARCH_WORD_32 w[(BINARY_SIZE*MMX_COEF)/sizeof(ARCH_WORD_32)];
unsigned char c[BINARY_SIZE*MMX_COEF];
} *crypt_key, *crypt_key2;
static unsigned int *total_len;
static unsigned int *total_len2;
#define MMX_INP_BUF_SZ (sizeof(input_buf[0]) *BLOCK_LOOPS)
#define MMX_INP_BUF2_SZ (sizeof(input_buf2[0])*BLOCK_LOOPS)
#define MMX_TOT_LEN_SZ (sizeof(total_len[0]) *BLOCK_LOOPS)
#define MMX_TOT_LEN2_SZ (sizeof(total_len2[0])*BLOCK_LOOPS)
#define MMX_INP_BUF_SZ (sizeof(input_buf[0]) *BLOCK_LOOPS)
#define MMX_CRYPT_KEY_SZ (sizeof(crypt_key[0]) *BLOCK_LOOPS+1)
#define MMX_CRYPT_KEY2_SZ (sizeof(crypt_key2[0])*BLOCK_LOOPS)
#endif
#define FLAT_INP_BUF_SZ (sizeof(MD5_IN)*(MAX_KEYS_PER_CRYPT_X86>>MD5_X2))
#define FLAT_TOT_LEN_SZ (sizeof(unsigned int)*(MAX_KEYS_PER_CRYPT_X86))
MD5_OUT *crypt_key_X86;
MD5_OUT *crypt_key2_X86;
MD5_IN *input_buf_X86;
MD5_IN *input_buf2_X86;
unsigned int *total_len_X86;
unsigned int *total_len2_X86;
static int keys_dirty;
// We store the salt here
static unsigned char *cursalt;
// length of salt (so we don't have to call strlen() all the time.
static int saltlen;
// This array is for the 2nd salt in the hash. I know of no hashes with double salts,
// but test type dynamic_16 (which is 'fake') has 2 salts, and this is the data/code to
// handle double salts.
static unsigned char *cursalt2;
static int saltlen2;
static unsigned char *username;
static int usernamelen;
static unsigned char *flds[10];
static int fld_lens[10];
const char *dynamic_itoa16 = itoa16;
#define itoa16_w2 __Dynamic_itoa_w2
#define itoa16_w2_u __Dynamic_itoa_w2_u
#define itoa16_w2_l __Dynamic_itoa_w2_l
unsigned short itoa16_w2_u[256], itoa16_w2_l[256];
unsigned short *itoa16_w2=itoa16_w2_l;
// array of the keys. Also lengths of the keys. NOTE if store_keys_in_input, then the
// key array will NOT be used (but the length array still is).
#ifndef MAX_KEYS_PER_CRYPT
#define MAX_KEYS_PER_CRYPT MAX_KEYS_PER_CRYPT_X86
#endif
#ifndef PLAINTEXT_LENGTH
#define PLAINTEXT_LENGTH PLAINTEXT_LENGTH_X86
#endif
#define EFFECTIVE_MKPC (MAX_KEYS_PER_CRYPT > MAX_KEYS_PER_CRYPT_X86 ? MAX_KEYS_PER_CRYPT : MAX_KEYS_PER_CRYPT_X86)
#define EFFECTIVE_MAX_LENGTH (PLAINTEXT_LENGTH > PLAINTEXT_LENGTH_X86 ? PLAINTEXT_LENGTH : PLAINTEXT_LENGTH_X86)
// Used to compute length of each string to clean. This is needed, since we have to clean a little more than
// just the length, IF we are cleaning strings that are in different endianity than native for the CPU.
// This is seen on SHA224 (etc) on Intel, or MD5 of BE systems. We still try to clean 'only' as much as
// we need to, but that is usually MORE than what the length of the stored string is. 8 gives us 7 byte spill
// over, plus 1 byte for the 0x80
#define COMPUTE_EX_LEN(a) ( (a) > (sizeof(input_buf_X86[0].x1.b)-8) ) ? sizeof(input_buf_X86[0].x1.b) : ((a)+8)
static char saved_key[EFFECTIVE_MKPC][EFFECTIVE_MAX_LENGTH + 1];
static int saved_key_len[EFFECTIVE_MKPC];
// Used in 'get_key' if we are running in store_keys_in_input mode
static char out[EFFECTIVE_MAX_LENGTH + 1];
// This is the GLOBAL count of keys. ALL of the primitives which deal with a count
// will read from this variable.
#define m_count m_Dynamic_Count
int m_count;
// If we are run in 'specific' mode (say, -format=dynamic -subformat=dynamic_0, then we
// want to 'allow' bare hashes to be 'valid'. This is how we will do this. We have a boolean
// that if set to true, we will perform a 1 time check within the valid function. If at
// that time we find out that we are cracking (or showing, etc) that we will accept lines
// that are either format of $dynamic_0$hhhhhh...32 or simply in the format of hhhhhhh..32
static int m_allow_rawhash_fixup = 0;
// this one IS in the private_dat, but since it is accessed SO much, we pull it
// out prior to 'internal' processing. The others are accessed right from
// the structure, since there are accessed infrequently enough to not matter.
static int dynamic_use_sse;
// If set to 1, then do unicode conversion is many string setting functions.
static int *md5_unicode_convert;
#define curdat Dynamic_curdat
private_subformat_data curdat;
// Helper function that loads out 256 unsigned short array that does base-16 conversions
// This function is called at the 'validation' call that loads our preloads (i.e. only
// called one time, pre 'run' (but will be called multiple times when benchmarking, but
// will NOT impact benchmark times.) Loading a word at a time (2 bytes), sped up
// the overall run time of dynamic_2 almost 5%, thus this conversion is MUCH faster than
// the fastest byte by byte I could put together. I tested several ways to access this
// array of unsigned shorts, and the best way was a 2 step method into an array of long
// integer pointers (thus, load 1/2 the 32 bit word, then the other 1/2, into a 32 bit word).
/*********************************************************************************
*********************************************************************************
* Start of the 'normal' *_fmt code for md5-gen
*********************************************************************************
*********************************************************************************/
char *RemoveHEX(char *output, char *input) {
char *cpi = input;
char *cpo = output;
char *cpH = strstr(input, "$HEX$");
if (!cpH) {
// should never get here, we have a check performed before this function is called.
strcpy(output, input);
return output;
}
while (cpi < cpH)
*cpo++ = *cpi++;
*cpo++ = *cpi;
cpi += 5;
while (*cpi) {
if (*cpi == '0' && cpi[1] == '0') {
strcpy(output, input);
return output;
}
if (atoi16[ARCH_INDEX(*cpi)] != 0x7f && atoi16[ARCH_INDEX(cpi[1])] != 0x7f) {
*cpo++ = atoi16[ARCH_INDEX(*cpi)]*16 + atoi16[ARCH_INDEX(cpi[1])];
cpi += 2;
} else if (*cpi == '$') {
while (*cpi && strncmp(cpi, "$HEX$", 5)) {
*cpo++ = *cpi++;
}
if (!strncmp(cpi, "$HEX$", 5)) {
*cpo++ = *cpi;
cpi += 5;
}
} else {
strcpy(output, input);
return output;
}
}
*cpo = 0;
return output;
}
/*********************************************************************************
* Detects a 'valid' md5-gen format. This function is NOT locked to anything. It
* takes its detection logic from the provided fmt_main pointer. Within there,
* is a 'private' data pointer. When john first loads the md5-gen, it calls a
* function which builds proper 'private' data for EACH type of md5-gen. Then
* john will call valid on EACH of those formats, asking each one if a string is
* valid. Each format has a 'private' properly setup data object.
*********************************************************************************/
static int valid(char *ciphertext, struct fmt_main *pFmt)
{
int i, cipherTextLen;
char *cp, fixed_ciphertext[1024];
private_subformat_data *pPriv = pFmt->private.data;
if (!pPriv)
return 0;
if (strncmp(ciphertext, pPriv->dynamic_WHICH_TYPE_SIG, strlen(pPriv->dynamic_WHICH_TYPE_SIG)))
return 0;
// this is now simply REMOVED totally, if we detect it. Doing this solves MANY other problems
// of leaving it in there. The ONLY problem we still have is NULL bytes.
if (strstr(ciphertext, "$HEX$")) {
if (strlen(ciphertext) < sizeof(fixed_ciphertext))
ciphertext = RemoveHEX(fixed_ciphertext, ciphertext);
}
cp = &ciphertext[strlen(pPriv->dynamic_WHICH_TYPE_SIG)];
if (pPriv->dynamic_base64_inout == 1)
{
// jgypwqm.JsMssPLiS8YQ00$BaaaaaSX
int i;
for (i = 0; i < 22; ++i) {
if (atoi64[ARCH_INDEX(cp[i])] == 0x7F)
return 0;
}
if (pPriv->dynamic_FIXED_SALT_SIZE == 0)
return !cp[i];
if (pPriv->dynamic_FIXED_SALT_SIZE && cp[22] != '$')
return 0;
if (pPriv->dynamic_FIXED_SALT_SIZE > 0 && strlen(&cp[23]) != pPriv->dynamic_FIXED_SALT_SIZE)
return 0;
else if (pPriv->dynamic_FIXED_SALT_SIZE < -1 && strlen(&cp[23]) > -(pPriv->dynamic_FIXED_SALT_SIZE))
return 0;
if ((pPriv->pSetup->startFlags & MGF_PHPassSetup) == MGF_PHPassSetup) {
// we have to perform the salt 'length' check here, so we do not process invalid hashes later.
int Lcount = atoi64[ARCH_INDEX(cp[23])];
if (Lcount < 7 || Lcount > 31)
return 0;
}
return 1;
}
if (pPriv->dynamic_base64_inout == 2)
{
// h3mJrcH0901pqX/m$alex
int i;
for (i = 0; i < 16; ++i) {
if (atoi64[ARCH_INDEX(cp[i])] == 0x7F)
return 0;
}
if (pPriv->dynamic_FIXED_SALT_SIZE == 0)
return !cp[i];
if (pPriv->dynamic_FIXED_SALT_SIZE && cp[16] != '$')
return 0;
if (pPriv->dynamic_FIXED_SALT_SIZE > 0 && strlen(&cp[17]) != pPriv->dynamic_FIXED_SALT_SIZE)
return 0;
else if (pPriv->dynamic_FIXED_SALT_SIZE < -1 && strlen(&cp[17]) > -(pPriv->dynamic_FIXED_SALT_SIZE))
return 0;
return 1;
}
if (pPriv->dynamic_base64_inout == 1)
{
if (strlen(cp) < 22)
return 0;
}
else if (pPriv->dynamic_base64_inout == 2)
{
if (strlen(cp) < 16)
return 0;
}
else
{
if (strlen(cp) < 32)
return 0;
}
cipherTextLen = CIPHERTEXT_LENGTH;
if (pPriv->dynamic_40_byte_input) {
cipherTextLen = 40;
} else if (pPriv->dynamic_48_byte_input) {
cipherTextLen = 48;
} else if (pPriv->dynamic_64_byte_input) {
cipherTextLen = 64;
} else if (pPriv->dynamic_56_byte_input) {
cipherTextLen = 56;
} else if (pPriv->dynamic_80_byte_input) {
cipherTextLen = 80;
} else if (pPriv->dynamic_96_byte_input) {
cipherTextLen = 96;
} else if (pPriv->dynamic_128_byte_input) {
cipherTextLen = 128;
}
for (i = 0; i < cipherTextLen; i++) {
if (atoi16[ARCH_INDEX(cp[i])] == 0x7f)
return 0;
}
if ((pPriv->pSetup->flags&MGF_SALTED) == 0) {
if (!cp[cipherTextLen])
return 1;
return 0;
}
if (cp[cipherTextLen] && cp[cipherTextLen] != '$')
return 0;
// NOTE if looking at this in the future, this was not my fix.
if (strlen(&cp[cipherTextLen]) > SALT_SIZE)
return 0;
// end NOTE.
if (pPriv->dynamic_FIXED_SALT_SIZE && ciphertext[pPriv->dynamic_SALT_OFFSET-1] != '$')
return 0;
if (pPriv->dynamic_FIXED_SALT_SIZE > 0 && strlen(&ciphertext[pPriv->dynamic_SALT_OFFSET]) != pPriv->dynamic_FIXED_SALT_SIZE) {
// check if there is a 'salt-2' or 'username', etc If that is the case, then this is still valid.
if (strncmp(&ciphertext[pPriv->dynamic_SALT_OFFSET+pPriv->dynamic_FIXED_SALT_SIZE], "$$", 2))
return 0;
}
else if (pPriv->dynamic_FIXED_SALT_SIZE < -1 && strlen(&ciphertext[pPriv->dynamic_SALT_OFFSET]) > -(pPriv->dynamic_FIXED_SALT_SIZE)) {
// check if there is a 'salt-2' or 'username', etc If that is the case, then this is still 'valid'
char *cpX = mem_alloc(-(pPriv->dynamic_FIXED_SALT_SIZE) + 3);
strnzcpy(cpX, &ciphertext[pPriv->dynamic_SALT_OFFSET], -(pPriv->dynamic_FIXED_SALT_SIZE) + 3);
if (!strstr(cpX, "$$")) {
MEM_FREE(cpX);
return 0;
}
MEM_FREE(cpX);
}
if (pPriv->b2Salts==1 && !strstr(&ciphertext[pPriv->dynamic_SALT_OFFSET-1], "$$2"))
return 0;
if (pPriv->nUserName && !strstr(&ciphertext[pPriv->dynamic_SALT_OFFSET-1], "$$U"))
return 0;
if (pPriv->FldMask) {
for (i = 0; i < 10; ++i) {
if ((pPriv->FldMask & (MGF_FLDx_BIT<<i)) == (MGF_FLDx_BIT<<i)) {
char Fld[5];
sprintf(Fld, "$$F%d", i);
if (!strstr(&ciphertext[pPriv->dynamic_SALT_OFFSET-1], Fld))
return 0;
}
}
}
if ( (pPriv->pSetup->flags & MGF_HDAA_SALT) == MGF_HDAA_SALT) {
// has a very complex salt function. Requires certain fields, AND for these to be in proper order!!!
char *cp = strchr(&ciphertext[12], '$');
if (!cp) return 0;
if (cp[1] == '$') return 0; // if salt is 'empty', return false.
cp = strchr(&cp[1], '$');
if (!cp || strncmp(cp,"$$U",3) || cp[3] == '$') return 0; // if next is not U or U is 'empty', return false.
cp = strstr(&cp[3], "$$F2");
if (!cp || cp[4] == '$') return 0; // if next is not F2 or F2 is 'empty', return false.
cp = strstr(&cp[4], "$$F3");
if (!cp || cp[4] == '$') return 0; // if next is not F3 or F3 is 'empty', return false.
cp = strstr(&cp[4], "$$F4");
if (!cp || cp[4] == '$') return 0; // if next is not F4 or F4 is 'empty', return false.
cp = strchr(&cp[4], '$');
if (!cp || cp[1] == '$') return 0; // if next is empty
cp = strchr(&cp[1], '$');
if (!cp || !cp[1]) return 0; // if last is empty
}
return 1;
}
static char *FixupIfNeeded(char *ciphertext, private_subformat_data *pPriv);
static struct fmt_main *dynamic_Get_fmt_main(int which);
static char *HandleCase(char *cp, int caseType);
// 'wrapper' functions. These are here, so we can call these functions to work on ALL data (not simply within the
// thead, which ONLY wants to work on a subset of the data. These functions should NOT be called by threading
// code, EVER. But this functions KNOW what to do. Some actually have threads, others do not need them.
#ifdef _OPENMP
#ifndef MMX_COEF
const int OMP_INC = (MD5_X2+1);
const int OMP_MD5_INC = (MD5_X2+1);
const int OMP_MD4_INC = (MD5_X2+1);
const int OMP_SHA1_INC = (MD5_X2+1);
#else
const int OMP_INC = (MD5_X2+1);
const int OMP_MD5_INC = (MD5_SSE_PARA*MMX_COEF);
const int OMP_MD4_INC = (MD4_SSE_PARA*MMX_COEF);
const int OMP_SHA1_INC = (SHA1_SSE_PARA*MMX_COEF);
#endif // MMX_COEF
#endif // _OPENMP
static inline void __nonMP_DynamicFunc__SSEtoX86_switch_output2() {
#ifdef _OPENMP
DynamicFunc__SSEtoX86_switch_output2(0,m_count,0);
#else
DynamicFunc__SSEtoX86_switch_output2();
#endif
}
static inline void __nonMP_DynamicFunc__append_from_last_output2_to_input1_as_base16() {
#ifdef _OPENMP
DynamicFunc__append_from_last_output2_to_input1_as_base16(0,m_count,0);
#else
DynamicFunc__append_from_last_output2_to_input1_as_base16();
#endif
}
static inline void __nonMP_DynamicFunc__set_input_len_32() {
#ifdef _OPENMP
DynamicFunc__set_input_len_32(0,m_count,0);
#else
DynamicFunc__set_input_len_32();
#endif
}
void __nonMP_eLargeOut(eLargeOut_t what) {
#ifdef _OPENMP
int i;
for (i = 1; i < m_ompt; ++i)
eLargeOut[i] = what;
#endif
eLargeOut[0] = what;
}
static inline void md5_unicode_convert_set(int what, int tid) {
md5_unicode_convert[tid] = what;
}
static inline int md5_unicode_convert_get(int tid) {
return md5_unicode_convert[tid];
}
void __nonMP_md5_unicode_convert(int what) {
#ifdef _OPENMP
int i;
for (i = 1; i < m_ompt; ++i)
md5_unicode_convert[i] = what;
#endif
md5_unicode_convert[0] = what;
}
#if !defined (_OPENMP)
#define md5_unicode_convert_set(what, tid) md5_unicode_convert_set(what, 0)
#define md5_unicode_convert_get(tid) md5_unicode_convert_get(0)
#define eLargeOut_set(what, tid) eLargeOut_set(what, 0)
#define eLargeOut_get(tid) eLargeOut_get(0)
#endif
static inline void __nonMP_DynamicFunc__append_keys2() {
#ifdef _OPENMP
DynamicFunc__append_keys2(0,m_count,0);
#else
DynamicFunc__append_keys2();
#endif
}
static void __possMP_DynamicFunc__crypt2_md5() {
#ifdef _OPENMP
int i;
int inc = OMP_MD5_INC;
// if (dynamic_use_sse!=1)
// inc = OMP_INC;
#pragma omp parallel for
for (i = 0; i < m_count; i += inc)
DynamicFunc__crypt2_md5(i,i+inc,omp_get_thread_num());
#else
DynamicFunc__crypt2_md5();
#endif
}
static void __nonMP_DynamicFunc__clean_input() {
unsigned i=0;
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
memset(input_buf, 0, MMX_INP_BUF_SZ);
memset(total_len, 0, MMX_TOT_LEN_SZ);
return;
}
#endif
for (; i < MAX_KEYS_PER_CRYPT_X86; ++i) {
//if (total_len_X86[i]) {
#if MD5_X2
if (i&1)
memset(input_buf_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len_X86[i]));
else
#endif
memset(input_buf_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len_X86[i]));
total_len_X86[i] = 0;
//}
}
return;
}
static void __nonMP_DynamicFunc__clean_input2() {
unsigned i=0;
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
memset(input_buf2, 0, MMX_INP_BUF2_SZ);
memset(total_len2, 0, MMX_TOT_LEN2_SZ);
return;
}
#endif
if (curdat.using_flat_buffers_sse2_ok) {
memset(total_len2_X86, 0, sizeof(total_len2_X86[0])*MAX_KEYS_PER_CRYPT_X86);
return;
}
for (; i < MAX_KEYS_PER_CRYPT_X86; ++i) {
//if (total_len2_X86[i]) {
#if MD5_X2
if (i&1)
memset(input_buf2_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len2_X86[i]));
else
#endif
memset(input_buf2_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len2_X86[i]));
total_len2_X86[i] = 0;
//}
}
return;
}
static void __nonMP_DynamicFunc__clean_input_full() {
#ifdef MMX_COEF
memset(input_buf, 0, MMX_INP_BUF_SZ);
memset(total_len, 0, MMX_TOT_LEN_SZ);
#endif
memset(input_buf_X86, 0, FLAT_INP_BUF_SZ);
memset(total_len_X86, 0, FLAT_TOT_LEN_SZ);
}
static void __nonMP_DynamicFunc__clean_input2_full() {
#ifdef MMX_COEF
memset(input_buf2, 0, MMX_INP_BUF2_SZ);
memset(total_len2, 0, MMX_TOT_LEN2_SZ);
#endif
memset(input_buf2_X86, 0, FLAT_INP_BUF_SZ);
memset(total_len2_X86, 0, FLAT_TOT_LEN_SZ);
}
static void __nonMP_DynamicFunc__clean_input_kwik() {
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
memset(total_len, 0, MMX_TOT_LEN_SZ);
return;
}
#endif
memset(total_len_X86, 0, FLAT_TOT_LEN_SZ);
#if !ARCH_LITTLE_ENDIAN
memset(input_buf_X86, 0, FLAT_INP_BUF_SZ);
#endif
}
#ifndef _OPENMP
static void __nonMP_DynamicFunc__clean_input2_kwik() {
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
memset(total_len2, 0, MMX_TOT_LEN2_SZ);
return;
}
#endif
memset(total_len2_X86, 0, FLAT_TOT_LEN_SZ);
#if !ARCH_LITTLE_ENDIAN
memset(input_buf2_X86, 0, FLAT_INP_BUF_SZ);
#endif
}
#endif
/*********************************************************************************
* init() here does nothing. NOTE many formats LINKING into us will have a valid
* that DOES do something, but ours does nothing.
*********************************************************************************/
static void init(struct fmt_main *pFmt)
{
private_subformat_data *pPriv = pFmt->private.data;
int i;
//fprintf(stderr, "init(%s)\n", pPriv->dynamic_WHICH_TYPE_SIG);
/* first off, SAVE the original format structure (owned by JtR). We may need this later */
pPriv->pFmtMain = pFmt;
#ifdef _OPENMP
m_ompt = omp_get_max_threads();
mem_calloc_tiny(1, MEM_ALIGN_WORD); // throw this one away, to get our allocations memory aligned
#if ARCH_ALLOWS_UNALIGNED
md5_unicode_convert = (int*)mem_calloc_tiny(sizeof(int)*m_ompt, MEM_ALIGN_NONE);
eLargeOut = (eLargeOut_t*)mem_calloc_tiny(sizeof(eLargeOut_t)*m_ompt, MEM_ALIGN_NONE);
#else
md5_unicode_convert = (int*)mem_calloc_tiny(sizeof(int)*m_ompt, MEM_ALIGN_WORD);
eLargeOut = (eLargeOut_t*)mem_calloc_tiny(sizeof(eLargeOut_t)*m_ompt, MEM_ALIGN_WORD);
#endif
for (i = 0; i < m_ompt; ++i)
eLargeOut[i] = eBase16;
#else
#if ARCH_ALLOWS_UNALIGNED
md5_unicode_convert = (int*)mem_calloc_tiny(sizeof(int), MEM_ALIGN_NONE);
eLargeOut = (eLargeOut_t*)mem_calloc_tiny(sizeof(eLargeOut_t), MEM_ALIGN_NONE);
#else
md5_unicode_convert = (int*)mem_calloc_tiny(sizeof(int), MEM_ALIGN_WORD);
eLargeOut = (eLargeOut_t*)mem_calloc_tiny(sizeof(eLargeOut_t), MEM_ALIGN_WORD);
#endif
eLargeOut[0] = eBase16;
#endif
#ifdef MMX_COEF
if (!input_buf) {
input_buf = mem_calloc_tiny(MMX_INP_BUF_SZ, MEM_ALIGN_SIMD);
total_len = mem_calloc_tiny(MMX_TOT_LEN_SZ, MEM_ALIGN_SIMD);
total_len2 = mem_calloc_tiny(MMX_TOT_LEN2_SZ, MEM_ALIGN_SIMD);
input_buf2 = mem_calloc_tiny(MMX_INP_BUF2_SZ, MEM_ALIGN_SIMD);
crypt_key = mem_calloc_tiny(MMX_CRYPT_KEY_SZ, MEM_ALIGN_SIMD);
crypt_key2 = mem_calloc_tiny(MMX_CRYPT_KEY2_SZ, MEM_ALIGN_SIMD);
}
#endif
if (!crypt_key_X86) {
// we have to align SIMD, since now we may load directly from these buffers (or save to them), in
// large hash SIMD code (sha256, etc). Also 1 larger in the array, since we might point 'extra'
// hashes past the end of our buffer to that value.
crypt_key_X86 = (MD5_OUT *)mem_calloc_tiny(sizeof(*crypt_key_X86)*((MAX_KEYS_PER_CRYPT_X86>>MD5_X2)+1), MEM_ALIGN_SIMD);
crypt_key2_X86 = (MD5_OUT *)mem_calloc_tiny(sizeof(*crypt_key2_X86)*((MAX_KEYS_PER_CRYPT_X86>>MD5_X2)+1), MEM_ALIGN_SIMD);
input_buf_X86 = (MD5_IN *)mem_calloc_tiny(sizeof(*input_buf_X86)*((MAX_KEYS_PER_CRYPT_X86>>MD5_X2)+1), MEM_ALIGN_SIMD);
input_buf2_X86 = (MD5_IN *)mem_calloc_tiny(sizeof(*input_buf2_X86)*((MAX_KEYS_PER_CRYPT_X86>>MD5_X2)+1), MEM_ALIGN_SIMD);
total_len_X86 = (unsigned int *)mem_calloc_tiny(sizeof(*total_len_X86)*(MAX_KEYS_PER_CRYPT_X86+1), sizeof(*total_len_X86));
total_len2_X86 = (unsigned int *)mem_calloc_tiny(sizeof(*total_len2_X86)*(MAX_KEYS_PER_CRYPT_X86+1), sizeof(*total_len2_X86));
}
gost_init_table();
if (!pPriv || (pPriv->init == 1 && !strcmp(curdat.dynamic_WHICH_TYPE_SIG, pPriv->dynamic_WHICH_TYPE_SIG)))
return;
__nonMP_DynamicFunc__clean_input_full();
__nonMP_DynamicFunc__clean_input2_full();
// Some builds (omp vs non omp, etc) do not call these functions, so to avoid 'unused' warnings, we simply
// call them here.
__nonMP_DynamicFunc__clean_input_kwik();
dynamic_RESET(pFmt);
if (!pPriv)
return;
pPriv->init = 1;
memcpy(&curdat, pPriv, sizeof(private_subformat_data));
dynamic_use_sse = curdat.dynamic_use_sse;
force_md5_ctx = curdat.force_md5_ctx;
fmt_Dynamic.params.max_keys_per_crypt = pFmt->params.max_keys_per_crypt;
fmt_Dynamic.params.min_keys_per_crypt = pFmt->params.min_keys_per_crypt;
fmt_Dynamic.params.flags = pFmt->params.flags;
fmt_Dynamic.params.format_name = pFmt->params.format_name;
fmt_Dynamic.params.algorithm_name = pFmt->params.algorithm_name;
fmt_Dynamic.params.benchmark_comment = pFmt->params.benchmark_comment;
fmt_Dynamic.params.benchmark_length = pFmt->params.benchmark_length;
// we allow for 3 bytes of utf8 data to make up the number of plaintext_length unicode chars.
if ( (pFmt->params.flags&FMT_UNICODE) && pers_opts.target_enc == UTF_8 ) {
//printf ("Here pFmt->params.plaintext_length=%d pPriv->pSetup->MaxInputLen=%d\n", pFmt->params.plaintext_length, pPriv->pSetup->MaxInputLen);
pFmt->params.plaintext_length = MIN(125, pFmt->params.plaintext_length * 3);
}
else
fmt_Dynamic.params.plaintext_length = pFmt->params.plaintext_length;
fmt_Dynamic.params.salt_size = pFmt->params.salt_size;
fmt_Dynamic.params.flags = pFmt->params.flags;
fmt_Dynamic.methods.cmp_all = pFmt->methods.cmp_all;
fmt_Dynamic.methods.cmp_one = pFmt->methods.cmp_one;
fmt_Dynamic.methods.cmp_exact = pFmt->methods.cmp_exact;
fmt_Dynamic.methods.set_salt = pFmt->methods.set_salt;
fmt_Dynamic.methods.salt = pFmt->methods.salt;
fmt_Dynamic.methods.salt_hash = pFmt->methods.salt_hash;
fmt_Dynamic.methods.split = pFmt->methods.split;
fmt_Dynamic.methods.set_key = pFmt->methods.set_key;
fmt_Dynamic.methods.get_key = pFmt->methods.get_key;
fmt_Dynamic.methods.clear_keys = pFmt->methods.clear_keys;
fmt_Dynamic.methods.crypt_all = pFmt->methods.crypt_all;
for (i = 0; i < PASSWORD_HASH_SIZES; ++i)
{
fmt_Dynamic.methods.binary_hash[i] = pFmt->methods.binary_hash[i];
fmt_Dynamic.methods.get_hash[i] = pFmt->methods.get_hash[i];
}
#if !MD5_IMM
{
extern void MD5_std_init(struct fmt_main *pFmt);
MD5_std_init(pFmt);
}
#endif
if (curdat.input2_set_len32) {
for (i = 0; i < MAX_KEYS_PER_CRYPT_X86; ++i)
total_len2_X86[i] = 32;
#ifdef MMX_COEF
for (i = 0; i < BLOCK_LOOPS; ++i) {
input_buf2[i].c[GETPOS(32,0)] = 0x80;
input_buf2[i].c[GETPOS(57,0)] = 0x1;
input_buf2[i].c[GETPOS(32,1)] = 0x80;
input_buf2[i].c[GETPOS(57,1)] = 0x1;
#if (MMX_COEF==4)
input_buf2[i].c[GETPOS(32,2)] = 0x80;
input_buf2[i].c[GETPOS(57,2)] = 0x1;
input_buf2[i].c[GETPOS(32,3)] = 0x80;
input_buf2[i].c[GETPOS(57,3)] = 0x1;
total_len2[i] = 0x20202020;
#else
total_len2[i] = 0x00200020;
#endif
}
#endif
}
}
/*********************************************************************************
* This function will add a $dynamic_#$ IF there is not one, and if we have a specific
* format requested. Also, it will add things like UserID, Domain, Fld3, Fld4,
* Fld5, etc.
*********************************************************************************/
static char *prepare(char *split_fields[10], struct fmt_main *pFmt)
{
private_subformat_data *pPriv = pFmt->private.data;
char Tmp[80];
int i;
char *cpBuilding=split_fields[1];
if (!pPriv)
return split_fields[1];
// ANY field[1] longer than 490 will simply be ignored, and returned 'as is'.
// the rest of this function makes this assumption.
if (!cpBuilding || strlen(cpBuilding) > 490)
return cpBuilding;
if (pFmt->params.salt_size && !strchr(split_fields[1], '$')) {
if (!pPriv->nUserName && !pPriv->FldMask && options.regen_lost_salts == 0)
return split_fields[1];
}
// handle 'older' md5_gen(x) signature, by simply converting to $dynamic_x$ signature
// Thus older md5_gen() is a valid input (or from john.pot), but ONLY the newer
// $dynamic_x$ will be written out (into .pot, output lines, etc).
if (!strncmp(cpBuilding, "md5_gen(", 8))
{
static char ct[496];
char *cp = &cpBuilding[8], *cpo = &ct[sprintf(ct, "$dynamic_")];
while (*cp >= '0' && *cp <= '9')
*cpo++ = *cp++;
*cpo++ = '$';
++cp;
strcpy(cpo, cp);
cpBuilding = ct;
}
// At this point, max length of cpBuilding is 491 (if it was a md5_gen signature)
cpBuilding = FixupIfNeeded(cpBuilding, pPriv);
// at this point max length is still < 512. 491 + strlen($dynamic_xxxxx$) is 506
if (strncmp(cpBuilding, "$dynamic_", 9)) {
// ok, here we add the 'generic' regen salt code
if (options.regen_lost_salts && !strchr(cpBuilding, '$')) {
char *cp = load_regen_lost_salt_Prepare(cpBuilding);
if (cp)
return cp;
}
return split_fields[1];
}
if ( (pPriv->pSetup->flags&MGF_SALTED) == 0)
return cpBuilding;
/* at this point, we want to convert ANY and all $HEX$hex into values */
/* the reason we want to do this, is so that things read from john.pot file will be in proper 'native' format */
/* the ONE exception to this, is if there is a NULL byte in the $HEX$ string, then we MUST leave that $HEX$ string */
/* alone, and let the later calls in dynamic.c handle them. */
if (strstr(cpBuilding, "$HEX$")) {
char *cp, *cpo;
int bGood=1;
static char ct[512];
strcpy(ct, cpBuilding);
cp = strstr(ct, "$HEX$");
cpo = cp;
*cpo++ = *cp;
cp += 5;
while (*cp && bGood) {
if (*cp == '0' && cp[1] == '0') {
bGood = 0;
break;
}
if (atoi16[ARCH_INDEX(*cp)] != 0x7f && atoi16[ARCH_INDEX(cp[1])] != 0x7f) {
*cpo++ = atoi16[ARCH_INDEX(*cp)]*16 + atoi16[ARCH_INDEX(cp[1])];
*cpo = 0;
cp += 2;
} else if (*cp == '$') {
while (*cp && strncmp(cp, "$HEX$", 5)) {
*cpo++ = *cp++;
}
*cpo = 0;
if (!strncmp(cp, "$HEX$", 5)) {
*cpo++ = *cp;
cp += 5;
}
} else {
return split_fields[1];
}
}
if (bGood)
cpBuilding = ct;
// if we came into $HEX$ removal, then cpBuilding will always be shorter
}
// at this point max length is still < 512. 491 + strlen($dynamic_xxxxx$) is 506
if (pPriv->nUserName && !strstr(cpBuilding, "$$U")) {
if (split_fields[0] && strlen(split_fields[0]) && strcmp(split_fields[0], "?")) {
char *userName=split_fields[0], *cp;
static char ct[1024];
// assume field[0] is in format: username OR DOMAIN\\username If we find a \\, then use the username 'following' it.
cp = strchr(split_fields[0], '\\');
if (cp)
userName = &cp[1];
userName = HandleCase(userName, pPriv->nUserName);
snprintf (ct, sizeof(ct), "%s$$U%s", cpBuilding, userName);
cpBuilding = ct;
}
}
if (pPriv->FldMask) {
for (i = 0; i < 10; ++i) {
if (pPriv->FldMask&(MGF_FLDx_BIT<<i)) {
sprintf(Tmp, "$$F%d", i);
if (split_fields[i] && strlen(split_fields[i]) && strcmp(split_fields[i], "/") && !strstr(cpBuilding, Tmp)) {
static char ct[1024];
char ct2[1024];
snprintf (ct2, sizeof(ct2), "%s$$F%d%s", cpBuilding, i, split_fields[i]);
strcpy(ct, ct2);
cpBuilding = ct;
}
}
}
}
return cpBuilding;
}
#if FMT_MAIN_VERSION > 9
static char *split(char *ciphertext, int index, struct fmt_main *pFmt)
#else
static char *split(char *ciphertext, int index)
#endif
{
static char out[1024];
#if FMT_MAIN_VERSION > 9
private_subformat_data *pPriv = pFmt->private.data;
#else
private_subformat_data *pPriv = &curdat;
#endif
if (strlen(ciphertext) > 950)
return ciphertext;
if (!strncmp(ciphertext, "$dynamic", 8)) {
if (strstr(ciphertext, "$HEX$"))
return RemoveHEX(out, ciphertext);
return ciphertext;
}
if (!strncmp(ciphertext, "md5_gen(", 8)) {
ciphertext += 8;
do ++ciphertext; while (*ciphertext != ')') ;
++ciphertext;
}
if (strstr(ciphertext, "$HEX$")) {
char *cp = out + sprintf(out, "%s", pPriv->dynamic_WHICH_TYPE_SIG);
RemoveHEX(cp, ciphertext);
} else
snprintf(out, sizeof(out), "%s%s", pPriv->dynamic_WHICH_TYPE_SIG, ciphertext);
return out;
}
// This split unifies case.
#if FMT_MAIN_VERSION > 9
static char *split_UC(char *ciphertext, int index, struct fmt_main *pFmt)
#else
static char *split_UC(char *ciphertext, int index)
#endif
{
static char out[1024];
#if FMT_MAIN_VERSION > 9
private_subformat_data *pPriv = pFmt->private.data;
#else
private_subformat_data *pPriv = &curdat;
#endif
if (!strncmp(ciphertext, "$dynamic", 8)) {
if (strstr(ciphertext, "$HEX$"))
RemoveHEX(out, ciphertext);
else
strcpy(out, ciphertext);
} else {
if (!strncmp(ciphertext, "md5_gen(", 8)) {
ciphertext += 8;
do ++ciphertext; while (*ciphertext != ')') ;
++ciphertext;
}
if (strstr(ciphertext, "$HEX$")) {
char *cp = out + sprintf(out, "%s", pPriv->dynamic_WHICH_TYPE_SIG);
RemoveHEX(cp, ciphertext);
} else
sprintf(out, "%s%s", pPriv->dynamic_WHICH_TYPE_SIG, ciphertext);
}
ciphertext = strchr(&out[8], '$')+1;
while (*ciphertext && *ciphertext != '$') {
if (*ciphertext >= 'A' && *ciphertext <= 'Z')
*ciphertext += 0x20; // ASCII specific, but I really do not care.
++ciphertext;
}
// printf("%s\n", out);
return out;
}
/*********************************************************************************
* Stores the new salt provided into our 'working' salt
*********************************************************************************/
static void set_salt(void *salt)
{
unsigned char *cpsalt;
unsigned todo_bits=0, i, bit;
if (!salt || curdat.dynamic_FIXED_SALT_SIZE == 0) {
saltlen = 0;
return;
}
cpsalt = *((unsigned char**)salt);
saltlen = *cpsalt++ - '0';
saltlen <<= 3;
saltlen += *cpsalt++ - '0';
#if ARCH_ALLOWS_UNALIGNED
if (*((ARCH_WORD_32*)cpsalt) != 0x30303030)
#else
if (memcmp(cpsalt, "0000", 4))
#endif
{
// this is why we used base-8. Takes an extra byte, but there is NO conditional
// logic, building this number, and no multiplication. We HAVE added one conditional
// check, to see if we can skip the entire load, if it is 0000.
todo_bits = *cpsalt++ - '0';
todo_bits <<= 3;
todo_bits += *cpsalt++ - '0';
todo_bits <<= 3;
todo_bits += *cpsalt++ - '0';
todo_bits <<= 3;
todo_bits += *cpsalt++ - '0';
}
else
cpsalt += 4;
cursalt = cpsalt;
if (!todo_bits) return;
cpsalt += saltlen;
if (todo_bits & 1) {
todo_bits ^= 1; // clear that bit.
saltlen2 = *cpsalt++;
cursalt2 = cpsalt;
if (todo_bits == 0) return;
cpsalt += saltlen2;
}
if (todo_bits & 2) {
todo_bits ^= 2; // clear that bit.
usernamelen = *cpsalt++;
username = cpsalt;
if (todo_bits == 0) return;
cpsalt += usernamelen;
}
bit = 4;
for (i = 0; i < 10; ++i, bit<<=1) {
if (todo_bits & bit) {
todo_bits ^= bit; // clear that bit.
fld_lens[i] = *cpsalt++;
flds[i] = cpsalt;
if (todo_bits == 0) return;
cpsalt += fld_lens[i];
}
}
}
/*********************************************************************************
* Sets this key. It will either be dropped DIRECTLY into the input buffer
* number 1, or put into an array of keys. Which one happens depends upon
* HOW the generic functions were laid out for this type. Not all types can
* load into the input. If not they MUST use the key array. Using the input
* buffer is faster, when it can be safely done.
*********************************************************************************/
static void set_key(char *key, int index)
{
unsigned int len;
//printf("idx=%d key=%s\n", index, key);
#ifdef MMX_COEF
if (curdat.store_keys_in_input==2)
dynamic_use_sse = 3;
else if (curdat.md5_startup_in_x86)
dynamic_use_sse = 2;
else if (dynamic_use_sse==2)
dynamic_use_sse = 1;
#endif
if (curdat.nPassCase>1)
key = HandleCase(key, curdat.nPassCase);
// Ok, if the key is in unicode/utf8, we switch it here one time, and are done with it.
if (curdat.store_keys_in_input)
{
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
// code derived from rawMD5_fmt_plug.c code from magnum
const ARCH_WORD_32 *key32 = (ARCH_WORD_32*)key;
unsigned int idx = ( ((unsigned)index)>>(MMX_COEF>>1));
ARCH_WORD_32 *keybuffer = &input_buf[idx].w[index&(MMX_COEF-1)];
ARCH_WORD_32 *keybuf_word = keybuffer;
unsigned int len;
ARCH_WORD_32 temp;
len = 0;
while((temp = *key32++) & 0xff) {
if (!(temp & 0xff00))
{
*keybuf_word = (temp & 0xff) | (0x80 << 8);
++len;
goto key_cleaning;
}
if (!(temp & 0xff0000))
{
*keybuf_word = (temp & 0xffff) | (0x80 << 16);
len+=2;
goto key_cleaning;
}
if (!(temp & 0xff000000))
{
*keybuf_word = temp | (0x80 << 24);
len+=3;
goto key_cleaning;
}
*keybuf_word = temp;
len += 4;
keybuf_word += MMX_COEF;
}
*keybuf_word = 0x80;
key_cleaning:
keybuf_word += MMX_COEF;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += MMX_COEF;
}
keybuffer[14*MMX_COEF] = len << 3;
return;
}
#endif
len = strlen(key);
if (len > 110) // we never do UTF-8 -> UTF-16 in this mode
len = 110;
// if(index==0) {
// we 'have' to use full clean here. NOTE 100% sure why, but 10 formats fail if we do not.
// __nonMP_DynamicFunc__clean_input_full();
// }
#if MD5_X2
if (index & 1)
memcpy(input_buf_X86[index>>MD5_X2].x2.b2, key, len);
else
#endif
memcpy(input_buf_X86[index>>MD5_X2].x1.b, key, len);
saved_key_len[index] = total_len_X86[index] = len;
}
else
{
len = strlen(key);
if (len > 110 && !(fmt_Dynamic.params.flags & FMT_UNICODE))
len = 110;
// if(index==0) {
// __nonMP_DynamicFunc__clean_input_full();
// }
keys_dirty = 1;
memcpy(((char*)(saved_key[index])), key, len);
saved_key_len[index] = len;
}
}
static void clear_keys(void) {
#ifdef MMX_COEF
if (curdat.pSetup->flags & MGF_FULL_CLEAN_REQUIRED) {
__nonMP_DynamicFunc__clean_input_full();
return;
}
if (curdat.store_keys_in_input==1 || curdat.store_keys_in_input==3)
return;
if (curdat.md5_startup_in_x86)
__nonMP_DynamicFunc__clean_input_full();
// This clean was causing failures (dirty buffers left) for dyna_51, 61 and formspring.
// once commented out, dyna fully passes. I see no reason to keep this here at all.
// else
// __nonMP_DynamicFunc__clean_input_kwik();
#else
__nonMP_DynamicFunc__clean_input_full();
#endif
}
/*********************************************************************************
* Returns the key. NOTE how it gets it depends upon if we are storing
* into the array of keys (there we simply return it), or if we are
* loading into input buffer #1. If in input buffer, we have to re-create
* the key, prior to returning it.
*********************************************************************************/
static char *get_key(int index)
{
if (curdat.store_keys_in_input)
{
unsigned int i;
unsigned char *cp;
#ifdef MMX_COEF
//if (dynamic_use_sse==1) {
// Note, if we are not in
if (dynamic_use_sse && !curdat.md5_startup_in_x86) {
unsigned int s;
unsigned int idx = ( ((unsigned)index)>>(MMX_COEF>>1));
//if (curdat.store_keys_in_input && dynamic_use_sse==1)
// s = saved_key_len[index]; // NOTE, we now have to get the length from the buffer, we do NOT store it into a saved_key_len buffer.
ARCH_WORD_32 *keybuffer = &input_buf[idx].w[index&(MMX_COEF-1)];
s = keybuffer[14*MMX_COEF] >> 3;
for(i=0;i<s;i++)
out[i] = input_buf[idx].c[GETPOS(i, index&(MMX_COEF-1))];
out[i] = 0;
return (char*)out;
}
#endif
#if MD5_X2
if (index & 1)
cp = input_buf_X86[index>>MD5_X2].x2.B2;
else
#endif
cp = input_buf_X86[index>>MD5_X2].x1.B;
for(i=0;i<saved_key_len[index];++i)
out[i] = cp[i];
out[i] = 0;
return (char*)out;
}
else
{
saved_key[index][saved_key_len[index]] = '\0';
return saved_key[index];
}
}
/*********************************************************************************
* Looks for ANY key that was cracked.
*********************************************************************************/
static int cmp_all(void *binary, int count)
{
unsigned int i;
#ifdef MMX_COEF
if (dynamic_use_sse&1) {
unsigned int cnt = ( ((unsigned)count+MMX_COEF-1)>>(MMX_COEF>>1));
for (i = 0; i < cnt; ++i)
{
if(( *((ARCH_WORD_32 *)binary) == crypt_key[i].w[0])
|| ( *((ARCH_WORD_32 *)binary) == crypt_key[i].w[1])
#if (MMX_COEF > 3)
|| ( *((ARCH_WORD_32 *)binary) == crypt_key[i].w[2])
|| ( *((ARCH_WORD_32 *)binary) == crypt_key[i].w[3])
#endif
)
return 1;
}
return 0;
}
#endif
for (i = 0; i < count; i++) {
#if MD5_X2
if (i&1) {
if (!(((ARCH_WORD_32 *)binary)[0] - crypt_key_X86[i>>MD5_X2].x2.w2[0]))
return 1;
}
else
#endif
if (!(((ARCH_WORD_32 *)binary)[0] - crypt_key_X86[i>>MD5_X2].x1.w[0]))
return 1;
}
return 0;
}
#if ARCH_LITTLE_ENDIAN
#define MASK_4x6 0x00ffffff
#else
#define MASK_4x6 0xffffff00
#endif
static int cmp_all_64_4x6(void *binary, int count)
{
unsigned int i;
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
unsigned int cnt = ( ((unsigned)count+MMX_COEF-1)>>(MMX_COEF>>1));
for (i = 0; i < cnt; ++i)
{
if(( *((ARCH_WORD_32 *)binary) == (crypt_key[i].w[0] & MASK_4x6))
|| ( *((ARCH_WORD_32 *)binary) == (crypt_key[i].w[1] & MASK_4x6))
#if (MMX_COEF > 3)
|| ( *((ARCH_WORD_32 *)binary) == (crypt_key[i].w[2] & MASK_4x6))
|| ( *((ARCH_WORD_32 *)binary) == (crypt_key[i].w[3] & MASK_4x6))
#endif
)
return 1;
}
return 0;
}
#endif
for (i = 0; i < count; i++) {
#if MD5_X2
if (i&1) {
if (!(((ARCH_WORD_32 *)binary)[0] - (crypt_key_X86[i>>MD5_X2].x2.w2[0]&MASK_4x6)))
return 1;
}
else
#endif
if (!(((ARCH_WORD_32 *)binary)[0] - (crypt_key_X86[i>>MD5_X2].x1.w[0]&MASK_4x6)))
return 1;
}
return 0;
}
/*********************************************************************************
* In this code, we always do exact compare, so if this function is called, it
* simply returns true.
*********************************************************************************/
static int cmp_exact(char *binary, int index)
{
return 1;
}
/*********************************************************************************
* There was 'something' that was possibly hit. Now john will ask us to check
* each one of the data items, for an 'exact' match.
*********************************************************************************/
static int cmp_one(void *binary, int index)
{
#ifdef MMX_COEF
if (dynamic_use_sse&1) {
unsigned int idx = ( ((unsigned)index)>>(MMX_COEF>>1));
if( (((ARCH_WORD_32 *)binary)[0] == ((ARCH_WORD_32 *)&(crypt_key[idx].c))[0*MMX_COEF+(index&(MMX_COEF-1))]) &&
(((ARCH_WORD_32 *)binary)[1] == ((ARCH_WORD_32 *)&(crypt_key[idx].c))[1*MMX_COEF+(index&(MMX_COEF-1))]) &&
(((ARCH_WORD_32 *)binary)[2] == ((ARCH_WORD_32 *)&(crypt_key[idx].c))[2*MMX_COEF+(index&(MMX_COEF-1))]) &&
(((ARCH_WORD_32 *)binary)[3] == ((ARCH_WORD_32 *)&(crypt_key[idx].c))[3*MMX_COEF+(index&(MMX_COEF-1))]))
return 1;
return 0;
}
#endif
#if MD5_X2
if (index & 1) {
if ( (((ARCH_WORD_32 *)binary)[0] == crypt_key_X86[index>>MD5_X2].x2.w2[0] ) &&
(((ARCH_WORD_32 *)binary)[1] == crypt_key_X86[index>>MD5_X2].x2.w2[1] ) &&
(((ARCH_WORD_32 *)binary)[2] == crypt_key_X86[index>>MD5_X2].x2.w2[2] ) &&
(((ARCH_WORD_32 *)binary)[3] == crypt_key_X86[index>>MD5_X2].x2.w2[3] ) )
return 1;
return 0;
}
#endif
if ( (((ARCH_WORD_32 *)binary)[0] == crypt_key_X86[index>>MD5_X2].x1.w[0] ) &&
(((ARCH_WORD_32 *)binary)[1] == crypt_key_X86[index>>MD5_X2].x1.w[1] ) &&
(((ARCH_WORD_32 *)binary)[2] == crypt_key_X86[index>>MD5_X2].x1.w[2] ) &&
(((ARCH_WORD_32 *)binary)[3] == crypt_key_X86[index>>MD5_X2].x1.w[3] ) )
return 1;
return 0;
}
static int cmp_one_64_4x6(void *binary, int index)
{
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
unsigned int idx = ( ((unsigned)index)>>(MMX_COEF>>1));
if( (((ARCH_WORD_32 *)binary)[0] == (((ARCH_WORD_32 *)&(crypt_key[idx].c))[0*MMX_COEF+(index&(MMX_COEF-1))] & MASK_4x6)) &&
(((ARCH_WORD_32 *)binary)[1] == (((ARCH_WORD_32 *)&(crypt_key[idx].c))[1*MMX_COEF+(index&(MMX_COEF-1))] & MASK_4x6)) &&
(((ARCH_WORD_32 *)binary)[2] == (((ARCH_WORD_32 *)&(crypt_key[idx].c))[2*MMX_COEF+(index&(MMX_COEF-1))] & MASK_4x6)) &&
(((ARCH_WORD_32 *)binary)[3] == (((ARCH_WORD_32 *)&(crypt_key[idx].c))[3*MMX_COEF+(index&(MMX_COEF-1))] & MASK_4x6)))
return 1;
return 0;
}
#endif
#if MD5_X2
if (index & 1) {
if ( (((ARCH_WORD_32*)binary)[0] == (crypt_key_X86[index>>MD5_X2].x2.w2[0] & MASK_4x6)) &&
(((ARCH_WORD_32*)binary)[1] == (crypt_key_X86[index>>MD5_X2].x2.w2[1] & MASK_4x6)) &&
(((ARCH_WORD_32*)binary)[2] == (crypt_key_X86[index>>MD5_X2].x2.w2[2] & MASK_4x6)) &&
(((ARCH_WORD_32*)binary)[3] == (crypt_key_X86[index>>MD5_X2].x2.w2[3] & MASK_4x6)) )
return 1;
return 0;
}
#endif
if ( (((ARCH_WORD_32*)binary)[0] == (crypt_key_X86[index>>MD5_X2].x1.w[0] & MASK_4x6)) &&
(((ARCH_WORD_32*)binary)[1] == (crypt_key_X86[index>>MD5_X2].x1.w[1] & MASK_4x6)) &&
(((ARCH_WORD_32*)binary)[2] == (crypt_key_X86[index>>MD5_X2].x1.w[2] & MASK_4x6)) &&
(((ARCH_WORD_32*)binary)[3] == (crypt_key_X86[index>>MD5_X2].x1.w[3] & MASK_4x6)) )
return 1;
return 0;
}
/*********************************************************************************
*********************************************************************************
* This is the real 'engine'. It simply calls functions one
* at a time from the array of functions.
*********************************************************************************
*********************************************************************************/
#if FMT_MAIN_VERSION > 10
static int crypt_all(int *pcount, struct db_salt *salt)
#else
static void crypt_all(int count)
#endif
{
// set m_count. This is our GLOBAL value, used by ALL of the script functions to know how
// many keys are loaded, and how much work we do.
#if FMT_MAIN_VERSION > 10
m_count = *pcount;
#else
m_count = count;
#endif
__nonMP_eLargeOut(eBase16);
#ifdef MMX_COEF
// If this format is MMX built, but is supposed to start in X86 (but be switchable), then we
// set that value here.
if (curdat.store_keys_in_input==2)
dynamic_use_sse = 3;
else if (curdat.md5_startup_in_x86)
dynamic_use_sse = 2;
else if (dynamic_use_sse==2)
dynamic_use_sse = 1;
#endif
__nonMP_md5_unicode_convert(0);
if (curdat.dynamic_base16_upcase) {
dynamic_itoa16 = itoa16u;
itoa16_w2 = itoa16_w2_u;
}
else {
dynamic_itoa16 = itoa16;
itoa16_w2 = itoa16_w2_l;
}
// There may have to be some 'prelim' work done with the keys. This is so that if we 'know' that keys were
// loaded into the keys[] array, but that we should do something like md5 and base-16 put them into an
// input slot, then we do that FIRST, prior to calling the script functions. Thus for a format such as
// md5(md5($p).$s) we could md5 the pass, and base-16 put it into a input buffer. Then when john sets salt
// and calls crypt all, the crypt script would simply set the input len to 32, append the salt and call a
// single crypt. That eliminates almost 1/2 of the calls to md5_crypt() for the format show in this example.
if (keys_dirty)
{
if (curdat.store_keys_normal_but_precompute_md5_to_output2)
{
keys_dirty = 0;
__nonMP_DynamicFunc__clean_input2();
if (curdat.store_keys_in_input_unicode_convert)
__nonMP_md5_unicode_convert(1);
__nonMP_DynamicFunc__append_keys2();
__nonMP_md5_unicode_convert(0);
if (curdat.using_flat_buffers_sse2_ok) {
if (curdat.store_keys_normal_but_precompute_md5_to_output2_base16_to_input1) {
#ifdef _OPENMP
DynamicFunc__MD5_crypt_input2_overwrite_input1(0,m_count,0);
#else
DynamicFunc__MD5_crypt_input2_overwrite_input1();
#endif
} else if (curdat.store_keys_normal_but_precompute_md5_to_output2_base16_to_input1_offset32) {
int i;
for (i = 0; i < m_count; ++i)
total_len_X86[i] = 32;
#ifdef _OPENMP
DynamicFunc__MD5_crypt_input2_append_input1(0,m_count,0);
#else
DynamicFunc__MD5_crypt_input2_append_input1();
#endif
} else {
// calls 'old' code (ossl, sorry :( We should FIND and remove any format
// written this way, if it is
__possMP_DynamicFunc__crypt2_md5();
}
} else {
__possMP_DynamicFunc__crypt2_md5();
if (curdat.store_keys_normal_but_precompute_md5_to_output2_base16_to_input1)
{
if (curdat.store_keys_normal_but_precompute_md5_to_output2_base16_to_input1==2)
__nonMP_DynamicFunc__SSEtoX86_switch_output2();
__nonMP_DynamicFunc__clean_input();
__nonMP_DynamicFunc__append_from_last_output2_to_input1_as_base16();
}
if (curdat.store_keys_normal_but_precompute_md5_to_output2_base16_to_input1_offset32)
{
#ifndef MMX_COEF
if (curdat.store_keys_normal_but_precompute_md5_to_output2_base16_to_input1_offset32==2)
#else
if (dynamic_use_sse == 1)
#endif
__nonMP_DynamicFunc__SSEtoX86_switch_output2();
__nonMP_DynamicFunc__clean_input();
__nonMP_DynamicFunc__set_input_len_32();
__nonMP_DynamicFunc__append_from_last_output2_to_input1_as_base16();
}
}
}
}
// Ok, now we 'run' the script. We simply call 1 function right after the other.
// ALL functions are void f(void). They use the globals:
// input_buf1[] input_buf2[] (requires thread safety)
// total_len1[] total_len2[] (requires thread safety)
// crypt1[] crypt2[] (requires thread safety)
// md5_unicode_convert (requires thread safety, had to change to array)
// saved_key[] (const?)
// saved_key_len[] (const)
// cursalt, cursalt2 (const)
// saltlen, saltlen2 (const)
// m_count (const)
// nConsts (const)
// Consts[], ConstsLen[] (const)
// Since this array is in a structure, we assign a simple pointer to it
// before walking. Trivial improvement, but every cycle counts :)
{
#ifdef _OPENMP
if ((curdat.pFmtMain->params.flags & FMT_OMP) == FMT_OMP) {
int j;
int inc = (m_count+m_ompt-1) / m_ompt;
//printf ("maxkeys=%d m_count=%d inc1=%d granularity=%d inc2=%d\n", curdat.pFmtMain->params.max_keys_per_crypt, m_count, inc, curdat.omp_granularity, ((inc + curdat.omp_granularity-1)/curdat.omp_granularity)*curdat.omp_granularity);
inc = ((inc + curdat.omp_granularity-1)/curdat.omp_granularity)*curdat.omp_granularity;
#pragma omp parallel for shared(curdat, inc, m_count)
for (j = 0; j < m_count; j += inc) {
int i;
int top=j+inc;
/* The last block may 'appear' to have more keys than we have in the
entire buffer space. This is due to the granularity. If so,
reduce that last one to stop at end of our buffers. NOT doing
this is causes a huge buffer overflow. */
if (top > curdat.pFmtMain->params.max_keys_per_crypt)
top = curdat.pFmtMain->params.max_keys_per_crypt;
// we now run a full script in this thread, using only a subset of
// the data, from [j,top) The next thread will run from [top,top+inc)
// each thread will take the next inc values, until we get to m_count
for (i = 0; curdat.dynamic_FUNCTIONS[i]; ++i)
(*(curdat.dynamic_FUNCTIONS[i]))(j,top,omp_get_thread_num());
}
} else {
int i;
// same code (almost), but without the threads.
for (i = 0; curdat.dynamic_FUNCTIONS[i]; ++i)
(*(curdat.dynamic_FUNCTIONS[i]))(0,m_count,0);
}
#else
int i;
for (i = 0; curdat.dynamic_FUNCTIONS[i]; ++i) {
(*(curdat.dynamic_FUNCTIONS[i]))();
#if 0
// Dump state (for debugging help)
printf ("\nState after function: %s\n", dynamic_Find_Function_Name(curdat.dynamic_FUNCTIONS[i]));
// dump input 1
#ifdef MMX_COEF
dump_stuff_mmx_msg("input_buf[0]", input_buf[0].c, 64, 0);
dump_stuff_mmx_msg("input_buf[1]", input_buf[0].c, 64, 1);
dump_stuff_mmx_msg("input_buf[2]", input_buf[0].c, 64, 2);
dump_stuff_mmx_msg("input_buf[3]", input_buf[0].c, 64, 3);
#endif
printf ("input_buf86[0] : %*.*s\n", total_len_X86[0],total_len_X86[0],input_buf_X86[0].x1.b);
printf ("input_buf86[1] : %*.*s\n", total_len_X86[1],total_len_X86[1],input_buf_X86[1].x1.b);
printf ("input_buf86[2] : %*.*s\n", total_len_X86[2],total_len_X86[2],input_buf_X86[2].x1.b);
printf ("input_buf86[3] : %*.*s\n", total_len_X86[3],total_len_X86[3],input_buf_X86[3].x1.b);
// dump crypt 1
#ifdef MMX_COEF
dump_stuff_mmx_msg("crypt_key[0]", crypt_key[0].c, 16, 0);
dump_stuff_mmx_msg("crypt_key[1]", crypt_key[0].c, 16, 1);
dump_stuff_mmx_msg("crypt_key[2]", crypt_key[0].c, 16, 2);
dump_stuff_mmx_msg("crypt_key[3]", crypt_key[0].c, 16, 3);
#endif
dump_stuff_be_msg("crypt_key_X86[0]", crypt_key_X86[0].x1.b, 16);
dump_stuff_be_msg("crypt_key_X86[1]", crypt_key_X86[1].x1.b, 16);
dump_stuff_be_msg("crypt_key_X86[2]", crypt_key_X86[2].x1.b, 16);
dump_stuff_be_msg("crypt_key_X86[3]", crypt_key_X86[3].x1.b, 16);
// dump input 2
#ifdef MMX_COEF
dump_stuff_mmx_msg("input_buf2[0]", input_buf2[0].c, 64, 0);
dump_stuff_mmx_msg("input_buf2[1]", input_buf2[0].c, 64, 1);
dump_stuff_mmx_msg("input_buf2[2]", input_buf2[0].c, 64, 2);
dump_stuff_mmx_msg("input_buf2[3]", input_buf2[0].c, 64, 3);
#endif
printf ("input2_buf86[0] : %*.*s\n", total_len2_X86[0],total_len2_X86[0],input_buf2_X86[0].x1.b);
printf ("input2_buf86[1] : %*.*s\n", total_len2_X86[1],total_len2_X86[1],input_buf2_X86[1].x1.b);
printf ("input2_buf86[2] : %*.*s\n", total_len2_X86[2],total_len2_X86[2],input_buf2_X86[2].x1.b);
printf ("input2_buf86[3] : %*.*s\n", total_len2_X86[3],total_len2_X86[3],input_buf2_X86[3].x1.b);
// dump crypt 2
#ifdef MMX_COEF
dump_stuff_mmx_msg("crypt_key2[0]", crypt_key2[0].c, 16, 0);
dump_stuff_mmx_msg("crypt_key2[1]", crypt_key2[0].c, 16, 1);
dump_stuff_mmx_msg("crypt_key2[2]", crypt_key2[0].c, 16, 2);
dump_stuff_mmx_msg("crypt_key2[3]", crypt_key2[0].c, 16, 3);
#endif
dump_stuff_be_msg("crypt_key2_X86[0]", crypt_key2_X86[0].x1.b, 16);
dump_stuff_be_msg("crypt_key2_X86[1]", crypt_key2_X86[1].x1.b, 16);
dump_stuff_be_msg("crypt_key2_X86[2]", crypt_key2_X86[2].x1.b, 16);
dump_stuff_be_msg("crypt_key2_X86[3]", crypt_key2_X86[3].x1.b, 16);
#endif
}
#endif
}
#if FMT_MAIN_VERSION > 10
return m_count;
#endif
}
/*********************************************************************************
* 'normal' hashing functions
*********************************************************************************/
extern char *MD5_DumpHexStr(void *p);
#if !ARCH_LITTLE_ENDIAN
// the lower 8 bits is zero on the binary (but filled in on the hash). We need to dump the low 8
static int binary_hash_0_64x4(void * binary) { return (((ARCH_WORD_32 *)binary)[0]>>8) & 0xf; }
static int binary_hash_1_64x4(void * binary) { return (((ARCH_WORD_32 *)binary)[0]>>8) & 0xff; }
static int binary_hash_2_64x4(void * binary) { return (((ARCH_WORD_32 *)binary)[0]>>8) & 0xfff; }
static int binary_hash_3_64x4(void * binary) { return (((ARCH_WORD_32 *)binary)[0]>>8) & 0xffff; }
static int binary_hash_4_64x4(void * binary) { return (((ARCH_WORD_32 *)binary)[0]>>8) & 0xfffff; }
static int binary_hash_5_64x4(void * binary) { return (((ARCH_WORD_32 *)binary)[0]>>8) & 0xffffff; }
static int get_hash_0_64x4(int index) {
#if MD5_X2
if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & 0xf;
#endif
return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & 0xf;}
static int get_hash_1_64x4(int index) {
#if MD5_X2
if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & 0xff;
#endif
return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & 0xff;}
static int get_hash_2_64x4(int index) {
#if MD5_X2
if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & 0xfff;
#endif
return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & 0xfff;}
static int get_hash_3_64x4(int index) {
#if MD5_X2
if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & 0xffff;
#endif
return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & 0xffff;}
static int get_hash_4_64x4(int index) {
#if MD5_X2
if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & 0xfffff;
#endif
return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & 0xfffff;}
static int get_hash_5_64x4(int index) {
#if MD5_X2
if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & 0xffffff;
#endif
return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & 0xffffff;}
#endif
static int get_hash_0(int index)
{
#ifdef MMX_COEF
if (dynamic_use_sse&1) {
unsigned int idx = ( ((unsigned)index)>>(MMX_COEF>>1));
return ((ARCH_WORD_32 *)&(crypt_key[idx].c))[index&(MMX_COEF-1)] & 0xf;
}
#endif
#if MD5_X2
if (index & 1)
return crypt_key_X86[index>>MD5_X2].x2.w2[0] & 0xf;
#endif
return crypt_key_X86[index>>MD5_X2].x1.w[0] & 0xf;
}
static int get_hash_1(int index)
{
#ifdef MMX_COEF
if (dynamic_use_sse&1) {
unsigned int idx = ( ((unsigned)index)>>(MMX_COEF>>1));
return ((ARCH_WORD_32 *)&(crypt_key[idx].c))[index&(MMX_COEF-1)] & 0xff;
}
#endif
#if MD5_X2
if (index & 1)
return crypt_key_X86[index>>MD5_X2].x2.w2[0] & 0xff;
#endif
return crypt_key_X86[index>>MD5_X2].x1.w[0] & 0xff;
}
static int get_hash_2(int index)
{
#ifdef MMX_COEF
if (dynamic_use_sse&1) {
unsigned int idx = ( ((unsigned)index)>>(MMX_COEF>>1));
return ((ARCH_WORD_32 *)&(crypt_key[idx].c))[index&(MMX_COEF-1)] & 0xfff;
}
#endif
#if MD5_X2
if (index & 1)
return crypt_key_X86[index>>MD5_X2].x2.w2[0] & 0xfff;
#endif
return crypt_key_X86[index>>MD5_X2].x1.w[0] & 0xfff;
}
static int get_hash_3(int index)
{
#ifdef MMX_COEF
if (dynamic_use_sse&1) {
unsigned int idx = ( ((unsigned)index)>>(MMX_COEF>>1));
return ((ARCH_WORD_32 *)&(crypt_key[idx].c))[index&(MMX_COEF-1)] & 0xffff;
}
#endif
#if MD5_X2
if (index & 1)
return crypt_key_X86[index>>MD5_X2].x2.w2[0] & 0xffff;
#endif
return crypt_key_X86[index>>MD5_X2].x1.w[0] & 0xffff;
}
static int get_hash_4(int index)
{
#ifdef MMX_COEF
if (dynamic_use_sse&1) {
unsigned int idx = ( ((unsigned)index)>>(MMX_COEF>>1));
return ((ARCH_WORD_32 *)&(crypt_key[idx].c))[index&(MMX_COEF-1)] & 0xfffff;
}
#endif
#if MD5_X2
if (index & 1)
return crypt_key_X86[index>>MD5_X2].x2.w2[0] & 0xfffff;
#endif
return crypt_key_X86[index>>MD5_X2].x1.w[0] & 0xfffff;
}
static int get_hash_5(int index)
{
#ifdef MMX_COEF
if (dynamic_use_sse&1) {
unsigned int idx = ( ((unsigned)index)>>(MMX_COEF>>1));
return ((ARCH_WORD_32 *)&(crypt_key[idx].c))[index&(MMX_COEF-1)] & 0xffffff;
}
#endif
#if MD5_X2
if (index & 1)
return crypt_key_X86[index>>MD5_X2].x2.w2[0] & 0xffffff;
#endif
return crypt_key_X86[index>>MD5_X2].x1.w[0] & 0xffffff;
}
static int get_hash_6(int index)
{
#ifdef MMX_COEF
if (dynamic_use_sse&1) {
unsigned int idx = ( ((unsigned)index)>>(MMX_COEF>>1));
return ((ARCH_WORD_32 *)&(crypt_key[idx].c))[index&(MMX_COEF-1)] & 0x7ffffff;
}
#endif
#if MD5_X2
if (index & 1)
return crypt_key_X86[index>>MD5_X2].x2.w2[0] & 0x7ffffff;
#endif
return crypt_key_X86[index>>MD5_X2].x1.w[0] & 0x7ffffff;
}
/************************************************************************
* We now fully handle all hashing of salts, here in the format. We
* return a pointer ot an allocated salt record. Thus, we search all
* of the salt records, looking for the same salt. If we find it, we
* want to return THAT pointer, and not allocate a new pointer.
* This works great, but forces us to do salt comparision here.
***********************************************************************/
#define DYNA_SALT_HASH_BITS 15
#define DYNA_SALT_HASH_SIZE (1<<DYNA_SALT_HASH_BITS)
#define DYNA_SALT_HASH_MOD (DYNA_SALT_HASH_SIZE-1)
typedef struct dyna_salt_list_entry {
struct dyna_salt_list_entry *next;
unsigned char *salt;
} dyna_salt_list_entry;
typedef struct {
dyna_salt_list_entry *head, *tail;
int count;
} dyna_salt_list_main;
typedef struct {
dyna_salt_list_main List;
} SaltHashTab_t;
static SaltHashTab_t *SaltHashTab=NULL;
static dyna_salt_list_entry *pSaltHashData=NULL, *pSaltHashDataNext=NULL;
static int dyna_salt_list_count=0;
static unsigned char *pSaltDataBuf=NULL, *pNextSaltDataBuf=NULL;
static int nSaltDataBuf=0;
static unsigned char *AddSaltHash(unsigned char *salt, unsigned len, unsigned int idx) {
unsigned char *pRet;
if (dyna_salt_list_count == 0) {
pSaltHashDataNext = pSaltHashData = mem_calloc_tiny(sizeof(dyna_salt_list_entry) * 25000, MEM_ALIGN_WORD);
dyna_salt_list_count = 25000;
}
if (nSaltDataBuf < len) {
pSaltDataBuf = pNextSaltDataBuf = mem_alloc_tiny(0x60000, MEM_ALIGN_NONE);
nSaltDataBuf = 0x60000;
}
pRet = pNextSaltDataBuf;
pSaltHashDataNext->salt = pNextSaltDataBuf;
memcpy(pSaltHashDataNext->salt, salt, len);
pNextSaltDataBuf += len;
nSaltDataBuf -= len;
if (SaltHashTab[idx].List.count == 0)
SaltHashTab[idx].List.tail = SaltHashTab[idx].List.head = pSaltHashDataNext;
else {
SaltHashTab[idx].List.tail->next = pSaltHashDataNext;
SaltHashTab[idx].List.tail = pSaltHashDataNext;
}
++SaltHashTab[idx].List.count;
++pSaltHashDataNext;
--dyna_salt_list_count;
return pRet;
}
static unsigned char *FindSaltHash(unsigned char *salt, unsigned len, u32 crc) {
unsigned int idx = crc & DYNA_SALT_HASH_MOD;
dyna_salt_list_entry *p;
if (!SaltHashTab)
SaltHashTab = mem_calloc_tiny(sizeof(SaltHashTab_t) * DYNA_SALT_HASH_SIZE, MEM_ALIGN_WORD);
if (!SaltHashTab[idx].List.count) {
return AddSaltHash(salt, len, idx);
}
// Ok, we have some salts in this hash list. Now walk the list, searching for an EQUAL salt.
p = SaltHashTab[idx].List.head;
while (p) {
if (!memcmp((char*)salt, (char*)p->salt, len)) {
return p->salt; // found it! return this one, so we do not allocate another.
}
p = p->next;
}
return AddSaltHash(salt, len, idx);
}
static unsigned char *HashSalt(unsigned char *salt, unsigned len) {
u32 crc = 0xffffffff, i;
unsigned char *ret_hash;
// compute the hash.
for (i = 0; i < len; ++i)
crc = pkzip_crc32(crc,salt[i]);
crc = ~crc;
ret_hash = FindSaltHash(salt, len, crc);
return ret_hash;
}
static int ConvertFromHex(unsigned char *p, int len) {
unsigned char *cp;
int i, x;
if (!p || memcmp(p, "HEX$", 4))
return len;
// Ok, do a convert, and return 'new' len.
len -= 4;
len >>= 1;
cp = p;
x = len;
for (i=4; x; --x, i+= 2) {
*cp++ = atoi16[ARCH_INDEX(p[i])]*16 + atoi16[ARCH_INDEX(p[i+1])];
}
*cp = 0;
return len;
}
static unsigned salt_external_to_internal_convert(unsigned char *extern_salt, unsigned char *Buffer) {
// Ok, we get this: extern_salt = salt_data$$2salt2$$Uuser ... where anything can be missing or in any order
// the any order has 1 exception of salt_data MUST be first. So if we get $$2salt2, then we know there is no salt-1 value.
unsigned char *salt2=0, *userid=0, *Flds[10];
int i, nsalt2=0, nuserid=0, nFlds[10]={0,0,0,0,0,0,0,0,0,0};
unsigned char len = strlen((char*)extern_salt), bit;
unsigned bit_array=0;
unsigned the_real_len = 6; // 2 bytes base-8 length, and 4 bytes base-8 bitmap.
// work from back of string to front, looking for the $$X signatures.
for (i = len-3; i >= 0; --i) {
if (extern_salt[i] == '$' && extern_salt[i+1] == '$') {
// a 'likely' extra salt value.
switch(extern_salt[i+2]) {
case '2':
if (curdat.b2Salts) {
salt2 = &extern_salt[i+3];
nsalt2 = strlen((char*)salt2);
nsalt2 = ConvertFromHex(salt2, nsalt2);
extern_salt[i] = 0;
bit_array |= 1;
the_real_len += (nsalt2+1);
}
break;
case 'U':
if (curdat.nUserName) {
userid = &extern_salt[i+3];
nuserid = strlen((char*)userid);
nuserid = ConvertFromHex(userid, nuserid);
extern_salt[i] = 0;
bit_array |= 2;
the_real_len += (nuserid+1);
}
break;
case 'F': {
if (extern_salt[i+3] >= '0' && extern_salt[i+3] <= '9') {
if (curdat.FldMask && (curdat.FldMask & (MGF_FLDx_BIT<<(extern_salt[i+3]-'0'))) == (MGF_FLDx_BIT<<(extern_salt[i+3]-'0'))) {
Flds[extern_salt[i+3]-'0'] = &extern_salt[i+4];
nFlds[extern_salt[i+3]-'0'] = strlen((char*)(Flds[extern_salt[i+3]-'0']));
nFlds[extern_salt[i+3]-'0'] = ConvertFromHex(Flds[extern_salt[i+3]-'0'], nFlds[extern_salt[i+3]-'0']);
extern_salt[i] = 0;
bit_array |= (1<<(2+extern_salt[i+3]-'0'));
the_real_len += (nFlds[extern_salt[i+3]-'0']+1);
}
break;
}
}
}
}
}
// We have now ripped the data apart. Now put it into Buffer, in proper ORDER
// Length of salt (salt1) These 2 are stored as base-8 numbers.
len = strlen((char*)extern_salt);
len = ConvertFromHex(extern_salt, len);
the_real_len += len;
*Buffer++ = (len>>3) + '0';
*Buffer++ = (len&7) + '0';
// bit array
*Buffer++ = (bit_array>>9) + '0';
*Buffer++ = ((bit_array>>6)&7) + '0';
*Buffer++ = ((bit_array>>3)&7) + '0';
*Buffer++ = (bit_array&7) + '0';
memcpy((char*)Buffer, (char*)extern_salt, len);
Buffer += len;
if (!bit_array)
return the_real_len;
if (nsalt2) {
*Buffer++ = nsalt2;
memcpy((char*)Buffer, (char*)salt2, nsalt2);
Buffer += nsalt2;
bit_array &= ~1;
if (!bit_array)
return the_real_len;
}
if (nuserid) {
*Buffer++ = nuserid;
memcpy((char*)Buffer, (char*)userid, nuserid);
Buffer += nuserid;
bit_array &= ~2;
if (!bit_array)
return the_real_len;
}
bit = 4;
for (i = 0; i < 10; ++i, bit<<=1) {
if (nFlds[i]) {
*Buffer++ = nFlds[i];
memcpy((char*)Buffer, (char*)(Flds[i]), nFlds[i]);
Buffer += nFlds[i];
bit_array &= ~bit;
if (!bit_array)
return the_real_len;
}
}
return the_real_len;
}
/*********************************************************************************
* This salt function has been TOTALLY re-written. Now, we do these things:
* 1. convert from external format ($salt$$Uuser$$2HEX$salt2_in_hex, etc, into
* our internal format. Our internal format is 2 base-8 numbers (2 digit and 4
* digit), followed by the 'raw' salt bytes, followed by pascal strings of any
* other special salt values (salt2, user, flields 0 to 9). The first 2 digit
* base 8 number is the length of the binary bytes of the 'real' salt. The
* 2nd base-8 4 digit number, is a bit mask of what 'extra' salt types are
* contained.
* 2. We allocate and 'own' the salt buffers here, so that:
* 3. We detect duplicate salts. NOTE, we have normalized the salts, so 2 salts that
* appear different (external format), appear exactly the same on internal format.
* Thus, we dupe remove them here.
* 4. We allocation storage for the salts. The ONLY thing we return to john, is
* a 4 (or 8 byte in 64 bit builds) pointer to the salt. Thus, when we find
* a dupe, we do not have to allocate ANY memory, and simply return the pointer
* to the original salt (which is the same as the one we are working on now).
*
* this is much more complex, however, it allows us to use much less memory, to
* have the set_salt function operate VERY quickly (all processing is done here).
* It also allows john load time to happen FASTER (yes faster), that it was happening
* due to smaller memory footprint, and john's external salt collision to have
* less work to do. The memory footprint was also reduced, because now we store
* JUST the require memory, and a pointer. Before, often we stored a LOT of memory
* for many format types. For a few types, we do use more memory with this method
* than before, but for more the memory usage is way down.
*********************************************************************************/
static void *salt(char *ciphertext)
{
char Salt[SALT_SIZE+1], saltIntBuf[SALT_SIZE+1];
int off, possible_neg_one=0;
unsigned char *saltp;
unsigned the_real_len;
static union x {
unsigned char salt_p[sizeof(unsigned char*)];
ARCH_WORD p[1];
} union_x;
if ( (curdat.pSetup->flags&MGF_SALTED) == 0) {
memset(union_x.salt_p, 0, sizeof(union_x.salt_p));
return union_x.salt_p;
}
memset(Salt, 0, SALT_SIZE+1);
// Ok, see if the wrong dynamic type is loaded (such as the 'last' dynamic type).
if (!strncmp(ciphertext, "$dynamic_", 9)) {
char *cp1 = &ciphertext[9];
char *cp2 = &curdat.dynamic_WHICH_TYPE_SIG[9];
while (*cp2 && *cp2 == *cp1) {
++cp1; ++cp2;
}
if (*cp2) {
char subformat[17];
struct fmt_main *pFmtLocal;
int nFmtNum;
memcpy(subformat, ciphertext, 16);
subformat[16] = 0;
cp2 = &subformat[9];
while (*cp2 && *cp2 != '$')
++cp2;
*cp2 = 0;
nFmtNum = -1;
sscanf(subformat, "$dynamic_%d", &nFmtNum);
if (nFmtNum==-1)
return union_x.salt_p;
pFmtLocal = dynamic_Get_fmt_main(nFmtNum);
memcpy(&curdat, pFmtLocal->private.data, sizeof(private_subformat_data));
}
}
if (curdat.dynamic_FIXED_SALT_SIZE==0 && !curdat.nUserName && !curdat.FldMask)
return union_x.salt_p;
if (!strncmp(ciphertext, "$dynamic_", 9))
off=curdat.dynamic_SALT_OFFSET;
else
off=curdat.dynamic_SALT_OFFSET-strlen(curdat.dynamic_WHICH_TYPE_SIG);
if (ciphertext[off] == '$') {
if (ciphertext[off+1]=='U' && curdat.nUserName)
possible_neg_one = -1;
else if (ciphertext[off+1]=='2' && curdat.b2Salts)
possible_neg_one = -1;
else if (ciphertext[off+1]=='F' && ciphertext[off+2]>='0' && ciphertext[off+2]<='9' && curdat.FldMask) {
if ((curdat.FldMask & (MGF_FLDx_BIT<<(ciphertext[off+2]-'0'))) == (MGF_FLDx_BIT<<(ciphertext[off+2]-'0')))
possible_neg_one = -1;
}
}
strnzcpy(Salt, &ciphertext[off + possible_neg_one], SALT_SIZE);
if (curdat.dynamic_salt_as_hex)
{
// Do not 'worry' about SSE/MMX, Only do 'generic' md5. This is ONLY done
// at the start of the run. We will NEVER see this run, once john starts.
MD5_CTX ctx;
unsigned char Buf[16];
unsigned char *cpo, *cpi, i;
unsigned slen=strlen(Salt);
MD5_Init(&ctx);
if (curdat.dynamic_salt_as_hex & 0x100)
{
char *s2 = mem_alloc(slen*2+1);
for (i = 0; i < slen; ++i)
{
s2[i<<1] = Salt[i];
s2[(i<<1)+1] = 0;
}
MD5_Update(&ctx, s2, slen*2);
MEM_FREE(s2);
}
else
MD5_Update(&ctx, Salt, slen);
MD5_Final(Buf, &ctx);
if ( (curdat.dynamic_salt_as_hex&3) == 2) {
strcat(Salt, "$$2");
cpo = (unsigned char *)&Salt[slen+3];
}
else {
cpo = (unsigned char*)Salt;
memset(Salt, 0, SALT_SIZE+1);
}
cpi = Buf;
for (i = 0; i < 16; ++i)
{
*cpo++ = dynamic_itoa16[(*cpi)>>4];
*cpo++ = dynamic_itoa16[(*cpi)&0xF];
++cpi;
}
*cpo = 0;
}
if (curdat.dynamic_hdaa_salt) {
//=$dynamic_1060$679066476e67b5c7c4e88f04be567f8b$8c12bd8f728afe56d45a0ce846b70e5a$$Uuser$$F2myrealm$$F3GET$/$$F400000001$4b61913cec32e2c9$auth:nocode
//digest authentication scheme :
//H1 = md5(user:realm:password)
//H2 = md5(method:digestURI)
//response = H3 = md5(h1:nonce:nonceCount:ClientNonce:qop:h2)
// salt is:
//8c12bd8f728afe56d45a0ce846b70e5a$$Uuser$$F2myrealm$$F3GET$/$$F400000001$4b61913cec32e2c9$auth
//change this to: (abcd is base-64 number)
//abcd :8c12bd8f728afe56d45a0ce846b70e5a:00000001:4b61913cec32e2c9:auth:H1$$Uuser$$F2myrealm
unsigned char *cp2, *cp3, *cp4, *cpTmp = mem_alloc(strlen(Salt) + 200); // larger than needed, 100% assured.
unsigned char *cpU2 = mem_alloc(strlen(Salt));
static unsigned cnt = 1;
unsigned i, j;
MD5_CTX ctx;
unsigned char Buf[16], h1_input[64];
memset(cpTmp, ' ', 33);
j = cnt++;
cp2 = cpTmp;
for (i = 0; i < 4; ++i) {
*cp2++ = itoa64[j%64];
j /= 64;
}
cp3 = (unsigned char*)strstr(Salt, "$$U");
*cp3++ = 0;
cp2 = cpU2;
*cp2++ = '$';
while (strncmp((char*)cp3, "$$F3", 4))
*cp2++ = *cp3++;
*cp2 = 0;
cp2 = &cpTmp[32];
*cp2++ = ':';
strcpy((char*)cp2, Salt);
cp2 += strlen((char*)cp2);
*cp2++ = ':';
cp4 = h1_input;
cp3 += 4;
while (strncmp((char*)cp3, "$$F4", 4)) {
if (*cp3 == '$') { *cp4++ = ':'; ++cp3; continue; }
*cp4++ = *cp3++;
}
*cp4 = 0;
MD5_Init(&ctx);
MD5_Update(&ctx, h1_input, strlen((char*)h1_input));
MD5_Final(Buf, &ctx);
cp3 += 4;
while (*cp3) {
if (*cp3 == '$') { *cp2++ = ':'; ++cp3; continue; }
*cp2++ = *cp3++;
}
*cp2++ = ':';
cp3 = Buf;
for (i = 0; i < 16; ++i)
{
*cp2++ = dynamic_itoa16[(*cp3)>>4];
*cp2++ = dynamic_itoa16[(*cp3)&0xF];
++cp3;
}
*cp2 = 0;
strcat((char*)cpTmp, (char*)cpU2);
strcpy(Salt, (char*)cpTmp);
MEM_FREE(cpU2);
MEM_FREE(cpTmp);
}
the_real_len = salt_external_to_internal_convert((unsigned char*)Salt, (unsigned char*)saltIntBuf);
// Now convert this into a stored salt, or find the 'already' stored same salt.
saltp = HashSalt((unsigned char*)saltIntBuf, the_real_len);
memcpy(union_x.salt_p, &saltp, sizeof(saltp));
return union_x.salt_p;
}
/*********************************************************************************
* 'special' get salt function for phpass. We return the 8 bytes salt, followed by
* the 1 byte loop count. 'normally' in phpass format, that order is reversed.
* we do it this way, since our 'primitive' functions would not know to treat the
* salt any differently for phpass. Thus the primitives are told about the first
* 8 bytes (and not the full 9). But the phpass crypt function uses that 9th byte.
*********************************************************************************/
static void *salt_phpass(char *ciphertext)
{
unsigned char salt[20], *saltp;
static union x {
unsigned char salt_p[sizeof(unsigned char*)];
unsigned long p[1];
} union_x;
if (!strncmp(ciphertext, "$dynamic_", 9)) {
ciphertext += 9;
while (*ciphertext != '$')
++ciphertext;
}
sprintf((char*)salt, "100000%8.8s%c", &ciphertext[25], ciphertext[24]);
// Now convert this into a stored salt, or find the 'already' stored same salt.
saltp = HashSalt(salt, 15);
memcpy(union_x.salt_p, &saltp, sizeof(saltp));
return union_x.salt_p;
}
/*********************************************************************************
* Now our salt is returned only as a pointer. We
*********************************************************************************/
static int salt_hash(void *salt)
{
unsigned long H;
if (!salt) return 0;
if ( (curdat.pSetup->flags&MGF_SALTED) == 0)
return 0;
// salt is now a pointer, but WORD aligned. We remove that word alingment, and simply use the next bits
H = *((unsigned long*)salt);
// Mix up the pointer value (H^(H>>9)) so that if we have a fixed sized allocation
// that things do get 'stirred' up better.
return ( (H^(H>>9)) & (SALT_HASH_SIZE-1) );
}
/*********************************************************************************
* Gets the binary value from a base-16 hash.
*********************************************************************************/
static void *binary(char *_ciphertext)
{
static char *realcipher;
int i;
char *ciphertext = _ciphertext;
if (!realcipher) realcipher = mem_alloc_tiny(BINARY_SIZE_SHA, MEM_ALIGN_WORD);
if (!strncmp(_ciphertext, "$dynamic_", 9)) {
ciphertext += 9;
while (*ciphertext++ != '$')
;
}
for(i=0;i<BINARY_SIZE;i++)
{
realcipher[i] =
atoi16[ARCH_INDEX(ciphertext[i*2])]*16 +
atoi16[ARCH_INDEX(ciphertext[i*2+1])];
}
return (void *)realcipher;
}
#if FMT_MAIN_VERSION > 9
// NOTE NOTE NOTE, we have currently ONLY implemented a non-salted function!!!
static char *source(char *source, void *binary)
{
static char Buf[256];
unsigned char *cpi= (unsigned char*)(binary);
char *cpo = Buf;
int i;
cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG);
for (i = 0; i < 16; ++i) {
*cpo++ = itoa16[(*cpi)>>4];
*cpo++ = itoa16[*cpi&0xF];
++cpi;
}
*cpo = 0;
return Buf;
}
static char *source_20_hex(char *source, void *binary)
{
static char Buf[256];
unsigned char *cpi= (unsigned char*)(binary);
char *cpo = Buf;
int i;
cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG);
for (i = 0; i < 20; ++i) {
*cpo++ = itoa16[(*cpi)>>4];
*cpo++ = itoa16[*cpi&0xF];
++cpi;
}
*cpo = 0;
return Buf;
}
static char *source_28_hex(char *source, void *binary)
{
static char Buf[256];
unsigned char *cpi= (unsigned char*)(binary);
char *cpo = Buf;
int i;
cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG);
for (i = 0; i < 28; ++i) {
*cpo++ = itoa16[(*cpi)>>4];
*cpo++ = itoa16[*cpi&0xF];
++cpi;
}
*cpo = 0;
return Buf;
}
static char *source_32_hex(char *source, void *binary)
{
static char Buf[256];
unsigned char *cpi= (unsigned char*)(binary);
char *cpo = Buf;
int i;
cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG);
for (i = 0; i < 32; ++i) {
*cpo++ = itoa16[(*cpi)>>4];
*cpo++ = itoa16[*cpi&0xF];
++cpi;
}
*cpo = 0;
return Buf;
}
static char *source_40_hex(char *source, void *binary)
{
static char Buf[256];
unsigned char *cpi= (unsigned char*)(binary);
char *cpo = Buf;
int i;
cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG);
for (i = 0; i < 40; ++i) {
*cpo++ = itoa16[(*cpi)>>4];
*cpo++ = itoa16[*cpi&0xF];
++cpi;
}
*cpo = 0;
return Buf;
}
static char *source_48_hex(char *source, void *binary)
{
static char Buf[256];
unsigned char *cpi= (unsigned char*)(binary);
char *cpo = Buf;
int i;
cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG);
for (i = 0; i < 48; ++i) {
*cpo++ = itoa16[(*cpi)>>4];
*cpo++ = itoa16[*cpi&0xF];
++cpi;
}
*cpo = 0;
return Buf;
}
static char *source_64_hex(char *source, void *binary)
{
static char Buf[256];
unsigned char *cpi= (unsigned char*)(binary);
char *cpo = Buf;
int i;
cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG);
for (i = 0; i < 64; ++i) {
*cpo++ = itoa16[(*cpi)>>4];
*cpo++ = itoa16[*cpi&0xF];
++cpi;
}
*cpo = 0;
return Buf;
}
#endif
/*********************************************************************************
* Gets the binary value from a base-64 hash (such as phpass)
*********************************************************************************/
static void * binary_b64(char *ciphertext)
{
int i;
unsigned sixbits;
static unsigned char b[16];
int bidx=0;
char *pos;
// ugly code, but only called one time (at program load,
// once for each candidate pass hash).
pos = ciphertext;
if (!strncmp(pos, "$dynamic_", 9)) {
pos += 9;
while (*pos++ != '$')
;
}
for (i = 0; i < 5; ++i)
{
sixbits = atoi64[ARCH_INDEX(*pos++)];
b[bidx] = sixbits;
sixbits = atoi64[ARCH_INDEX(*pos++)];
b[bidx++] |= (sixbits<<6);
sixbits >>= 2;
b[bidx] = sixbits;
sixbits = atoi64[ARCH_INDEX(*pos++)];
b[bidx++] |= (sixbits<<4);
sixbits >>= 4;
b[bidx] = sixbits;
sixbits = atoi64[ARCH_INDEX(*pos++)];
b[bidx++] |= (sixbits<<2);
}
sixbits = atoi64[ARCH_INDEX(*pos++)];
b[bidx] = sixbits;
sixbits = atoi64[ARCH_INDEX(*pos++)];
b[bidx] |= (sixbits<<6);
//printf("\nciphertext=%s\n", ciphertext);
//dump_stuff_msg("binary", b, 16);
return b;
}
#define TO_BINARY(b1, b2, b3) \
value = \
(MD5_word)atoi64[ARCH_INDEX(pos[0])] | \
((MD5_word)atoi64[ARCH_INDEX(pos[1])] << 6) | \
((MD5_word)atoi64[ARCH_INDEX(pos[2])] << 12) | \
((MD5_word)atoi64[ARCH_INDEX(pos[3])] << 18); \
pos += 4; \
b[b1] = value >> 16; \
b[b2] = value >> 8; \
b[b3] = value;
static void * binary_b64a(char *ciphertext)
{
static unsigned char b[16];
char *pos;
MD5_word value;
pos = ciphertext;
if (!strncmp(pos, "$dynamic_", 9)) {
pos += 9;
while (*pos++ != '$')
;
}
TO_BINARY(0, 6, 12);
TO_BINARY(1, 7, 13);
TO_BINARY(2, 8, 14);
TO_BINARY(3, 9, 15);
TO_BINARY(4, 10, 5);
b[11] =
(MD5_word)atoi64[ARCH_INDEX(pos[0])] |
((MD5_word)atoi64[ARCH_INDEX(pos[1])] << 6);
MD5_swap((MD5_word*)b,(MD5_word*)b, 4);
return b;
}
/*********************************************************************************
* Gets the binary value from a base-64 hash (such as cisco PIX)
*********************************************************************************/
static void * binary_b64_4x6(char *ciphertext)
{
static ARCH_WORD_32 b[4];
int i;
char *pos;
pos = ciphertext;
if (!strncmp(pos, "$dynamic_", 9)) {
pos += 9;
while (*pos++ != '$')
;
}
for(i = 0; i < 4; i++) {
b[i] =
atoi64[ARCH_INDEX(pos[i*4 + 0])] +
(atoi64[ARCH_INDEX(pos[i*4 + 1])] << 6) +
(atoi64[ARCH_INDEX(pos[i*4 + 2])] << 12) +
(atoi64[ARCH_INDEX(pos[i*4 + 3])] << 18);
}
MD5_swap(b,b, 4);
return (void *)b;
}
/*********************************************************************************
* Here is the main mdg_generic fmt_main. NOTE in its default settings, it is
* ready to handle base-16 hashes. The phpass stuff will be linked in later, IF
* needed.
*********************************************************************************/
static struct fmt_main fmt_Dynamic =
{
{
FORMAT_LABEL,
FORMAT_NAME,
#ifdef MMX_COEF
ALGORITHM_NAME,
#else
ALGORITHM_NAME_X86,
#endif
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
#ifdef MMX_COEF
PLAINTEXT_LENGTH,
#else
PLAINTEXT_LENGTH_X86,
#endif
BINARY_SIZE,
#if FMT_MAIN_VERSION > 9
BINARY_ALIGN,
#endif
SALT_SIZE,
#if FMT_MAIN_VERSION > 9
SALT_ALIGN,
#endif
#ifdef MMX_COEF
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#else
MIN_KEYS_PER_CRYPT_X86,
MAX_KEYS_PER_CRYPT_X86,
#endif
#ifdef _OPENMP
FMT_OMP |
#endif
FMT_CASE | FMT_8_BIT,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
dynamic_tests
}, {
init,
#if FMT_MAIN_VERSION > 10
fmt_default_done,
fmt_default_reset,
#endif
prepare,
valid,
split,
binary,
salt,
#if FMT_MAIN_VERSION > 9
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
#endif
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
set_salt,
set_key,
get_key,
clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
/**************************************************************
**************************************************************
**************************************************************
**************************************************************
* These are the md5 'primitive' functions that are used by
* the build-in expressions, and by the expression generator
* They load passwords, salts, user ids, do crypts, convert
* crypts into base-16, etc. They are pretty encompassing,
* and have been found to be able to do most anything with
* a standard 'base-16' md5 hash, salted or unsalted that
* fits a 'simple' php style expression.
**************************************************************
**************************************************************
**************************************************************
*************************************************************/
static void Dynamic_Load_itoa16_w2()
{
char buf[3];
int i;
for (i = 0; i < 256; ++i)
{
sprintf(buf, "%X%X", i>>4, i&0xF);
memcpy(&(itoa16_w2_u[i]), buf, 2);
sprintf(buf, "%x%x", i>>4, i&0xF);
memcpy(&(itoa16_w2_l[i]), buf, 2);
}
}
#ifdef MMX_COEF
/**************************************************************
**************************************************************
* Here are some 'helpers' to our helpers, when it comes to
* loading data into the mmx/sse buffers. We have several
* of these common helper functions, and use them in 'most'
* of the helper primitives, instead of having the same
* code being inlined in each of them.
**************************************************************
*************************************************************/
static void __SSE_append_output_base16_to_input(ARCH_WORD_32 *IPBdw, unsigned char *CRY, unsigned idx_mod)
{
// #3
// 5955K (core2, $dynamic_2$)
// 1565K (core2, $dynamic_1006$)
// 3381K (ath64, $dynamic_2$)
// 824.7k (ath64, $dynamic_1006$)
#undef inc
#if (MMX_COEF==4)
#define inc 6
#else
#define inc 2
#endif
unsigned short *IPBw = (unsigned short*)IPBdw;
IPBw += (idx_mod<<1);
CRY += (idx_mod<<2);
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
CRY += (inc<<1);
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
CRY += (inc<<1);
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
CRY += (inc<<1);
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw = 0x80;
#undef inc
}
static void __SSE_overwrite_output_base16_to_input(ARCH_WORD_32 *IPBdw, unsigned char *CRY, unsigned idx_mod)
{
// #3
// 5955K (core2, $dynamic_2$)
// 1565K (core2, $dynamic_1006$)
// 3381K (ath64, $dynamic_2$)
// 824.7k (ath64, $dynamic_1006$)
#undef inc
#if (MMX_COEF==4)
#define inc 6
#else
#define inc 2
#endif
unsigned short *IPBw = (unsigned short *)IPBdw;
IPBw += (idx_mod<<1);
CRY += (idx_mod<<2);
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
CRY += (inc<<1);
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
CRY += (inc<<1);
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
CRY += (inc<<1);
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
*IPBw++ = itoa16_w2[*CRY++];
*IPBw++ = itoa16_w2[*CRY++];
IPBw += inc;
#undef inc
}
static void __SSE_append_output_base16_to_input_semi_aligned_2(unsigned ip, ARCH_WORD_32 *IPBdw, unsigned char *CRY, unsigned idx_mod)
{
// #1
// 9586k/4740k (core2, $dynamic_9$)
// 5113k/4382k (core2,$dynamic_10$)
// (ath64, $dynamic_9$)
// (ath64, $dynamic_10$)
#if (MMX_COEF==4)
# define inc 4
# define incCRY 12
#else
# define inc 2
# define incCRY 4
#endif
// Ok, here we are 1/2 off. We are starting in the 'middle' of a DWORD (and end
// in the middle of the last one).
// start our pointers out at the right 32 bit offset into the first MMX/SSE buffer
IPBdw += idx_mod;
IPBdw += (ip>>2)*MMX_COEF;
CRY += (idx_mod<<2);
// first byte handled here.
*IPBdw &= 0xFFFF;
*IPBdw |= (((ARCH_WORD_32)(itoa16_w2[*CRY++]))<<16);
IPBdw += inc;
*IPBdw = (itoa16_w2[*CRY++]);
*IPBdw |= (((ARCH_WORD_32)(itoa16_w2[*CRY++]))<<16);
IPBdw += inc;
*IPBdw = (itoa16_w2[*CRY++]);
CRY += incCRY;
*IPBdw |= (((ARCH_WORD_32)(itoa16_w2[*CRY++]))<<16);
IPBdw += inc;
*IPBdw = (itoa16_w2[*CRY++]);
*IPBdw |= (((ARCH_WORD_32)(itoa16_w2[*CRY++]))<<16);
IPBdw += inc;
*IPBdw = (itoa16_w2[*CRY++]);
CRY += incCRY;
*IPBdw |= (((ARCH_WORD_32)(itoa16_w2[*CRY++]))<<16);
IPBdw += inc;
*IPBdw = (itoa16_w2[*CRY++]);
*IPBdw |= (((ARCH_WORD_32)(itoa16_w2[*CRY++]))<<16);
IPBdw += inc;
*IPBdw = (itoa16_w2[*CRY++]);
CRY += incCRY;
*IPBdw |= (((ARCH_WORD_32)(itoa16_w2[*CRY++]))<<16);
IPBdw += inc;
*IPBdw = (itoa16_w2[*CRY++]);
*IPBdw |= (((ARCH_WORD_32)(itoa16_w2[*CRY++]))<<16);
IPBdw += inc;
*IPBdw = (itoa16_w2[*CRY++]);
// Add the 0x80 at the proper location (offset 0x21)
*IPBdw |= 0x800000;
#undef inc
#undef incCRY
}
static void __SSE_append_output_base16_to_input_semi_aligned_0(unsigned ip, ARCH_WORD_32 *IPBdw, unsigned char *CRY, unsigned idx_mod)
{
// #2
// 6083k (core2, $dynamic_2$)
// 1590K (core2, $dynamic_1006$)
// 3537K (ath64, $dynamic_2$)
// 890.3K (ath64, $dynamic_1006$)
#undef inc
#if (MMX_COEF==4)
#define inc 4
//# define incCRY 12
# define incCRY 14
#else
#define inc 2
# define incCRY 6
#endif
// start our pointers out at the right 32 bit offset into the first MMX/SSE buffer
IPBdw += idx_mod;
IPBdw += (ip>>2)*MMX_COEF;
CRY += (idx_mod<<2);
*IPBdw = (((ARCH_WORD_32)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]);
IPBdw += inc;
CRY += 2;
*IPBdw = (((ARCH_WORD_32)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]);
IPBdw += inc;
// CRY += (inc*3)+2;
CRY += incCRY;
*IPBdw = (((ARCH_WORD_32)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]);
IPBdw += inc;
CRY += 2;
*IPBdw = (((ARCH_WORD_32)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]);
IPBdw += inc;
// CRY += (inc*3)+2;
CRY += incCRY;
*IPBdw = (((ARCH_WORD_32)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]);
IPBdw += inc;
CRY += 2;
*IPBdw = (((ARCH_WORD_32)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]);
IPBdw += inc;
// CRY += (inc*3)+2;
CRY += incCRY;
*IPBdw = (((ARCH_WORD_32)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]);
IPBdw += inc;
CRY += 2;
*IPBdw = (((ARCH_WORD_32)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]);
// Add the 0x80 at the proper location (offset 0x21)
IPBdw += inc;
*IPBdw = 0x80;
#undef inc
#undef incCRY
}
static void __SSE_append_string_to_input_unicode(unsigned char *IPB, unsigned idx_mod, unsigned char *cp, unsigned len, unsigned bf_ptr, unsigned bUpdate0x80)
{
unsigned char *cpO;
#if ARCH_LITTLE_ENDIAN
// if big-endian, we gain nothing from this function (since we would have to byte swap)
if (len>1&&!(bf_ptr&1))
{
unsigned w32_cnt;
if(bf_ptr&2) {
cpO = &IPB[GETPOS(bf_ptr, idx_mod)];
bf_ptr += 2;
*cpO = *cp++;
cpO[1] = 0;
--len;
}
w32_cnt = len>>1;
if (w32_cnt)
{
ARCH_WORD_32 *wpO;
wpO = (ARCH_WORD_32*)&IPB[GETPOS(bf_ptr, idx_mod)];
len -= (w32_cnt<<1);
bf_ptr += (w32_cnt<<2);
do
{
ARCH_WORD_32 x = 0;
x = cp[1];
x <<= 16;
x += cp[0];
*wpO = x;
cp += 2;
wpO += MMX_COEF;
}
while (--w32_cnt);
}
}
#endif
cpO = &IPB[GETPOS(bf_ptr, idx_mod)];
while (len--)
{
*cpO++ = *cp++;
if ( ((++bf_ptr)&3) == 0)
cpO += ((MMX_COEF-1)*4);
*cpO++ = 0;
if ( ((++bf_ptr)&3) == 0)
cpO += ((MMX_COEF-1)*4);
}
if (bUpdate0x80)
*cpO = 0x80;
}
static void __SSE_append_string_to_input(unsigned char *IPB, unsigned idx_mod, unsigned char *cp, unsigned len, unsigned bf_ptr, unsigned bUpdate0x80)
{
unsigned char *cpO;
// if our insertion point is on an 'even' DWORD, then we use DWORD * copying, as long as we can
// This provides quite a nice speedup.
#if ARCH_LITTLE_ENDIAN
// if big-endian, we gain nothing from this function (since we would have to byte swap)
if (len>3&&(bf_ptr&3)) {
cpO = &IPB[GETPOS(bf_ptr, idx_mod)];
while (len--)
{
*cpO++ = *cp++;
if ( ((++bf_ptr)&3) == 0) {
if (!len) {
if (bUpdate0x80)
*cpO = 0x80;
return;
}
break;
}
}
}
if (len>3&&!(bf_ptr&3))
{
unsigned w32_cnt = len>>2;
if (w32_cnt)
{
ARCH_WORD_32 *wpO;
wpO = (ARCH_WORD_32*)&IPB[GETPOS(bf_ptr, idx_mod)];
len -= (w32_cnt<<2);
bf_ptr += (w32_cnt<<2);
do
{
*wpO = *((ARCH_WORD_32*)cp);
cp += 4;
wpO += MMX_COEF;
}
while (--w32_cnt);
}
if (!len) {
if (bUpdate0x80)
IPB[GETPOS(bf_ptr, idx_mod)] = 0x80;
return;
}
}
#endif
cpO = &IPB[GETPOS(bf_ptr, idx_mod)];
while (len--)
{
*cpO++ = *cp++;
if ( ((++bf_ptr)&3) == 0)
cpO += ((MMX_COEF-1)*4);
}
if (bUpdate0x80)
*cpO = 0x80;
}
#endif // #ifdef MMX_COEF from way above.
static inline void __append_string(DYNA_OMP_PARAMSm unsigned char *Str, unsigned len)
{
unsigned j;
unsigned til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
if (!md5_unicode_convert_get(tid)) {
for (; j < til; ++j) {
unsigned idx = (j>>(MMX_COEF>>1));
unsigned idx_mod = j&(MMX_COEF-1);
unsigned bf_ptr = (total_len[idx] >> ((32/MMX_COEF)*idx_mod)) & 0xFF;
total_len[idx] += (len << ((32/MMX_COEF)*idx_mod));
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,Str,len,bf_ptr,1);
}
} else {
if (pers_opts.target_enc != ASCII && pers_opts.target_enc != ISO_8859_1) {
UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now
int outlen;
outlen = enc_to_utf16(utf16Str, 27, Str, len) * sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
for (; j < til; ++j) {
unsigned idx = (j>>(MMX_COEF>>1));
unsigned idx_mod = j&(MMX_COEF-1);
unsigned bf_ptr = (total_len[idx] >> ((32/MMX_COEF)*idx_mod)) & 0xFF;
total_len[idx] += ( outlen << ((32/MMX_COEF)*idx_mod));
// note we use the 'non' unicode variant, since we have already computed the unicode, and length properly
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)utf16Str,outlen,bf_ptr,1);
}
} else {
for (; j < til; ++j) {
unsigned idx = (j>>(MMX_COEF>>1));
unsigned idx_mod = j&(MMX_COEF-1);
unsigned bf_ptr = (total_len[idx] >> ((32/MMX_COEF)*idx_mod)) & 0xFF;
total_len[idx] += ( (len<<1) << ((32/MMX_COEF)*idx_mod));
__SSE_append_string_to_input_unicode(input_buf[idx].c,idx_mod,Str,len,bf_ptr,1);
}
}
}
return;
}
#endif
if (md5_unicode_convert_get(tid)) {
if (pers_opts.target_enc != ASCII && pers_opts.target_enc != ISO_8859_1) {
UTF16 utf16Str[EFFECTIVE_MAX_LENGTH / 3 + 1];
int outlen;
outlen = enc_to_utf16(utf16Str, EFFECTIVE_MAX_LENGTH / 3, Str, len) * sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
for (; j < til; ++j) {
int z;
unsigned char *cp;
unsigned char *cpi = (unsigned char*)utf16Str;
#if MD5_X2
if (j&1)
cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]);
else
#endif
cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]);
for (z = 0; z < outlen; ++z) {
*cp++ = *cpi++;
}
total_len_X86[j] += outlen;
}
} else {
for (; j < til; ++j) {
int z;
unsigned char *cp;
unsigned char *cpi = Str;
#if MD5_X2
if (j&1)
cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]);
else
#endif
cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]);
for (z = 0; z < len; ++z) {
*cp++ = *cpi++;
*cp++ = 0;
}
total_len_X86[j] += (len<<1);
}
}
} else {
for (; j < til; ++j) {
#if MD5_X2
if (j&1)
memcpy(&(input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]]), Str, len);
else
#endif
memcpy(&(input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]]), Str, len);
total_len_X86[j] += len;
}
}
}
static inline void __append2_string(DYNA_OMP_PARAMSm unsigned char *Str, unsigned len)
{
unsigned j;
unsigned til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
if (!md5_unicode_convert_get(tid)) {
for (; j < til; ++j) {
unsigned idx = (j>>(MMX_COEF>>1));
unsigned idx_mod = j&(MMX_COEF-1);
unsigned bf_ptr = (total_len2[idx] >> ((32/MMX_COEF)*idx_mod)) & 0xFF;
total_len2[idx] += ( len << ((32/MMX_COEF)*idx_mod));
__SSE_append_string_to_input(input_buf2[idx].c,idx_mod,Str,len,bf_ptr,1);
}
} else {
if (pers_opts.target_enc != ASCII && pers_opts.target_enc != ISO_8859_1) {
UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now
int outlen;
outlen = enc_to_utf16(utf16Str, 27, Str, len) * sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
for (; j < til; ++j) {
unsigned idx = (j>>(MMX_COEF>>1));
unsigned idx_mod = j&(MMX_COEF-1);
unsigned bf_ptr = (total_len2[idx] >> ((32/MMX_COEF)*idx_mod)) & 0xFF;
total_len2[idx] += ( outlen << ((32/MMX_COEF)*idx_mod));
// note we use the 'non' unicode variant of __SSE_append_string_to_input(), since it's already unicode, and length properly
__SSE_append_string_to_input(input_buf2[idx].c,idx_mod,(unsigned char*)utf16Str,outlen,bf_ptr,1);
}
} else {
for (; j < til; ++j) {
unsigned idx = (j>>(MMX_COEF>>1));
unsigned idx_mod = j&(MMX_COEF-1);
unsigned bf_ptr = (total_len2[idx] >> ((32/MMX_COEF)*idx_mod)) & 0xFF;
total_len2[idx] += ( (len<<1) << ((32/MMX_COEF)*idx_mod));
__SSE_append_string_to_input_unicode(input_buf2[idx].c,idx_mod,Str,len,bf_ptr,1);
}
}
}
return;
}
#endif
if (md5_unicode_convert_get(tid)) {
if (pers_opts.target_enc != ASCII && pers_opts.target_enc != ISO_8859_1) {
UTF16 utf16Str[EFFECTIVE_MAX_LENGTH / 3 + 1];
int outlen;
outlen = enc_to_utf16(utf16Str, EFFECTIVE_MAX_LENGTH / 3, Str, len) * sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
for (; j < til; ++j) {
int z;
unsigned char *cp;
unsigned char *cpi = (unsigned char*)utf16Str;
#if MD5_X2
if (j&1)
cp = &(input_buf2_X86[j>>MD5_X2].x2.B2[total_len2_X86[j]]);
else
#endif
cp = &(input_buf2_X86[j>>MD5_X2].x1.B[total_len2_X86[j]]);
for (z = 0; z < outlen; ++z) {
*cp++ = *cpi++;
}
total_len2_X86[j] += outlen;
}
} else {
for (; j < til; ++j) {
int z;
unsigned char *cp;
unsigned char *cpi = Str;
#if MD5_X2
if (j&1)
cp = &(input_buf2_X86[j>>MD5_X2].x2.B2[total_len2_X86[j]]);
else
#endif
cp = &(input_buf2_X86[j>>MD5_X2].x1.B[total_len2_X86[j]]);
for (z = 0; z < len; ++z) {
*cp++ = *cpi++;
*cp++ = 0;
}
total_len2_X86[j] += (len<<1);
}
}
} else {
for (; j < til; ++j) {
#if MD5_X2
if (j&1)
memcpy(&(input_buf2_X86[j>>MD5_X2].x2.b2[total_len2_X86[j]]), Str, len);
else
#endif
memcpy(&(input_buf2_X86[j>>MD5_X2].x1.b[total_len2_X86[j]]), Str, len);
total_len2_X86[j] += len;
}
}
}
void DynamicFunc__setmode_unicode(DYNA_OMP_PARAMS) // DYNA_OMP_PARAMS not used. We use omp_thread_num() instead.
{
md5_unicode_convert_set(1,tid);
}
void DynamicFunc__setmode_normal (DYNA_OMP_PARAMS) // DYNA_OMP_PARAMS not used. We use omp_thread_num() instead.
{
md5_unicode_convert_set(0,tid);
}
/**************************************************************
* DYNAMIC primitive helper function
* Clears the input variable, and input 'lengths'
*************************************************************/
void DynamicFunc__clean_input(DYNA_OMP_PARAMS)
{
#ifndef _OPENMP
__nonMP_DynamicFunc__clean_input();
#else
unsigned i=0;
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
int x = first / MMX_COEF;
int y = (last+MMX_COEF-1) / MMX_COEF;
while (x < y) {
memset(input_buf[x].c, 0, sizeof(input_buf[0]));
total_len[x] = 0;
++x;
}
return;
}
#endif
for (i = first; i < last; ++i) {
#if MD5_X2
if (i&1)
memset(input_buf_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len_X86[i]));
else
#endif
memset(input_buf_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len_X86[i]));
total_len_X86[i] = 0;
}
#endif
}
void DynamicFunc__clean_input2(DYNA_OMP_PARAMS)
{
#ifndef _OPENMP
__nonMP_DynamicFunc__clean_input2();
#else
unsigned i=0;
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
int x = first / MMX_COEF;
int y = (last+MMX_COEF-1) / MMX_COEF;
while (x < y) {
memset(input_buf2[x].c, 0, sizeof(input_buf2[0]));
total_len2[x] = 0;
++x;
}
return;
}
#endif
for (i = first; i < last; ++i) {
#if MD5_X2
if (i&1)
memset(input_buf2_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len2_X86[i]));
else
#endif
memset(input_buf2_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len2_X86[i]));
total_len2_X86[i] = 0;
}
#endif
}
void DynamicFunc__clean_input_full(DYNA_OMP_PARAMS)
{
#ifndef _OPENMP
__nonMP_DynamicFunc__clean_input_full();
#else
int i;
#ifdef MMX_COEF
int x = first / MMX_COEF;
int y = (last+MMX_COEF-1) / MMX_COEF;
while (x < y) {
memset(input_buf[x].c, 0, sizeof(input_buf[0]));
total_len[x] = 0;
++x;
}
#endif
for (i = first; i < last; ++i) {
#if MD5_X2
if (i&1)
memset(input_buf_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len_X86[i]));
else
#endif
memset(input_buf_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len_X86[i]));
total_len_X86[i] = 0;
}
#endif
}
void DynamicFunc__clean_input2_full(DYNA_OMP_PARAMS)
{
#ifndef _OPENMP
__nonMP_DynamicFunc__clean_input2_full();
#else
int i;
#ifdef MMX_COEF
int x = first / MMX_COEF;
int y = (last+MMX_COEF-1) / MMX_COEF;
while (x < y) {
memset(input_buf2[x].c, 0, sizeof(input_buf2[0]));
total_len2[x] = 0;
++x;
}
#endif
for (i = first; i < last; ++i) {
#if MD5_X2
if (i&1)
memset(input_buf2_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len2_X86[i]));
else
#endif
memset(input_buf2_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len2_X86[i]));
total_len2_X86[i] = 0;
}
#endif
}
void DynamicFunc__clean_input_kwik(DYNA_OMP_PARAMS)
{
#ifndef _OPENMP
__nonMP_DynamicFunc__clean_input_kwik();
#else
#ifdef MMX_COEF
int i;
if (dynamic_use_sse==1) {
int x = first / MMX_COEF;
int y = (last+MMX_COEF-1) / MMX_COEF;
while (x < y)
total_len[x++] = 0;
return;
}
#else
int i;
#endif
for (i = first; i < last; ++i) {
#if !ARCH_LITTLE_ENDIAN
#if MD5_X2
if (i&1)
memset(input_buf_X86[i>>MD5_X2].x2.b2, 0, total_len_X86[i]+5);
else
#endif
memset(input_buf_X86[i>>MD5_X2].x1.b, 0, total_len_X86[i]+5);
#endif
total_len_X86[i] = 0;
}
#endif
}
void DynamicFunc__clean_input2_kwik(DYNA_OMP_PARAMS)
{
#ifndef _OPENMP
__nonMP_DynamicFunc__clean_input2_kwik();
#else
#ifdef MMX_COEF
int i;
if (dynamic_use_sse==1) {
int x = first / MMX_COEF;
int y = (last+MMX_COEF-1) / MMX_COEF;
while (x < y)
total_len2[x++] = 0;
return;
}
#else
int i;
#endif
for (i = first; i < last; ++i) {
#if !ARCH_LITTLE_ENDIAN
#if MD5_X2
if (i&1)
memset(input_buf2_X86[i>>MD5_X2].x2.b2, 0, total_len2_X86[i]+5);
else
#endif
memset(input_buf2_X86[i>>MD5_X2].x1.b, 0, total_len2_X86[i]+5);
#endif
total_len2_X86[i] = 0;
}
#endif
}
/**************************************************************
* DYNAMIC primitive helper function
* Appends all keys to the end of the input variables, and
* updates lengths
*************************************************************/
void DynamicFunc__append_keys(DYNA_OMP_PARAMS)
{
unsigned j;
unsigned til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
for (; j < til; ++j) {
unsigned idx = (j>>(MMX_COEF>>1));
unsigned idx_mod = j&(MMX_COEF-1);
unsigned bf_ptr = (total_len[idx] >> ((32/MMX_COEF)*idx_mod)) & 0xFF;
if (md5_unicode_convert_get(tid)) {
if (pers_opts.target_enc != ASCII && pers_opts.target_enc != ISO_8859_1) {
UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now
int outlen;
int maxlen=27;
if (curdat.pSetup->MaxInputLen < maxlen)
maxlen = curdat.pSetup->MaxInputLen;
outlen = enc_to_utf16(utf16Str, maxlen, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16);
if (outlen <= 0) {
saved_key_len[j] = -outlen / sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
}
total_len[idx] += ( outlen << ((32/MMX_COEF)*idx_mod));
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)utf16Str,outlen,bf_ptr,1);
} else {
total_len[idx] += ( ((saved_key_len[j])<<1) << ((32/MMX_COEF)*idx_mod));
__SSE_append_string_to_input_unicode(input_buf[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1);
}
} else {
total_len[idx] += (saved_key_len[j] << ((32/MMX_COEF)*idx_mod));
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1);
}
}
return;
}
#endif
if (md5_unicode_convert_get(tid)) {
if (pers_opts.target_enc != ASCII && pers_opts.target_enc != ISO_8859_1) {
for (; j < til; ++j) {
int z;
unsigned char *cp, *cpi;
UTF16 utf16Str[EFFECTIVE_MAX_LENGTH / 3 + 1];
int outlen;
outlen = enc_to_utf16(utf16Str, EFFECTIVE_MAX_LENGTH / 3, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16);
if (outlen <= 0) {
saved_key_len[j] = -outlen / sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
}
#if MD5_X2
if (j&1)
cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]);
else
#endif
cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]);
for (cpi = (unsigned char*)utf16Str, z = 0; z < outlen; ++z)
*cp++ = *cpi++;
total_len_X86[j] += outlen;
}
} else {
for (; j < til; ++j) {
int z;
unsigned char *cp, *cpi = (unsigned char*)saved_key[j];
#if MD5_X2
if (j&1)
cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]);
else
#endif
cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]);
for (z = 0; z < saved_key_len[j]; ++z) {
*cp++ = *cpi++;
*cp++ = 0;
}
total_len_X86[j] += (saved_key_len[j]<<1);
}
}
} else {
for (; j < til; ++j) {
#if MD5_X2
if (j&1)
memcpy(&(input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]]), saved_key[j], saved_key_len[j]);
else
#endif
memcpy(&(input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]]), saved_key[j], saved_key_len[j]);
total_len_X86[j] += saved_key_len[j];
}
}
}
// DynamicFunc__append_keys_pad16
// append the array of keys to the array input1[], padding with nulls to 16 bytes, if input shorter.
// Needed for net-md5 and net-sha1 formats.
void DynamicFunc__append_keys_pad16(DYNA_OMP_PARAMS)
{
unsigned j;
unsigned til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
for (; j < til; ++j) {
unsigned idx = (j>>(MMX_COEF>>1));
unsigned idx_mod = j&(MMX_COEF-1);
unsigned bf_ptr = (total_len[idx] >> ((32/MMX_COEF)*idx_mod)) & 0xFF;
saved_key[j][saved_key_len[j]] = 0; // so strncpy 'works'
if (saved_key_len[j] < 16) {
char buf[17];
strncpy(buf, saved_key[j], 17);
total_len[idx] += (16 << ((32/MMX_COEF)*idx_mod));
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)buf,16,bf_ptr,1);
} else {
total_len[idx] += (saved_key_len[j] << ((32/MMX_COEF)*idx_mod));
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1);
}
}
return;
}
#endif
for (; j < til; ++j) {
saved_key[j][saved_key_len[j]] = 0; // so strncpy 'works'
#if MD5_X2
if (j&1)
strncpy(&(input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]]), saved_key[j], 17);
else
#endif
strncpy(&(input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]]), saved_key[j], 17);
total_len_X86[j] += 16;
}
}
void DynamicFunc__append_keys_pad20(DYNA_OMP_PARAMS)
{
unsigned j;
unsigned til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
for (; j < til; ++j) {
unsigned idx = (j>>(MMX_COEF>>1));
unsigned idx_mod = j&(MMX_COEF-1);
unsigned bf_ptr = (total_len[idx] >> ((32/MMX_COEF)*idx_mod)) & 0xFF;
saved_key[j][saved_key_len[j]] = 0; // so strncpy 'works'
if (saved_key_len[j] < 20) {
char buf[21];
strncpy(buf, saved_key[j], 21);
total_len[idx] += (20 << ((32/MMX_COEF)*idx_mod));
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)buf,20,bf_ptr,1);
} else {
total_len[idx] += (saved_key_len[j] << ((32/MMX_COEF)*idx_mod));
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1);
}
}
return;
}
#endif
for (; j < til; ++j) {
saved_key[j][saved_key_len[j]] = 0; // so strncpy 'works'
#if MD5_X2
if (j&1)
strncpy(&(input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]]), saved_key[j], 21);
else
#endif
strncpy(&(input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]]), saved_key[j], 21);
total_len_X86[j] += 20;
}
}
/**************************************************************
* DYNAMIC primitive helper function
* Appends all keys to the end of the 2nd input variables, and
* updates lengths
*************************************************************/
void DynamicFunc__append_keys2(DYNA_OMP_PARAMS)
{
unsigned j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
for (; j < til; ++j) {
unsigned idx = (j>>(MMX_COEF>>1));
unsigned idx_mod = j&(MMX_COEF-1);
unsigned bf_ptr = (total_len2[idx] >> ((32/MMX_COEF)*idx_mod)) & 0xFF;
if (md5_unicode_convert_get(tid)) {
if (pers_opts.target_enc != ASCII && pers_opts.target_enc != ISO_8859_1) {
UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now
int outlen;
int maxlen=27;
if (curdat.pSetup->MaxInputLen < maxlen)
maxlen = curdat.pSetup->MaxInputLen;
outlen = enc_to_utf16(utf16Str, maxlen, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16);
if (outlen <= 0) {
saved_key_len[j] = -outlen / sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
}
total_len2[idx] += ( outlen << ((32/MMX_COEF)*idx_mod));
__SSE_append_string_to_input(input_buf2[idx].c,idx_mod,(unsigned char*)utf16Str,outlen,bf_ptr,1);
} else {
total_len2[idx] += ( (saved_key_len[j]<<1) << ((32/MMX_COEF)*idx_mod));
__SSE_append_string_to_input_unicode(input_buf2[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1);
}
} else {
total_len2[idx] += (saved_key_len[j] << ((32/MMX_COEF)*idx_mod));
__SSE_append_string_to_input(input_buf2[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1);
}
}
return;
}
#endif
if (md5_unicode_convert_get(tid)) {
if (pers_opts.target_enc != ASCII && pers_opts.target_enc != ISO_8859_1) {
for (; j < til; ++j) {
int z;
unsigned char *cp, *cpi;
UTF16 utf16Str[EFFECTIVE_MAX_LENGTH / 3 + 1];
int outlen;
outlen = enc_to_utf16(utf16Str, EFFECTIVE_MAX_LENGTH / 3, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16);
if (outlen <= 0) {
saved_key_len[j] = -outlen / sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
}
#if MD5_X2
if (j&1)
cp = &(input_buf2_X86[j>>MD5_X2].x2.B2[total_len2_X86[j]]);
else
#endif
cp = &(input_buf2_X86[j>>MD5_X2].x1.B[total_len2_X86[j]]);
for (cpi = (unsigned char*)utf16Str, z = 0; z < outlen; ++z)
*cp++ = *cpi++;
total_len2_X86[j] += outlen;
}
} else {
for (; j < til; ++j) {
int z;
unsigned char *cp, *cpi = (unsigned char*)saved_key[j];
#if MD5_X2
if (j&1)
cp = &(input_buf2_X86[j>>MD5_X2].x2.B2[total_len2_X86[j]]);
else
#endif
cp = &(input_buf2_X86[j>>MD5_X2].x1.B[total_len2_X86[j]]);
for (z = 0; z < saved_key_len[j]; ++z) {
*cp++ = *cpi++;
*cp++ = 0;
}
total_len2_X86[j] += (saved_key_len[j]<<1);
}
}
} else {
for (; j < til; ++j) {
#if MD5_X2
if (j&1)
memcpy(&(input_buf2_X86[j>>MD5_X2].x2.b2[total_len2_X86[j]]), saved_key[j], saved_key_len[j]);
else
#endif
memcpy(&(input_buf2_X86[j>>MD5_X2].x1.b[total_len2_X86[j]]), saved_key[j], saved_key_len[j]);
total_len2_X86[j] += saved_key_len[j];
}
}
}
void DynamicFunc__set_input_len_16(DYNA_OMP_PARAMS)
{
unsigned j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
unsigned k;
j /= MMX_COEF;
til = (til+MMX_COEF-1)/MMX_COEF;
for (; j < til; ++j)
{
// If length is < 16, then remove existing end of buffer marker, and then set
// one at offset 16
unsigned cur_block_len = total_len[j];
for (k = 0; k < MMX_COEF; ++k) {
unsigned this_item_len = cur_block_len & 0xFF;
#if (MMX_COEF==4)
cur_block_len >>= 8;
#else
cur_block_len >>= 16;
#endif
if (this_item_len < 16)
input_buf[j].c[GETPOS(this_item_len, k&(MMX_COEF-1))] = 0x00;
input_buf[j].c[GETPOS(16, k&(MMX_COEF-1))] = 0x80;
}
#if (MMX_COEF==4)
total_len[j] = 0x10101010;
#else
total_len[j] = 0x100010;
#endif
}
return;
}
#endif
for (; j < til; ++j)
{
// TODO: this code MAY need buffer cleaned up if we are using md5_go code!!!
#if MD5_X2
if (j&1) {
while (total_len_X86[j] < 16)
input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]++] = 0;
}
else
#endif
{while (total_len_X86[j] < 16)
input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]++] = 0;}
total_len_X86[j] = 16;
}
}
void DynamicFunc__set_input2_len_16(DYNA_OMP_PARAMS)
{
unsigned j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
unsigned k;
j /= MMX_COEF;
til = (til+MMX_COEF-1)/MMX_COEF;
for (; j < til; ++j)
{
// If length is < 16, then remove existing end of buffer marker, and then set
// one at offset 16
unsigned cur_block_len = total_len2[j];
for (k = 0; k < MMX_COEF; ++k) {
unsigned this_item_len = cur_block_len & 0xFF;
#if (MMX_COEF==4)
cur_block_len >>= 8;
#else
cur_block_len >>= 16;
#endif
if (this_item_len < 16)
input_buf2[j].c[GETPOS(this_item_len, k&(MMX_COEF-1))] = 0x00;
input_buf2[j].c[GETPOS(16, k&(MMX_COEF-1))] = 0x80;
}
#if (MMX_COEF==4)
total_len2[j] = 0x10101010;
#else
total_len2[j] = 0x100010;
#endif
}
return;
}
#endif
for (; j < til; ++j)
{
// TODO: this code MAY need buffer cleaned up if we are using md5_go code!!!
#if MD5_X2
if (j&1) {
while (total_len2_X86[j] < 16)
input_buf2_X86[j>>MD5_X2].x2.b2[total_len2_X86[j]++] = 0;
}
else
#endif
{while (total_len2_X86[j] < 16)
input_buf2_X86[j>>MD5_X2].x1.b[total_len2_X86[j]++] = 0;}
total_len2_X86[j] = 16;
}
}
void DynamicFunc__set_input_len_20(DYNA_OMP_PARAMS)
{
unsigned j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
unsigned k;
j /= MMX_COEF;
til = (til+MMX_COEF-1)/MMX_COEF;
for (; j < til; ++j)
{
// If length is < 20, then remove existing end of buffer marker, and then set
// one at offset 20
unsigned cur_block_len = total_len[j];
for (k = 0; k < MMX_COEF; ++k) {
unsigned this_item_len = cur_block_len & 0xFF;
#if (MMX_COEF==4)
cur_block_len >>= 8;
#else
cur_block_len >>= 16;
#endif
if (this_item_len < 20)
input_buf[j].c[GETPOS(this_item_len, k&(MMX_COEF-1))] = 0x00;
input_buf[j].c[GETPOS(20, k&(MMX_COEF-1))] = 0x80;
}
#if (MMX_COEF==4)
total_len[j] = 0x14141414;
#else
total_len[j] = 0x140014;
#endif
}
return;
}
#endif
for (; j < til; ++j)
{
#if MD5_X2
if (j&1) {
while (total_len_X86[j] < 20)
input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]++] = 0;
}
else
#endif
{while (total_len_X86[j] < 20)
input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]++] = 0;}
total_len_X86[j] = 20;
}
}
void DynamicFunc__set_input2_len_20(DYNA_OMP_PARAMS)
{
unsigned j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
unsigned k;
j /= MMX_COEF;
til = (til+MMX_COEF-1)/MMX_COEF;
for (; j < til; ++j)
{
// If length is < 20, then remove existing end of buffer marker, and then set
// one at offset 20
unsigned cur_block_len = total_len2[j];
for (k = 0; k < MMX_COEF; ++k) {
unsigned this_item_len = cur_block_len & 0xFF;
#if (MMX_COEF==4)
cur_block_len >>= 8;
#else
cur_block_len >>= 16;
#endif
if (this_item_len < 20)
input_buf2[j].c[GETPOS(this_item_len, k&(MMX_COEF-1))] = 0x00;
input_buf2[j].c[GETPOS(20, k&(MMX_COEF-1))] = 0x80;
}
#if (MMX_COEF==4)
total_len2[j] = 0x14141414;
#else
total_len2[j] = 0x100014;
#endif
}
return;
}
#endif
for (; j < til; ++j)
{
#if MD5_X2
if (j&1) {
while (total_len2_X86[j] < 20)
input_buf2_X86[j>>MD5_X2].x2.b2[total_len2_X86[j]++] = 0;
}
else
#endif
{while (total_len2_X86[j] < 20)
input_buf2_X86[j>>MD5_X2].x1.b[total_len2_X86[j]++] = 0;}
total_len2_X86[j] = 20;
}
}
void DynamicFunc__set_input_len_32(DYNA_OMP_PARAMS)
{
unsigned j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
j /= MMX_COEF;
til = (til+MMX_COEF-1)/MMX_COEF;
for (; j < til; ++j)
{
unsigned k;
for (k = 0; k < MMX_COEF; ++k)
input_buf[j].c[GETPOS(32, k&(MMX_COEF-1))] = 0x80;
#if (MMX_COEF==4)
total_len[j] = 0x20202020;
#else
total_len[j] = 0x200020;
#endif
}
return;
}
#endif
for (; j < til; ++j)
{
total_len_X86[j] = 32;
#if !ARCH_LITTLE_ENDIAN
#if MD5_X2
if (j&1) {
//MD5_swap(input_buf_X86[j>>MD5_X2].x2.w2, input_buf_X86[j>>MD5_X2].x2.w2, 8);
memset(&(input_buf_X86[j>>MD5_X2].x2.B2[32]), 0, 24);
}
else
#endif
{
//MD5_swap(input_buf_X86[j>>MD5_X2].x1.w, input_buf_X86[j>>MD5_X2].x1.w, 8);
memset(&(input_buf_X86[j>>MD5_X2].x1.B[32]), 0, 24);
}
#endif
}
}
void DynamicFunc__set_input2_len_32(DYNA_OMP_PARAMS)
{
unsigned j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
j /= MMX_COEF;
til = (til+MMX_COEF-1)/MMX_COEF;
for (; j < til; ++j)
{
unsigned k;
for (k = 0; k < MMX_COEF; ++k)
input_buf2[j].c[GETPOS(32, k&(MMX_COEF-1))] = 0x80;
#if (MMX_COEF==4)
total_len2[j] = 0x20202020;
#else
total_len2[j] = 0x200020;
#endif
}
return;
}
#endif
for (; j < til; ++j)
{
total_len2_X86[j] = 32;
#if !ARCH_LITTLE_ENDIAN
#if MD5_X2
if (j&1) {
//MD5_swap(input_buf2_X86[j>>MD5_X2].x2.w2, input_buf2_X86[j>>MD5_X2].x2.w2, 8);
memset(&(input_buf2_X86[j>>MD5_X2].x2.B2[32]), 0, 24);
}
else
#endif
{
//MD5_swap(input_buf2_X86[j>>MD5_X2].x1.w, input_buf2_X86[j>>MD5_X2].x1.w, 8);
memset(&(input_buf2_X86[j>>MD5_X2].x1.B[32]), 0, 24);
}
#endif
}
}
void DynamicFunc__set_input_len_40(DYNA_OMP_PARAMS)
{
unsigned j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
j /= MMX_COEF;
til = (til+MMX_COEF-1)/MMX_COEF;
for (; j < til; ++j)
{
unsigned k;
for (k = 0; k < MMX_COEF; ++k)
input_buf[j].c[GETPOS(40, k&(MMX_COEF-1))] = 0x80;
#if (MMX_COEF==4)
total_len[j] = 0x28282828;
#else
total_len[j] = 0x280028;
#endif
}
return;
}
#endif
for (; j < til; ++j)
{
total_len_X86[j] = 40;
#if !ARCH_LITTLE_ENDIAN
#if MD5_X2
if (j&1) {
memset(&(input_buf_X86[j>>MD5_X2].x2.B2[40]), 0, 16);
}
else
#endif
{
memset(&(input_buf_X86[j>>MD5_X2].x1.B[40]), 0, 16);
}
#endif
}
}
void DynamicFunc__set_input2_len_40(DYNA_OMP_PARAMS)
{
unsigned j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
j /= MMX_COEF;
til = (til+MMX_COEF-1)/MMX_COEF;
for (; j < til; ++j)
{
unsigned k;
for (k = 0; k < MMX_COEF; ++k)
input_buf2[j].c[GETPOS(40, k&(MMX_COEF-1))] = 0x80;
#if (MMX_COEF==4)
total_len2[j] = 0x28282828;
#else
total_len2[j] = 0x280028;
#endif
}
return;
}
#endif
for (; j < til; ++j)
{
total_len2_X86[j] = 40;
#if !ARCH_LITTLE_ENDIAN
#if MD5_X2
if (j&1) {
memset(&(input_buf2_X86[j>>MD5_X2].x2.B2[40]), 0, 16);
}
else
#endif
{
memset(&(input_buf2_X86[j>>MD5_X2].x1.B[40]), 0, 16);
}
#endif
}
}
void DynamicFunc__set_input_len_64(DYNA_OMP_PARAMS)
{
unsigned j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1)
exit(!!fprintf(stderr, "Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_64 in SSE2/MMX mode\n"));
#endif
for (; j < til; ++j)
total_len_X86[j] = 64;
}
void DynamicFunc__set_input2_len_64(DYNA_OMP_PARAMS)
{
unsigned j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1)
exit(!!fprintf(stderr, "Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_64 in SSE2/MMX mode\n"));
#endif
for (; j < til; ++j)
total_len2_X86[j] = 64;
}
void DynamicFunc__set_input_len_100(DYNA_OMP_PARAMS)
{
unsigned j, til;
#ifdef _OPENMP
til = last;
j = first;
#else
j = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
exit(!!fprintf(stderr, "Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_100 in SSE2/MMX mode\n"));
}
#endif
for (; j < til; ++j) {
unsigned char *cp;
#if MD5_X2
if (j&1)
cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]);
else
#endif
cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]);
while (*cp)
*cp++ = 0;
total_len_X86[j] = 100;
}
}
/**************************************************************
* DYNAMIC primitive helper function
* Appends the salt to the end of the input variables, and
* updates lengths
*************************************************************/
void DynamicFunc__append_salt(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm cursalt, saltlen);
}
/**************************************************************
* DYNAMIC primitive helper function
* Appends the salt to the end of the 2nd input variables, and
* updates lengths
*************************************************************/
void DynamicFunc__append_salt2(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm cursalt, saltlen);
}
void DynamicFunc__append_input_from_input2(DYNA_OMP_PARAMS)
{
unsigned i, til;
#ifdef _OPENMP
til = last;
i = first;
#else
i = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
unsigned j, k;
til = (til+MMX_COEF-1)/MMX_COEF;
i /= MMX_COEF;
for (; i < til; ++i)
{
for (j = 0; j < MMX_COEF; ++j)
{
unsigned start_len = (total_len[i] >> ((32/MMX_COEF)*j)) & 0xFF;
unsigned len1 = (total_len2[i] >> ((32/MMX_COEF)*j)) & 0xFF;
for (k = 0; k < len1; ++k)
input_buf[i].c[GETPOS((k+start_len), j)] = input_buf2[i].c[GETPOS(k,j)];
input_buf[i].c[GETPOS((len1+start_len), j)] = 0x80;
total_len[i] += ( len1 << ( ( (32/MMX_COEF) * j ) ));
}
}
return;
}
#endif
for (; i < til; ++i)
{
#if MD5_X2
if (i&1)
memcpy(&(input_buf_X86[i>>MD5_X2].x2.b2[total_len_X86[i]]), input_buf2_X86[i>>MD5_X2].x2.b2, total_len2_X86[i]);
else
#endif
memcpy(&(input_buf_X86[i>>MD5_X2].x1.b[total_len_X86[i]]), input_buf2_X86[i>>MD5_X2].x1.b, total_len2_X86[i]);
total_len_X86[i] += total_len2_X86[i];
}
}
void DynamicFunc__append_input2_from_input(DYNA_OMP_PARAMS)
{
unsigned i, til;
#ifdef _OPENMP
til = last;
i = first;
#else
i = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
unsigned j, k;
til = (til+MMX_COEF-1)/MMX_COEF;
i /= MMX_COEF;
for (; i < til; ++i)
{
for (j = 0; j < MMX_COEF; ++j)
{
unsigned start_len = (total_len2[i] >> ((32/MMX_COEF)*j)) & 0xFF;
unsigned len1 = (total_len[i] >> ((32/MMX_COEF)*j)) & 0xFF;
for (k = 0; k < len1; ++k)
input_buf2[i].c[GETPOS((k+start_len), j)] = input_buf[i].c[GETPOS(k,j)];
input_buf2[i].c[GETPOS((len1+start_len), j)] = 0x80;
total_len2[i] += ( len1 << ( ( (32/MMX_COEF) * j ) ));
}
}
return;
}
#endif
for (; i < til; ++i)
{
#if MD5_X2
if (i&1)
memcpy(&(input_buf2_X86[i>>MD5_X2].x2.b2[total_len2_X86[i]]), input_buf_X86[i>>MD5_X2].x2.b2, total_len_X86[i]);
else
#endif
memcpy(&(input_buf2_X86[i>>MD5_X2].x1.b[total_len2_X86[i]]), input_buf_X86[i>>MD5_X2].x1.b, total_len_X86[i]);
total_len2_X86[i] += total_len_X86[i];
}
}
void DynamicFunc__append_input_from_input(DYNA_OMP_PARAMS)
{
unsigned i, til;
#ifdef _OPENMP
til = last;
i = first;
#else
i = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
unsigned j, k;
til = (til+MMX_COEF-1)/MMX_COEF;
i /= MMX_COEF;
for (; i < til; ++i)
{
for (j = 0; j < MMX_COEF; ++j)
{
unsigned start_len = (total_len[i] >> ((32/MMX_COEF)*j)) & 0xFF;
for (k = 0; k < start_len; ++k)
input_buf[i].c[GETPOS((k+start_len), j)] = input_buf[i].c[GETPOS(k,j)];
input_buf[i].c[GETPOS((start_len+start_len), j)] = 0x80;
total_len[i] += ( start_len << ( ( (32/MMX_COEF) * j ) ));
}
}
return;
}
#endif
for (; i < til; ++i)
{
#if MD5_X2
if (i&1)
memcpy(&(input_buf_X86[i>>MD5_X2].x2.b2[total_len_X86[i]]), input_buf_X86[i>>MD5_X2].x2.b2, total_len_X86[i]);
else
#endif
memcpy(&(input_buf_X86[i>>MD5_X2].x1.b[total_len_X86[i]]), input_buf_X86[i>>MD5_X2].x1.b, total_len_X86[i]);
total_len_X86[i] <<= 1;
}
}
void DynamicFunc__append_input2_from_input2(DYNA_OMP_PARAMS)
{
unsigned i, til;
#ifdef _OPENMP
til = last;
i = first;
#else
i = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
unsigned j, k;
til = (til+MMX_COEF-1)/MMX_COEF;
i /= MMX_COEF;
for (; i < til; ++i)
{
for (j = 0; j < MMX_COEF; ++j)
{
unsigned start_len = (total_len2[i] >> ((32/MMX_COEF)*j)) & 0xFF;
for (k = 0; k < start_len; ++k)
input_buf2[i].c[GETPOS((k+start_len), j)] = input_buf2[i].c[GETPOS(k,j)];
input_buf2[i].c[GETPOS((start_len+start_len), j)] = 0x80;
total_len2[i] += ( start_len << ( ( (32/MMX_COEF) * j ) ));
}
}
return;
}
#endif
for (; i < til; ++i)
{
#if MD5_X2
if (i&1)
memcpy(&(input_buf2_X86[i>>MD5_X2].x2.b2[total_len2_X86[i]]), input_buf2_X86[i>>MD5_X2].x2.b2, total_len2_X86[i]);
else
#endif
memcpy(&(input_buf2_X86[i>>MD5_X2].x1.b[total_len2_X86[i]]), input_buf2_X86[i>>MD5_X2].x1.b, total_len2_X86[i]);
total_len2_X86[i] <<= 1;
}
}
#ifdef MD5_SSE_PARA
static void SSE_Intrinsics_LoadLens(int side, int i)
{
ARCH_WORD_32 *p;
ARCH_WORD_32 TL;
int j;
if (side == 0)
{
for (j = 0; j < MD5_SSE_PARA; j++)
{
p = input_buf[i+j].w;
TL = (ARCH_WORD_32)total_len[i+j];
p[14*MMX_COEF+0] = ((TL>>0)&0xFF)<<3;
p[14*MMX_COEF+1] = ((TL>>8)&0xFF)<<3;
p[14*MMX_COEF+2] = ((TL>>16)&0xFF)<<3;
p[14*MMX_COEF+3] = ((TL>>24)&0xFF)<<3;
}
}
else
{
for (j = 0; j < MD5_SSE_PARA; j++)
{
p = input_buf2[i+j].w;
TL = (ARCH_WORD_32)total_len2[i+j];
p[14*MMX_COEF+0] = ((TL>>0)&0xFF)<<3;
p[14*MMX_COEF+1] = ((TL>>8)&0xFF)<<3;
p[14*MMX_COEF+2] = ((TL>>16)&0xFF)<<3;
p[14*MMX_COEF+3] = ((TL>>24)&0xFF)<<3;
}
}
}
#endif
/**************************************************************
* DYNAMIC primitive helper function
* Encrypts the data in the first input field. The data is
* still in the binary encrypted format, in the crypt_key.
* we do not yet convert to base-16. This is so we can output
* as base-16, or later, if we add base-64, we can output to
* that format instead. Some functions do NOT change from
* the binary format (such as phpass). Thus if we are doing
* something like phpass, we would NOT want the conversion
* to happen at all
*************************************************************/
void DynamicFunc__crypt_md5(DYNA_OMP_PARAMS)
{
unsigned i, til;
#ifdef _OPENMP
til = last;
i = first;
#else
i = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
til = (til+MMX_COEF-1)>>(MMX_COEF>>1);
i >>= (MMX_COEF>>1);
if (curdat.store_keys_in_input) {
for (; i < til; i += MD5_SSE_PARA) {
SSEmd5body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
}
} else {
for (; i < til; i += MD5_SSE_PARA) {
SSE_Intrinsics_LoadLens(0, i);
SSEmd5body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
}
}
return;
}
#endif
for (; i < til; ++i) {
#if MD5_X2
unsigned len[2];
len[0] = total_len_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len_X86[i];
#else
unsigned len = total_len_X86[i];
#endif
DoMD5(input_buf_X86[i>>MD5_X2], len, crypt_key_X86[i>>MD5_X2]);
}
}
void DynamicFunc__crypt_md4(DYNA_OMP_PARAMS)
{
unsigned i, til;
#ifdef _OPENMP
til = last;
i = first;
#else
i = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
til = (til+MMX_COEF-1)>>(MMX_COEF>>1);
i >>= (MMX_COEF>>1);
if (curdat.store_keys_in_input) {
for (; i < til; i += MD4_SSE_PARA) {
SSEmd4body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
}
} else {
for (; i < til; i += MD4_SSE_PARA) {
SSE_Intrinsics_LoadLens(0, i);
SSEmd4body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
}
}
return;
}
#endif
for (; i < til; ++i) {
// MD5_X2 sets our input buffers and crypt keys up in 'double' format. Thus, we HAVE
// to treat them just like we do in MD5. The macro hides the details.
#if MD5_X2
unsigned len[2];
len[0] = total_len_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len_X86[i];
#else
unsigned len = total_len_X86[i];
#endif
DoMD4(input_buf_X86[i>>MD5_X2], len, crypt_key_X86[i>>MD5_X2]);
}
}
// we do provide a NOOP function. This will not kill jtr, BUT output that this function has been REMOVED
// but it DOES NOT shutdown john.
void DynamicFunc__FreeBSDMD5Crypt(DYNA_OMP_PARAMS) {
static int bFirst=1;
if (bFirst) {
bFirst = 0;
fprintf(stderr, "\nERROR, DynamicFunc__FreeBSDMD5Crypt() dynamic primitive is no longer supported.\nThis format is invalid and will not process\n");
}
}
/**************************************************************
* DYNAMIC primitive helper function
* Special crypt to handle the 'looping' needed for phpass
*************************************************************/
void DynamicFunc__PHPassCrypt(DYNA_OMP_PARAMS)
{
unsigned Lcount;
Lcount = atoi64[ARCH_INDEX(cursalt[8])];
if (Lcount < 7 || Lcount > 31)
exit(!!fprintf(stderr, "Error, invalid loop byte in a php salt %s\n",cursalt));
Lcount = (1<<Lcount);
DynamicFunc__clean_input(DYNA_OMP_PARAMSd);
// First 'round' is md5 of ($s.$p)
DynamicFunc__append_salt(DYNA_OMP_PARAMSd);
DynamicFunc__append_keys(DYNA_OMP_PARAMSd);
// The later rounds (variable number, based upon the salt's first byte)
// are ALL done as 16 byte md5 result of prior hash, with the password appeneded
// crypt, and put the 'raw' 16 byte raw crypt data , into the
// input buffer. We will then append the keys to that, and never
// have to append the keys again (we just make sure we do NOT adjust
// the amount of bytes to md5 from this point no
DynamicFunc__crypt_md5_to_input_raw(DYNA_OMP_PARAMSd);
// Now append the pass
DynamicFunc__append_keys(DYNA_OMP_PARAMSd);
// NOTE last we do 1 less than the required number of crypts in our loop
DynamicFunc__crypt_md5_to_input_raw_Overwrite_NoLen_but_setlen_in_SSE(DYNA_OMP_PARAMSd);
#if !ARCH_LITTLE_ENDIAN
// from this point on, we want to have the binary blobs in 'native' big endian
// format. Thus, we need to 'unswap' them. Then the call to the
// DynamicFunc__crypt_md5_to_input_raw_Overwrite_NoLen will leave the 16 bytes
// output, in big endian (thus needing no swapping).
// we only have to 'fix up' the final crypt results.
#if MD5_X2
MD5_swap2(input_buf_X86[0].x1.w, input_buf_X86[0].x2.w2, input_buf_X86[0].x1.w, input_buf_X86[0].x2.w2, 4);
#else
MD5_swap(input_buf_X86[0].x1.w, input_buf_X86[0].x1.w, 4);
#endif
#endif
--Lcount;
while(--Lcount)
DynamicFunc__crypt_md5_to_input_raw_Overwrite_NoLen(DYNA_OMP_PARAMSd);
// final crypt is to the normal 'output' buffer, since john uses that to find 'hits'.
#if !ARCH_LITTLE_ENDIAN
// we have to use this funtion, since we do not want to 'fixup' the
// end of the buffer again (it has been put into BE format already.
// Thus, simply use the raw_overwrite again, then swap the output that
// is found in the input buf to the output buf.
DynamicFunc__crypt_md5_to_input_raw_Overwrite_NoLen(DYNA_OMP_PARAMSd);
#if MD5_X2
MD5_swap2(input_buf_X86[0].x1.w, input_buf_X86[0].x2.w2, crypt_key_X86[0].x1.w, crypt_key_X86[0].x2.w2, 4);
#else
MD5_swap(input_buf_X86[0].x1.w, crypt_key_X86[0].x1.w, 4);
#endif
//dump_stuff_msg("crypt0", crypt_key_X86[0].x1.w, 16);
//dump_stuff_msg("crypt1", crypt_key_X86[0].x2.w2, 16);
//{ static int x=0; if (++x == 2) exit(0); }
#else
// little endian can use 'original' crypt function.
DynamicFunc__crypt_md5(DYNA_OMP_PARAMSd);
//dump_stuff_msg("crypt0", crypt_key_X86[0].x1.w, 16);
//{ static int x=0; if (++x == 8) exit(0); }
#endif
}
void DynamicFunc__POCrypt(DYNA_OMP_PARAMS)
{
unsigned i, j;
unsigned til, len;
unsigned char *pBuf;
#if MD5_X2
unsigned char *pBuf2;
unsigned lens[2];
#endif
#ifdef _OPENMP
til = last;
i = first;
#else
i = 0;
til = m_count;
#endif
//DynamicFunc__clean_input_kwik();
//DynamicFunc__append_salt,
//DynamicFunc__append_input1_from_CONST1,
//DynamicFunc__append_keys,
//DynamicFunc__append_input1_from_CONST2,
//DynamicFunc__append_salt,
//DynamicFunc__crypt_md5,
pBuf = input_buf_X86[i>>MD5_X2].x1.B;
#if MD5_X2
pBuf2 = input_buf_X86[i>>MD5_X2].x2.B2;
memset(pBuf2, 0, sizeof(input_buf_X86[i>>MD5_X2].x2.B2));
memcpy(pBuf2, cursalt, 32);
pBuf2[32] = 'Y';
#endif
memset(pBuf, 0, sizeof(input_buf_X86[i>>MD5_X2].x1.b));
memcpy(pBuf, cursalt, 32);
pBuf[32] = 'Y';
for (j = i; j < til; ++j) {
len = saved_key_len[j];
memcpy(&pBuf[33], saved_key[j], len);
pBuf[33+len] = 0xf7;
memcpy(&pBuf[34+len], cursalt, 32);
#if MD5_X2
lens[0] = len+66; // len from the 'first'
++j;
if (j < m_count) {
len = saved_key_len[j];
memcpy(&pBuf2[33], saved_key[j], len);
pBuf2[33+len] = 0xf7;
memcpy(&pBuf2[34+len], cursalt, 32);
lens[1] = len+66;
} else {
lens[1] = 0;
}
DoMD5(input_buf_X86[i>>MD5_X2], lens, crypt_key_X86[j>>MD5_X2]);
#else
DoMD5(input_buf_X86[i>>MD5_X2], (len+66), crypt_key_X86[j]);
#endif
}
}
/**************************************************************
* DYNAMIC primitive helper function
* Encrypts the data in the 2nd input field into crypt_keys2.
*************************************************************/
void DynamicFunc__crypt2_md5(DYNA_OMP_PARAMS)
{
int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
til = (til+MMX_COEF-1)>>(MMX_COEF>>1);
i >>= (MMX_COEF>>1);
for (; i < til; i += MD5_SSE_PARA) {
SSE_Intrinsics_LoadLens(1, i);
SSEmd5body(input_buf2[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN);
}
return;
}
#endif
for (; i < til; ++i) {
#if MD5_X2
unsigned len[2];
len[0] = total_len2_X86[i++];
if (i < m_count)
len[1] = total_len2_X86[i];
else
len[1] = 0;
#else
unsigned len = total_len2_X86[i];
#endif
DoMD5(input_buf2_X86[i>>MD5_X2], len, crypt_key2_X86[i>>MD5_X2]);
}
}
void DynamicFunc__crypt2_md4(DYNA_OMP_PARAMS)
{
int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
til = (til+MMX_COEF-1)>>(MMX_COEF>>1);
i >>= (MMX_COEF>>1);
for (; i < til; i += MD4_SSE_PARA) {
SSE_Intrinsics_LoadLens(1, i);
SSEmd4body(input_buf2[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN);
}
return;
}
#endif
for (; i < til; ++i) {
// MD5_X2 sets our input buffers and crypt keys up in 'double' format. Thus, we HAVE
// to treat them just like we do in MD5. The macro hides the details.
#if MD5_X2
unsigned len[2];
len[0] = total_len2_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len2_X86[i];
#else
unsigned len = total_len2_X86[i];
#endif
DoMD4(input_buf2_X86[i>>MD5_X2], len, crypt_key2_X86[i>>MD5_X2]);
}
}
/**************************************************************
* DYNAMIC primitive helper function
* Encrypts the data in the 1st input field crypt_keys2.
*************************************************************/
void DynamicFunc__crypt_md5_in1_to_out2(DYNA_OMP_PARAMS)
{
int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
til = (til+MMX_COEF-1)>>(MMX_COEF>>1);
i >>= (MMX_COEF>>1);
if (curdat.store_keys_in_input) {
for (; i < til; i += MD5_SSE_PARA) {
SSEmd5body(input_buf[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN);
}
} else {
for (; i < til; i += MD5_SSE_PARA) {
SSE_Intrinsics_LoadLens(0, i);
SSEmd5body(input_buf[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN);
}
}
return;
}
#endif
for (; i < til; ++i) {
#if MD5_X2
unsigned len[2];
len[0] = total_len_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len_X86[i];
#else
unsigned len = total_len_X86[i];
#endif
DoMD5(input_buf_X86[i>>MD5_X2], len, crypt_key2_X86[i>>MD5_X2]);
}
}
void DynamicFunc__crypt_md4_in1_to_out2(DYNA_OMP_PARAMS)
{
int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
til = (til+MMX_COEF-1)>>(MMX_COEF>>1);
i >>= (MMX_COEF>>1);
if (curdat.store_keys_in_input) {
for (; i < til; i += MD4_SSE_PARA) {
SSEmd4body(input_buf[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN);
}
} else {
for (; i < til; i += MD4_SSE_PARA) {
SSE_Intrinsics_LoadLens(0, i);
SSEmd4body(input_buf[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN);
}
}
return;
}
#endif
for (; i < til; ++i) {
// MD5_X2 sets our input buffers and crypt keys up in 'double' format. Thus, we HAVE
// to treat them just like we do in MD5. The macro hides the details.
#if MD5_X2
unsigned len[2];
len[0] = total_len_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len_X86[i];
#else
unsigned len = total_len_X86[i];
#endif
DoMD4(input_buf_X86[i>>MD5_X2], len, crypt_key2_X86[i>>MD5_X2]);
}
}
/**************************************************************
* DYNAMIC primitive helper function
* Encrypts the data in the 2nd input field into crypt_keys.
*************************************************************/
void DynamicFunc__crypt_md5_in2_to_out1(DYNA_OMP_PARAMS)
{
int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
til = (til+MMX_COEF-1)>>(MMX_COEF>>1);
i >>= (MMX_COEF>>1);
for (; i < til; i += MD5_SSE_PARA)
{
SSE_Intrinsics_LoadLens(1, i);
SSEmd5body(input_buf2[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
//dump_stuff_mmx_msg("DynamicFunc__crypt_md5_in2_to_out1", input_buf2[i].c,64,m_count-1);
}
return;
}
#endif
for (; i < til; ++i) {
#if MD5_X2
unsigned len[2];
len[0] = total_len2_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len2_X86[i];
#else
unsigned len = total_len2_X86[i];
#endif
DoMD5(input_buf2_X86[i>>MD5_X2], len, crypt_key_X86[i>>MD5_X2]);
}
}
void DynamicFunc__crypt_md4_in2_to_out1(DYNA_OMP_PARAMS)
{
int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
til = (til+MMX_COEF-1)>>(MMX_COEF>>1);
i >>= (MMX_COEF>>1);
for (; i < til; i += MD4_SSE_PARA)
{
SSE_Intrinsics_LoadLens(1, i);
SSEmd4body(input_buf2[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
}
return;
}
#endif
for (; i < til; ++i) {
// MD5_X2 sets our input buffers and crypt keys up in 'double' format. Thus, we HAVE
// to treat them just like we do in MD5. The macro hides the details.
#if MD5_X2
unsigned len[2];
len[0] = total_len2_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len2_X86[i];
#else
unsigned len = total_len2_X86[i];
#endif
DoMD4(input_buf2_X86[i>>MD5_X2], len, crypt_key_X86[i>>MD5_X2]);
}
}
void DynamicFunc__crypt_md5_to_input_raw(DYNA_OMP_PARAMS)
{
int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
til = (til+MMX_COEF-1)>>(MMX_COEF>>1);
i >>= (MMX_COEF>>1);
for (; i < til; i += MD5_SSE_PARA)
{
unsigned j;
SSE_Intrinsics_LoadLens(0, i);
// NOTE, since crypt_key array is 16 bytes each, and input_buf is 64 bytes
// each, and we are doing 3 at a time, we can NOT directly write to the
// input buff, but have to use the crypt_key buffer, and then memcpy when done.
SSEmd5body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
for (j = 0; j < MD5_SSE_PARA; ++j)
{
memset(input_buf[i+j].c, 0, sizeof(input_buf[0]));
memcpy(input_buf[i+j].c, crypt_key[i+j].c, 16*4);
total_len[i+j] = 0x10101010;
}
}
return;
}
#endif
for (; i < til; ++i) {
#if MD5_X2
unsigned len[2];
len[0] = total_len_X86[i];
total_len_X86[i++] = 0x10;
if (i == m_count)
len[1] = 0;
else
len[1] = total_len_X86[i];
#else
unsigned len = total_len_X86[i];
#endif
DoMD5(input_buf_X86[i>>MD5_X2], len, input_buf_X86[i>>MD5_X2]);
total_len_X86[i] = 0x10;
}
}
void DynamicFunc__crypt_md5_to_input_raw_Overwrite_NoLen_but_setlen_in_SSE(DYNA_OMP_PARAMS)
{
int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
til = (til+MMX_COEF-1)>>(MMX_COEF>>1);
i >>=(MMX_COEF>>1);
for (; i < til; i += MD5_SSE_PARA)
{
unsigned j;
SSE_Intrinsics_LoadLens(0, i);
// NOTE, since crypt_key array is 16 bytes each, and input_buf is 64 bytes
// each, and we are doing 3 at a time, we can NOT directly write to the
// input buff, but have to use the crypt_key buffer, and then memcpy when done.
SSEmd5body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
for (j = 0; j < MD5_SSE_PARA; ++j)
memcpy(input_buf[i+j].c, crypt_key[i+j].c, 16*4);
}
return;
}
#endif
for (; i < til; ++i) {
#if MD5_X2
unsigned len[2];
len[0] = total_len_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len_X86[i];
#else
unsigned len = total_len_X86[i];
#endif
DoMD5(input_buf_X86[i>>MD5_X2], len, input_buf_X86[i>>MD5_X2]);
}
}
void DynamicFunc__crypt_md5_to_input_raw_Overwrite_NoLen(DYNA_OMP_PARAMS)
{
int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
til = (til+MMX_COEF-1)>>(MMX_COEF>>1);
i >>= (MMX_COEF>>1);
for (; i < til; i += MD5_SSE_PARA)
{
unsigned j;
// NOTE, since crypt_key array is 16 bytes each, and input_buf is 64 bytes
// each, and we are doing 3 at a time, we can NOT directly write to the
// input buff, but have to use the crypt_key buffer, and then memcpy when done.
SSEmd5body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN);
for (j = 0; j < MD5_SSE_PARA; ++j)
memcpy(input_buf[i+j].c, crypt_key[i+j].c, 16*4);
}
return;
}
#endif
for (; i < til; ++i) {
#if MD5_X2
unsigned len[2];
len[0] = total_len_X86[i++];
if (i == m_count)
len[1] = 0;
else
len[1] = total_len_X86[i];
#else
unsigned len = total_len_X86[i];
#endif
// we call DoMD5o so as to 'not' change then length (it was already set)
DoMD5o(input_buf_X86[i>>MD5_X2], len, input_buf_X86[i>>MD5_X2]);
}
}
void DynamicFunc__overwrite_salt_to_input1_no_size_fix(DYNA_OMP_PARAMS)
{
int j, til;
#ifdef _OPENMP
j = first;
til = last;
#else
j = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
if (md5_unicode_convert_get(tid)) {
if (pers_opts.target_enc != ASCII && pers_opts.target_enc != ISO_8859_1) {
UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now
int outlen;
outlen = enc_to_utf16(utf16Str, 27, (unsigned char*)cursalt, saltlen) * sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
for (; j < til; ++j) {
__SSE_append_string_to_input(input_buf[j>>(MMX_COEF>>1)].c,j&(MMX_COEF-1),(unsigned char*)utf16Str,outlen,0,0);
}
} else {
for (; j < til; ++j)
__SSE_append_string_to_input_unicode(input_buf[j>>(MMX_COEF>>1)].c,j&(MMX_COEF-1),(unsigned char*)cursalt,saltlen,0,0);
}
return;
}
for (; j < til; ++j)
__SSE_append_string_to_input(input_buf[j>>(MMX_COEF>>1)].c,j&(MMX_COEF-1),cursalt,saltlen,0,0);
return;
}
#endif
if (md5_unicode_convert_get(tid)) {
if (pers_opts.target_enc != ASCII && pers_opts.target_enc != ISO_8859_1) {
UTF16 utf16Str[EFFECTIVE_MAX_LENGTH / 3 + 1];
int outlen;
outlen = enc_to_utf16(utf16Str, EFFECTIVE_MAX_LENGTH / 3, (unsigned char*)cursalt, saltlen) * sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
for (; j < til; ++j) {
int z;
unsigned char *cp, *cpi = (unsigned char*)utf16Str;
#if MD5_X2
if (j&1)
cp = input_buf_X86[j>>MD5_X2].x2.B2;
else
#endif
cp = input_buf_X86[j>>MD5_X2].x1.B;
for (z = 0; z < outlen; ++z)
*cp++ = *cpi++;
}
} else {
for (; j < til; ++j) {
int z;
unsigned char *cp, *cpi = (unsigned char*)cursalt;
#if MD5_X2
if (j&1)
cp = input_buf_X86[j>>MD5_X2].x2.B2;
else
#endif
cp = input_buf_X86[j>>MD5_X2].x1.B;
for (z = 0; z < saltlen; ++z) {
*cp++ = *cpi++;
*cp++ = 0;
}
}
}
return;
}
for (; j < til; ++j) {
#if MD5_X2
if (j&1)
memcpy(input_buf_X86[j>>MD5_X2].x2.b2, cursalt, saltlen);
else
#endif
memcpy(input_buf_X86[j>>MD5_X2].x1.b, cursalt, saltlen);
}
}
void DynamicFunc__overwrite_salt_to_input2_no_size_fix(DYNA_OMP_PARAMS)
{
int j, til;
#ifdef _OPENMP
j = first;
til = last;
#else
j = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
if (md5_unicode_convert_get(tid)) {
if (pers_opts.target_enc != ASCII && pers_opts.target_enc != ISO_8859_1) {
UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now
int outlen;
outlen = enc_to_utf16(utf16Str, 27, (unsigned char*)cursalt, saltlen) * sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
for (; j < til; ++j) {
__SSE_append_string_to_input(input_buf2[j>>(MMX_COEF>>1)].c,j&(MMX_COEF-1),(unsigned char*)utf16Str,outlen,0,0);
}
} else {
for (; j < til; ++j)
__SSE_append_string_to_input_unicode(input_buf2[j>>(MMX_COEF>>1)].c,j&(MMX_COEF-1),(unsigned char*)cursalt,saltlen,0,0);
}
return;
}
for (; j < til; ++j)
__SSE_append_string_to_input(input_buf2[j>>(MMX_COEF>>1)].c,j&(MMX_COEF-1),cursalt,saltlen,0,0);
return;
}
#endif
if (md5_unicode_convert_get(tid)) {
if (pers_opts.target_enc != ASCII && pers_opts.target_enc != ISO_8859_1) {
UTF16 utf16Str[EFFECTIVE_MAX_LENGTH / 3 + 1];
int outlen;
outlen = enc_to_utf16(utf16Str, EFFECTIVE_MAX_LENGTH / 3, (unsigned char*)cursalt, saltlen) * sizeof(UTF16);
if (outlen < 0)
outlen = strlen16(utf16Str) * sizeof(UTF16);
for (; j < til; ++j) {
int z;
unsigned char *cp, *cpi = (unsigned char*)utf16Str;
#if MD5_X2
if (j&1)
cp = input_buf2_X86[j>>MD5_X2].x2.B2;
else
#endif
cp = input_buf2_X86[j>>MD5_X2].x1.B;
for (z = 0; z < outlen; ++z)
*cp++ = *cpi++;
}
} else {
for (; j < til; ++j) {
int z;
unsigned char *cp, *cpi = (unsigned char*)cursalt;
#if MD5_X2
if (j&1)
cp = input_buf2_X86[j>>MD5_X2].x2.B2;
else
#endif
cp = input_buf2_X86[j>>MD5_X2].x1.B;
for (z = 0; z < saltlen; ++z) {
*cp++ = *cpi++;
*cp++ = 0;
}
}
}
return;
}
for (; j < til; ++j) {
#if MD5_X2
if (j&1)
memcpy(input_buf2_X86[j>>MD5_X2].x2.b2, cursalt, saltlen);
else
#endif
memcpy(input_buf2_X86[j>>MD5_X2].x1.b, cursalt, saltlen);
}
}
/**************************************************************
* DYNAMIC primitive helper function
* overwrites start of input1 from the output2 data using base-16
*************************************************************/
void DynamicFunc__overwrite_from_last_output2_to_input1_as_base16_no_size_fix(DYNA_OMP_PARAMS)
{
int j, til;
#ifdef _OPENMP
j = first;
til = last;
#else
j = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
unsigned idx;
for (; j < til; ++j)
{
idx = ( ((unsigned)j)>>(MMX_COEF>>1));
__SSE_overwrite_output_base16_to_input(input_buf[idx].w, crypt_key2[idx].c, j&(MMX_COEF-1));
}
return;
}
#endif
for (; j < til; ++j)
{
unsigned char *cpo, *cpi;
unsigned i;
/* MD5_word *w; */
#if MD5_X2
if (j&1)
{cpo = input_buf_X86[j>>MD5_X2].x2.B2; cpi = crypt_key2_X86[j>>MD5_X2].x2.B2; /* w=input_buf_X86[j>>MD5_X2].x2.w2; */}
else
#endif
{cpo = input_buf_X86[j>>MD5_X2].x1.B; cpi = crypt_key2_X86[j>>MD5_X2].x1.B; /* w=input_buf_X86[j>>MD5_X2].x1.w; */ }
for (i = 0; i < 16; ++i, ++cpi)
{
*cpo++ = dynamic_itoa16[*cpi>>4];
*cpo++ = dynamic_itoa16[*cpi&0xF];
}
//MD5_swap(w,w,4);
// if swapped, then HDAA fails on big endian systems.
}
}
/**************************************************************
* DYNAMIC primitive helper function
* overwrites start of input1 from the output1 data using base-16
*************************************************************/
void DynamicFunc__overwrite_from_last_output_as_base16_no_size_fix(DYNA_OMP_PARAMS)
{
int j, til;
#ifdef _OPENMP
j = first;
til = last;
#else
j = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
unsigned idx;
for (; j < til; ++j)
{
idx = ( ((unsigned)j)>>(MMX_COEF>>1));
__SSE_overwrite_output_base16_to_input(input_buf[idx].w, crypt_key[idx].c, j&(MMX_COEF-1));
}
return;
}
#endif
for (; j < til; ++j)
{
unsigned char *cpo, *cpi;
unsigned i;
/* MD5_word *w; */
#if MD5_X2
if (j&1)
{cpo = input_buf_X86[j>>MD5_X2].x2.B2; cpi = crypt_key_X86[j>>MD5_X2].x2.B2; /* w=input_buf_X86[j>>MD5_X2].x2.w2; */}
else
#endif
{cpo = input_buf_X86[j>>MD5_X2].x1.B; cpi = crypt_key_X86[j>>MD5_X2].x1.B; /* w=input_buf_X86[j>>MD5_X2].x1.w; */ }
for (i = 0; i < 16; ++i, ++cpi)
{
*cpo++ = dynamic_itoa16[*cpi>>4];
*cpo++ = dynamic_itoa16[*cpi&0xF];
}
//MD5_swap(w,w,4);
// if swapped, then HDAA fails on big endian systems.
}
}
/**************************************************************
* DYNAMIC primitive helper function
* This will take the data stored in the crypt_keys (the encrypted
* 'first' key variable), and use a base-16 text formatting, and
* append this to the first input buffer (adjusting the lengths)
*************************************************************/
void DynamicFunc__append_from_last_output_as_base16(DYNA_OMP_PARAMS)
{
int j, til;
#ifdef _OPENMP
j = first;
til = last;
#else
j = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
unsigned idx;
for (; j < til; ++j)
{
unsigned ip;
idx = ( ((unsigned)j)>>(MMX_COEF>>1));
// This is the 'actual' work.
ip = (total_len[idx] >> ((32/MMX_COEF)*(j&(MMX_COEF-1)))) & 0xFF;
total_len[idx] += (32<<((32/MMX_COEF)*(j&(MMX_COEF-1))));
if (!ip)
__SSE_append_output_base16_to_input(input_buf[idx].w, crypt_key[idx].c, j&(MMX_COEF-1));
else if (ip&1)
{
// Note we are 100% unaligned, and it seems fastest to handle byte/byte (at this time).
unsigned k;
for (k = 0; k < 16; ++k)
{
unsigned char v = crypt_key[idx].c[GETPOS(k, j&(MMX_COEF-1))];
input_buf[idx].c[GETPOS(ip+(k<<1), j&(MMX_COEF-1))] = dynamic_itoa16[v>>4];
input_buf[idx].c[GETPOS(ip+(k<<1)+1, j&(MMX_COEF-1))] = dynamic_itoa16[v&0xF];
}
input_buf[idx].c[GETPOS(ip+32, j&(MMX_COEF-1))] = 0x80;
}
else if ((ip&3)==0)
__SSE_append_output_base16_to_input_semi_aligned_0(ip, input_buf[idx].w, crypt_key[idx].c, j&(MMX_COEF-1));
else
__SSE_append_output_base16_to_input_semi_aligned_2(ip, input_buf[idx].w, crypt_key[idx].c, j&(MMX_COEF-1));
}
return;
}
#endif
for (; j < til; ++j)
{
unsigned char *cp, *cpi;
unsigned i;
#if MD5_X2
if (j&1)
{cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]); cpi = crypt_key_X86[j>>MD5_X2].x2.B2; }
else
#endif
{cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]); cpi = crypt_key_X86[j>>MD5_X2].x1.B; }
for (i = 0; i < 16; ++i)
{
#if ARCH_ALLOWS_UNALIGNED
*((unsigned short*)cp) = itoa16_w2[*cpi++];
cp += 2;
#else
unsigned char b = *cpi++;
*cp++ = dynamic_itoa16[b>>4];
*cp++ = dynamic_itoa16[b&0xF];
#endif
}
*cp = 0;
total_len_X86[j] += 32;
}
}
/**************************************************************
* DYNAMIC primitive helper function
* This will take the data stored in the crypt_keys2 (the encrypted
* 'second' key variable), and base-16 appends to the 2nd input
*************************************************************/
void DynamicFunc__append_from_last_output2_as_base16(DYNA_OMP_PARAMS)
{
int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
unsigned idx;
for (; i < til; ++i)
{
unsigned ip, j;
idx = ( ((unsigned)i)>>(MMX_COEF>>1));
// This is the 'actual' work.
ip = (total_len2[idx] >> ((32/MMX_COEF)*(i&(MMX_COEF-1)))) & 0xFF;
total_len2[idx] += (32<<((32/MMX_COEF)*(i&(MMX_COEF-1))));
if (!ip)
__SSE_append_output_base16_to_input(input_buf2[idx].w, crypt_key2[idx].c, i&(MMX_COEF-1));
else if (ip&1)
{
// Note we are 100% unaligned, and it seems fastest to handle byte/byte (at this time).
for (j = 0; j < 16; ++j)
{
unsigned char v = crypt_key2[idx].c[GETPOS(j, i&(MMX_COEF-1))];
input_buf2[idx].c[GETPOS(ip+(j<<1), i&(MMX_COEF-1))] = dynamic_itoa16[v>>4];
input_buf2[idx].c[GETPOS(ip+(j<<1)+1, i&(MMX_COEF-1))] = dynamic_itoa16[v&0xF];
}
input_buf2[idx].c[GETPOS(ip+32, i&(MMX_COEF-1))] = 0x80;
}
else if ((ip&3)==0)
__SSE_append_output_base16_to_input_semi_aligned_0(ip, input_buf2[idx].w, crypt_key2[idx].c, i&(MMX_COEF-1));
else
__SSE_append_output_base16_to_input_semi_aligned_2(ip, input_buf2[idx].w, crypt_key2[idx].c, i&(MMX_COEF-1));
}
return;
}
#endif
for (; i < til; ++i)
{
unsigned j;
unsigned char *cp, *cpi;
#if MD5_X2
if (i&1)
{cp = &(input_buf2_X86[i>>MD5_X2].x2.B2[total_len2_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x2.B2; }
else
#endif
{cp = &(input_buf2_X86[i>>MD5_X2].x1.B[total_len2_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x1.B; }
for (j = 0; j < 16; ++j)
{
#if ARCH_ALLOWS_UNALIGNED
*((unsigned short*)cp) = itoa16_w2[*cpi++];
cp += 2;
#else
unsigned char b = *cpi++;
*cp++ = dynamic_itoa16[b>>4];
*cp++ = dynamic_itoa16[b&0xF];
#endif
}
*cp = 0;
total_len2_X86[i] += 32;
}
}
/**************************************************************
* DYNAMIC primitive helper function
* overwrites start of input2 from the output1 data using base-16
* an optimization, if the same thing is done over and over
* again, such as md5(md5(md5(md5($p)))) There, we would only
* call the copy and set length once, then simply call copy.
*************************************************************/
void DynamicFunc__overwrite_from_last_output_to_input2_as_base16_no_size_fix(DYNA_OMP_PARAMS)
{
int i, til,j;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
unsigned idx;
for (; i < til; ++i)
{
idx = ( ((unsigned)i)>>(MMX_COEF>>1));
__SSE_overwrite_output_base16_to_input(input_buf2[idx].w, crypt_key[idx].c, i&(MMX_COEF-1));
}
return;
}
#endif
j = i;
for (; j < til; ++j)
{
unsigned char *cpo, *cpi;
/* MD5_word *w; */
#if MD5_X2
if (j&1)
{cpo = input_buf2_X86[j>>MD5_X2].x2.B2; cpi = crypt_key_X86[j>>MD5_X2].x2.B2; /* w=input_buf_X86[j>>MD5_X2].x2.w2; */}
else
#endif
{cpo = input_buf2_X86[j>>MD5_X2].x1.B; cpi = crypt_key_X86[j>>MD5_X2].x1.B; /* w=input_buf_X86[j>>MD5_X2].x1.w; */ }
for (i = 0; i < 16; ++i, ++cpi)
{
*cpo++ = dynamic_itoa16[*cpi>>4];
*cpo++ = dynamic_itoa16[*cpi&0xF];
}
//MD5_swap(w,w,4);
// if swapped, then HDAA fails on big endian systems.
}
}
/**************************************************************
* DYNAMIC primitive helper function
* overwrites start of input2 from the output2 data using base-16
*************************************************************/
void DynamicFunc__overwrite_from_last_output2_as_base16_no_size_fix(DYNA_OMP_PARAMS)
{
int i, til,j;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
unsigned idx;
for (; i < til; ++i)
{
idx = ( ((unsigned)i)>>(MMX_COEF>>1));
__SSE_overwrite_output_base16_to_input(input_buf2[idx].w, crypt_key2[idx].c, i&(MMX_COEF-1));
}
return;
}
#endif
j=i;
for (; j < til; ++j)
{
unsigned char *cpo, *cpi;
/* MD5_word *w; */
#if MD5_X2
if (j&1)
{cpo = input_buf2_X86[j>>MD5_X2].x2.B2; cpi = crypt_key2_X86[j>>MD5_X2].x2.B2; /* w=input_buf_X86[j>>MD5_X2].x2.w2; */}
else
#endif
{cpo = input_buf2_X86[j>>MD5_X2].x1.B; cpi = crypt_key2_X86[j>>MD5_X2].x1.B; /* w=input_buf_X86[j>>MD5_X2].x1.w; */ }
for (i = 0; i < 16; ++i, ++cpi)
{
*cpo++ = dynamic_itoa16[*cpi>>4];
*cpo++ = dynamic_itoa16[*cpi&0xF];
}
//MD5_swap(w,w,4);
// if swapped, then HDAA fails on big endian systems.
}
}
/**************************************************************
* DYNAMIC primitive helper function
* This will take the data stored in the crypt_keys1 (the encrypted
* 'first' key variable), and base-16 appends to the 2nd input
*************************************************************/
void DynamicFunc__append_from_last_output_to_input2_as_base16(DYNA_OMP_PARAMS)
{
int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
unsigned index=i, idx;
for (; index < til; ++index)
{
unsigned ip;
idx = ( ((unsigned)index)>>(MMX_COEF>>1));
// This is the 'actual' work.
ip = (total_len2[idx] >> ((32/MMX_COEF)*(index&(MMX_COEF-1)))) & 0xFF;
total_len2[idx] += (32<<((32/MMX_COEF)*(index&(MMX_COEF-1))));
if (!ip)
__SSE_append_output_base16_to_input(input_buf2[idx].w, crypt_key[idx].c, index&(MMX_COEF-1));
else if (ip&1)
{
// Note we are 100% unaligned, and it seems fastest to handle byte/byte (at this time).
for (i = 0; i < 16; ++i)
{
unsigned char v = crypt_key[idx].c[GETPOS(i, index&(MMX_COEF-1))];
input_buf2[idx].c[GETPOS(ip+(i<<1), index&(MMX_COEF-1))] = dynamic_itoa16[v>>4];
input_buf2[idx].c[GETPOS(ip+(i<<1)+1, index&(MMX_COEF-1))] = dynamic_itoa16[v&0xF];
}
input_buf2[idx].c[GETPOS(ip+32, index&(MMX_COEF-1))] = 0x80;
}
else if ((ip&3)==0)
__SSE_append_output_base16_to_input_semi_aligned_0(ip, input_buf2[idx].w, crypt_key[idx].c, index&(MMX_COEF-1));
else
__SSE_append_output_base16_to_input_semi_aligned_2(ip, input_buf2[idx].w, crypt_key[idx].c, index&(MMX_COEF-1));
}
return;
}
#endif
for (; i < til; ++i)
{
unsigned j;
unsigned char *cp, *cpi;
#if MD5_X2
if (i&1)
{cpi = crypt_key_X86[i>>MD5_X2].x2.B2; cp = &(input_buf2_X86[i>>MD5_X2].x2.B2[total_len2_X86[i]]); }
else
#endif
{cpi = crypt_key_X86[i>>MD5_X2].x1.B; cp = &(input_buf2_X86[i>>MD5_X2].x1.B[total_len2_X86[i]]);}
for (j = 0; j < 16; ++j)
{
#if ARCH_ALLOWS_UNALIGNED
*((unsigned short*)cp) = itoa16_w2[*cpi++];
cp += 2;
#else
unsigned char b = *cpi++;
*cp++ = dynamic_itoa16[b>>4];
*cp++ = dynamic_itoa16[b&0xF];
#endif
}
*cp = 0;
total_len2_X86[i] += 32;
}
}
/**************************************************************
* DYNAMIC primitive helper function
* This will take the data stored in the crypt_keys2 (the encrypted
* 'second' key variable), and base-16 appends to the 1st input
*************************************************************/
void DynamicFunc__append_from_last_output2_to_input1_as_base16(DYNA_OMP_PARAMS)
{
int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
unsigned index=i, idx;
for (; index < til; ++index)
{
unsigned ip;
idx = ( ((unsigned)index)>>(MMX_COEF>>1));
// This is the 'actual' work.
ip = (total_len[idx] >> ((32/MMX_COEF)*(index&(MMX_COEF-1)))) & 0xFF;
total_len[idx] += (32<<((32/MMX_COEF)*(index&(MMX_COEF-1))));
if (!ip)
__SSE_append_output_base16_to_input(input_buf[idx].w, crypt_key2[idx].c, index&(MMX_COEF-1));
else if (ip&1)
{
// Note we are 100% unaligned, and it seems fastest to handle byte/byte (at this time).
for (i = 0; i < 16; ++i)
{
unsigned char v = crypt_key2[idx].c[GETPOS(i, index&(MMX_COEF-1))];
input_buf[idx].c[GETPOS(ip+(i<<1), index&(MMX_COEF-1))] = dynamic_itoa16[v>>4];
input_buf[idx].c[GETPOS(ip+(i<<1)+1, index&(MMX_COEF-1))] = dynamic_itoa16[v&0xF];
}
input_buf[idx].c[GETPOS(ip+32, index&(MMX_COEF-1))] = 0x80;
}
else if ((ip&3)==0)
__SSE_append_output_base16_to_input_semi_aligned_0(ip, input_buf[idx].w, crypt_key2[idx].c, index&(MMX_COEF-1));
else
__SSE_append_output_base16_to_input_semi_aligned_2(ip, input_buf[idx].w, crypt_key2[idx].c, index&(MMX_COEF-1));
}
return;
}
#endif
for (; i < til; ++i)
{
unsigned j;
unsigned char *cp, *cpi;
#if MD5_X2
if (i&1)
{cp = &(input_buf_X86[i>>MD5_X2].x2.B2[total_len_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x2.B2; }
else
#endif
{cp = &(input_buf_X86[i>>MD5_X2].x1.B[total_len_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x1.B; }
for (j = 0; j < 16; ++j)
{
#if ARCH_ALLOWS_UNALIGNED
*((unsigned short*)cp) = itoa16_w2[*cpi++];
cp += 2;
#else
unsigned char b = *cpi++;
*cp++ = dynamic_itoa16[b>>4];
*cp++ = dynamic_itoa16[b&0xF];
#endif
}
*cp = 0;
total_len_X86[i] += 32;
}
}
void DynamicFunc__append_from_last_output2_as_raw(DYNA_OMP_PARAMS)
{
int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
unsigned index=i, idx;
for (; index < til; ++index)
{
unsigned ip;
idx = ( ((unsigned)index)>>(MMX_COEF>>1));
// This is the 'actual' work.
ip = (total_len[idx] >> ((32/MMX_COEF)*(index&(MMX_COEF-1)))) & 0xFF;
if (!ip)
{
ARCH_WORD_32 *po = input_buf[idx].w;
ARCH_WORD_32 *pi = crypt_key2[idx].w;
po += (index&(MMX_COEF-1));
pi += (index&(MMX_COEF-1));
for (i = 0; i < 4; i++)
{
*po = *pi;
po += MMX_COEF;
pi += MMX_COEF;
}
input_buf[idx].c[GETPOS(16, index&(MMX_COEF-1))] = 0x80;
}
else
{
for (i = 0; i < 16; ++i)
input_buf[idx].c[GETPOS(ip+i, index&(MMX_COEF-1))] = crypt_key2[idx].c[GETPOS(i, index&(MMX_COEF-1))];
input_buf[idx].c[GETPOS(ip+16, index&(MMX_COEF-1))] = 0x80;
}
total_len[idx] += (16<<((32/MMX_COEF)*(index&(MMX_COEF-1))));
}
return;
}
#endif
for (; i < til; ++i)
{
unsigned j;
unsigned char *cp, *cpi;
#if MD5_X2
if (i&1)
{cp = &(input_buf_X86[i>>MD5_X2].x2.B2[total_len_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x2.B2; }
else
#endif
{cp = &(input_buf_X86[i>>MD5_X2].x1.B[total_len_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x1.B; }
for (j = 0; j < 16; ++j)
*cp++ = *cpi++;
*cp = 0;
total_len_X86[i] += 16;
}
}
void DynamicFunc__append2_from_last_output2_as_raw(DYNA_OMP_PARAMS)
{
int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
unsigned index=i, idx;
for (; index < til; ++index)
{
unsigned ip;
idx = ( ((unsigned)index)>>(MMX_COEF>>1));
// This is the 'actual' work.
ip = (total_len2[idx] >> ((32/MMX_COEF)*(index&(MMX_COEF-1)))) & 0xFF;
if (!ip)
{
ARCH_WORD_32 *po = input_buf2[idx].w;
ARCH_WORD_32 *pi = crypt_key2[idx].w;
po += (index&(MMX_COEF-1));
pi += (index&(MMX_COEF-1));
for (i = 0; i < 4; i++)
{
*po = *pi;
po += MMX_COEF;
pi += MMX_COEF;
}
input_buf2[idx].c[GETPOS(16, index&(MMX_COEF-1))] = 0x80;
}
else
{
for (i = 0; i < 16; ++i)
input_buf2[idx].c[GETPOS(ip+i, index&(MMX_COEF-1))] = crypt_key2[idx].c[GETPOS(i, index&(MMX_COEF-1))];
input_buf2[idx].c[GETPOS(ip+16, index&(MMX_COEF-1))] = 0x80;
}
total_len2[idx] += (16<<((32/MMX_COEF)*(index&(MMX_COEF-1))));
}
return;
}
#endif
for (; i < til; ++i)
{
unsigned j;
unsigned char *cp, *cpi;
#if MD5_X2
if (i&1)
{cp = &(input_buf2_X86[i>>MD5_X2].x2.B2[total_len2_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x2.B2; }
else
#endif
{cp = &(input_buf2_X86[i>>MD5_X2].x1.B[total_len2_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x1.B; }
for (j = 0; j < 16; ++j)
*cp++ = *cpi++;
*cp = 0;
total_len2_X86[i] += 16;
}
}
void DynamicFunc__append_from_last_output1_as_raw(DYNA_OMP_PARAMS)
{
int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
unsigned index, idx;
for (index = i; index < til; ++index)
{
unsigned ip;
idx = ( ((unsigned)index)>>(MMX_COEF>>1));
// This is the 'actual' work.
ip = (total_len[idx] >> ((32/MMX_COEF)*(index&(MMX_COEF-1)))) & 0xFF;
if (!ip)
{
ARCH_WORD_32 *po = input_buf[idx].w;
ARCH_WORD_32 *pi = crypt_key[idx].w;
po += (index&(MMX_COEF-1));
pi += (index&(MMX_COEF-1));
for (i = 0; i < 4; i++)
{
*po = *pi;
po += MMX_COEF;
pi += MMX_COEF;
}
input_buf[idx].c[GETPOS(16, index&(MMX_COEF-1))] = 0x80;
}
else
{
for (i = 0; i < 16; ++i)
input_buf[idx].c[GETPOS(ip+i, index&(MMX_COEF-1))] = crypt_key[idx].c[GETPOS(i, index&(MMX_COEF-1))];
input_buf[idx].c[GETPOS(ip+16, index&(MMX_COEF-1))] = 0x80;
}
total_len[idx] += (16<<((32/MMX_COEF)*(index&(MMX_COEF-1))));
}
return;
}
#endif
for (; i < til; ++i)
{
unsigned j;
unsigned char *cp, *cpi;
#if MD5_X2
if (i&1)
{cp = &(input_buf_X86[i>>MD5_X2].x2.B2[total_len_X86[i]]); cpi = crypt_key_X86[i>>MD5_X2].x2.B2; }
else
#endif
{cp = &(input_buf_X86[i>>MD5_X2].x1.B[total_len_X86[i]]); cpi = crypt_key_X86[i>>MD5_X2].x1.B; }
for (j = 0; j < 16; ++j)
*cp++ = *cpi++;
*cp = 0;
total_len_X86[i] += 16;
}
}
void DynamicFunc__append2_from_last_output1_as_raw(DYNA_OMP_PARAMS)
{
int i, til;
#ifdef _OPENMP
i = first;
til = last;
#else
i = 0;
til = m_count;
#endif
#ifdef MMX_COEF
if (dynamic_use_sse==1) {
unsigned index, idx;
for (index = i; index < til; ++index)
{
unsigned ip;
idx = ( ((unsigned)index)>>(MMX_COEF>>1));
// This is the 'actual' work.
ip = (total_len2[idx] >> ((32/MMX_COEF)*(index&(MMX_COEF-1)))) & 0xFF;
if (!ip)
{
ARCH_WORD_32 *po = input_buf2[idx].w;
ARCH_WORD_32 *pi = crypt_key[idx].w;
po += (index&(MMX_COEF-1));
pi += (index&(MMX_COEF-1));
for (i = 0; i < 4; i++)
{
*po = *pi;
po += MMX_COEF;
pi += MMX_COEF;
}
input_buf2[idx].c[GETPOS(16, index&(MMX_COEF-1))] = 0x80;
}
else
{
for (i = 0; i < 16; ++i)
input_buf2[idx].c[GETPOS(ip+i, index&(MMX_COEF-1))] = crypt_key[idx].c[GETPOS(i, index&(MMX_COEF-1))];
input_buf2[idx].c[GETPOS(ip+16, index&(MMX_COEF-1))] = 0x80;
}
total_len2[idx] += (16<<((32/MMX_COEF)*(index&(MMX_COEF-1))));
}
return;
}
#endif
for (; i < til; ++i)
{
unsigned j;
unsigned char *cp, *cpi;
#if MD5_X2
if (i&1)
{cp = &(input_buf2_X86[i>>MD5_X2].x2.B2[total_len2_X86[i]]); cpi = crypt_key_X86[i>>MD5_X2].x2.B2; }
else
#endif
{cp = &(input_buf2_X86[i>>MD5_X2].x1.B[total_len2_X86[i]]); cpi = crypt_key_X86[i>>MD5_X2].x1.B; }
for (j = 0; j < 16; ++j)
*cp++ = *cpi++;
*cp = 0;
total_len2_X86[i] += 16;
}
}
/**************************************************************
* DYNAMIC primitive helper function
* Append salt #2 into input 1
*************************************************************/
void DynamicFunc__append_2nd_salt(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm cursalt2, saltlen2);
}
/**************************************************************
* DYNAMIC primitive helper function
* Append salt #2 into input 2
*************************************************************/
void DynamicFunc__append_2nd_salt2(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm cursalt2, saltlen2);
}
/**************************************************************
* DYNAMIC primitive helper function
* Append UserID into input 1
*************************************************************/
void DynamicFunc__append_userid(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm username, usernamelen);
}
/**************************************************************
* DYNAMIC primitive helper function
* Append UserID into input 2
*************************************************************/
void DynamicFunc__append_userid2(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm username, usernamelen);
}
void DynamicFunc__append_input1_from_CONST1(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm curdat.Consts[0], curdat.ConstsLen[0]);
}
void DynamicFunc__append_input1_from_CONST2(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm curdat.Consts[1], curdat.ConstsLen[1]);
}
void DynamicFunc__append_input1_from_CONST3(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm curdat.Consts[2], curdat.ConstsLen[2]);
}
void DynamicFunc__append_input1_from_CONST4(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm curdat.Consts[3], curdat.ConstsLen[3]);
}
void DynamicFunc__append_input1_from_CONST5(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm curdat.Consts[4], curdat.ConstsLen[4]);
}
void DynamicFunc__append_input1_from_CONST6(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm curdat.Consts[5], curdat.ConstsLen[5]);
}
void DynamicFunc__append_input1_from_CONST7(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm curdat.Consts[6], curdat.ConstsLen[6]);
}
void DynamicFunc__append_input1_from_CONST8(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm curdat.Consts[7], curdat.ConstsLen[7]);
}
void DynamicFunc__append_input2_from_CONST1(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm curdat.Consts[0], curdat.ConstsLen[0]);
}
void DynamicFunc__append_input2_from_CONST2(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm curdat.Consts[1], curdat.ConstsLen[1]);
}
void DynamicFunc__append_input2_from_CONST3(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm curdat.Consts[2], curdat.ConstsLen[2]);
}
void DynamicFunc__append_input2_from_CONST4(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm curdat.Consts[3], curdat.ConstsLen[3]);
}
void DynamicFunc__append_input2_from_CONST5(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm curdat.Consts[4], curdat.ConstsLen[4]);
}
void DynamicFunc__append_input2_from_CONST6(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm curdat.Consts[5], curdat.ConstsLen[5]);
}
void DynamicFunc__append_input2_from_CONST7(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm curdat.Consts[6], curdat.ConstsLen[6]);
}
void DynamicFunc__append_input2_from_CONST8(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm curdat.Consts[7], curdat.ConstsLen[7]);
}
void DynamicFunc__append_fld0(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[0], fld_lens[0]);
}
void DynamicFunc__append_fld1(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[1], fld_lens[1]);
}
void DynamicFunc__append_fld2(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[2], fld_lens[2]);
}
void DynamicFunc__append_fld3(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[3], fld_lens[3]);
}
void DynamicFunc__append_fld4(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[4], fld_lens[4]);
}
void DynamicFunc__append_fld5(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[5], fld_lens[5]);
}
void DynamicFunc__append_fld6(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[6], fld_lens[6]);
}
void DynamicFunc__append_fld7(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[7], fld_lens[7]);
}
void DynamicFunc__append_fld8(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[8], fld_lens[8]);
}
void DynamicFunc__append_fld9(DYNA_OMP_PARAMS)
{
__append_string(DYNA_OMP_PARAMSdm flds[9], fld_lens[9]);
}
void DynamicFunc__append2_fld0(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[0], fld_lens[0]);
}
void DynamicFunc__append2_fld1(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[1], fld_lens[1]);
}
void DynamicFunc__append2_fld2(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[2], fld_lens[2]);
}
void DynamicFunc__append2_fld3(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[3], fld_lens[3]);
}
void DynamicFunc__append2_fld4(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[4], fld_lens[4]);
}
void DynamicFunc__append2_fld5(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[5], fld_lens[5]);
}
void DynamicFunc__append2_fld6(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[6], fld_lens[6]);
}
void DynamicFunc__append2_fld7(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[7], fld_lens[7]);
}
void DynamicFunc__append2_fld8(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[8], fld_lens[8]);
}
void DynamicFunc__append2_fld9(DYNA_OMP_PARAMS)
{
__append2_string(DYNA_OMP_PARAMSdm flds[9], fld_lens[9]);
}
void DynamicFunc__SSEtoX86_switch_input1(DYNA_OMP_PARAMS) {
#ifdef MMX_COEF
int j, k, idx, max;
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 2;
for (j = 0; j < m_count; j += MMX_COEF)
{
ARCH_WORD_32 *cpi;
#if (MD5_X2)
ARCH_WORD_32 *cpo = input_buf_X86[j>>1].x1.w;
ARCH_WORD_32 *cpo2 = input_buf_X86[j>>1].x2.w2;
ARCH_WORD_32 *cpo3 = input_buf_X86[(j>>1)+1].x1.w;
ARCH_WORD_32 *cpo4 = input_buf_X86[(j>>1)+1].x2.w2;
#else
ARCH_WORD_32 *cpo = input_buf_X86[j].x1.w;
ARCH_WORD_32 *cpo2 = input_buf_X86[j+1].x1.w;
#if (MMX_COEF==4)
ARCH_WORD_32 *cpo3 = input_buf_X86[j+2].x1.w;
ARCH_WORD_32 *cpo4 = input_buf_X86[j+3].x1.w;
#endif
#endif
idx = ( ((unsigned)j)>>(MMX_COEF>>1));
cpi = input_buf[idx].w;
max = total_len_X86[j] = (total_len[idx]&0xFF);
if (max < (total_len_X86[j+1]=((total_len[idx]>> 8)&0xFF)))
max = total_len_X86[j+1];
if (max < (total_len_X86[j+2]=((total_len[idx]>>16)&0xFF)))
max = total_len_X86[j+2];
if (max < (total_len_X86[j+3]=((total_len[idx]>>24)&0xFF)))
max = total_len_X86[j+3];
max = (max+3)>>2;
for (k = 0; k < max; ++k) {
*cpo++ = *cpi++;
*cpo2++ = *cpi++;
#if (MMX_COEF==4)
*cpo3++ = *cpi++;
*cpo4++ = *cpi++;
#endif
}
#if (MD5_X2)
input_buf_X86[j>>1].x1.b[total_len_X86[j]] = 0;
input_buf_X86[j>>1].x2.b2[total_len_X86[j+1]] = 0;
input_buf_X86[(j>>1)+1].x1.b[total_len_X86[j+2]] = 0;
input_buf_X86[(j>>1)+1].x2.b2[total_len_X86[j+3]] = 0;
#else
input_buf_X86[j].x1.b[total_len_X86[j]] = 0;
input_buf_X86[j+1].x1.b[total_len_X86[j+1]] = 0;
#if (MMX_COEF==4)
input_buf_X86[j+2].x1.b[total_len_X86[j+2]] = 0;
input_buf_X86[j+3].x1.b[total_len_X86[j+3]] = 0;
#endif
#endif
}
#endif
}
void DynamicFunc__SSEtoX86_switch_input2(DYNA_OMP_PARAMS) {
#ifdef MMX_COEF
int j, k, idx, max;
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 2;
for (j = 0; j < m_count; j += MMX_COEF)
{
ARCH_WORD_32 *cpi;
#if (MD5_X2)
ARCH_WORD_32 *cpo = input_buf2_X86[j>>1].x1.w;
ARCH_WORD_32 *cpo2 = input_buf2_X86[j>>1].x2.w2;
ARCH_WORD_32 *cpo3 = input_buf2_X86[(j>>1)+1].x1.w;
ARCH_WORD_32 *cpo4 = input_buf2_X86[(j>>1)+1].x2.w2;
#else
ARCH_WORD_32 *cpo = input_buf2_X86[j].x1.w;
ARCH_WORD_32 *cpo2 = input_buf2_X86[j+1].x1.w;
#if (MMX_COEF==4)
ARCH_WORD_32 *cpo3 = input_buf2_X86[j+2].x1.w;
ARCH_WORD_32 *cpo4 = input_buf2_X86[j+3].x1.w;
#endif
#endif
idx = ( ((unsigned)j)>>(MMX_COEF>>1));
cpi = input_buf2[idx].w;
max = total_len2_X86[j] = (total_len2[idx]&0xFF);
if (max < (total_len2_X86[j+1]=((total_len2[idx]>> 8)&0xFF)))
max = total_len2_X86[j+1];
if (max < (total_len2_X86[j+2]=((total_len2[idx]>>16)&0xFF)))
max = total_len2_X86[j+2];
if (max < (total_len2_X86[j+3]=((total_len2[idx]>>24)&0xFF)))
max = total_len2_X86[j+3];
max = (max+3)>>2;
for (k = 0; k < max; ++k) {
*cpo++ = *cpi++;
*cpo2++ = *cpi++;
#if (MMX_COEF==4)
*cpo3++ = *cpi++;
*cpo4++ = *cpi++;
#endif
}
// get rid of the 0x80
#if (MD5_X2)
input_buf2_X86[j>>1].x1.b[total_len2_X86[j]] = 0;
input_buf2_X86[j>>1].x2.b2[total_len2_X86[j+1]] = 0;
input_buf2_X86[(j>>1)+1].x1.b[total_len2_X86[j+2]] = 0;
input_buf2_X86[(j>>1)+1].x2.b2[total_len2_X86[j+3]] = 0;
#else
input_buf2_X86[j].x1.b[total_len2_X86[j]] = 0;
input_buf2_X86[j+1].x1.b[total_len2_X86[j+1]] = 0;
#if (MMX_COEF==4)
input_buf2_X86[j+2].x1.b[total_len2_X86[j+2]] = 0;
input_buf2_X86[j+3].x1.b[total_len2_X86[j+3]] = 0;
#endif
#endif
}
#endif
}
void DynamicFunc__SSEtoX86_switch_output1(DYNA_OMP_PARAMS) {
#ifdef MMX_COEF
int j, k, idx;
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 2;
for (j = 0; j < m_count; j += MMX_COEF)
{
ARCH_WORD_32 *cpi;
#if (MD5_X2)
ARCH_WORD_32 *cpo = crypt_key_X86[j>>1].x1.w;
ARCH_WORD_32 *cpo2 = crypt_key_X86[j>>1].x2.w2;
ARCH_WORD_32 *cpo3 = crypt_key_X86[(j>>1)+1].x1.w;
ARCH_WORD_32 *cpo4 = crypt_key_X86[(j>>1)+1].x2.w2;
#else
ARCH_WORD_32 *cpo = crypt_key_X86[j].x1.w;
ARCH_WORD_32 *cpo2 = crypt_key_X86[j+1].x1.w;
#if (MMX_COEF==4)
ARCH_WORD_32 *cpo3 = crypt_key_X86[j+2].x1.w;
ARCH_WORD_32 *cpo4 = crypt_key_X86[j+3].x1.w;
#endif
#endif
idx = ( ((unsigned)j)>>(MMX_COEF>>1));
cpi = (void*)crypt_key[idx].c;
for (k = 0; k < 4; ++k) {
*cpo++ = *cpi++;
*cpo2++ = *cpi++;
#if (MMX_COEF==4)
*cpo3++ = *cpi++;
*cpo4++ = *cpi++;
#endif
}
}
#endif
}
void DynamicFunc__SSEtoX86_switch_output2(DYNA_OMP_PARAMS) {
#ifdef MMX_COEF
int j, k, idx;
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 2;
for (j = 0; j < m_count; j += MMX_COEF)
{
ARCH_WORD_32 *cpi;
#if (MD5_X2)
ARCH_WORD_32 *cpo = crypt_key2_X86[j>>1].x1.w;
ARCH_WORD_32 *cpo2 = crypt_key2_X86[j>>1].x2.w2;
ARCH_WORD_32 *cpo3 = crypt_key2_X86[(j>>1)+1].x1.w;
ARCH_WORD_32 *cpo4 = crypt_key2_X86[(j>>1)+1].x2.w2;
#else
ARCH_WORD_32 *cpo = crypt_key2_X86[j].x1.w;
ARCH_WORD_32 *cpo2 = crypt_key2_X86[j+1].x1.w;
#if (MMX_COEF==4)
ARCH_WORD_32 *cpo3 = crypt_key2_X86[j+2].x1.w;
ARCH_WORD_32 *cpo4 = crypt_key2_X86[j+3].x1.w;
#endif
#endif
idx = ( ((unsigned)j)>>(MMX_COEF>>1));
cpi = crypt_key2[idx].w;
for (k = 0; k < 4; ++k) {
*cpo++ = *cpi++;
*cpo2++ = *cpi++;
#if (MMX_COEF==4)
*cpo3++ = *cpi++;
*cpo4++ = *cpi++;
#endif
}
}
#endif
}
void DynamicFunc__X86toSSE_switch_input1(DYNA_OMP_PARAMS) {
#ifdef MMX_COEF
unsigned j, idx, idx_mod;
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 1;
__nonMP_DynamicFunc__clean_input();
for (j = 0; j < m_count; ++j) {
idx = (j>>(MMX_COEF>>1));
idx_mod = j&(MMX_COEF-1);
total_len[idx] += (total_len_X86[j] << ((32/MMX_COEF)*idx_mod));
#if (MD5_X2)
if (j & 1)
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,input_buf_X86[j>>1].x2.B2,total_len_X86[j],0,1);
else
#endif
__SSE_append_string_to_input(input_buf[idx].c,idx_mod,input_buf_X86[j>>MD5_X2].x1.B,total_len_X86[j],0,1);
}
#endif
}
void DynamicFunc__X86toSSE_switch_input2(DYNA_OMP_PARAMS) {
#ifdef MMX_COEF
unsigned j, idx, idx_mod;
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 1;
__nonMP_DynamicFunc__clean_input2();
for (j = 0; j < m_count; ++j) {
idx = (j>>(MMX_COEF>>1));
idx_mod = j&(MMX_COEF-1);
total_len2[idx] += (total_len2_X86[j] << ((32/MMX_COEF)*idx_mod));
#if (MD5_X2)
if (j & 1)
__SSE_append_string_to_input(input_buf2[idx].c,idx_mod,input_buf2_X86[j>>1].x2.B2,total_len2_X86[j],0,1);
else
#endif
__SSE_append_string_to_input(input_buf2[idx].c,idx_mod,input_buf2_X86[j>>MD5_X2].x1.B,total_len2_X86[j],0,1);
}
#endif
}
void DynamicFunc__X86toSSE_switch_output1(DYNA_OMP_PARAMS) {
#ifdef MMX_COEF
int j, k, idx;
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 1;
for (j = 0; j < m_count; j += MMX_COEF)
{
ARCH_WORD_32 *cpi;
#if (MD5_X2)
ARCH_WORD_32 *cpo = crypt_key_X86[j>>1].x1.w;
ARCH_WORD_32 *cpo2 = crypt_key_X86[j>>1].x2.w2;
ARCH_WORD_32 *cpo3 = crypt_key_X86[(j>>1)+1].x1.w;
ARCH_WORD_32 *cpo4 = crypt_key_X86[(j>>1)+1].x2.w2;
#else
ARCH_WORD_32 *cpo = crypt_key_X86[j].x1.w;
ARCH_WORD_32 *cpo2 = crypt_key_X86[j+1].x1.w;
#if (MMX_COEF==4)
ARCH_WORD_32 *cpo3 = crypt_key_X86[j+2].x1.w;
ARCH_WORD_32 *cpo4 = crypt_key_X86[j+3].x1.w;
#endif
#endif
idx = ( ((unsigned)j)>>(MMX_COEF>>1));
cpi = (void*)crypt_key[idx].c;
for (k = 0; k < 4; ++k) {
*cpi++ = *cpo++;
*cpi++ = *cpo2++;
#if (MMX_COEF==4)
*cpi++ = *cpo3++;
*cpi++ = *cpo4++;
#endif
}
}
#endif
}
void DynamicFunc__X86toSSE_switch_output2(DYNA_OMP_PARAMS) {
#ifdef MMX_COEF
int j, k, idx;
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 1;
for (j = 0; j < m_count; j += MMX_COEF)
{
ARCH_WORD_32 *cpi;
#if (MD5_X2)
ARCH_WORD_32 *cpo = crypt_key2_X86[j>>1].x1.w;
ARCH_WORD_32 *cpo2 = crypt_key2_X86[j>>1].x2.w2;
ARCH_WORD_32 *cpo3 = crypt_key2_X86[(j>>1)+1].x1.w;
ARCH_WORD_32 *cpo4 = crypt_key2_X86[(j>>1)+1].x2.w2;
#else
ARCH_WORD_32 *cpo = crypt_key2_X86[j].x1.w;
ARCH_WORD_32 *cpo2 = crypt_key2_X86[j+1].x1.w;
#if (MMX_COEF==4)
ARCH_WORD_32 *cpo3 = crypt_key2_X86[j+2].x1.w;
ARCH_WORD_32 *cpo4 = crypt_key2_X86[j+3].x1.w;
#endif
#endif
idx = ( ((unsigned)j)>>(MMX_COEF>>1));
cpi = crypt_key2[idx].w;
for (k = 0; k < 4; ++k) {
*cpi++ = *cpo++;
*cpi++ = *cpo2++;
#if (MMX_COEF==4)
*cpi++ = *cpo3++;
*cpi++ = *cpo4++;
#endif
}
}
#endif
}
// This function, simply 'switches' back to SSE It does NOT copy any data from X86 to SSE
void DynamicFunc__ToSSE(DYNA_OMP_PARAMS) {
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 1;
}
// This function, simply 'switches' to X86 It does NOT copy any data from SSE to X86
void DynamicFunc__ToX86(DYNA_OMP_PARAMS) {
if (dynamic_use_sse == 0)
return;
dynamic_use_sse = 2;
}
void DynamicFunc__base16_convert_locase(DYNA_OMP_PARAMS) {
dynamic_itoa16 = itoa16;
itoa16_w2=itoa16_w2_l;
}
void DynamicFunc__base16_convert_upcase(DYNA_OMP_PARAMS) {
dynamic_itoa16 = itoa16u;
itoa16_w2=itoa16_w2_u;
}
/* These are the 'older' singular functions. These SHOULD be viewed as depricated. They still work, but should not be used */
/* NOTE, any new larger hash crypts, will NOT have this *_base16() functions. */
void DynamicFunc__SHA1_crypt_input1_append_input2_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__SHA1_crypt_input1_append_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__SHA1_crypt_input2_append_input1_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__SHA1_crypt_input2_append_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__SHA1_crypt_input1_overwrite_input1_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__SHA1_crypt_input1_overwrite_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__SHA1_crypt_input2_overwrite_input2_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__SHA1_crypt_input2_overwrite_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__SHA1_crypt_input1_overwrite_input2_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__SHA1_crypt_input1_overwrite_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__SHA1_crypt_input2_overwrite_input1_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__SHA1_crypt_input2_overwrite_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__SHA224_crypt_input1_append_input2_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__SHA224_crypt_input1_append_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__SHA224_crypt_input2_append_input1_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__SHA224_crypt_input2_append_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__SHA224_crypt_input1_overwrite_input1_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__SHA224_crypt_input1_overwrite_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__SHA224_crypt_input2_overwrite_input2_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__SHA224_crypt_input2_overwrite_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__SHA224_crypt_input1_overwrite_input2_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__SHA224_crypt_input1_overwrite_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__SHA224_crypt_input2_overwrite_input1_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__SHA224_crypt_input2_overwrite_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__SHA256_crypt_input1_append_input2_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__SHA256_crypt_input1_append_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__SHA256_crypt_input2_append_input1_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__SHA256_crypt_input2_append_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__SHA256_crypt_input1_overwrite_input1_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__SHA256_crypt_input1_overwrite_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__SHA256_crypt_input2_overwrite_input2_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__SHA256_crypt_input2_overwrite_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__SHA256_crypt_input1_overwrite_input2_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__SHA256_crypt_input1_overwrite_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__SHA256_crypt_input2_overwrite_input1_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__SHA256_crypt_input2_overwrite_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__SHA384_crypt_input1_append_input2_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__SHA384_crypt_input1_append_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__SHA384_crypt_input2_append_input1_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__SHA384_crypt_input2_append_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__SHA384_crypt_input1_overwrite_input1_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__SHA384_crypt_input1_overwrite_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__SHA384_crypt_input2_overwrite_input2_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__SHA384_crypt_input2_overwrite_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__SHA384_crypt_input1_overwrite_input2_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__SHA384_crypt_input1_overwrite_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__SHA384_crypt_input2_overwrite_input1_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__SHA384_crypt_input2_overwrite_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__SHA512_crypt_input1_append_input2_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__SHA512_crypt_input1_append_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__SHA512_crypt_input2_append_input1_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__SHA512_crypt_input2_append_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__SHA512_crypt_input1_overwrite_input1_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__SHA512_crypt_input1_overwrite_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__SHA512_crypt_input2_overwrite_input2_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__SHA512_crypt_input2_overwrite_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__SHA512_crypt_input1_overwrite_input2_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__SHA512_crypt_input1_overwrite_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__SHA512_crypt_input2_overwrite_input1_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__SHA512_crypt_input2_overwrite_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__GOST_crypt_input1_append_input2_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__GOST_crypt_input1_append_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__GOST_crypt_input2_append_input1_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__GOST_crypt_input2_append_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__GOST_crypt_input1_overwrite_input1_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__GOST_crypt_input1_overwrite_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__GOST_crypt_input2_overwrite_input2_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__GOST_crypt_input2_overwrite_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__GOST_crypt_input1_overwrite_input2_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__GOST_crypt_input1_overwrite_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__GOST_crypt_input2_overwrite_input1_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__GOST_crypt_input2_overwrite_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__WHIRLPOOL_crypt_input1_append_input2_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__WHIRLPOOL_crypt_input1_append_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__WHIRLPOOL_crypt_input2_append_input1_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__WHIRLPOOL_crypt_input2_append_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__WHIRLPOOL_crypt_input1_overwrite_input1_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__WHIRLPOOL_crypt_input1_overwrite_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__WHIRLPOOL_crypt_input2_overwrite_input2_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__WHIRLPOOL_crypt_input2_overwrite_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__WHIRLPOOL_crypt_input1_overwrite_input2_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__WHIRLPOOL_crypt_input1_overwrite_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__WHIRLPOOL_crypt_input2_overwrite_input1_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__WHIRLPOOL_crypt_input2_overwrite_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__Tiger_crypt_input1_append_input2_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__Tiger_crypt_input1_append_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__Tiger_crypt_input2_append_input1_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__Tiger_crypt_input2_append_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__Tiger_crypt_input1_overwrite_input1_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__Tiger_crypt_input1_overwrite_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__Tiger_crypt_input2_overwrite_input2_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__Tiger_crypt_input2_overwrite_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__Tiger_crypt_input1_overwrite_input2_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__Tiger_crypt_input1_overwrite_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__Tiger_crypt_input2_overwrite_input1_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__Tiger_crypt_input2_overwrite_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__RIPEMD128_crypt_input1_append_input2_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__RIPEMD128_crypt_input1_append_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__RIPEMD128_crypt_input2_append_input1_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__RIPEMD128_crypt_input2_append_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__RIPEMD128_crypt_input1_overwrite_input1_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__RIPEMD128_crypt_input1_overwrite_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__RIPEMD128_crypt_input2_overwrite_input2_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__RIPEMD128_crypt_input2_overwrite_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__RIPEMD128_crypt_input1_overwrite_input2_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__RIPEMD128_crypt_input1_overwrite_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__RIPEMD128_crypt_input2_overwrite_input1_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__RIPEMD128_crypt_input2_overwrite_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__RIPEMD160_crypt_input1_append_input2_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__RIPEMD160_crypt_input1_append_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__RIPEMD160_crypt_input2_append_input1_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__RIPEMD160_crypt_input2_append_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__RIPEMD160_crypt_input1_overwrite_input1_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__RIPEMD160_crypt_input1_overwrite_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__RIPEMD160_crypt_input2_overwrite_input2_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__RIPEMD160_crypt_input2_overwrite_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__RIPEMD160_crypt_input1_overwrite_input2_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__RIPEMD160_crypt_input1_overwrite_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__RIPEMD160_crypt_input2_overwrite_input1_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__RIPEMD160_crypt_input2_overwrite_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__RIPEMD256_crypt_input1_append_input2_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__RIPEMD256_crypt_input1_append_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__RIPEMD256_crypt_input2_append_input1_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__RIPEMD256_crypt_input2_append_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__RIPEMD256_crypt_input1_overwrite_input1_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__RIPEMD256_crypt_input1_overwrite_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__RIPEMD256_crypt_input2_overwrite_input2_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__RIPEMD256_crypt_input2_overwrite_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__RIPEMD256_crypt_input1_overwrite_input2_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__RIPEMD256_crypt_input1_overwrite_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__RIPEMD256_crypt_input2_overwrite_input1_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__RIPEMD256_crypt_input2_overwrite_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__RIPEMD320_crypt_input1_append_input2_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__RIPEMD320_crypt_input1_append_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__RIPEMD320_crypt_input2_append_input1_base16(DYNA_OMP_PARAMS) { DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__RIPEMD320_crypt_input2_append_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__RIPEMD320_crypt_input1_overwrite_input1_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__RIPEMD320_crypt_input1_overwrite_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__RIPEMD320_crypt_input2_overwrite_input2_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__RIPEMD320_crypt_input2_overwrite_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__RIPEMD320_crypt_input1_overwrite_input2_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__RIPEMD320_crypt_input1_overwrite_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__RIPEMD320_crypt_input2_overwrite_input1_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__RIPEMD320_crypt_input2_overwrite_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__MD5_crypt_input1_append_input2_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__MD5_crypt_input1_append_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__MD5_crypt_input2_append_input1_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__MD5_crypt_input2_append_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__MD5_crypt_input1_overwrite_input1_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__MD5_crypt_input1_overwrite_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__MD5_crypt_input2_overwrite_input2_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__MD5_crypt_input2_overwrite_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__MD5_crypt_input1_overwrite_input2_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__MD5_crypt_input1_overwrite_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__MD5_crypt_input2_overwrite_input1_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__MD5_crypt_input2_overwrite_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__MD4_crypt_input1_append_input2_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__MD4_crypt_input1_append_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__MD4_crypt_input2_append_input1_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__MD4_crypt_input2_append_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__MD4_crypt_input1_overwrite_input1_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__MD4_crypt_input1_overwrite_input1(DYNA_OMP_PARAMSd); }
void DynamicFunc__MD4_crypt_input2_overwrite_input2_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__MD4_crypt_input2_overwrite_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__MD4_crypt_input1_overwrite_input2_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__MD4_crypt_input1_overwrite_input2(DYNA_OMP_PARAMSd); }
void DynamicFunc__MD4_crypt_input2_overwrite_input1_base16(DYNA_OMP_PARAMS){ DynamicFunc__LargeHash_OUTMode_base16(DYNA_OMP_PARAMSd); DynamicFunc__MD4_crypt_input2_overwrite_input1(DYNA_OMP_PARAMSd); }
/**************************************************************
* DEPRICATED functions. These are the older pseudo functions
* which we now have flags for. We keep them, so that we can
* add the proper flags, even if the user is running an older
* script.
*************************************************************/
void DynamicFunc__PHPassSetup(DYNA_OMP_PARAMS) {}
void DynamicFunc__InitialLoadKeysToInput(DYNA_OMP_PARAMS) {}
void DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2(DYNA_OMP_PARAMS) {}
void DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1(DYNA_OMP_PARAMS) {}
void DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1_offset32(DYNA_OMP_PARAMS) {}
/**************************************************************
**************************************************************
**************************************************************
**************************************************************
* DYNAMIC primitive helper function
* This is the END of the primitives.
**************************************************************
**************************************************************
**************************************************************
*************************************************************/
static DYNAMIC_primitive_funcp *ConvertFuncs(DYNAMIC_primitive_funcp p, int *count)
{
static DYNAMIC_primitive_funcp fncs[20];
*count = 0;
if (p==DynamicFunc__PHPassSetup ||
p==DynamicFunc__InitialLoadKeysToInput ||
p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2 ||
p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1 ||
p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1_offset32)
return fncs; // ignore these
#ifndef MMX_COEF
if (p==DynamicFunc__SSEtoX86_switch_input1 || p==DynamicFunc__SSEtoX86_switch_input2 ||
p==DynamicFunc__SSEtoX86_switch_output1 || p==DynamicFunc__SSEtoX86_switch_output2 ||
p==DynamicFunc__X86toSSE_switch_input1 || p==DynamicFunc__X86toSSE_switch_input2 ||
p==DynamicFunc__X86toSSE_switch_output1 || p==DynamicFunc__X86toSSE_switch_output2 ||
p==DynamicFunc__ToSSE || p==DynamicFunc__ToX86)
return fncs; // we ignore these functions 100% in x86 mode.
#endif
// if (p==DynamicFunc__append_input2_from_CONST1) {
// fncs[0] = DynamicFunc__set_input2;
// fncs[1] = DynamicFunc__set_CONST1;
// fncs[2] = DynamicFunc__append_CONST;
// *count = 3;
// }
/* LOOK INTO THIS!!!!! This may not be valid, now that SHA1 is handled 100% outside of the SSE2 code.
But I am not sure just WTF this is supposed to do anyway, since not LE should be using CTX only??? */
#if !ARCH_LITTLE_ENDIAN
if (p==DynamicFunc__SHA1_crypt_input1_append_input2_base16 || p==DynamicFunc__SHA1_crypt_input1_append_input2 ||
p==DynamicFunc__SHA1_crypt_input2_append_input1_base16 || p==DynamicFunc__SHA1_crypt_input2_append_input1 ||
p==DynamicFunc__SHA1_crypt_input1_overwrite_input1_base16 || p==DynamicFunc__SHA1_crypt_input1_overwrite_input1 ||
p==DynamicFunc__SHA1_crypt_input2_overwrite_input2_base16 || p==DynamicFunc__SHA1_crypt_input2_overwrite_input2 ||
p==DynamicFunc__SHA1_crypt_input1_overwrite_input2_base16 || p==DynamicFunc__SHA1_crypt_input1_overwrite_input2 ||
p==DynamicFunc__SHA1_crypt_input2_overwrite_input1_base16 || p==DynamicFunc__SHA1_crypt_input2_overwrite_input1 ||
p==DynamicFunc__SHA1_crypt_input1_to_output1_FINAL ||
p==DynamicFunc__SHA1_crypt_input2_to_output1_FINAL)
curdat.force_md5_ctx = 0;
#endif
*count = 1;
fncs[0] = p;
return fncs;
}
#ifdef _OPENMP
static int isBadOMPFunc(DYNAMIC_primitive_funcp p) {
// If ANY of these functions are seen, we can NOT use OMP for this single format.
#if MMX_COEF
if (p==DynamicFunc__SSEtoX86_switch_input1 || p==DynamicFunc__SSEtoX86_switch_input2 ||
p==DynamicFunc__SSEtoX86_switch_output1 || p==DynamicFunc__SSEtoX86_switch_output2 ||
p==DynamicFunc__X86toSSE_switch_input1 || p==DynamicFunc__X86toSSE_switch_input2 ||
p==DynamicFunc__X86toSSE_switch_output1 || p==DynamicFunc__X86toSSE_switch_output2 ||
p==DynamicFunc__ToSSE || p==DynamicFunc__ToX86)
return 1;
#endif
if (p==DynamicFunc__base16_convert_locase || p==DynamicFunc__base16_convert_upcase)
return 1;
return 0;
}
#endif
static int isMD4Func(DYNAMIC_primitive_funcp p) {
// handle flats
if (p==DynamicFunc__MD4_crypt_input1_append_input2_base16 || p==DynamicFunc__MD4_crypt_input1_append_input2 ||
p==DynamicFunc__MD4_crypt_input2_append_input1_base16 || p==DynamicFunc__MD4_crypt_input2_append_input1 ||
p==DynamicFunc__MD4_crypt_input1_overwrite_input1_base16 || p==DynamicFunc__MD4_crypt_input1_overwrite_input1 ||
p==DynamicFunc__MD4_crypt_input2_overwrite_input2_base16 || p==DynamicFunc__MD4_crypt_input2_overwrite_input2 ||
p==DynamicFunc__MD4_crypt_input1_overwrite_input2_base16 || p==DynamicFunc__MD4_crypt_input1_overwrite_input2 ||
p==DynamicFunc__MD4_crypt_input2_overwrite_input1_base16 || p==DynamicFunc__MD4_crypt_input2_overwrite_input1 ||
p==DynamicFunc__MD4_crypt_input1_to_output1_FINAL ||
p==DynamicFunc__MD4_crypt_input2_to_output1_FINAL)
return 1;
// handle older mmx_coef variants
if (p==DynamicFunc__crypt_md4 || p==DynamicFunc__crypt_md4_in1_to_out2 ||
p==DynamicFunc__crypt2_md4 || p==DynamicFunc__crypt_md4_in2_to_out1)
return 1;
return 0;
}
#ifdef _OPENMP
// Only used in OMP code, to compute LCM granularity. So we #ifdef it out to avoid compiler warnings.
#ifdef MMX_COEF
// otherwise unused
static int isMD5Func(DYNAMIC_primitive_funcp p) {
// handle flats
if (p==DynamicFunc__MD5_crypt_input1_append_input2_base16 || p==DynamicFunc__MD5_crypt_input1_append_input2 ||
p==DynamicFunc__MD5_crypt_input2_append_input1_base16 || p==DynamicFunc__MD5_crypt_input2_append_input1 ||
p==DynamicFunc__MD5_crypt_input1_overwrite_input1_base16 || p==DynamicFunc__MD5_crypt_input1_overwrite_input1 ||
p==DynamicFunc__MD5_crypt_input2_overwrite_input2_base16 || p==DynamicFunc__MD5_crypt_input2_overwrite_input2 ||
p==DynamicFunc__MD5_crypt_input1_overwrite_input2_base16 || p==DynamicFunc__MD5_crypt_input1_overwrite_input2 ||
p==DynamicFunc__MD5_crypt_input2_overwrite_input1_base16 || p==DynamicFunc__MD5_crypt_input2_overwrite_input1 ||
p==DynamicFunc__MD5_crypt_input1_to_output1_FINAL ||
p==DynamicFunc__MD5_crypt_input2_to_output1_FINAL)
return 1;
// handle older mmx_coef variants
if (p==DynamicFunc__crypt_md5 || p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1 ||
p==DynamicFunc__crypt_md5_in1_to_out2 || p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2 ||
p==DynamicFunc__crypt_md5_to_input_raw || p==DynamicFunc__crypt_md5_to_input_raw_Overwrite_NoLen ||
p==DynamicFunc__crypt_md5_in2_to_out1 || p==DynamicFunc__crypt_md5_to_input_raw_Overwrite_NoLen_but_setlen_in_SSE ||
p==DynamicFunc__crypt2_md5 || p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1_offset32)
return 1;
// this one also.
if (p==DynamicFunc__PHPassCrypt)
return 1;
return 0;
}
#endif
#endif
static int isSHA1Func(DYNAMIC_primitive_funcp p) {
if (p==DynamicFunc__SHA1_crypt_input1_append_input2_base16 || p==DynamicFunc__SHA1_crypt_input1_append_input2 ||
p==DynamicFunc__SHA1_crypt_input2_append_input1_base16 || p==DynamicFunc__SHA1_crypt_input2_append_input1 ||
p==DynamicFunc__SHA1_crypt_input1_overwrite_input1_base16 || p==DynamicFunc__SHA1_crypt_input1_overwrite_input1 ||
p==DynamicFunc__SHA1_crypt_input2_overwrite_input2_base16 || p==DynamicFunc__SHA1_crypt_input2_overwrite_input2 ||
p==DynamicFunc__SHA1_crypt_input1_overwrite_input2_base16 || p==DynamicFunc__SHA1_crypt_input1_overwrite_input2 ||
p==DynamicFunc__SHA1_crypt_input2_overwrite_input1_base16 || p==DynamicFunc__SHA1_crypt_input2_overwrite_input1 ||
p==DynamicFunc__SHA1_crypt_input1_to_output1_FINAL ||
p==DynamicFunc__SHA1_crypt_input2_to_output1_FINAL)
return 1;
return 0;
}
static int isSHA2_256Func(DYNAMIC_primitive_funcp p) {
if (p==DynamicFunc__SHA224_crypt_input1_append_input2_base16 || p==DynamicFunc__SHA224_crypt_input1_append_input2 ||
p==DynamicFunc__SHA224_crypt_input2_append_input1_base16 || p==DynamicFunc__SHA224_crypt_input2_append_input1 ||
p==DynamicFunc__SHA224_crypt_input1_overwrite_input1_base16 || p==DynamicFunc__SHA224_crypt_input1_overwrite_input1 ||
p==DynamicFunc__SHA224_crypt_input2_overwrite_input2_base16 || p==DynamicFunc__SHA224_crypt_input2_overwrite_input2 ||
p==DynamicFunc__SHA224_crypt_input2_overwrite_input2_base16 || p==DynamicFunc__SHA224_crypt_input2_overwrite_input2 ||
p==DynamicFunc__SHA224_crypt_input2_overwrite_input1_base16 || p==DynamicFunc__SHA224_crypt_input2_overwrite_input1 ||
p==DynamicFunc__SHA224_crypt_input1_to_output1_FINAL ||
p==DynamicFunc__SHA224_crypt_input2_to_output1_FINAL ||
p==DynamicFunc__SHA256_crypt_input1_append_input2_base16 || p==DynamicFunc__SHA256_crypt_input1_append_input2 ||
p==DynamicFunc__SHA256_crypt_input2_append_input1_base16 || p==DynamicFunc__SHA256_crypt_input2_append_input1 ||
p==DynamicFunc__SHA256_crypt_input1_overwrite_input1_base16 || p==DynamicFunc__SHA256_crypt_input1_overwrite_input1 ||
p==DynamicFunc__SHA256_crypt_input2_overwrite_input2_base16 || p==DynamicFunc__SHA256_crypt_input2_overwrite_input2 ||
p==DynamicFunc__SHA256_crypt_input1_overwrite_input2_base16 || p==DynamicFunc__SHA256_crypt_input1_overwrite_input2 ||
p==DynamicFunc__SHA256_crypt_input2_overwrite_input1_base16 || p==DynamicFunc__SHA256_crypt_input2_overwrite_input1 ||
p==DynamicFunc__SHA256_crypt_input1_to_output1_FINAL ||
p==DynamicFunc__SHA256_crypt_input2_to_output1_FINAL)
return 1;
return 0;
}
static int isSHA2_512Func(DYNAMIC_primitive_funcp p) {
if (p==DynamicFunc__SHA384_crypt_input1_append_input2_base16 || p==DynamicFunc__SHA384_crypt_input1_append_input2 ||
p==DynamicFunc__SHA384_crypt_input2_append_input1_base16 || p==DynamicFunc__SHA384_crypt_input2_append_input1 ||
p==DynamicFunc__SHA384_crypt_input1_overwrite_input1_base16 || p==DynamicFunc__SHA384_crypt_input1_overwrite_input1 ||
p==DynamicFunc__SHA384_crypt_input2_overwrite_input2_base16 || p==DynamicFunc__SHA384_crypt_input2_overwrite_input2 ||
p==DynamicFunc__SHA384_crypt_input1_overwrite_input2_base16 || p==DynamicFunc__SHA384_crypt_input1_overwrite_input2 ||
p==DynamicFunc__SHA384_crypt_input2_overwrite_input1_base16 || p==DynamicFunc__SHA384_crypt_input2_overwrite_input1 ||
p==DynamicFunc__SHA384_crypt_input1_to_output1_FINAL ||
p==DynamicFunc__SHA384_crypt_input2_to_output1_FINAL ||
p==DynamicFunc__SHA512_crypt_input1_append_input2_base16 || p==DynamicFunc__SHA512_crypt_input1_append_input2 ||
p==DynamicFunc__SHA512_crypt_input2_append_input1_base16 || p==DynamicFunc__SHA512_crypt_input2_append_input1 ||
p==DynamicFunc__SHA512_crypt_input1_overwrite_input1_base16 || p==DynamicFunc__SHA512_crypt_input1_overwrite_input1 ||
p==DynamicFunc__SHA512_crypt_input2_overwrite_input2_base16 || p==DynamicFunc__SHA512_crypt_input2_overwrite_input2 ||
p==DynamicFunc__SHA512_crypt_input1_overwrite_input2_base16 || p==DynamicFunc__SHA512_crypt_input1_overwrite_input2 ||
p==DynamicFunc__SHA512_crypt_input2_overwrite_input1_base16 || p==DynamicFunc__SHA512_crypt_input2_overwrite_input1 ||
p==DynamicFunc__SHA512_crypt_input1_to_output1_FINAL ||
p==DynamicFunc__SHA512_crypt_input2_to_output1_FINAL)
return 1;
return 0;
}
static int isGOSTFunc(DYNAMIC_primitive_funcp p) {
if (p==DynamicFunc__GOST_crypt_input1_append_input2_base16 || p==DynamicFunc__GOST_crypt_input1_append_input2 ||
p==DynamicFunc__GOST_crypt_input2_append_input1_base16 || p==DynamicFunc__GOST_crypt_input2_append_input1 ||
p==DynamicFunc__GOST_crypt_input1_overwrite_input1_base16 || p==DynamicFunc__GOST_crypt_input1_overwrite_input1 ||
p==DynamicFunc__GOST_crypt_input2_overwrite_input2_base16 || p==DynamicFunc__GOST_crypt_input2_overwrite_input2 ||
p==DynamicFunc__GOST_crypt_input1_overwrite_input2_base16 || p==DynamicFunc__GOST_crypt_input1_overwrite_input2 ||
p==DynamicFunc__GOST_crypt_input2_overwrite_input1_base16 || p==DynamicFunc__GOST_crypt_input2_overwrite_input1 ||
p==DynamicFunc__GOST_crypt_input1_to_output1_FINAL ||
p==DynamicFunc__GOST_crypt_input2_to_output1_FINAL)
return 1;
return 0;
}
static int isTigerFunc(DYNAMIC_primitive_funcp p) {
if (p==DynamicFunc__Tiger_crypt_input1_append_input2_base16 || p==DynamicFunc__Tiger_crypt_input1_append_input2 ||
p==DynamicFunc__Tiger_crypt_input2_append_input1_base16 || p==DynamicFunc__Tiger_crypt_input2_append_input1 ||
p==DynamicFunc__Tiger_crypt_input1_overwrite_input1_base16 || p==DynamicFunc__Tiger_crypt_input1_overwrite_input1 ||
p==DynamicFunc__Tiger_crypt_input2_overwrite_input2_base16 || p==DynamicFunc__Tiger_crypt_input2_overwrite_input2 ||
p==DynamicFunc__Tiger_crypt_input1_overwrite_input2_base16 || p==DynamicFunc__Tiger_crypt_input1_overwrite_input2 ||
p==DynamicFunc__Tiger_crypt_input2_overwrite_input1_base16 || p==DynamicFunc__Tiger_crypt_input2_overwrite_input1 ||
p==DynamicFunc__Tiger_crypt_input1_to_output1_FINAL ||
p==DynamicFunc__Tiger_crypt_input2_to_output1_FINAL)
return 1;
return 0;
}
static int isWHIRLFunc(DYNAMIC_primitive_funcp p) {
if (p==DynamicFunc__WHIRLPOOL_crypt_input1_append_input2_base16 || p==DynamicFunc__WHIRLPOOL_crypt_input1_append_input2 ||
p==DynamicFunc__WHIRLPOOL_crypt_input2_append_input1_base16 || p==DynamicFunc__WHIRLPOOL_crypt_input2_append_input1 ||
p==DynamicFunc__WHIRLPOOL_crypt_input1_overwrite_input1_base16 || p==DynamicFunc__WHIRLPOOL_crypt_input1_overwrite_input1 ||
p==DynamicFunc__WHIRLPOOL_crypt_input2_overwrite_input2_base16 || p==DynamicFunc__WHIRLPOOL_crypt_input2_overwrite_input2 ||
p==DynamicFunc__WHIRLPOOL_crypt_input1_overwrite_input2_base16 || p==DynamicFunc__WHIRLPOOL_crypt_input1_overwrite_input2 ||
p==DynamicFunc__WHIRLPOOL_crypt_input2_overwrite_input1_base16 || p==DynamicFunc__WHIRLPOOL_crypt_input2_overwrite_input1 ||
p==DynamicFunc__WHIRLPOOL_crypt_input1_to_output1_FINAL ||
p==DynamicFunc__WHIRLPOOL_crypt_input2_to_output1_FINAL)
return 1;
return 0;
}
static int isRIPEMDFunc(DYNAMIC_primitive_funcp p) {
if (p==DynamicFunc__RIPEMD128_crypt_input1_append_input2_base16 || p==DynamicFunc__RIPEMD128_crypt_input1_append_input2 ||
p==DynamicFunc__RIPEMD128_crypt_input2_append_input1_base16 || p==DynamicFunc__RIPEMD128_crypt_input2_append_input1 ||
p==DynamicFunc__RIPEMD128_crypt_input1_overwrite_input1_base16 || p==DynamicFunc__RIPEMD128_crypt_input1_overwrite_input1 ||
p==DynamicFunc__RIPEMD128_crypt_input2_overwrite_input2_base16 || p==DynamicFunc__RIPEMD128_crypt_input2_overwrite_input2 ||
p==DynamicFunc__RIPEMD128_crypt_input1_overwrite_input2_base16 || p==DynamicFunc__RIPEMD128_crypt_input1_overwrite_input2 ||
p==DynamicFunc__RIPEMD128_crypt_input2_overwrite_input1_base16 || p==DynamicFunc__RIPEMD128_crypt_input2_overwrite_input1 ||
p==DynamicFunc__RIPEMD128_crypt_input1_to_output1_FINAL ||
p==DynamicFunc__RIPEMD128_crypt_input2_to_output1_FINAL)
return 1;
if (p==DynamicFunc__RIPEMD160_crypt_input1_append_input2_base16 || p==DynamicFunc__RIPEMD160_crypt_input1_append_input2 ||
p==DynamicFunc__RIPEMD160_crypt_input2_append_input1_base16 || p==DynamicFunc__RIPEMD160_crypt_input2_append_input1 ||
p==DynamicFunc__RIPEMD160_crypt_input1_overwrite_input1_base16 || p==DynamicFunc__RIPEMD160_crypt_input1_overwrite_input1 ||
p==DynamicFunc__RIPEMD160_crypt_input2_overwrite_input2_base16 || p==DynamicFunc__RIPEMD160_crypt_input2_overwrite_input2 ||
p==DynamicFunc__RIPEMD160_crypt_input1_overwrite_input2_base16 || p==DynamicFunc__RIPEMD160_crypt_input1_overwrite_input2 ||
p==DynamicFunc__RIPEMD160_crypt_input2_overwrite_input1_base16 || p==DynamicFunc__RIPEMD160_crypt_input2_overwrite_input1 ||
p==DynamicFunc__RIPEMD160_crypt_input1_to_output1_FINAL ||
p==DynamicFunc__RIPEMD160_crypt_input2_to_output1_FINAL)
return 1;
if (p==DynamicFunc__RIPEMD256_crypt_input1_append_input2_base16 || p==DynamicFunc__RIPEMD256_crypt_input1_append_input2 ||
p==DynamicFunc__RIPEMD256_crypt_input2_append_input1_base16 || p==DynamicFunc__RIPEMD256_crypt_input2_append_input1 ||
p==DynamicFunc__RIPEMD256_crypt_input1_overwrite_input1_base16 || p==DynamicFunc__RIPEMD256_crypt_input1_overwrite_input1 ||
p==DynamicFunc__RIPEMD256_crypt_input2_overwrite_input2_base16 || p==DynamicFunc__RIPEMD256_crypt_input2_overwrite_input2 ||
p==DynamicFunc__RIPEMD256_crypt_input1_overwrite_input2_base16 || p==DynamicFunc__RIPEMD256_crypt_input1_overwrite_input2 ||
p==DynamicFunc__RIPEMD256_crypt_input2_overwrite_input1_base16 || p==DynamicFunc__RIPEMD256_crypt_input2_overwrite_input1 ||
p==DynamicFunc__RIPEMD256_crypt_input1_to_output1_FINAL ||
p==DynamicFunc__RIPEMD256_crypt_input2_to_output1_FINAL)
return 1;
if (p==DynamicFunc__RIPEMD320_crypt_input1_append_input2_base16 || p==DynamicFunc__RIPEMD320_crypt_input1_append_input2 ||
p==DynamicFunc__RIPEMD320_crypt_input2_append_input1_base16 || p==DynamicFunc__RIPEMD320_crypt_input2_append_input1 ||
p==DynamicFunc__RIPEMD320_crypt_input1_overwrite_input1_base16 || p==DynamicFunc__RIPEMD320_crypt_input1_overwrite_input1 ||
p==DynamicFunc__RIPEMD320_crypt_input2_overwrite_input2_base16 || p==DynamicFunc__RIPEMD320_crypt_input2_overwrite_input2 ||
p==DynamicFunc__RIPEMD320_crypt_input1_overwrite_input2_base16 || p==DynamicFunc__RIPEMD320_crypt_input1_overwrite_input2 ||
p==DynamicFunc__RIPEMD320_crypt_input2_overwrite_input1_base16 || p==DynamicFunc__RIPEMD320_crypt_input2_overwrite_input1 ||
p==DynamicFunc__RIPEMD320_crypt_input1_to_output1_FINAL ||
p==DynamicFunc__RIPEMD320_crypt_input2_to_output1_FINAL)
return 1;
return 0;
}
static int isLargeHashFinalFunc(DYNAMIC_primitive_funcp p) {
if (p==DynamicFunc__SHA1_crypt_input1_to_output1_FINAL || p==DynamicFunc__SHA1_crypt_input2_to_output1_FINAL ||
p==DynamicFunc__SHA224_crypt_input1_to_output1_FINAL || p==DynamicFunc__SHA224_crypt_input2_to_output1_FINAL ||
p==DynamicFunc__SHA256_crypt_input1_to_output1_FINAL || p==DynamicFunc__SHA256_crypt_input2_to_output1_FINAL ||
p==DynamicFunc__SHA384_crypt_input1_to_output1_FINAL || p==DynamicFunc__SHA384_crypt_input2_to_output1_FINAL ||
p==DynamicFunc__SHA512_crypt_input1_to_output1_FINAL || p==DynamicFunc__SHA512_crypt_input2_to_output1_FINAL ||
p==DynamicFunc__GOST_crypt_input1_to_output1_FINAL || p==DynamicFunc__GOST_crypt_input2_to_output1_FINAL ||
p==DynamicFunc__WHIRLPOOL_crypt_input1_to_output1_FINAL || p==DynamicFunc__WHIRLPOOL_crypt_input2_to_output1_FINAL ||
p==DynamicFunc__Tiger_crypt_input1_to_output1_FINAL || p==DynamicFunc__Tiger_crypt_input2_to_output1_FINAL ||
p==DynamicFunc__RIPEMD128_crypt_input1_to_output1_FINAL || p==DynamicFunc__RIPEMD128_crypt_input2_to_output1_FINAL ||
p==DynamicFunc__RIPEMD160_crypt_input1_to_output1_FINAL || p==DynamicFunc__RIPEMD160_crypt_input2_to_output1_FINAL ||
p==DynamicFunc__RIPEMD256_crypt_input1_to_output1_FINAL || p==DynamicFunc__RIPEMD256_crypt_input2_to_output1_FINAL ||
p==DynamicFunc__RIPEMD320_crypt_input1_to_output1_FINAL || p==DynamicFunc__RIPEMD320_crypt_input2_to_output1_FINAL
)
return 1;
return 0;
}
#ifdef _OPENMP
#ifdef MMX_COEF
// Simple euclid algorithm for GCD
static int GCD (int a, int b) {
while (b) {
int t = b;
b = a % b;
a = t;
}
return a;
}
// simple algorith for LCM is (a*b)/GCD(a,b)
static int LCM(int a, int b) {
a/=GCD(a,b);
return a*b;
}
#endif
static void dyna_setupOMP(DYNAMIC_Setup *Setup, struct fmt_main *pFmt) {
int i;
#ifndef MMX_COEF
curdat.omp_granularity=OMP_INC;
#else
if ((curdat.pSetup->flags& MGF_NOTSSE2Safe) == MGF_NOTSSE2Safe)
curdat.omp_granularity=OMP_INC;
else {
curdat.omp_granularity = 1;
for (i=0; Setup->pFuncs[i]; ++i) {
if (isMD5Func(Setup->pFuncs[i]))
curdat.omp_granularity = LCM(curdat.omp_granularity, MD5_SSE_PARA*MMX_COEF);
else if (isMD4Func(Setup->pFuncs[i]))
curdat.omp_granularity = LCM(curdat.omp_granularity, MD4_SSE_PARA*MMX_COEF);
else if (isSHA1Func(Setup->pFuncs[i]))
curdat.omp_granularity = LCM(curdat.omp_granularity, SHA1_SSE_PARA*MMX_COEF);
else if (isSHA2_256Func(Setup->pFuncs[i]))
#if MMX_COEF_SHA256
#if SHA256_SSE_PARA
curdat.omp_granularity = LCM(curdat.omp_granularity, SHA256_SSE_PARA*MMX_COEF_SHA256);
#else
curdat.omp_granularity = LCM(curdat.omp_granularity, MMX_COEF_SHA256);
#endif
#else
curdat.omp_granularity=LCM(curdat.omp_granularity, OMP_INC);
#endif
else if (isSHA2_512Func(Setup->pFuncs[i]))
#if MMX_COEF_SHA512
#if SHA512_SSE_PARA
curdat.omp_granularity = LCM(curdat.omp_granularity, SHA512_SSE_PARA*MMX_COEF_SHA512);
#else
curdat.omp_granularity = LCM(curdat.omp_granularity, MMX_COEF_SHA512);
#endif
#else
curdat.omp_granularity=LCM(curdat.omp_granularity, OMP_INC);
#endif
}
}
#endif
for (i=0; Setup->pFuncs[i]; ++i) {
if (isBadOMPFunc(Setup->pFuncs[i]))
pFmt->params.flags &= (~FMT_OMP);
}
if ((pFmt->params.flags&FMT_OMP)==FMT_OMP && (curdat.pSetup->startFlags&MGF_POOR_OMP)==MGF_POOR_OMP)
pFmt->params.flags |= FMT_OMP_BAD;
}
#endif
int dynamic_SETUP(DYNAMIC_Setup *Setup, struct fmt_main *pFmt)
{
int i, j, cnt, cnt2, x;
DYNAMIC_primitive_funcp *pFuncs;
if (Setup->flags & MGF_ColonNOTValid)
{
extern struct options_main options;
if (options.loader.field_sep_char == ':')
{
return 0;
}
}
// Deal with depricated 1st functions. Convert them to proper 'flags'
if (Setup->pFuncs[0] == DynamicFunc__PHPassSetup)
Setup->startFlags |= MGF_PHPassSetup;
if (Setup->pFuncs[0] == DynamicFunc__InitialLoadKeysToInput)
Setup->startFlags |= MGF_KEYS_INPUT;
if (Setup->pFuncs[0] == DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2)
Setup->startFlags |= MGF_KEYS_CRYPT_IN2;
if (Setup->pFuncs[0] == DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1)
Setup->startFlags |= MGF_KEYS_BASE16_IN1;
if (Setup->pFuncs[0] == DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1_offset32)
Setup->startFlags |= MGF_KEYS_BASE16_IN1_Offset32;
curdat.dynamic_hdaa_salt = ((Setup->flags &MGF_HDAA_SALT)==MGF_HDAA_SALT) ? 1 : 0;
curdat.dynamic_40_byte_input = ((Setup->startFlags&MGF_INPUT_20_BYTE)==MGF_INPUT_20_BYTE) ? 1 : 0;
curdat.dynamic_48_byte_input = ((Setup->startFlags&MGF_INPUT_24_BYTE)==MGF_INPUT_24_BYTE) ? 1 : 0;
curdat.dynamic_64_byte_input = ((Setup->startFlags&MGF_INPUT_32_BYTE)==MGF_INPUT_32_BYTE) ? 1 : 0;
curdat.dynamic_56_byte_input = ((Setup->startFlags&MGF_INPUT_28_BYTE)==MGF_INPUT_28_BYTE) ? 1 : 0;
curdat.dynamic_80_byte_input = ((Setup->startFlags&MGF_INPUT_40_BYTE)==MGF_INPUT_40_BYTE) ? 1 : 0;
curdat.dynamic_96_byte_input = ((Setup->startFlags&MGF_INPUT_48_BYTE)==MGF_INPUT_48_BYTE) ? 1 : 0;
curdat.dynamic_128_byte_input= ((Setup->startFlags&MGF_INPUT_64_BYTE)==MGF_INPUT_64_BYTE) ? 1 : 0;
curdat.FldMask = 0;
curdat.b2Salts = ((Setup->flags&MGF_SALTED2)==MGF_SALTED2) ? 1 : 0;
curdat.dynamic_base16_upcase = ((Setup->flags&MGF_BASE_16_OUTPUT_UPCASE)==MGF_BASE_16_OUTPUT_UPCASE) ? 1 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD0)==MGF_FLD0) ? MGF_FLD0 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD1)==MGF_FLD1) ? MGF_FLD1 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD2)==MGF_FLD2) ? MGF_FLD2 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD3)==MGF_FLD3) ? MGF_FLD3 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD4)==MGF_FLD4) ? MGF_FLD4 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD5)==MGF_FLD5) ? MGF_FLD5 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD6)==MGF_FLD6) ? MGF_FLD6 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD7)==MGF_FLD7) ? MGF_FLD7 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD8)==MGF_FLD8) ? MGF_FLD8 : 0;
curdat.FldMask |= ((Setup->flags&MGF_FLD9)==MGF_FLD9) ? MGF_FLD9 : 0;
curdat.dynamic_base64_inout = 0;
curdat.dynamic_salt_as_hex = 0;
curdat.force_md5_ctx = 0;
curdat.nUserName = 0;
curdat.nPassCase = 1;
curdat.md5_startup_in_x86 = curdat.dynamic_use_sse = 0; // if 0, then never use SSE2
curdat.init = 0;
curdat.pSetup = Setup;
pFmt->methods.binary = binary;
pFmt->methods.cmp_all=cmp_all;
pFmt->methods.cmp_one=cmp_one;
#if FMT_MAIN_VERSION > 9
pFmt->methods.source=fmt_default_source;
#endif
pFmt->methods.salt = salt;
pFmt->methods.set_salt = set_salt;
pFmt->methods.salt_hash = salt_hash;
//pFmt->params.format_name = str_alloc_copy(Setup->szFORMAT_NAME);
pFmt->params.format_name = "";
pFmt->params.benchmark_length = 0; // NOTE 0 'assumes' salted. If unsalted, we set back to -1
pFmt->params.salt_size = 0;
pFmt->params.min_keys_per_crypt = 1;
curdat.using_flat_buffers_sse2_ok = 0; // used to distingish MGF_NOTSSE2Safe from MGF_FLAT_BUFFERS
if ((Setup->flags & MGF_FLAT_BUFFERS) == MGF_FLAT_BUFFERS)
curdat.using_flat_buffers_sse2_ok = 1;
#ifdef MMX_COEF
curdat.dynamic_use_sse = 1; // if 1, then we are in SSE2 mode (but can switch out)
if ((Setup->flags & MGF_NOTSSE2Safe) == MGF_NOTSSE2Safe) {
curdat.dynamic_use_sse = 0; // Do not use SSE code at all.
} else if ((Setup->flags & MGF_FLAT_BUFFERS) == MGF_FLAT_BUFFERS) {
curdat.dynamic_use_sse = 0; // uses flat buffers but will use SSE code (large formats use the flat buffers, and the SSE2 code 'mixes' them).
curdat.using_flat_buffers_sse2_ok = 1;
} else if ((Setup->flags & MGF_StartInX86Mode) == MGF_StartInX86Mode) {
curdat.dynamic_use_sse = 2; // if 2, then we are in SSE2 mode, but currently using X86 (and can switch back to SSE2).
curdat.md5_startup_in_x86 = 1;
}
if (curdat.dynamic_use_sse || curdat.using_flat_buffers_sse2_ok) {
pFmt->params.max_keys_per_crypt = MAX_KEYS_PER_CRYPT;
pFmt->params.algorithm_name = ALGORITHM_NAME;
} else {
pFmt->params.max_keys_per_crypt = MAX_KEYS_PER_CRYPT_X86;
pFmt->params.algorithm_name = ALGORITHM_NAME_X86;
}
#else
pFmt->params.max_keys_per_crypt = MAX_KEYS_PER_CRYPT_X86;
pFmt->params.algorithm_name = ALGORITHM_NAME_X86;
#endif
dynamic_use_sse = curdat.dynamic_use_sse;
// Ok, set the new 'constants' data
memset(curdat.Consts, 0, sizeof(curdat.Consts));
memset(curdat.ConstsLen, 0, sizeof(curdat.ConstsLen));
for (curdat.nConsts = 0; curdat.nConsts < 8; ++curdat.nConsts)
{
if (Setup->pConstants[curdat.nConsts].Const == NULL)
break;
//curdat.Consts[curdat.nConsts] = (unsigned char*)str_alloc_copy(Setup->pConstants[curdat.nConsts].Const);
//curdat.ConstsLen[curdat.nConsts] = strlen(Setup->pConstants[curdat.nConsts].Const);
// we really do not 'have' to null terminate, but do just to be on the 'safe' side.
curdat.Consts[curdat.nConsts] = mem_alloc_tiny(Setup->pConstants[curdat.nConsts].len+1, MEM_ALIGN_NONE);
memcpy(curdat.Consts[curdat.nConsts], Setup->pConstants[curdat.nConsts].Const, Setup->pConstants[curdat.nConsts].len);
curdat.Consts[curdat.nConsts][Setup->pConstants[curdat.nConsts].len] = 0;
curdat.ConstsLen[curdat.nConsts] = Setup->pConstants[curdat.nConsts].len;
}
if (Setup->flags & MGF_INPBASE64)
{
curdat.dynamic_base64_inout = 1;
pFmt->methods.binary = binary_b64;
}
if (Setup->flags & MGF_INPBASE64_4x6)
{
curdat.dynamic_base64_inout = 2;
pFmt->methods.binary = binary_b64_4x6;
pFmt->methods.cmp_all = cmp_all_64_4x6;
pFmt->methods.cmp_one = cmp_one_64_4x6;
#if !ARCH_LITTLE_ENDIAN
pFmt->methods.binary_hash[0] = binary_hash_0_64x4;
pFmt->methods.binary_hash[1] = binary_hash_1_64x4;
pFmt->methods.binary_hash[2] = binary_hash_2_64x4;
pFmt->methods.binary_hash[3] = binary_hash_3_64x4;
pFmt->methods.binary_hash[4] = binary_hash_4_64x4;
pFmt->methods.binary_hash[5] = binary_hash_5_64x4;
pFmt->methods.get_hash[0] = get_hash_0_64x4;
pFmt->methods.get_hash[1] = get_hash_1_64x4;
pFmt->methods.get_hash[2] = get_hash_2_64x4;
pFmt->methods.get_hash[3] = get_hash_3_64x4;
pFmt->methods.get_hash[4] = get_hash_4_64x4;
pFmt->methods.get_hash[5] = get_hash_5_64x4;
#endif
// Not enough bits in a single WORD to do the 7th one.
pFmt->methods.binary_hash[6] = NULL;
pFmt->methods.get_hash[6] = NULL;
}
// printf ("%.13s",Setup->szFORMAT_NAME);
if ( (Setup->flags & (MGF_INPBASE64|MGF_INPBASE64_4x6|MGF_INPBASE64a)) == 0) {
pFmt->params.flags |= FMT_SPLIT_UNIFIES_CASE;
// printf (" Setting FMT_SPLIT_UNIFIES_CASE");
if (pFmt->methods.split == split) {
pFmt->methods.split = split_UC;
// printf (" split set to split_UC()\n");
}
}
// else printf (" split set to split()\n");
if (Setup->flags & MGF_UTF8)
pFmt->params.flags |= FMT_UTF8;
if (Setup->flags & MGF_INPBASE64a) {
curdat.dynamic_base64_inout = 1;
pFmt->methods.binary = binary_b64a;
}
if ( (Setup->flags & MGF_USERNAME) == MGF_USERNAME)
curdat.nUserName = 1;
if ( (Setup->flags & MGF_USERNAME_UPCASE) == MGF_USERNAME_UPCASE)
curdat.nUserName = 2;
if ( (Setup->flags & MGF_USERNAME_LOCASE) == MGF_USERNAME_LOCASE)
curdat.nUserName = 3;
// Ok, what 'flag' in the format struct, do we clear???
if ( (Setup->flags & MGF_PASSWORD_UPCASE) == MGF_PASSWORD_UPCASE) {
curdat.nPassCase = 2;
pFmt->params.flags &= (~FMT_CASE);
}
if ( (Setup->flags & MGF_PASSWORD_LOCASE) == MGF_PASSWORD_LOCASE) {
curdat.nPassCase = 3;
pFmt->params.flags &= (~FMT_CASE);
}
if ( (Setup->flags & MGF_SALT_AS_HEX) == MGF_SALT_AS_HEX)
curdat.dynamic_salt_as_hex = 1;
if ( (Setup->flags & MGF_SALT_AS_HEX_TO_SALT2) == MGF_SALT_AS_HEX_TO_SALT2) {
curdat.dynamic_salt_as_hex = 2;
if (curdat.b2Salts)
return !fprintf(stderr, "Error invalid format %s: MGF_SALT_AS_HEX_TO_SALT2 and MGF_SALTED2 are not valid to use in same format\n", Setup->szFORMAT_NAME);
curdat.b2Salts = 2;
}
if ( (Setup->flags & MGF_SALT_UNICODE_B4_CRYPT) == MGF_SALT_UNICODE_B4_CRYPT && curdat.dynamic_salt_as_hex)
curdat.dynamic_salt_as_hex |= 0x100;
if ( (Setup->flags & MGF_SALTED) == 0)
{
curdat.dynamic_FIXED_SALT_SIZE = 0;
pFmt->params.benchmark_length = -1;
pFmt->params.salt_size = 0;
}
else
{
pFmt->params.salt_size = sizeof(void *);
if (Setup->SaltLen > 0)
curdat.dynamic_FIXED_SALT_SIZE = Setup->SaltLen;
else
{
// says we have a salt, but NOT a fixed sized one that we 'know' about.
// if the SaltLen is -1, then there is NO constraints. If the SaltLen
// is -12 (or any other neg number other than -1), then there is no
// fixed salt length, but the 'max' salt size is -SaltLen. So, -12
// means any salt from 1 to 12 is 'valid'.
if (Setup->SaltLen > -2)
curdat.dynamic_FIXED_SALT_SIZE = -1;
else {
curdat.dynamic_FIXED_SALT_SIZE = Setup->SaltLen;
#if !defined (MMX_COEF)
// for non-sse, we limit ourselves to 110 bytes, not 55. So, we can add 55 to this value
curdat.dynamic_FIXED_SALT_SIZE -= 55;
#endif
}
}
}
if (Setup->MaxInputLen)
pFmt->params.plaintext_length = Setup->MaxInputLen;
else {
if ( ((Setup->flags&MGF_FLAT_BUFFERS)==MGF_FLAT_BUFFERS) || ((Setup->flags&MGF_NOTSSE2Safe)==MGF_NOTSSE2Safe)) {
pFmt->params.plaintext_length = 110 - abs(Setup->SaltLen);
if (pFmt->params.plaintext_length < 32)
pFmt->params.plaintext_length = 32;
} else {
pFmt->params.plaintext_length = 55 - abs(Setup->SaltLen);
if (pFmt->params.plaintext_length < 1) {
pFmt->params.plaintext_length = 1;
fprintf(stderr, "\nError, for format %s, MMX build, is not valid due to TOO long of a SaltLength\n", Setup->szFORMAT_NAME);
}
}
}
#ifndef MMX_COEF
if (Setup->MaxInputLenX86) {
pFmt->params.plaintext_length = Setup->MaxInputLenX86;
} else {
if (Setup->SaltLenX86)
pFmt->params.plaintext_length = 110 - abs(Setup->SaltLenX86);
else
pFmt->params.plaintext_length = 110 - abs(Setup->SaltLen);
if (pFmt->params.plaintext_length < 32)
pFmt->params.plaintext_length = 32;
}
#endif
curdat.store_keys_in_input = !!(Setup->startFlags&MGF_KEYS_INPUT );
curdat.input2_set_len32 = !!(Setup->startFlags&MGF_SET_INP2LEN32);
#if FMT_MAIN_VERSION > 9
if (Setup->startFlags&MGF_SOURCE) {
if (Setup->startFlags&MGF_INPUT_20_BYTE) pFmt->methods.source = source_20_hex;
else if (Setup->startFlags&MGF_INPUT_28_BYTE) pFmt->methods.source = source_28_hex;
else if (Setup->startFlags&MGF_INPUT_32_BYTE) pFmt->methods.source = source_32_hex;
else if (Setup->startFlags&MGF_INPUT_40_BYTE) pFmt->methods.source = source_40_hex;
else if (Setup->startFlags&MGF_INPUT_48_BYTE) pFmt->methods.source = source_48_hex;
else if (Setup->startFlags&MGF_INPUT_64_BYTE) pFmt->methods.source = source_64_hex;
else pFmt->methods.source = source;
}
#endif
if (!curdat.store_keys_in_input && Setup->startFlags&MGF_KEYS_INPUT_BE_SAFE)
curdat.store_keys_in_input = 3;
curdat.store_keys_in_input_unicode_convert = !!(Setup->startFlags&MGF_KEYS_UNICODE_B4_CRYPT);
if (curdat.store_keys_in_input_unicode_convert && curdat.store_keys_in_input)
return !fprintf(stderr, "Error invalid format %s: Using MGF_KEYS_INPUT and MGF_KEYS_UNICODE_B4_CRYPT in same format is NOT valid\n", Setup->szFORMAT_NAME);
curdat.store_keys_normal_but_precompute_md5_to_output2 = !!(Setup->startFlags&MGF_KEYS_CRYPT_IN2);
curdat.store_keys_normal_but_precompute_md5_to_output2_base16_to_input1 = !!(Setup->startFlags&MGF_KEYS_BASE16_IN1);
if (!!(Setup->startFlags&MGF_KEYS_BASE16_X86_IN1)) {
curdat.store_keys_normal_but_precompute_md5_to_output2_base16_to_input1=2;
}
if (curdat.store_keys_normal_but_precompute_md5_to_output2_base16_to_input1)
curdat.store_keys_normal_but_precompute_md5_to_output2 = 1;
curdat.store_keys_normal_but_precompute_md5_to_output2_base16_to_input1_offset32 = !!(Setup->startFlags&MGF_KEYS_BASE16_IN1_Offset32);
if (!!(Setup->startFlags&MGF_KEYS_BASE16_X86_IN1_Offset32))
curdat.store_keys_normal_but_precompute_md5_to_output2_base16_to_input1_offset32=2;
if (curdat.store_keys_normal_but_precompute_md5_to_output2_base16_to_input1_offset32)
{
curdat.store_keys_normal_but_precompute_md5_to_output2 = 1;
}
if (Setup->startFlags&MGF_PHPassSetup)
{
pFmt->methods.salt = salt_phpass;
#ifdef _OPENMP
#ifdef MMX_COEF
// no reason to do 128 crypts, causes slow validity checking. But we do get some gains
// by doing more than simple 1 set of MMX_COEF
pFmt->params.algorithm_name = "128/128 " SSE_type " 96x4x" STRINGIZE(MD5_SSE_PARA);
pFmt->params.max_keys_per_crypt = 96*MD5_SSE_PARA;
#else
#if ARCH_LITTLE_ENDIAN
pFmt->params.max_keys_per_crypt = 96;
#else
pFmt->params.max_keys_per_crypt = 2;
#endif
#if MD5_X2
pFmt->params.algorithm_name = "32/" ARCH_BITS_STR " 48x2 (MD5_body)";
#else
pFmt->params.algorithm_name = "32/" ARCH_BITS_STR " 96x1 (MD5_body)";
#endif
#endif
#ifdef MMX_COEF
pFmt->params.algorithm_name = "128/128 " SSE_type " 4x4x" STRINGIZE(MD5_SSE_PARA);
pFmt->params.max_keys_per_crypt = 16*MD5_SSE_PARA;
#endif
#else
// In non-sse mode, 1 test runs as fast as 128. But validity checking is MUCH faster if
// we leave it at only 1.
pFmt->params.max_keys_per_crypt = 1;
#if MD5_X2
pFmt->params.max_keys_per_crypt = 2;
pFmt->params.algorithm_name = "32/" ARCH_BITS_STR " 1x2 (MD5_body)";
#else
pFmt->params.algorithm_name = "32/" ARCH_BITS_STR " (MD5_body)";
#endif
#endif
pFmt->params.min_keys_per_crypt = 1;
saltlen = 8;
// no reason to run double tests. The 1 salt vs MANY salts is the
// same speed, so why double the benchmark time for no reason.
pFmt->params.benchmark_length = -1;
}
if ((Setup->startFlags) == 0)
{
// Ok, if we do not have some 'special' loader function, we MUST first clean some
// input. If that is not done, there is NO WAY this is a valid format. This is
// NOT an intelligent check, but more like the dummy lights on newer automobiles.
// You know it will not work, but do not know 'why', nor should you care.
if (Setup->pFuncs[0] != DynamicFunc__clean_input &&
Setup->pFuncs[0] != DynamicFunc__clean_input2 &&
Setup->pFuncs[0] != DynamicFunc__clean_input_kwik &&
Setup->pFuncs[0] != DynamicFunc__clean_input2_kwik &&
Setup->pFuncs[0] != DynamicFunc__clean_input_full)
return !fprintf(stderr, "Error invalid format %s: The first command MUST be a clean of input 1 or input 2 OR a special key 2 input loader function\n", Setup->szFORMAT_NAME);
}
if ( (Setup->flags&MGF_SALTED2)==MGF_SALTED2 && (Setup->flags&MGF_SALT_AS_HEX) == MGF_SALT_AS_HEX)
{
// if the user wants salt_as_hex, then here can NOT be 2 salts.
return !fprintf(stderr, "Error invalid format %s: If using MGF_SALT_AS_HEX flag, then you can NOT have a 2nd salt.\n", Setup->szFORMAT_NAME);
}
if (Setup->pFuncs && Setup->pFuncs[0])
{
int z;
for (z = 0; Setup->pFuncs[z]; ++z)
;
z += 50;
curdat.dynamic_FUNCTIONS = mem_alloc_tiny(z*sizeof(DYNAMIC_primitive_funcp), MEM_ALIGN_WORD);
j = 0;
#if !ARCH_LITTLE_ENDIAN
// for bigendian, we do NOT store into keys, since we byte swap them.
if (curdat.store_keys_in_input==1) {
// this is only a minor speed hit, so simply fix by doing this. There is an
// extra memcpy, that is it.
curdat.store_keys_in_input = 0;
curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__clean_input;
curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__append_keys;
}
// NOTE NOTE NOTE, FIXME. These are 'hacks' which slow stuff way down. We should look at
// building preloads that CAN do this. Store key input to input 1, but then do not use
// input 1. Put a copy to input 2, then append, etc. In that way, we cut the number of
// MD5's down by at least 1.
//
// But for now, just get it working. Get it working faster later.
// note, with Setup->pFuncs[0]==DynamicFunc__set_input_len_32, we only will handle type 6 and 7
// for now we have this 'turned' off. It is fixed for type 6, 7 and 14. It is left on for the
// john.ini stuff. Thus, if someone builds the intel version type 6, it will work (but slower).
if (curdat.store_keys_normal_but_precompute_md5_to_output2_base16_to_input1==1 && Setup->pFuncs[0]==DynamicFunc__set_input_len_32) {
curdat.store_keys_normal_but_precompute_md5_to_output2_base16_to_input1 = 0;
curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__clean_input;
curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__append_keys;
curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__crypt_md5;
curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__clean_input;
Setup->pFuncs[0] = DynamicFunc__append_from_last_output_as_base16;
}
#endif
for (i=0; Setup->pFuncs[i]; ++i)
{
if (j > z-10)
{
int k;
z += 100;
curdat.dynamic_FUNCTIONS = mem_alloc_tiny(z*sizeof(DYNAMIC_primitive_funcp), MEM_ALIGN_WORD);
for (k = 0; k <= j; ++k)
curdat.dynamic_FUNCTIONS[k] = curdat.dynamic_FUNCTIONS[k];
}
if (curdat.store_keys_in_input)
{
if (Setup->pFuncs[i] == DynamicFunc__append_keys)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_keys called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__append_keys2)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_keys2 called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__clean_input)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but clean_input called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__append_salt)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_salt called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__append_from_last_output2_to_input1_as_base16)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_from_last_output2_to_input1_as_base16 called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__overwrite_from_last_output2_to_input1_as_base16_no_size_fix)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but overwrite_from_last_output2_to_input1_as_base16_no_size_fix called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__append_from_last_output_as_base16)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_from_last_output_as_base16s called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__overwrite_from_last_output_as_base16_no_size_fix)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but overwrite_from_last_output_as_base16_no_size_fix called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__append_2nd_salt)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_2nd_salt called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__set_input_len_32)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but DynamicFunc__set_input_len_32 called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__set_input_len_64)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but DynamicFunc__set_input_len_32 called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__overwrite_salt_to_input1_no_size_fix)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but DynamicFunc__set_input_len_32 called and that is invalid\n", Setup->szFORMAT_NAME);
if (Setup->pFuncs[i] == DynamicFunc__append_input_from_input2)
return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but DynamicFunc__set_input_len_32 called and that is invalid\n", Setup->szFORMAT_NAME);
}
// Ok if copy constants are set, make SURE we have that many constants.
if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST1 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST1) && curdat.nConsts == 0)
return !fprintf(stderr, "Error invalid format %s: Append Constant function called, but NO constants in the format\n", Setup->szFORMAT_NAME);
if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST2 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST2) && curdat.nConsts < 2)
return !fprintf(stderr, "Error invalid format %s: Append Constant #2 function called, but NO constants, or less than 2 constants in the format\n", Setup->szFORMAT_NAME);
if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST3 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST3) && curdat.nConsts < 3)
return !fprintf(stderr, "Error invalid format %s: Append Constant #3 function called, but NO constants, or less than 3 constants in the format\n", Setup->szFORMAT_NAME);
if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST4 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST4) && curdat.nConsts < 4)
return !fprintf(stderr, "Error invalid format %s: Append Constant #4 function called, but NO constants, or less than 4 constants in the format\n", Setup->szFORMAT_NAME);
if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST5 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST5) && curdat.nConsts < 5)
return !fprintf(stderr, "Error invalid format %s: Append Constant #5 function called, but NO constants, or less than 5 constants in the format\n", Setup->szFORMAT_NAME);
if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST6 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST6) && curdat.nConsts < 6)
return !fprintf(stderr, "Error invalid format %s: Append Constant #6 function called, but NO constants, or less than 6 constants in the format\n", Setup->szFORMAT_NAME);
if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST7 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST7) && curdat.nConsts < 7)
return !fprintf(stderr, "Error invalid format %s: Append Constant #7 function called, but NO constants, or less than 7 constants in the format\n", Setup->szFORMAT_NAME);
if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST8 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST8) && curdat.nConsts < 8)
return !fprintf(stderr, "Error invalid format %s: Append Constant #8 function called, but NO constants, or less than 8 constants in the format\n", Setup->szFORMAT_NAME);
if ( (Setup->pFuncs[i] == DynamicFunc__append_2nd_salt || Setup->pFuncs[i] == DynamicFunc__append_2nd_salt2) && curdat.b2Salts == 0)
return !fprintf(stderr, "Error invalid format %s: A call to one of the 'salt-2' functions, but this format does not have MFG_SALT2 flag set\n", Setup->szFORMAT_NAME);
// Ok, if we have made it here, the function is 'currently' still valid. Load this pointer into our array of pointers.
pFuncs = ConvertFuncs(Setup->pFuncs[i], &cnt2);
for (x = 0; x < cnt2; ++x) {
curdat.dynamic_FUNCTIONS[j++] = pFuncs[x];
if (pFuncs[x] == DynamicFunc__setmode_unicode)
pFmt->params.flags |= FMT_UNICODE;
if (isSHA1Func(pFuncs[x])) {
if (!strcmp(pFmt->params.algorithm_name, ALGORITHM_NAME))
pFmt->params.algorithm_name = ALGORITHM_NAME_S;
else if (!strcmp(pFmt->params.algorithm_name, ALGORITHM_NAME_X86))
pFmt->params.algorithm_name = ALGORITHM_NAME_X86_S;
}
if (isSHA2_256Func(pFuncs[x])) {
#ifdef MMX_COEF_SHA256
if (curdat.using_flat_buffers_sse2_ok)
pFmt->params.algorithm_name = ALGORITHM_NAME_S2_256;
else
#endif
pFmt->params.algorithm_name = ALGORITHM_NAME_X86_S2_256;
}
if (isSHA2_512Func(pFuncs[x])) {
#ifdef MMX_COEF_SHA512
if (curdat.using_flat_buffers_sse2_ok)
pFmt->params.algorithm_name = ALGORITHM_NAME_S2_512;
else
#endif
pFmt->params.algorithm_name = ALGORITHM_NAME_X86_S2_512;
}
if (isMD4Func(pFuncs[x])) {
if (!strcmp(pFmt->params.algorithm_name, ALGORITHM_NAME))
pFmt->params.algorithm_name = ALGORITHM_NAME_4;
else if(!strcmp(pFmt->params.algorithm_name, ALGORITHM_NAME_X86))
pFmt->params.algorithm_name = ALGORITHM_NAME_X86_4;
}
if (isWHIRLFunc(pFuncs[x])) {
if (!strcmp(pFmt->params.algorithm_name, ALGORITHM_NAME))
pFmt->params.algorithm_name = ALGORITHM_NAME_WP2;
else if (!strcmp(pFmt->params.algorithm_name, ALGORITHM_NAME_X86))
pFmt->params.algorithm_name = ALGORITHM_NAME_X86_WP2;
}
if (isGOSTFunc(pFuncs[x])) {
if (!strcmp(pFmt->params.algorithm_name, ALGORITHM_NAME))
pFmt->params.algorithm_name = ALGORITHM_NAME_GST2;
else if (!strcmp(pFmt->params.algorithm_name, ALGORITHM_NAME_X86))
pFmt->params.algorithm_name = ALGORITHM_NAME_X86_GST2;
}
if (isTigerFunc(pFuncs[x])) {
if (!strcmp(pFmt->params.algorithm_name, ALGORITHM_NAME))
pFmt->params.algorithm_name = ALGORITHM_NAME_TGR;
else if (!strcmp(pFmt->params.algorithm_name, ALGORITHM_NAME_X86))
pFmt->params.algorithm_name = ALGORITHM_NAME_X86_TGR;
}
if (isRIPEMDFunc(pFuncs[x])) {
if (!strcmp(pFmt->params.algorithm_name, ALGORITHM_NAME))
pFmt->params.algorithm_name = ALGORITHM_NAME_RIPEMD;
else if (!strcmp(pFmt->params.algorithm_name, ALGORITHM_NAME_X86))
pFmt->params.algorithm_name = ALGORITHM_NAME_X86_RIPEMD;
}
}
if (isLargeHashFinalFunc(curdat.dynamic_FUNCTIONS[j-1]))
{
if (Setup->pFuncs[i+1])
return !fprintf(stderr, "Error invalid format %s: DynamicFunc__LARGE_HASH_crypt_inputX_to_output1_FINAL, can ONLY be used as the last function in a script\n", Setup->szFORMAT_NAME);
}
}
curdat.dynamic_FUNCTIONS[j] = NULL;
}
if (!Setup->pPreloads || Setup->pPreloads[0].ciphertext == NULL)
{
return !fprintf(stderr, "Error invalid format %s: Error, no validation hash(s) for this format\n", Setup->szFORMAT_NAME);
}
cnt = 0;
#ifdef _OPENMP
dyna_setupOMP(Setup, pFmt);
#endif
{
struct fmt_tests *pfx = mem_alloc_tiny(ARRAY_COUNT(dynamic_tests) * sizeof (struct fmt_tests), MEM_ALIGN_WORD);
memset(pfx, 0, ARRAY_COUNT(dynamic_tests) * sizeof (struct fmt_tests));
for (i = 0; cnt < ARRAY_COUNT(dynamic_tests) -1; ++i, ++cnt)
{
if (Setup->pPreloads[i].ciphertext == NULL) {
if (Setup->startFlags&MGF_PHPassSetup)
// for phpass, do not load ANY more than the 9 that are in the preload.
// loading more will simply slow down the validation code loop at startup.
break;
i = 0;
}
if (Setup->pPreloads[i].ciphertext[0] == 'A' && Setup->pPreloads[i].ciphertext[1] == '=') {
if (pers_opts.target_enc != ASCII && pers_opts.target_enc != ISO_8859_1)
continue;
pfx[cnt].ciphertext = str_alloc_copy(&Setup->pPreloads[i].ciphertext[2]);
}
else if (Setup->pPreloads[i].ciphertext[0] == 'U' && Setup->pPreloads[i].ciphertext[1] == '=') {
if (pers_opts.target_enc != UTF_8)
continue;
pfx[cnt].ciphertext = str_alloc_copy(&Setup->pPreloads[i].ciphertext[2]);
}
else
pfx[cnt].ciphertext = str_alloc_copy(Setup->pPreloads[i].ciphertext);
pfx[cnt].plaintext = str_alloc_copy(Setup->pPreloads[i].plaintext);
#if FMT_MAIN_VERSION > 9
pfx[cnt].fields[0] = Setup->pPreloads[i].fields[0] ? str_alloc_copy(Setup->pPreloads[i].fields[0]) : "";
pfx[cnt].fields[1] = pfx[cnt].ciphertext;
for (j = 2; j < 10; ++j)
pfx[cnt].fields[j] = Setup->pPreloads[i].fields[j] ? str_alloc_copy(Setup->pPreloads[i].fields[j]) : "";
#else
pfx[cnt].flds[0] = Setup->pPreloads[i].flds[0] ? str_alloc_copy(Setup->pPreloads[i].flds[0]) : "";
pfx[cnt].flds[1] = pfx[cnt].ciphertext;
for (j = 2; j < 10; ++j)
pfx[cnt].flds[j] = Setup->pPreloads[i].flds[j] ? str_alloc_copy(Setup->pPreloads[i].flds[j]) : "";
#endif
}
pfx[cnt].ciphertext = NULL;
pfx[cnt].plaintext = NULL;
pFmt->params.tests = pfx;
}
if (curdat.dynamic_base16_upcase)
dynamic_itoa16 = itoa16u;
else
dynamic_itoa16 = itoa16;
{
char s[512], *cp;
cp = Setup->szFORMAT_NAME;
cp = strchr(Setup->szFORMAT_NAME, ' ');
++cp;
sprintf(s, "%s %s", cp, pFmt->params.algorithm_name);
pFmt->params.algorithm_name = str_alloc_copy(s);
}
return 1;
}
static int LoadOneFormat(int idx, struct fmt_main *pFmt)
{
extern struct options_main options;
char label[16], label_id[16], *cp;
memcpy(pFmt, &fmt_Dynamic, sizeof(struct fmt_main));
dynamic_RESET(pFmt);
// Ok we need to list this as a dynamic format (even for the 'thin' formats)
pFmt->params.flags |= FMT_DYNAMIC;
if (idx < 1000) {
if (dynamic_RESERVED_PRELOAD_SETUP(idx, pFmt) != 1)
return 0;
}
else {
if (dynamic_LOAD_PARSER_FUNCTIONS(idx, pFmt) != 1)
return 0;
}
/* we 'have' to take the sig from the test array. If we do not have */
/* our preload array 'solid', then the idx will not be the proper */
/* number. So we simply grab the label from the test cyphertext string */
strncpy(label, pFmt->params.tests[0].ciphertext, 15);
cp = strchr(&label[1], '$');
cp[1] = 0;
strcpy(label_id, &label[1]);
cp = strchr(label_id, '$');
*cp = 0;
// if (!options.format || strncmp(options.format, "dynamic_", 8))
// pFmt->params.label = str_alloc_copy("dynamic");
// else
pFmt->params.label = str_alloc_copy(label_id);
strcpy(curdat.dynamic_WHICH_TYPE_SIG, label);
curdat.dynamic_HASH_OFFSET = strlen(label);
if (curdat.dynamic_base64_inout == 1)
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 22 + 1;
else if (curdat.dynamic_base64_inout == 2)
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 16 + 1;
else if (curdat.dynamic_40_byte_input)
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 40 + 1;
else if (curdat.dynamic_48_byte_input)
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 48 + 1;
else if (curdat.dynamic_64_byte_input)
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 64 + 1;
else if (curdat.dynamic_56_byte_input)
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 56 + 1;
else if (curdat.dynamic_80_byte_input)
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 80 + 1;
else if (curdat.dynamic_96_byte_input)
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 96 + 1;
else if (curdat.dynamic_128_byte_input)
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 128 + 1;
else
curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 32 + 1;
pFmt->private.data = mem_alloc_tiny(sizeof(private_subformat_data), MEM_ALIGN_WORD);
memcpy(pFmt->private.data, &curdat, sizeof(private_subformat_data));
if (strncmp(curdat.dynamic_WHICH_TYPE_SIG, pFmt->params.tests[0].ciphertext, strlen(curdat.dynamic_WHICH_TYPE_SIG)))
{
fprintf(stderr, "ERROR, when loading dynamic formats, the wrong curdat item was linked to this type:\nTYPE_SIG=%s\nTest_Dat=%s\n",
curdat.dynamic_WHICH_TYPE_SIG, pFmt->params.tests[0].ciphertext);
return 0;
}
return 1;
}
int dynamic_Register_formats(struct fmt_main **ptr)
{
int count, i, idx, single=-1, wildcard = 0;
extern struct options_main options;
if (options.format && strstr(options.format, "*"))
wildcard = 1;
Dynamic_Load_itoa16_w2();
if (!wildcard && options.format &&
!strncmp(options.format, "dynamic_", 8))
sscanf(options.format, "dynamic_%d", &single);
if (options.format && options.subformat && !strcmp(options.format, "dynamic") && !strncmp(options.subformat, "dynamic_", 8))
sscanf(options.subformat, "dynamic_%d", &single);
if (options.dynamic_bare_hashes_always_valid == 'Y')
m_allow_rawhash_fixup = 1;
else if (options.dynamic_bare_hashes_always_valid != 'N' && cfg_get_bool(SECTION_OPTIONS, NULL, "DynamicAlwaysUseBareHashes", 1))
m_allow_rawhash_fixup = 1;
if (single != -1) {
// user wanted only a 'specific' format. Simply load that one.
m_allow_rawhash_fixup = 1;
if (dynamic_IS_VALID(single) == 0)
return 0;
pFmts = mem_alloc_tiny(sizeof(pFmts[0]), MEM_ALIGN_WORD);
if (!LoadOneFormat(single, pFmts))
return 0;
*ptr = pFmts;
return (nFmts = 1);
}
for (count = i = 0; i < 5000; ++i) {
if (dynamic_IS_VALID(i) == 1)
++count;
}
// Ok, now we know how many formats we have. Load them
pFmts = mem_alloc_tiny(sizeof(pFmts[0])*count, MEM_ALIGN_WORD);
for (idx = i = 0; i < 5000; ++i) {
if (dynamic_IS_VALID(i) == 1) {
if (LoadOneFormat(i, &pFmts[idx]) == 0)
--count;
else
++idx;
}
}
*ptr = pFmts;
return (nFmts = count);
}
/*
* finds the 'proper' sub format from the allocated formats, IFF that format 'exists'
*/
static struct fmt_main *dynamic_Get_fmt_main(int which)
{
char label[40];
int i;
sprintf(label, "$dynamic_%d$", which);
for (i = 0; i < nFmts; ++i) {
private_subformat_data *pPriv = pFmts[i].private.data;
if (!strcmp(pPriv->dynamic_WHICH_TYPE_SIG, label))
return &pFmts[i];
}
return NULL;
}
/*
* This function will 'forget' which md5-gen subtype we are working with. It will allow
* a different type to be used. Very useful for things like -test (benchmarking).
*/
static void dynamic_RESET(struct fmt_main *fmt)
{
memset(&curdat, 0, sizeof(curdat));
m_count = 0;
keys_dirty = 0;
cursalt=cursalt2=username=0;
saltlen=saltlen2=usernamelen=0;
// make 'sure' we startout with blank inputs.
m_count = 0;
#ifdef MMX_COEF
if (input_buf) {
#else
if (input_buf_X86) {
#endif
__nonMP_DynamicFunc__clean_input_full();
__nonMP_DynamicFunc__clean_input2_full();
}
}
/*
* This will LINK our functions into some other fmt_main struction. That way
* that struction can use our code. The other *_fmt.c file will need to
* 'override' the valid, the binary and the salt functions, and make changes
* to the hash, BEFORE calling into the dynamic valid/binary/salt functions.
* Other than those functions (and calling into this linkage function at init time)
* that is about all that needs to be in that 'other' *_fmt.c file, as long as the
* format is part of the md5-generic 'class' of functions.
*/
struct fmt_main *dynamic_THIN_FORMAT_LINK(struct fmt_main *pFmt, char *ciphertext, char *orig_sig, int bInitAlso)
{
int i, valid, nFmtNum;
struct fmt_main *pFmtLocal;
static char subformat[17], *cp;
m_allow_rawhash_fixup = 0;
strncpy(subformat, ciphertext, 16);
subformat[16] = 0;
cp = strchr(&subformat[9], '$');
if (cp)
cp[1] = 0;
nFmtNum = -1;
sscanf(subformat, "$dynamic_%d", &nFmtNum);
if (nFmtNum==-1)
exit(fprintf(stderr, "Error, Invalid signature line trying to link to dynamic format.\nOriginal format=%s\nSignature line=%s\n", orig_sig, ciphertext));
pFmtLocal = dynamic_Get_fmt_main(nFmtNum);
if (pFmtLocal == NULL) {
exit(fprintf(stderr, "Error, Invalid signature line trying to link to dynamic format.\nOriginal format=%s\nSignature line=%s\n", orig_sig, ciphertext));
}
valid = pFmtLocal->methods.valid(ciphertext, pFmtLocal);
if (!valid)
exit(fprintf(stderr, "Error, trying to link to %s using ciphertext=%s FAILED\n", subformat, ciphertext));
pFmt->params.max_keys_per_crypt = pFmtLocal->params.max_keys_per_crypt;
pFmt->params.min_keys_per_crypt = pFmtLocal->params.min_keys_per_crypt;
pFmt->params.flags = pFmtLocal->params.flags;
if (pFmtLocal->params.salt_size)
pFmt->params.salt_size = sizeof(void*);
else
pFmt->params.salt_size = 0;
pFmt->methods.cmp_all = pFmtLocal->methods.cmp_all;
pFmt->methods.cmp_one = pFmtLocal->methods.cmp_one;
pFmt->methods.cmp_exact = pFmtLocal->methods.cmp_exact;
#if FMT_MAIN_VERSION > 9
#if FMT_MAIN_VERSION > 11
for (i = 0; i < FMT_TUNABLE_COSTS; ++i) {
pFmt->methods.tunable_cost_value[i] = pFmtLocal->methods.tunable_cost_value[i];
pFmt->params.tunable_cost_name[i] = pFmtLocal->params.tunable_cost_name[i];
}
#endif
pFmt->methods.source = pFmtLocal->methods.source;
#endif
pFmt->methods.set_salt = pFmtLocal->methods.set_salt;
pFmt->methods.salt = pFmtLocal->methods.salt;
pFmt->methods.salt_hash = pFmtLocal->methods.salt_hash;
pFmt->methods.split = pFmtLocal->methods.split;
pFmt->methods.set_key = pFmtLocal->methods.set_key;
pFmt->methods.get_key = pFmtLocal->methods.get_key;
pFmt->methods.clear_keys = pFmtLocal->methods.clear_keys;
pFmt->methods.crypt_all = pFmtLocal->methods.crypt_all;
pFmt->methods.prepare = pFmtLocal->methods.prepare;
for (i = 0; i < PASSWORD_HASH_SIZES; ++i)
{
pFmt->methods.binary_hash[i] = pFmtLocal->methods.binary_hash[i];
pFmt->methods.get_hash[i] = pFmtLocal->methods.get_hash[i];
}
if (bInitAlso)
{
//fprintf(stderr, "dynamic_THIN_FORMAT_LINK() calling init(%s)\n", subformat);
init(pFmtLocal);
}
pFmt->private.data = mem_alloc_tiny(sizeof(private_subformat_data), MEM_ALIGN_WORD);
memcpy(pFmt->private.data, pFmtLocal->private.data, sizeof(private_subformat_data));
return pFmtLocal;
}
// We ONLY deal with hex hashes at this time. Is we later have to deal with
// base-64, this will become harder. Before this function we had bugs where
// many things were loaded as 'being' valid, even if not.
static int looks_like_raw_hash(char *ciphertext, private_subformat_data *pPriv) {
int i, cipherTextLen = CIPHERTEXT_LENGTH;
if (pPriv->dynamic_40_byte_input) {
cipherTextLen = 40;
} else if (pPriv->dynamic_48_byte_input) {
cipherTextLen = 48;
} else if (pPriv->dynamic_64_byte_input) {
cipherTextLen = 64;
} else if (pPriv->dynamic_56_byte_input) {
cipherTextLen = 56;
} else if (pPriv->dynamic_80_byte_input) {
cipherTextLen = 80;
} else if (pPriv->dynamic_96_byte_input) {
cipherTextLen = 96;
} else if (pPriv->dynamic_128_byte_input) {
cipherTextLen = 128;
}
for (i = 0; i < cipherTextLen; i++) {
if (atoi16[ARCH_INDEX(ciphertext[i])] == 0x7f)
return 0;
}
if ((pPriv->pSetup->flags&MGF_SALTED) == 0) {
if (!ciphertext[cipherTextLen])
return 1;
return 0;
}
return ciphertext[cipherTextLen] == '$';
}
static char *FixupIfNeeded(char *ciphertext, private_subformat_data *pPriv)
{
if (!ciphertext || *ciphertext == 0 || *ciphertext == '*')
return ciphertext;
if (m_allow_rawhash_fixup && strncmp(ciphertext, "$dynamic_", 9) && looks_like_raw_hash(ciphertext, pPriv))
{
static char __ciphertext[512+24];
if (pPriv->pSetup->flags & MGF_SALTED) {
if (!strchr(ciphertext, '$'))
return ciphertext;
}
if ( (pPriv->pSetup->flags & MGF_SALTED2) == MGF_SALTED2) {
if (!strstr(ciphertext, "$$2"))
return ciphertext;
}
if ( (pPriv->pSetup->flags & MGF_USERNAME) == MGF_USERNAME) {
if (!strstr(ciphertext, "$$U"))
return ciphertext;
}
if (pPriv->FldMask) {
int i;
for (i = 0; i < 10; ++i) {
if ((pPriv->FldMask & (MGF_FLDx_BIT<<i)) == (MGF_FLDx_BIT<<i)) {
char Fld[5];
sprintf(Fld, "$$F%d", i);
if (!strstr(&ciphertext[pPriv->dynamic_SALT_OFFSET-1], Fld))
return ciphertext;
}
}
}
strcpy(__ciphertext, pPriv->dynamic_WHICH_TYPE_SIG);
strnzcpy(&__ciphertext[strlen(__ciphertext)], ciphertext, 512);
return __ciphertext;
}
return ciphertext;
}
int text_in_dynamic_format_already(struct fmt_main *pFmt, char *ciphertext)
{
private_subformat_data *pPriv;
if (!pFmt) return 0;
/* NOTE, it 'is' possible to get called here, without the private stuff being setup
properly (in valid, etc). So, we simply grab the static private stuff each time */
pPriv = pFmt->private.data;
if (!ciphertext || !pPriv || !pPriv->dynamic_WHICH_TYPE_SIG) return 0;
return !strncmp(ciphertext, pPriv->dynamic_WHICH_TYPE_SIG, strlen(pPriv->dynamic_WHICH_TYPE_SIG));
}
// if caseType == 1, return cp
// if caseType == 2, return upcase(cp)
// if caseType == 3, return locase(cp)
// if caseType == 4, return upcaseFirstChar(locase(cp))
static char *HandleCase(char *cp, int caseType)
{
static UTF8 dest[256];
switch(caseType) {
case 1:
return cp;
case 2:
enc_uc(dest, sizeof(dest), (unsigned char*)cp, strlen(cp));
if (!strcmp((char*)dest, cp))
return cp;
break;
case 3:
case 4:
enc_lc(dest, sizeof(dest), (unsigned char*)cp, strlen(cp));
if (caseType == 4)
dest[0] = low2up_ansi(dest[0]);
if (!strcmp((char*)dest, cp))
return cp;
break;
default:
return cp;
}
return (char*)dest;
}
int dynamic_real_salt_length(struct fmt_main *pFmt) {
if (pFmt->params.flags & FMT_DYNAMIC) {
private_subformat_data *pPriv = pFmt->private.data;
if (pPriv == NULL || pPriv->pSetup == NULL)
return -1; // not a dynamic format, or called before we have loaded them!!
return abs(pPriv->pSetup->SaltLen);
}
// NOT a dynamic format
return -1;
}
|
ASTMatchers.h | //===- ASTMatchers.h - Structural query framework ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements matchers to be used together with the MatchFinder to
// match AST nodes.
//
// Matchers are created by generator functions, which can be combined in
// a functional in-language DSL to express queries over the C++ AST.
//
// For example, to match a class with a certain name, one would call:
// cxxRecordDecl(hasName("MyClass"))
// which returns a matcher that can be used to find all AST nodes that declare
// a class named 'MyClass'.
//
// For more complicated match expressions we're often interested in accessing
// multiple parts of the matched AST nodes once a match is found. In that case,
// call `.bind("name")` on match expressions that match the nodes you want to
// access.
//
// For example, when we're interested in child classes of a certain class, we
// would write:
// cxxRecordDecl(hasName("MyClass"), has(recordDecl().bind("child")))
// When the match is found via the MatchFinder, a user provided callback will
// be called with a BoundNodes instance that contains a mapping from the
// strings that we provided for the `.bind()` calls to the nodes that were
// matched.
// In the given example, each time our matcher finds a match we get a callback
// where "child" is bound to the RecordDecl node of the matching child
// class declaration.
//
// See ASTMatchersInternal.h for a more in-depth explanation of the
// implementation details of the matcher framework.
//
// See ASTMatchFinder.h for how to use the generated matchers to run over
// an AST.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
#define LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTTypeTraits.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/OperationKinds.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
#include "clang/ASTMatchers/ASTMatchersInternal.h"
#include "clang/ASTMatchers/ASTMatchersMacros.h"
#include "clang/Basic/AttrKinds.h"
#include "clang/Basic/ExceptionSpecificationType.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TypeTraits.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Regex.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <limits>
#include <string>
#include <utility>
#include <vector>
namespace clang {
namespace ast_matchers {
/// Maps string IDs to AST nodes matched by parts of a matcher.
///
/// The bound nodes are generated by calling \c bind("id") on the node matchers
/// of the nodes we want to access later.
///
/// The instances of BoundNodes are created by \c MatchFinder when the user's
/// callbacks are executed every time a match is found.
class BoundNodes {
public:
/// Returns the AST node bound to \c ID.
///
/// Returns NULL if there was no node bound to \c ID or if there is a node but
/// it cannot be converted to the specified type.
template <typename T>
const T *getNodeAs(StringRef ID) const {
return MyBoundNodes.getNodeAs<T>(ID);
}
/// Type of mapping from binding identifiers to bound nodes. This type
/// is an associative container with a key type of \c std::string and a value
/// type of \c clang::ast_type_traits::DynTypedNode
using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap;
/// Retrieve mapping from binding identifiers to bound nodes.
const IDToNodeMap &getMap() const {
return MyBoundNodes.getMap();
}
private:
friend class internal::BoundNodesTreeBuilder;
/// Create BoundNodes from a pre-filled map of bindings.
BoundNodes(internal::BoundNodesMap &MyBoundNodes)
: MyBoundNodes(MyBoundNodes) {}
internal::BoundNodesMap MyBoundNodes;
};
/// Types of matchers for the top-level classes in the AST class
/// hierarchy.
/// @{
using DeclarationMatcher = internal::Matcher<Decl>;
using StatementMatcher = internal::Matcher<Stmt>;
using TypeMatcher = internal::Matcher<QualType>;
using TypeLocMatcher = internal::Matcher<TypeLoc>;
using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>;
using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>;
using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>;
/// @}
/// Matches any node.
///
/// Useful when another matcher requires a child matcher, but there's no
/// additional constraint. This will often be used with an explicit conversion
/// to an \c internal::Matcher<> type such as \c TypeMatcher.
///
/// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g.,
/// \code
/// "int* p" and "void f()" in
/// int* p;
/// void f();
/// \endcode
///
/// Usable as: Any Matcher
inline internal::TrueMatcher anything() { return internal::TrueMatcher(); }
/// Matches the top declaration context.
///
/// Given
/// \code
/// int X;
/// namespace NS {
/// int Y;
/// } // namespace NS
/// \endcode
/// decl(hasDeclContext(translationUnitDecl()))
/// matches "int X", but not "int Y".
extern const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl>
translationUnitDecl;
/// Matches typedef declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typedefDecl()
/// matches "typedef int X", but not "using Y = int"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl>
typedefDecl;
/// Matches typedef name declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typedefNameDecl()
/// matches "typedef int X" and "using Y = int"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl>
typedefNameDecl;
/// Matches type alias declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typeAliasDecl()
/// matches "using Y = int", but not "typedef int X"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl>
typeAliasDecl;
/// Matches type alias template declarations.
///
/// typeAliasTemplateDecl() matches
/// \code
/// template <typename T>
/// using Y = X<T>;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl>
typeAliasTemplateDecl;
/// Matches AST nodes that were expanded within the main-file.
///
/// Example matches X but not Y
/// (matcher = cxxRecordDecl(isExpansionInMainFile())
/// \code
/// #include <Y.h>
/// class X {};
/// \endcode
/// Y.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER(isExpansionInMainFile,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
return SourceManager.isInMainFile(
SourceManager.getExpansionLoc(Node.getBeginLoc()));
}
/// Matches AST nodes that were expanded within system-header-files.
///
/// Example matches Y but not X
/// (matcher = cxxRecordDecl(isExpansionInSystemHeader())
/// \code
/// #include <SystemHeader.h>
/// class X {};
/// \endcode
/// SystemHeader.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
return false;
}
return SourceManager.isInSystemHeader(ExpansionLoc);
}
/// Matches AST nodes that were expanded within files whose name is
/// partially matching a given regex.
///
/// Example matches Y but not X
/// (matcher = cxxRecordDecl(isExpansionInFileMatching("AST.*"))
/// \code
/// #include "ASTMatcher.h"
/// class X {};
/// \endcode
/// ASTMatcher.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER_P(isExpansionInFileMatching,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc),
std::string, RegExp) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
return false;
}
auto FileEntry =
SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc));
if (!FileEntry) {
return false;
}
auto Filename = FileEntry->getName();
llvm::Regex RE(RegExp);
return RE.match(Filename);
}
/// Matches declarations.
///
/// Examples matches \c X, \c C, and the friend declaration inside \c C;
/// \code
/// void X();
/// class C {
/// friend X;
/// };
/// \endcode
extern const internal::VariadicAllOfMatcher<Decl> decl;
/// Matches a declaration of a linkage specification.
///
/// Given
/// \code
/// extern "C" {}
/// \endcode
/// linkageSpecDecl()
/// matches "extern "C" {}"
extern const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl>
linkageSpecDecl;
/// Matches a declaration of anything that could have a name.
///
/// Example matches \c X, \c S, the anonymous union type, \c i, and \c U;
/// \code
/// typedef int X;
/// struct S {
/// union {
/// int i;
/// } U;
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl;
/// Matches a declaration of label.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// labelDecl()
/// matches 'FOO:'
extern const internal::VariadicDynCastAllOfMatcher<Decl, LabelDecl> labelDecl;
/// Matches a declaration of a namespace.
///
/// Given
/// \code
/// namespace {}
/// namespace test {}
/// \endcode
/// namespaceDecl()
/// matches "namespace {}" and "namespace test {}"
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceDecl>
namespaceDecl;
/// Matches a declaration of a namespace alias.
///
/// Given
/// \code
/// namespace test {}
/// namespace alias = ::test;
/// \endcode
/// namespaceAliasDecl()
/// matches "namespace alias" but not "namespace test"
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceAliasDecl>
namespaceAliasDecl;
/// Matches class, struct, and union declarations.
///
/// Example matches \c X, \c Z, \c U, and \c S
/// \code
/// class X;
/// template<class T> class Z {};
/// struct S {};
/// union U {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, RecordDecl> recordDecl;
/// Matches C++ class declarations.
///
/// Example matches \c X, \c Z
/// \code
/// class X;
/// template<class T> class Z {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXRecordDecl>
cxxRecordDecl;
/// Matches C++ class template declarations.
///
/// Example matches \c Z
/// \code
/// template<class T> class Z {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl>
classTemplateDecl;
/// Matches C++ class template specializations.
///
/// Given
/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
/// \endcode
/// classTemplateSpecializationDecl()
/// matches the specializations \c A<int> and \c A<double>
extern const internal::VariadicDynCastAllOfMatcher<
Decl, ClassTemplateSpecializationDecl>
classTemplateSpecializationDecl;
/// Matches C++ class template partial specializations.
///
/// Given
/// \code
/// template<class T1, class T2, int I>
/// class A {};
///
/// template<class T, int I>
/// class A<T, T*, I> {};
///
/// template<>
/// class A<int, int, 1> {};
/// \endcode
/// classTemplatePartialSpecializationDecl()
/// matches the specialization \c A<T,T*,I> but not \c A<int,int,1>
extern const internal::VariadicDynCastAllOfMatcher<
Decl, ClassTemplatePartialSpecializationDecl>
classTemplatePartialSpecializationDecl;
/// Matches declarator declarations (field, variable, function
/// and non-type template parameter declarations).
///
/// Given
/// \code
/// class X { int y; };
/// \endcode
/// declaratorDecl()
/// matches \c int y.
extern const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl>
declaratorDecl;
/// Matches parameter variable declarations.
///
/// Given
/// \code
/// void f(int x);
/// \endcode
/// parmVarDecl()
/// matches \c int x.
extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl>
parmVarDecl;
/// Matches C++ access specifier declarations.
///
/// Given
/// \code
/// class C {
/// public:
/// int a;
/// };
/// \endcode
/// accessSpecDecl()
/// matches 'public:'
extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl>
accessSpecDecl;
/// Matches constructor initializers.
///
/// Examples matches \c i(42).
/// \code
/// class C {
/// C() : i(42) {}
/// int i;
/// };
/// \endcode
extern const internal::VariadicAllOfMatcher<CXXCtorInitializer>
cxxCtorInitializer;
/// Matches template arguments.
///
/// Given
/// \code
/// template <typename T> struct C {};
/// C<int> c;
/// \endcode
/// templateArgument()
/// matches 'int' in C<int>.
extern const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument;
/// Matches template name.
///
/// Given
/// \code
/// template <typename T> class X { };
/// X<int> xi;
/// \endcode
/// templateName()
/// matches 'X' in X<int>.
extern const internal::VariadicAllOfMatcher<TemplateName> templateName;
/// Matches non-type template parameter declarations.
///
/// Given
/// \code
/// template <typename T, int N> struct C {};
/// \endcode
/// nonTypeTemplateParmDecl()
/// matches 'N', but not 'T'.
extern const internal::VariadicDynCastAllOfMatcher<Decl,
NonTypeTemplateParmDecl>
nonTypeTemplateParmDecl;
/// Matches template type parameter declarations.
///
/// Given
/// \code
/// template <typename T, int N> struct C {};
/// \endcode
/// templateTypeParmDecl()
/// matches 'T', but not 'N'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl>
templateTypeParmDecl;
/// Matches public C++ declarations.
///
/// Given
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c;
/// };
/// \endcode
/// fieldDecl(isPublic())
/// matches 'int a;'
AST_MATCHER(Decl, isPublic) {
return Node.getAccess() == AS_public;
}
/// Matches protected C++ declarations.
///
/// Given
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c;
/// };
/// \endcode
/// fieldDecl(isProtected())
/// matches 'int b;'
AST_MATCHER(Decl, isProtected) {
return Node.getAccess() == AS_protected;
}
/// Matches private C++ declarations.
///
/// Given
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c;
/// };
/// \endcode
/// fieldDecl(isPrivate())
/// matches 'int c;'
AST_MATCHER(Decl, isPrivate) {
return Node.getAccess() == AS_private;
}
/// Matches non-static data members that are bit-fields.
///
/// Given
/// \code
/// class C {
/// int a : 2;
/// int b;
/// };
/// \endcode
/// fieldDecl(isBitField())
/// matches 'int a;' but not 'int b;'.
AST_MATCHER(FieldDecl, isBitField) {
return Node.isBitField();
}
/// Matches non-static data members that are bit-fields of the specified
/// bit width.
///
/// Given
/// \code
/// class C {
/// int a : 2;
/// int b : 4;
/// int c : 2;
/// };
/// \endcode
/// fieldDecl(hasBitWidth(2))
/// matches 'int a;' and 'int c;' but not 'int b;'.
AST_MATCHER_P(FieldDecl, hasBitWidth, unsigned, Width) {
return Node.isBitField() &&
Node.getBitWidthValue(Finder->getASTContext()) == Width;
}
/// Matches non-static data members that have an in-class initializer.
///
/// Given
/// \code
/// class C {
/// int a = 2;
/// int b = 3;
/// int c;
/// };
/// \endcode
/// fieldDecl(hasInClassInitializer(integerLiteral(equals(2))))
/// matches 'int a;' but not 'int b;'.
/// fieldDecl(hasInClassInitializer(anything()))
/// matches 'int a;' and 'int b;' but not 'int c;'.
AST_MATCHER_P(FieldDecl, hasInClassInitializer, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Initializer = Node.getInClassInitializer();
return (Initializer != nullptr &&
InnerMatcher.matches(*Initializer, Finder, Builder));
}
/// Determines whether the function is "main", which is the entry point
/// into an executable program.
AST_MATCHER(FunctionDecl, isMain) {
return Node.isMain();
}
/// Matches the specialized template of a specialization declaration.
///
/// Given
/// \code
/// template<typename T> class A {}; #1
/// template<> class A<int> {}; #2
/// \endcode
/// classTemplateSpecializationDecl(hasSpecializedTemplate(classTemplateDecl()))
/// matches '#2' with classTemplateDecl() matching the class template
/// declaration of 'A' at #1.
AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate,
internal::Matcher<ClassTemplateDecl>, InnerMatcher) {
const ClassTemplateDecl* Decl = Node.getSpecializedTemplate();
return (Decl != nullptr &&
InnerMatcher.matches(*Decl, Finder, Builder));
}
/// Matches a declaration that has been implicitly added
/// by the compiler (eg. implicit default/copy constructors).
AST_MATCHER(Decl, isImplicit) {
return Node.isImplicit();
}
/// Matches classTemplateSpecializations, templateSpecializationType and
/// functionDecl that have at least one TemplateArgument matching the given
/// InnerMatcher.
///
/// Given
/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
///
/// template<typename T> f() {};
/// void func() { f<int>(); };
/// \endcode
///
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(asString("int"))))
/// matches the specialization \c A<int>
///
/// functionDecl(hasAnyTemplateArgument(refersToType(asString("int"))))
/// matches the specialization \c f<int>
AST_POLYMORPHIC_MATCHER_P(
hasAnyTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType,
FunctionDecl),
internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> List =
internal::getTemplateSpecializationArgs(Node);
return matchesFirstInRange(InnerMatcher, List.begin(), List.end(), Finder,
Builder);
}
/// Matches expressions that match InnerMatcher after any implicit AST
/// nodes are stripped off.
///
/// Parentheses and explicit casts are not discarded.
/// Given
/// \code
/// class C {};
/// C a = C();
/// C b;
/// C c = b;
/// \endcode
/// The matchers
/// \code
/// varDecl(hasInitializer(ignoringImplicit(cxxConstructExpr())))
/// \endcode
/// would match the declarations for a, b, and c.
/// While
/// \code
/// varDecl(hasInitializer(cxxConstructExpr()))
/// \endcode
/// only match the declarations for b and c.
AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>,
InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImplicit(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after any implicit casts
/// are stripped off.
///
/// Parentheses and explicit casts are not discarded.
/// Given
/// \code
/// int arr[5];
/// int a = 0;
/// char b = 0;
/// const int c = a;
/// int *d = arr;
/// long e = (long) 0l;
/// \endcode
/// The matchers
/// \code
/// varDecl(hasInitializer(ignoringImpCasts(integerLiteral())))
/// varDecl(hasInitializer(ignoringImpCasts(declRefExpr())))
/// \endcode
/// would match the declarations for a, b, c, and d, but not e.
/// While
/// \code
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// \endcode
/// only match the declarations for b, c, and d.
AST_MATCHER_P(Expr, ignoringImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after parentheses and
/// casts are stripped off.
///
/// Implicit and non-C Style casts are also discarded.
/// Given
/// \code
/// int a = 0;
/// char b = (0);
/// void* c = reinterpret_cast<char*>(0);
/// char d = char(0);
/// \endcode
/// The matcher
/// varDecl(hasInitializer(ignoringParenCasts(integerLiteral())))
/// would match the declarations for a, b, c, and d.
/// while
/// varDecl(hasInitializer(integerLiteral()))
/// only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after implicit casts and
/// parentheses are stripped off.
///
/// Explicit casts are not discarded.
/// Given
/// \code
/// int arr[5];
/// int a = 0;
/// char b = (0);
/// const int c = a;
/// int *d = (arr);
/// long e = ((long) 0l);
/// \endcode
/// The matchers
/// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral())))
/// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr())))
/// would match the declarations for a, b, c, and d, but not e.
/// while
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// would only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder);
}
/// Matches types that match InnerMatcher after any parens are stripped.
///
/// Given
/// \code
/// void (*fp)(void);
/// \endcode
/// The matcher
/// \code
/// varDecl(hasType(pointerType(pointee(ignoringParens(functionType())))))
/// \endcode
/// would match the declaration for fp.
AST_MATCHER_P_OVERLOAD(QualType, ignoringParens, internal::Matcher<QualType>,
InnerMatcher, 0) {
return InnerMatcher.matches(Node.IgnoreParens(), Finder, Builder);
}
/// Overload \c ignoringParens for \c Expr.
///
/// Given
/// \code
/// const char* str = ("my-string");
/// \endcode
/// The matcher
/// \code
/// implicitCastExpr(hasSourceExpression(ignoringParens(stringLiteral())))
/// \endcode
/// would match the implicit cast resulting from the assignment.
AST_MATCHER_P_OVERLOAD(Expr, ignoringParens, internal::Matcher<Expr>,
InnerMatcher, 1) {
const Expr *E = Node.IgnoreParens();
return InnerMatcher.matches(*E, Finder, Builder);
}
/// Matches expressions that are instantiation-dependent even if it is
/// neither type- nor value-dependent.
///
/// In the following example, the expression sizeof(sizeof(T() + T()))
/// is instantiation-dependent (since it involves a template parameter T),
/// but is neither type- nor value-dependent, since the type of the inner
/// sizeof is known (std::size_t) and therefore the size of the outer
/// sizeof is known.
/// \code
/// template<typename T>
/// void f(T x, T y) { sizeof(sizeof(T() + T()); }
/// \endcode
/// expr(isInstantiationDependent()) matches sizeof(sizeof(T() + T())
AST_MATCHER(Expr, isInstantiationDependent) {
return Node.isInstantiationDependent();
}
/// Matches expressions that are type-dependent because the template type
/// is not yet instantiated.
///
/// For example, the expressions "x" and "x + y" are type-dependent in
/// the following code, but "y" is not type-dependent:
/// \code
/// template<typename T>
/// void add(T x, int y) {
/// x + y;
/// }
/// \endcode
/// expr(isTypeDependent()) matches x + y
AST_MATCHER(Expr, isTypeDependent) { return Node.isTypeDependent(); }
/// Matches expression that are value-dependent because they contain a
/// non-type template parameter.
///
/// For example, the array bound of "Chars" in the following example is
/// value-dependent.
/// \code
/// template<int Size> int f() { return Size; }
/// \endcode
/// expr(isValueDependent()) matches return Size
AST_MATCHER(Expr, isValueDependent) { return Node.isValueDependent(); }
/// Matches classTemplateSpecializations, templateSpecializationType and
/// functionDecl where the n'th TemplateArgument matches the given InnerMatcher.
///
/// Given
/// \code
/// template<typename T, typename U> class A {};
/// A<bool, int> b;
/// A<int, bool> c;
///
/// template<typename T> void f() {}
/// void func() { f<int>(); };
/// \endcode
/// classTemplateSpecializationDecl(hasTemplateArgument(
/// 1, refersToType(asString("int"))))
/// matches the specialization \c A<bool, int>
///
/// functionDecl(hasTemplateArgument(0, refersToType(asString("int"))))
/// matches the specialization \c f<int>
AST_POLYMORPHIC_MATCHER_P2(
hasTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType,
FunctionDecl),
unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> List =
internal::getTemplateSpecializationArgs(Node);
if (List.size() <= N)
return false;
return InnerMatcher.matches(List[N], Finder, Builder);
}
/// Matches if the number of template arguments equals \p N.
///
/// Given
/// \code
/// template<typename T> struct C {};
/// C<int> c;
/// \endcode
/// classTemplateSpecializationDecl(templateArgumentCountIs(1))
/// matches C<int>.
AST_POLYMORPHIC_MATCHER_P(
templateArgumentCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType),
unsigned, N) {
return internal::getTemplateSpecializationArgs(Node).size() == N;
}
/// Matches a TemplateArgument that refers to a certain type.
///
/// Given
/// \code
/// struct X {};
/// template<typename T> struct A {};
/// A<X> a;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(class(hasName("X")))))
/// matches the specialization \c A<X>
AST_MATCHER_P(TemplateArgument, refersToType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Type)
return false;
return InnerMatcher.matches(Node.getAsType(), Finder, Builder);
}
/// Matches a TemplateArgument that refers to a certain template.
///
/// Given
/// \code
/// template<template <typename> class S> class X {};
/// template<typename T> class Y {};
/// X<Y> xi;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToTemplate(templateName())))
/// matches the specialization \c X<Y>
AST_MATCHER_P(TemplateArgument, refersToTemplate,
internal::Matcher<TemplateName>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Template)
return false;
return InnerMatcher.matches(Node.getAsTemplate(), Finder, Builder);
}
/// Matches a canonical TemplateArgument that refers to a certain
/// declaration.
///
/// Given
/// \code
/// struct B { int next; };
/// template<int(B::*next_ptr)> struct A {};
/// A<&B::next> a;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToDeclaration(fieldDecl(hasName("next")))))
/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, refersToDeclaration,
internal::Matcher<Decl>, InnerMatcher) {
if (Node.getKind() == TemplateArgument::Declaration)
return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder);
return false;
}
/// Matches a sugar TemplateArgument that refers to a certain expression.
///
/// Given
/// \code
/// struct B { int next; };
/// template<int(B::*next_ptr)> struct A {};
/// A<&B::next> a;
/// \endcode
/// templateSpecializationType(hasAnyTemplateArgument(
/// isExpr(hasDescendant(declRefExpr(to(fieldDecl(hasName("next"))))))))
/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, isExpr, internal::Matcher<Expr>, InnerMatcher) {
if (Node.getKind() == TemplateArgument::Expression)
return InnerMatcher.matches(*Node.getAsExpr(), Finder, Builder);
return false;
}
/// Matches a TemplateArgument that is an integral value.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(isIntegral()))
/// matches the implicit instantiation of C in C<42>
/// with isIntegral() matching 42.
AST_MATCHER(TemplateArgument, isIntegral) {
return Node.getKind() == TemplateArgument::Integral;
}
/// Matches a TemplateArgument that referes to an integral type.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(refersToIntegralType(asString("int"))))
/// matches the implicit instantiation of C in C<42>.
AST_MATCHER_P(TemplateArgument, refersToIntegralType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
return InnerMatcher.matches(Node.getIntegralType(), Finder, Builder);
}
/// Matches a TemplateArgument of integral type with a given value.
///
/// Note that 'Value' is a string as the template argument's value is
/// an arbitrary precision integer. 'Value' must be euqal to the canonical
/// representation of that integral value in base 10.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(equalsIntegralValue("42")))
/// matches the implicit instantiation of C in C<42>.
AST_MATCHER_P(TemplateArgument, equalsIntegralValue,
std::string, Value) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
return Node.getAsIntegral().toString(10) == Value;
}
/// Matches an Objective-C autorelease pool statement.
///
/// Given
/// \code
/// @autoreleasepool {
/// int x = 0;
/// }
/// \endcode
/// autoreleasePoolStmt(stmt()) matches the declaration of "x"
/// inside the autorelease pool.
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
ObjCAutoreleasePoolStmt> autoreleasePoolStmt;
/// Matches any value declaration.
///
/// Example matches A, B, C and F
/// \code
/// enum X { A, B, C };
/// void F();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl;
/// Matches C++ constructor declarations.
///
/// Example matches Foo::Foo() and Foo::Foo(int)
/// \code
/// class Foo {
/// public:
/// Foo();
/// Foo(int);
/// int DoSomething();
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConstructorDecl>
cxxConstructorDecl;
/// Matches explicit C++ destructor declarations.
///
/// Example matches Foo::~Foo()
/// \code
/// class Foo {
/// public:
/// virtual ~Foo();
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl>
cxxDestructorDecl;
/// Matches enum declarations.
///
/// Example matches X
/// \code
/// enum X {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl;
/// Matches enum constants.
///
/// Example matches A, B, C
/// \code
/// enum X {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl>
enumConstantDecl;
/// Matches method declarations.
///
/// Example matches y
/// \code
/// class X { void y(); };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl>
cxxMethodDecl;
/// Matches conversion operator declarations.
///
/// Example matches the operator.
/// \code
/// class X { operator int() const; };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl>
cxxConversionDecl;
/// Matches user-defined and implicitly generated deduction guide.
///
/// Example matches the deduction guide.
/// \code
/// template<typename T>
/// class X { X(int) };
/// X(int) -> X<int>;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl>
cxxDeductionGuideDecl;
/// Matches variable declarations.
///
/// Note: this does not match declarations of member variables, which are
/// "field" declarations in Clang parlance.
///
/// Example matches a
/// \code
/// int a;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl;
/// Matches field declarations.
///
/// Given
/// \code
/// class X { int m; };
/// \endcode
/// fieldDecl()
/// matches 'm'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl;
/// Matches indirect field declarations.
///
/// Given
/// \code
/// struct X { struct { int a; }; };
/// \endcode
/// indirectFieldDecl()
/// matches 'a'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl>
indirectFieldDecl;
/// Matches function declarations.
///
/// Example matches f
/// \code
/// void f();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl>
functionDecl;
/// Matches C++ function template declarations.
///
/// Example matches f
/// \code
/// template<class T> void f(T t) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl>
functionTemplateDecl;
/// Matches friend declarations.
///
/// Given
/// \code
/// class X { friend void foo(); };
/// \endcode
/// friendDecl()
/// matches 'friend void foo()'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl;
/// Matches statements.
///
/// Given
/// \code
/// { ++a; }
/// \endcode
/// stmt()
/// matches both the compound statement '{ ++a; }' and '++a'.
extern const internal::VariadicAllOfMatcher<Stmt> stmt;
/// Matches declaration statements.
///
/// Given
/// \code
/// int a;
/// \endcode
/// declStmt()
/// matches 'int a'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt;
/// Matches member expressions.
///
/// Given
/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// int a; static int b;
/// };
/// \endcode
/// memberExpr()
/// matches this->x, x, y.x, a, this->b
extern const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr;
/// Matches unresolved member expressions.
///
/// Given
/// \code
/// struct X {
/// template <class T> void f();
/// void g();
/// };
/// template <class T> void h() { X x; x.f<T>(); x.g(); }
/// \endcode
/// unresolvedMemberExpr()
/// matches x.f<T>
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr>
unresolvedMemberExpr;
/// Matches member expressions where the actual member referenced could not be
/// resolved because the base expression or the member name was dependent.
///
/// Given
/// \code
/// template <class T> void f() { T t; t.g(); }
/// \endcode
/// cxxDependentScopeMemberExpr()
/// matches t.g
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXDependentScopeMemberExpr>
cxxDependentScopeMemberExpr;
/// Matches call expressions.
///
/// Example matches x.y() and y()
/// \code
/// X x;
/// x.y();
/// y();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr;
/// Matches call expressions which were resolved using ADL.
///
/// Example matches y(x) but not y(42) or NS::y(x).
/// \code
/// namespace NS {
/// struct X {};
/// void y(X);
/// }
///
/// void y(...);
///
/// void test() {
/// NS::X x;
/// y(x); // Matches
/// NS::y(x); // Doesn't match
/// y(42); // Doesn't match
/// using NS::y;
/// y(x); // Found by both unqualified lookup and ADL, doesn't match
// }
/// \endcode
AST_MATCHER(CallExpr, usesADL) { return Node.usesADL(); }
/// Matches lambda expressions.
///
/// Example matches [&](){return 5;}
/// \code
/// [&](){return 5;}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr;
/// Matches member call expressions.
///
/// Example matches x.y()
/// \code
/// X x;
/// x.y();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr>
cxxMemberCallExpr;
/// Matches ObjectiveC Message invocation expressions.
///
/// The innermost message send invokes the "alloc" class method on the
/// NSString class, while the outermost message send invokes the
/// "initWithString" instance method on the object returned from
/// NSString's "alloc". This matcher should match both message sends.
/// \code
/// [[NSString alloc] initWithString:@"Hello"]
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr>
objcMessageExpr;
/// Matches Objective-C interface declarations.
///
/// Example matches Foo
/// \code
/// @interface Foo
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCInterfaceDecl>
objcInterfaceDecl;
/// Matches Objective-C implementation declarations.
///
/// Example matches Foo
/// \code
/// @implementation Foo
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCImplementationDecl>
objcImplementationDecl;
/// Matches Objective-C protocol declarations.
///
/// Example matches FooDelegate
/// \code
/// @protocol FooDelegate
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCProtocolDecl>
objcProtocolDecl;
/// Matches Objective-C category declarations.
///
/// Example matches Foo (Additions)
/// \code
/// @interface Foo (Additions)
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryDecl>
objcCategoryDecl;
/// Matches Objective-C category definitions.
///
/// Example matches Foo (Additions)
/// \code
/// @implementation Foo (Additions)
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl>
objcCategoryImplDecl;
/// Matches Objective-C method declarations.
///
/// Example matches both declaration and definition of -[Foo method]
/// \code
/// @interface Foo
/// - (void)method;
/// @end
///
/// @implementation Foo
/// - (void)method {}
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl>
objcMethodDecl;
/// Matches block declarations.
///
/// Example matches the declaration of the nameless block printing an input
/// integer.
///
/// \code
/// myFunc(^(int p) {
/// printf("%d", p);
/// })
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl>
blockDecl;
/// Matches Objective-C instance variable declarations.
///
/// Example matches _enabled
/// \code
/// @implementation Foo {
/// BOOL _enabled;
/// }
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl>
objcIvarDecl;
/// Matches Objective-C property declarations.
///
/// Example matches enabled
/// \code
/// @interface Foo
/// @property BOOL enabled;
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl>
objcPropertyDecl;
/// Matches Objective-C \@throw statements.
///
/// Example matches \@throw
/// \code
/// @throw obj;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtThrowStmt>
objcThrowStmt;
/// Matches Objective-C @try statements.
///
/// Example matches @try
/// \code
/// @try {}
/// @catch (...) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtTryStmt>
objcTryStmt;
/// Matches Objective-C @catch statements.
///
/// Example matches @catch
/// \code
/// @try {}
/// @catch (...) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtCatchStmt>
objcCatchStmt;
/// Matches Objective-C @finally statements.
///
/// Example matches @finally
/// \code
/// @try {}
/// @finally {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtFinallyStmt>
objcFinallyStmt;
/// Matches expressions that introduce cleanups to be run at the end
/// of the sub-expression's evaluation.
///
/// Example matches std::string()
/// \code
/// const std::string str = std::string();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExprWithCleanups>
exprWithCleanups;
/// Matches init list expressions.
///
/// Given
/// \code
/// int a[] = { 1, 2 };
/// struct B { int x, y; };
/// B b = { 5, 6 };
/// \endcode
/// initListExpr()
/// matches "{ 1, 2 }" and "{ 5, 6 }"
extern const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr>
initListExpr;
/// Matches the syntactic form of init list expressions
/// (if expression have it).
AST_MATCHER_P(InitListExpr, hasSyntacticForm,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *SyntForm = Node.getSyntacticForm();
return (SyntForm != nullptr &&
InnerMatcher.matches(*SyntForm, Finder, Builder));
}
/// Matches C++ initializer list expressions.
///
/// Given
/// \code
/// std::vector<int> a({ 1, 2, 3 });
/// std::vector<int> b = { 4, 5 };
/// int c[] = { 6, 7 };
/// std::pair<int, int> d = { 8, 9 };
/// \endcode
/// cxxStdInitializerListExpr()
/// matches "{ 1, 2, 3 }" and "{ 4, 5 }"
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXStdInitializerListExpr>
cxxStdInitializerListExpr;
/// Matches implicit initializers of init list expressions.
///
/// Given
/// \code
/// point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 };
/// \endcode
/// implicitValueInitExpr()
/// matches "[0].y" (implicitly)
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitValueInitExpr>
implicitValueInitExpr;
/// Matches paren list expressions.
/// ParenListExprs don't have a predefined type and are used for late parsing.
/// In the final AST, they can be met in template declarations.
///
/// Given
/// \code
/// template<typename T> class X {
/// void f() {
/// X x(*this);
/// int a = 0, b = 1; int i = (a, b);
/// }
/// };
/// \endcode
/// parenListExpr() matches "*this" but NOT matches (a, b) because (a, b)
/// has a predefined type and is a ParenExpr, not a ParenListExpr.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr>
parenListExpr;
/// Matches substitutions of non-type template parameters.
///
/// Given
/// \code
/// template <int N>
/// struct A { static const int n = N; };
/// struct B : public A<42> {};
/// \endcode
/// substNonTypeTemplateParmExpr()
/// matches "N" in the right-hand side of "static const int n = N;"
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
SubstNonTypeTemplateParmExpr>
substNonTypeTemplateParmExpr;
/// Matches using declarations.
///
/// Given
/// \code
/// namespace X { int x; }
/// using X::x;
/// \endcode
/// usingDecl()
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl;
/// Matches using namespace declarations.
///
/// Given
/// \code
/// namespace X { int x; }
/// using namespace X;
/// \endcode
/// usingDirectiveDecl()
/// matches \code using namespace X \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl>
usingDirectiveDecl;
/// Matches reference to a name that can be looked up during parsing
/// but could not be resolved to a specific declaration.
///
/// Given
/// \code
/// template<typename T>
/// T foo() { T a; return a; }
/// template<typename T>
/// void bar() {
/// foo<T>();
/// }
/// \endcode
/// unresolvedLookupExpr()
/// matches \code foo<T>() \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr>
unresolvedLookupExpr;
/// Matches unresolved using value declarations.
///
/// Given
/// \code
/// template<typename X>
/// class C : private X {
/// using X::x;
/// };
/// \endcode
/// unresolvedUsingValueDecl()
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl,
UnresolvedUsingValueDecl>
unresolvedUsingValueDecl;
/// Matches unresolved using value declarations that involve the
/// typename.
///
/// Given
/// \code
/// template <typename T>
/// struct Base { typedef T Foo; };
///
/// template<typename T>
/// struct S : private Base<T> {
/// using typename Base<T>::Foo;
/// };
/// \endcode
/// unresolvedUsingTypenameDecl()
/// matches \code using Base<T>::Foo \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl,
UnresolvedUsingTypenameDecl>
unresolvedUsingTypenameDecl;
/// Matches a constant expression wrapper.
///
/// Example matches the constant in the case statement:
/// (matcher = constantExpr())
/// \code
/// switch (a) {
/// case 37: break;
/// }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr>
constantExpr;
/// Matches parentheses used in expressions.
///
/// Example matches (foo() + 1)
/// \code
/// int foo() { return 1; }
/// int a = (foo() + 1);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr;
/// Matches constructor call expressions (including implicit ones).
///
/// Example matches string(ptr, n) and ptr within arguments of f
/// (matcher = cxxConstructExpr())
/// \code
/// void f(const string &a, const string &b);
/// char *ptr;
/// int n;
/// f(string(ptr, n), ptr);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr>
cxxConstructExpr;
/// Matches unresolved constructor call expressions.
///
/// Example matches T(t) in return statement of f
/// (matcher = cxxUnresolvedConstructExpr())
/// \code
/// template <typename T>
/// void f(const T& t) { return T(t); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXUnresolvedConstructExpr>
cxxUnresolvedConstructExpr;
/// Matches implicit and explicit this expressions.
///
/// Example matches the implicit this expression in "return i".
/// (matcher = cxxThisExpr())
/// \code
/// struct foo {
/// int i;
/// int f() { return i; }
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr>
cxxThisExpr;
/// Matches nodes where temporaries are created.
///
/// Example matches FunctionTakesString(GetStringByValue())
/// (matcher = cxxBindTemporaryExpr())
/// \code
/// FunctionTakesString(GetStringByValue());
/// FunctionTakesStringByPointer(GetStringPointer());
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBindTemporaryExpr>
cxxBindTemporaryExpr;
/// Matches nodes where temporaries are materialized.
///
/// Example: Given
/// \code
/// struct T {void func();};
/// T f();
/// void g(T);
/// \endcode
/// materializeTemporaryExpr() matches 'f()' in these statements
/// \code
/// T u(f());
/// g(f());
/// f().func();
/// \endcode
/// but does not match
/// \code
/// f();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
MaterializeTemporaryExpr>
materializeTemporaryExpr;
/// Matches new expressions.
///
/// Given
/// \code
/// new X;
/// \endcode
/// cxxNewExpr()
/// matches 'new X'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr;
/// Matches delete expressions.
///
/// Given
/// \code
/// delete X;
/// \endcode
/// cxxDeleteExpr()
/// matches 'delete X'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr>
cxxDeleteExpr;
/// Matches array subscript expressions.
///
/// Given
/// \code
/// int i = a[1];
/// \endcode
/// arraySubscriptExpr()
/// matches "a[1]"
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr>
arraySubscriptExpr;
/// Matches the value of a default argument at the call site.
///
/// Example matches the CXXDefaultArgExpr placeholder inserted for the
/// default value of the second parameter in the call expression f(42)
/// (matcher = cxxDefaultArgExpr())
/// \code
/// void f(int x, int y = 0);
/// f(42);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr>
cxxDefaultArgExpr;
/// Matches overloaded operator calls.
///
/// Note that if an operator isn't overloaded, it won't match. Instead, use
/// binaryOperator matcher.
/// Currently it does not match operators such as new delete.
/// FIXME: figure out why these do not match?
///
/// Example matches both operator<<((o << b), c) and operator<<(o, b)
/// (matcher = cxxOperatorCallExpr())
/// \code
/// ostream &operator<< (ostream &out, int i) { };
/// ostream &o; int b = 1, c = 1;
/// o << b << c;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr>
cxxOperatorCallExpr;
/// Matches expressions.
///
/// Example matches x()
/// \code
/// void f() { x(); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr;
/// Matches expressions that refer to declarations.
///
/// Example matches x in if (x)
/// \code
/// bool x;
/// if (x) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr>
declRefExpr;
/// Matches a reference to an ObjCIvar.
///
/// Example: matches "a" in "init" method:
/// \code
/// @implementation A {
/// NSString *a;
/// }
/// - (void) init {
/// a = @"hello";
/// }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr>
objcIvarRefExpr;
/// Matches a reference to a block.
///
/// Example: matches "^{}":
/// \code
/// void f() { ^{}(); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr;
/// Matches if statements.
///
/// Example matches 'if (x) {}'
/// \code
/// if (x) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt;
/// Matches for statements.
///
/// Example matches 'for (;;) {}'
/// \code
/// for (;;) {}
/// int i[] = {1, 2, 3}; for (auto a : i);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt;
/// Matches the increment statement of a for loop.
///
/// Example:
/// forStmt(hasIncrement(unaryOperator(hasOperatorName("++"))))
/// matches '++x' in
/// \code
/// for (x; x < N; ++x) { }
/// \endcode
AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Increment = Node.getInc();
return (Increment != nullptr &&
InnerMatcher.matches(*Increment, Finder, Builder));
}
/// Matches the initialization statement of a for loop.
///
/// Example:
/// forStmt(hasLoopInit(declStmt()))
/// matches 'int x = 0' in
/// \code
/// for (int x = 0; x < N; ++x) { }
/// \endcode
AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Init = Node.getInit();
return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder));
}
/// Matches range-based for statements.
///
/// cxxForRangeStmt() matches 'for (auto a : i)'
/// \code
/// int i[] = {1, 2, 3}; for (auto a : i);
/// for(int j = 0; j < 5; ++j);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt>
cxxForRangeStmt;
/// Matches the initialization statement of a for loop.
///
/// Example:
/// forStmt(hasLoopVariable(anything()))
/// matches 'int x' in
/// \code
/// for (int x : a) { }
/// \endcode
AST_MATCHER_P(CXXForRangeStmt, hasLoopVariable, internal::Matcher<VarDecl>,
InnerMatcher) {
const VarDecl *const Var = Node.getLoopVariable();
return (Var != nullptr && InnerMatcher.matches(*Var, Finder, Builder));
}
/// Matches the range initialization statement of a for loop.
///
/// Example:
/// forStmt(hasRangeInit(anything()))
/// matches 'a' in
/// \code
/// for (int x : a) { }
/// \endcode
AST_MATCHER_P(CXXForRangeStmt, hasRangeInit, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *const Init = Node.getRangeInit();
return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder));
}
/// Matches while statements.
///
/// Given
/// \code
/// while (true) {}
/// \endcode
/// whileStmt()
/// matches 'while (true) {}'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt;
/// Matches do statements.
///
/// Given
/// \code
/// do {} while (true);
/// \endcode
/// doStmt()
/// matches 'do {} while(true)'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt;
/// Matches break statements.
///
/// Given
/// \code
/// while (true) { break; }
/// \endcode
/// breakStmt()
/// matches 'break'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt;
/// Matches continue statements.
///
/// Given
/// \code
/// while (true) { continue; }
/// \endcode
/// continueStmt()
/// matches 'continue'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt>
continueStmt;
/// Matches return statements.
///
/// Given
/// \code
/// return 1;
/// \endcode
/// returnStmt()
/// matches 'return 1'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt;
/// Matches goto statements.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// gotoStmt()
/// matches 'goto FOO'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt;
/// Matches label statements.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// labelStmt()
/// matches 'FOO:'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt;
/// Matches address of label statements (GNU extension).
///
/// Given
/// \code
/// FOO: bar();
/// void *ptr = &&FOO;
/// goto *bar;
/// \endcode
/// addrLabelExpr()
/// matches '&&FOO'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AddrLabelExpr>
addrLabelExpr;
/// Matches switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// switchStmt()
/// matches 'switch(a)'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt;
/// Matches case and default statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// switchCase()
/// matches 'case 42:' and 'default:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase;
/// Matches case statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// caseStmt()
/// matches 'case 42:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt;
/// Matches default statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// defaultStmt()
/// matches 'default:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt>
defaultStmt;
/// Matches compound statements.
///
/// Example matches '{}' and '{{}}' in 'for (;;) {{}}'
/// \code
/// for (;;) {{}}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt>
compoundStmt;
/// Matches catch statements.
///
/// \code
/// try {} catch(int i) {}
/// \endcode
/// cxxCatchStmt()
/// matches 'catch(int i)'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt>
cxxCatchStmt;
/// Matches try statements.
///
/// \code
/// try {} catch(int i) {}
/// \endcode
/// cxxTryStmt()
/// matches 'try {}'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt;
/// Matches throw expressions.
///
/// \code
/// try { throw 5; } catch(int i) {}
/// \endcode
/// cxxThrowExpr()
/// matches 'throw 5'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr>
cxxThrowExpr;
/// Matches null statements.
///
/// \code
/// foo();;
/// \endcode
/// nullStmt()
/// matches the second ';'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt;
/// Matches asm statements.
///
/// \code
/// int i = 100;
/// __asm("mov al, 2");
/// \endcode
/// asmStmt()
/// matches '__asm("mov al, 2")'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt;
/// Matches bool literals.
///
/// Example matches true
/// \code
/// true
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr>
cxxBoolLiteral;
/// Matches string literals (also matches wide string literals).
///
/// Example matches "abcd", L"abcd"
/// \code
/// char *s = "abcd";
/// wchar_t *ws = L"abcd";
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral>
stringLiteral;
/// Matches character literals (also matches wchar_t).
///
/// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral),
/// though.
///
/// Example matches 'a', L'a'
/// \code
/// char ch = 'a';
/// wchar_t chw = L'a';
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral>
characterLiteral;
/// Matches integer literals of all sizes / encodings, e.g.
/// 1, 1L, 0x1 and 1U.
///
/// Does not match character-encoded integers such as L'a'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral>
integerLiteral;
/// Matches float literals of all sizes / encodings, e.g.
/// 1.0, 1.0f, 1.0L and 1e10.
///
/// Does not match implicit conversions such as
/// \code
/// float a = 10;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral>
floatLiteral;
/// Matches imaginary literals, which are based on integer and floating
/// point literals e.g.: 1i, 1.0i
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral>
imaginaryLiteral;
/// Matches user defined literal operator call.
///
/// Example match: "foo"_suffix
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral>
userDefinedLiteral;
/// Matches compound (i.e. non-scalar) literals
///
/// Example match: {1}, (1, 2)
/// \code
/// int array[4] = {1};
/// vector int myvec = (vector int)(1, 2);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr>
compoundLiteralExpr;
/// Matches nullptr literal.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr>
cxxNullPtrLiteralExpr;
/// Matches GNU __builtin_choose_expr.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr>
chooseExpr;
/// Matches GNU __null expression.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr>
gnuNullExpr;
/// Matches atomic builtins.
/// Example matches __atomic_load_n(ptr, 1)
/// \code
/// void foo() { int *ptr; __atomic_load_n(ptr, 1); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr;
/// Matches statement expression (GNU extension).
///
/// Example match: ({ int X = 4; X; })
/// \code
/// int C = ({ int X = 4; X; });
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr;
/// Matches binary operator expressions.
///
/// Example matches a || b
/// \code
/// !(a || b)
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator>
binaryOperator;
/// Matches unary operator expressions.
///
/// Example matches !a
/// \code
/// !a || b
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator>
unaryOperator;
/// Matches conditional operator expressions.
///
/// Example matches a ? b : c
/// \code
/// (a ? b : c) + 42
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator>
conditionalOperator;
/// Matches binary conditional operator expressions (GNU extension).
///
/// Example matches a ?: b
/// \code
/// (a ?: b) + 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
BinaryConditionalOperator>
binaryConditionalOperator;
/// Matches opaque value expressions. They are used as helpers
/// to reference another expressions and can be met
/// in BinaryConditionalOperators, for example.
///
/// Example matches 'a'
/// \code
/// (a ?: c) + 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr>
opaqueValueExpr;
/// Matches a C++ static_assert declaration.
///
/// Example:
/// staticAssertExpr()
/// matches
/// static_assert(sizeof(S) == sizeof(int))
/// in
/// \code
/// struct S {
/// int x;
/// };
/// static_assert(sizeof(S) == sizeof(int));
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, StaticAssertDecl>
staticAssertDecl;
/// Matches a reinterpret_cast expression.
///
/// Either the source expression or the destination type can be matched
/// using has(), but hasDestinationType() is more specific and can be
/// more readable.
///
/// Example matches reinterpret_cast<char*>(&p) in
/// \code
/// void* p = reinterpret_cast<char*>(&p);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXReinterpretCastExpr>
cxxReinterpretCastExpr;
/// Matches a C++ static_cast expression.
///
/// \see hasDestinationType
/// \see reinterpretCast
///
/// Example:
/// cxxStaticCastExpr()
/// matches
/// static_cast<long>(8)
/// in
/// \code
/// long eight(static_cast<long>(8));
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStaticCastExpr>
cxxStaticCastExpr;
/// Matches a dynamic_cast expression.
///
/// Example:
/// cxxDynamicCastExpr()
/// matches
/// dynamic_cast<D*>(&b);
/// in
/// \code
/// struct B { virtual ~B() {} }; struct D : B {};
/// B b;
/// D* p = dynamic_cast<D*>(&b);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDynamicCastExpr>
cxxDynamicCastExpr;
/// Matches a const_cast expression.
///
/// Example: Matches const_cast<int*>(&r) in
/// \code
/// int n = 42;
/// const int &r(n);
/// int* p = const_cast<int*>(&r);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstCastExpr>
cxxConstCastExpr;
/// Matches a C-style cast expression.
///
/// Example: Matches (int) 2.2f in
/// \code
/// int i = (int) 2.2f;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CStyleCastExpr>
cStyleCastExpr;
/// Matches explicit cast expressions.
///
/// Matches any cast expression written in user code, whether it be a
/// C-style cast, a functional-style cast, or a keyword cast.
///
/// Does not match implicit conversions.
///
/// Note: the name "explicitCast" is chosen to match Clang's terminology, as
/// Clang uses the term "cast" to apply to implicit conversions as well as to
/// actual cast expressions.
///
/// \see hasDestinationType.
///
/// Example: matches all five of the casts in
/// \code
/// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42)))))
/// \endcode
/// but does not match the implicit conversion in
/// \code
/// long ell = 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExplicitCastExpr>
explicitCastExpr;
/// Matches the implicit cast nodes of Clang's AST.
///
/// This matches many different places, including function call return value
/// eliding, as well as any type conversions.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitCastExpr>
implicitCastExpr;
/// Matches any cast nodes of Clang's AST.
///
/// Example: castExpr() matches each of the following:
/// \code
/// (int) 3;
/// const_cast<Expr *>(SubExpr);
/// char c = 0;
/// \endcode
/// but does not match
/// \code
/// int i = (0);
/// int k = 0;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr;
/// Matches functional cast expressions
///
/// Example: Matches Foo(bar);
/// \code
/// Foo f = bar;
/// Foo g = (Foo) bar;
/// Foo h = Foo(bar);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFunctionalCastExpr>
cxxFunctionalCastExpr;
/// Matches functional cast expressions having N != 1 arguments
///
/// Example: Matches Foo(bar, bar)
/// \code
/// Foo h = Foo(bar, bar);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTemporaryObjectExpr>
cxxTemporaryObjectExpr;
/// Matches predefined identifier expressions [C99 6.4.2.2].
///
/// Example: Matches __func__
/// \code
/// printf("%s", __func__);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, PredefinedExpr>
predefinedExpr;
/// Matches C99 designated initializer expressions [C99 6.7.8].
///
/// Example: Matches { [2].y = 1.0, [0].x = 1.0 }
/// \code
/// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DesignatedInitExpr>
designatedInitExpr;
/// Matches designated initializer expressions that contain
/// a specific number of designators.
///
/// Example: Given
/// \code
/// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 };
/// point ptarray2[10] = { [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 };
/// \endcode
/// designatorCountIs(2)
/// matches '{ [2].y = 1.0, [0].x = 1.0 }',
/// but not '{ [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }'.
AST_MATCHER_P(DesignatedInitExpr, designatorCountIs, unsigned, N) {
return Node.size() == N;
}
/// Matches \c QualTypes in the clang AST.
extern const internal::VariadicAllOfMatcher<QualType> qualType;
/// Matches \c Types in the clang AST.
extern const internal::VariadicAllOfMatcher<Type> type;
/// Matches \c TypeLocs in the clang AST.
extern const internal::VariadicAllOfMatcher<TypeLoc> typeLoc;
/// Matches if any of the given matchers matches.
///
/// Unlike \c anyOf, \c eachOf will generate a match result for each
/// matching submatcher.
///
/// For example, in:
/// \code
/// class A { int a; int b; };
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(eachOf(has(fieldDecl(hasName("a")).bind("v")),
/// has(fieldDecl(hasName("b")).bind("v"))))
/// \endcode
/// will generate two results binding "v", the first of which binds
/// the field declaration of \c a, the second the field declaration of
/// \c b.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
eachOf;
/// Matches if any of the given matchers matches.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
anyOf;
/// Matches if all given matchers match.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
allOf;
/// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL)
///
/// Given
/// \code
/// Foo x = bar;
/// int y = sizeof(x) + alignof(x);
/// \endcode
/// unaryExprOrTypeTraitExpr()
/// matches \c sizeof(x) and \c alignof(x)
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
UnaryExprOrTypeTraitExpr>
unaryExprOrTypeTraitExpr;
/// Matches unary expressions that have a specific type of argument.
///
/// Given
/// \code
/// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c);
/// \endcode
/// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int"))
/// matches \c sizeof(a) and \c alignof(c)
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType,
internal::Matcher<QualType>, InnerMatcher) {
const QualType ArgumentType = Node.getTypeOfArgument();
return InnerMatcher.matches(ArgumentType, Finder, Builder);
}
/// Matches unary expressions of a certain kind.
///
/// Given
/// \code
/// int x;
/// int s = sizeof(x) + alignof(x)
/// \endcode
/// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf))
/// matches \c sizeof(x)
///
/// If the matcher is use from clang-query, UnaryExprOrTypeTrait parameter
/// should be passed as a quoted string. e.g., ofKind("UETT_SizeOf").
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) {
return Node.getKind() == Kind;
}
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// alignof.
inline internal::Matcher<Stmt> alignOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)),
InnerMatcher)));
}
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// sizeof.
inline internal::Matcher<Stmt> sizeOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(ofKind(UETT_SizeOf), InnerMatcher)));
}
/// Matches NamedDecl nodes that have the specified name.
///
/// Supports specifying enclosing namespaces or classes by prefixing the name
/// with '<enclosing>::'.
/// Does not match typedefs of an underlying type with the given name.
///
/// Example matches X (Name == "X")
/// \code
/// class X;
/// \endcode
///
/// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X")
/// \code
/// namespace a { namespace b { class X; } }
/// \endcode
inline internal::Matcher<NamedDecl> hasName(const std::string &Name) {
return internal::Matcher<NamedDecl>(new internal::HasNameMatcher({Name}));
}
/// Matches NamedDecl nodes that have any of the specified names.
///
/// This matcher is only provided as a performance optimization of hasName.
/// \code
/// hasAnyName(a, b, c)
/// \endcode
/// is equivalent to, but faster than
/// \code
/// anyOf(hasName(a), hasName(b), hasName(c))
/// \endcode
extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef,
internal::hasAnyNameFunc>
hasAnyName;
/// Matches NamedDecl nodes whose fully qualified names contain
/// a substring matched by the given RegExp.
///
/// Supports specifying enclosing namespaces or classes by
/// prefixing the name with '<enclosing>::'. Does not match typedefs
/// of an underlying type with the given name.
///
/// Example matches X (regexp == "::X")
/// \code
/// class X;
/// \endcode
///
/// Example matches X (regexp is one of "::X", "^foo::.*X", among others)
/// \code
/// namespace foo { namespace bar { class X; } }
/// \endcode
AST_MATCHER_P(NamedDecl, matchesName, std::string, RegExp) {
assert(!RegExp.empty());
std::string FullNameString = "::" + Node.getQualifiedNameAsString();
llvm::Regex RE(RegExp);
return RE.match(FullNameString);
}
/// Matches overloaded operator names.
///
/// Matches overloaded operator names specified in strings without the
/// "operator" prefix: e.g. "<<".
///
/// Given:
/// \code
/// class A { int operator*(); };
/// const A &operator<<(const A &a, const A &b);
/// A a;
/// a << a; // <-- This matches
/// \endcode
///
/// \c cxxOperatorCallExpr(hasOverloadedOperatorName("<<"))) matches the
/// specified line and
/// \c cxxRecordDecl(hasMethod(hasOverloadedOperatorName("*")))
/// matches the declaration of \c A.
///
/// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl>
inline internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, StringRef,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>
hasOverloadedOperatorName(StringRef Name) {
return internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, StringRef,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>(Name);
}
/// Matches C++ classes that are directly or indirectly derived from a class
/// matching \c Base, or Objective-C classes that directly or indirectly
/// subclass a class matching \c Base.
///
/// Note that a class is not considered to be derived from itself.
///
/// Example matches Y, Z, C (Base == hasName("X"))
/// \code
/// class X;
/// class Y : public X {}; // directly derived
/// class Z : public Y {}; // indirectly derived
/// typedef X A;
/// typedef A B;
/// class C : public B {}; // derived from a typedef of X
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("X")):
/// \code
/// class Foo;
/// typedef Foo X;
/// class Bar : public Foo {}; // derived from a type that X is a typedef of
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("NSObject"))
/// \code
/// @interface NSObject @end
/// @interface Bar : NSObject @end
/// \endcode
///
/// Usable as: Matcher<CXXRecordDecl>, Matcher<ObjCInterfaceDecl>
AST_POLYMORPHIC_MATCHER_P(
isDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base) {
// Check if the node is a C++ struct/union/class.
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/false);
// The node must be an Objective-C class.
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder,
/*Directly=*/false);
}
/// Overloaded method as shortcut for \c isDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Similar to \c isDerivedFrom(), but also matches classes that directly
/// match \c Base.
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isSameOrDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base, 0) {
const auto M = anyOf(Base, isDerivedFrom(Base));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Overloaded method as shortcut for
/// \c isSameOrDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isSameOrDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isSameOrDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches C++ or Objective-C classes that are directly derived from a class
/// matching \c Base.
///
/// Note that a class is not considered to be derived from itself.
///
/// Example matches Y, C (Base == hasName("X"))
/// \code
/// class X;
/// class Y : public X {}; // directly derived
/// class Z : public Y {}; // indirectly derived
/// typedef X A;
/// typedef A B;
/// class C : public B {}; // derived from a typedef of X
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("X")):
/// \code
/// class Foo;
/// typedef Foo X;
/// class Bar : public Foo {}; // derived from a type that X is a typedef of
/// \endcode
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDirectlyDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base, 0) {
// Check if the node is a C++ struct/union/class.
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/true);
// The node must be an Objective-C class.
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder,
/*Directly=*/true);
}
/// Overloaded method as shortcut for \c isDirectlyDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDirectlyDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isDirectlyDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches the first method of a class or struct that satisfies \c
/// InnerMatcher.
///
/// Given:
/// \code
/// class A { void func(); };
/// class B { void member(); };
/// \endcode
///
/// \c cxxRecordDecl(hasMethod(hasName("func"))) matches the declaration of
/// \c A but not \c B.
AST_MATCHER_P(CXXRecordDecl, hasMethod, internal::Matcher<CXXMethodDecl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.method_begin(),
Node.method_end(), Finder, Builder);
}
/// Matches the generated class of lambda expressions.
///
/// Given:
/// \code
/// auto x = []{};
/// \endcode
///
/// \c cxxRecordDecl(isLambda()) matches the implicit class declaration of
/// \c decltype(x)
AST_MATCHER(CXXRecordDecl, isLambda) {
return Node.isLambda();
}
/// Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y
/// (matcher = cxxRecordDecl(has(cxxRecordDecl(hasName("X")))
/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; }; // Does not match Z.
/// \endcode
///
/// ChildT must be an AST base type.
///
/// Usable as: Any Matcher
/// Note that has is direct matcher, so it also matches things like implicit
/// casts and paren casts. If you are matching with expr then you should
/// probably consider using ignoringParenImpCasts like:
/// has(ignoringParenImpCasts(expr())).
extern const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has;
/// Matches AST nodes that have descendant AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y, Z
/// (matcher = cxxRecordDecl(hasDescendant(cxxRecordDecl(hasName("X")))))
/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; };
/// \endcode
///
/// DescendantT must be an AST base type.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasDescendantMatcher>
hasDescendant;
/// Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y, Y::X, Z::Y, Z::Y::X
/// (matcher = cxxRecordDecl(forEach(cxxRecordDecl(hasName("X")))
/// \code
/// class X {};
/// class Y { class X {}; }; // Matches Y, because Y::X is a class of name X
/// // inside Y.
/// class Z { class Y { class X {}; }; }; // Does not match Z.
/// \endcode
///
/// ChildT must be an AST base type.
///
/// As opposed to 'has', 'forEach' will cause a match for each result that
/// matches instead of only on the first one.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<internal::ForEachMatcher>
forEach;
/// Matches AST nodes that have descendant AST nodes that match the
/// provided matcher.
///
/// Example matches X, A, A::X, B, B::C, B::C::X
/// (matcher = cxxRecordDecl(forEachDescendant(cxxRecordDecl(hasName("X")))))
/// \code
/// class X {};
/// class A { class X {}; }; // Matches A, because A::X is a class of name
/// // X inside A.
/// class B { class C { class X {}; }; };
/// \endcode
///
/// DescendantT must be an AST base type.
///
/// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for
/// each result that matches instead of only on the first one.
///
/// Note: Recursively combined ForEachDescendant can cause many matches:
/// cxxRecordDecl(forEachDescendant(cxxRecordDecl(
/// forEachDescendant(cxxRecordDecl())
/// )))
/// will match 10 times (plus injected class name matches) on:
/// \code
/// class A { class B { class C { class D { class E {}; }; }; }; };
/// \endcode
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::ForEachDescendantMatcher>
forEachDescendant;
/// Matches if the node or any descendant matches.
///
/// Generates results for each match.
///
/// For example, in:
/// \code
/// class A { class B {}; class C {}; };
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(hasName("::A"),
/// findAll(cxxRecordDecl(isDefinition()).bind("m")))
/// \endcode
/// will generate results for \c A, \c B and \c C.
///
/// Usable as: Any Matcher
template <typename T>
internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) {
return eachOf(Matcher, forEachDescendant(Matcher));
}
/// Matches AST nodes that have a parent that matches the provided
/// matcher.
///
/// Given
/// \code
/// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } }
/// \endcode
/// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }".
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasParentMatcher,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>>
hasParent;
/// Matches AST nodes that have an ancestor that matches the provided
/// matcher.
///
/// Given
/// \code
/// void f() { if (true) { int x = 42; } }
/// void g() { for (;;) { int x = 43; } }
/// \endcode
/// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasAncestorMatcher,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>>
hasAncestor;
/// Matches if the provided matcher does not match.
///
/// Example matches Y (matcher = cxxRecordDecl(unless(hasName("X"))))
/// \code
/// class X {};
/// class Y {};
/// \endcode
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<1, 1> unless;
/// Matches a node if the declaration associated with that node
/// matches the given matcher.
///
/// The associated declaration is:
/// - for type nodes, the declaration of the underlying type
/// - for CallExpr, the declaration of the callee
/// - for MemberExpr, the declaration of the referenced member
/// - for CXXConstructExpr, the declaration of the constructor
/// - for CXXNewExpr, the declaration of the operator new
/// - for ObjCIvarExpr, the declaration of the ivar
///
/// For type nodes, hasDeclaration will generally match the declaration of the
/// sugared type. Given
/// \code
/// class X {};
/// typedef X Y;
/// Y y;
/// \endcode
/// in varDecl(hasType(hasDeclaration(decl()))) the decl will match the
/// typedefDecl. A common use case is to match the underlying, desugared type.
/// This can be achieved by using the hasUnqualifiedDesugaredType matcher:
/// \code
/// varDecl(hasType(hasUnqualifiedDesugaredType(
/// recordType(hasDeclaration(decl())))))
/// \endcode
/// In this matcher, the decl will match the CXXRecordDecl of class X.
///
/// Usable as: Matcher<AddrLabelExpr>, Matcher<CallExpr>,
/// Matcher<CXXConstructExpr>, Matcher<CXXNewExpr>, Matcher<DeclRefExpr>,
/// Matcher<EnumType>, Matcher<InjectedClassNameType>, Matcher<LabelStmt>,
/// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>,
/// Matcher<TagType>, Matcher<TemplateSpecializationType>,
/// Matcher<TemplateTypeParmType>, Matcher<TypedefType>,
/// Matcher<UnresolvedUsingType>
inline internal::PolymorphicMatcherWithParam1<
internal::HasDeclarationMatcher, internal::Matcher<Decl>,
void(internal::HasDeclarationSupportedTypes)>
hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) {
return internal::PolymorphicMatcherWithParam1<
internal::HasDeclarationMatcher, internal::Matcher<Decl>,
void(internal::HasDeclarationSupportedTypes)>(InnerMatcher);
}
/// Matches a \c NamedDecl whose underlying declaration matches the given
/// matcher.
///
/// Given
/// \code
/// namespace N { template<class T> void f(T t); }
/// template <class T> void g() { using N::f; f(T()); }
/// \endcode
/// \c unresolvedLookupExpr(hasAnyDeclaration(
/// namedDecl(hasUnderlyingDecl(hasName("::N::f")))))
/// matches the use of \c f in \c g() .
AST_MATCHER_P(NamedDecl, hasUnderlyingDecl, internal::Matcher<NamedDecl>,
InnerMatcher) {
const NamedDecl *UnderlyingDecl = Node.getUnderlyingDecl();
return UnderlyingDecl != nullptr &&
InnerMatcher.matches(*UnderlyingDecl, Finder, Builder);
}
/// Matches on the implicit object argument of a member call expression, after
/// stripping off any parentheses or implicit casts.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// Y g();
/// class X : public Y {};
/// void z(Y y, X x) { y.m(); (g()).m(); x.m(); }
/// \endcode
/// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("Y")))))
/// matches `y.m()` and `(g()).m()`.
/// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("X")))))
/// matches `x.m()`.
/// cxxMemberCallExpr(on(callExpr()))
/// matches `(g()).m()`.
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *ExprNode = Node.getImplicitObjectArgument()
->IgnoreParenImpCasts();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches on the receiver of an ObjectiveC Message expression.
///
/// Example
/// matcher = objCMessageExpr(hasReceiverType(asString("UIWebView *")));
/// matches the [webView ...] message invocation.
/// \code
/// NSString *webViewJavaScript = ...
/// UIWebView *webView = ...
/// [webView stringByEvaluatingJavaScriptFromString:webViewJavascript];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasReceiverType, internal::Matcher<QualType>,
InnerMatcher) {
const QualType TypeDecl = Node.getReceiverType();
return InnerMatcher.matches(TypeDecl, Finder, Builder);
}
/// Returns true when the Objective-C method declaration is a class method.
///
/// Example
/// matcher = objcMethodDecl(isClassMethod())
/// matches
/// \code
/// @interface I + (void)foo; @end
/// \endcode
/// but not
/// \code
/// @interface I - (void)bar; @end
/// \endcode
AST_MATCHER(ObjCMethodDecl, isClassMethod) {
return Node.isClassMethod();
}
/// Returns true when the Objective-C method declaration is an instance method.
///
/// Example
/// matcher = objcMethodDecl(isInstanceMethod())
/// matches
/// \code
/// @interface I - (void)bar; @end
/// \endcode
/// but not
/// \code
/// @interface I + (void)foo; @end
/// \endcode
AST_MATCHER(ObjCMethodDecl, isInstanceMethod) {
return Node.isInstanceMethod();
}
/// Returns true when the Objective-C message is sent to a class.
///
/// Example
/// matcher = objcMessageExpr(isClassMessage())
/// matches
/// \code
/// [NSString stringWithFormat:@"format"];
/// \endcode
/// but not
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
AST_MATCHER(ObjCMessageExpr, isClassMessage) {
return Node.isClassMessage();
}
/// Returns true when the Objective-C message is sent to an instance.
///
/// Example
/// matcher = objcMessageExpr(isInstanceMessage())
/// matches
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
/// but not
/// \code
/// [NSString stringWithFormat:@"format"];
/// \endcode
AST_MATCHER(ObjCMessageExpr, isInstanceMessage) {
return Node.isInstanceMessage();
}
/// Matches if the Objective-C message is sent to an instance,
/// and the inner matcher matches on that instance.
///
/// For example the method call in
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
/// is matched by
/// objcMessageExpr(hasReceiver(declRefExpr(to(varDecl(hasName("x"))))))
AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *ReceiverNode = Node.getInstanceReceiver();
return (ReceiverNode != nullptr &&
InnerMatcher.matches(*ReceiverNode->IgnoreParenImpCasts(), Finder,
Builder));
}
/// Matches when BaseName == Selector.getAsString()
///
/// matcher = objCMessageExpr(hasSelector("loadHTMLString:baseURL:"));
/// matches the outer message expr in the code below, but NOT the message
/// invocation for self.bodyView.
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) {
Selector Sel = Node.getSelector();
return BaseName.compare(Sel.getAsString()) == 0;
}
/// Matches when at least one of the supplied string equals to the
/// Selector.getAsString()
///
/// matcher = objCMessageExpr(hasSelector("methodA:", "methodB:"));
/// matches both of the expressions below:
/// \code
/// [myObj methodA:argA];
/// [myObj methodB:argB];
/// \endcode
extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>,
StringRef,
internal::hasAnySelectorFunc>
hasAnySelector;
/// Matches ObjC selectors whose name contains
/// a substring matched by the given RegExp.
/// matcher = objCMessageExpr(matchesSelector("loadHTMLString\:baseURL?"));
/// matches the outer message expr in the code below, but NOT the message
/// invocation for self.bodyView.
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, matchesSelector, std::string, RegExp) {
assert(!RegExp.empty());
std::string SelectorString = Node.getSelector().getAsString();
llvm::Regex RE(RegExp);
return RE.match(SelectorString);
}
/// Matches when the selector is the empty selector
///
/// Matches only when the selector of the objCMessageExpr is NULL. This may
/// represent an error condition in the tree!
AST_MATCHER(ObjCMessageExpr, hasNullSelector) {
return Node.getSelector().isNull();
}
/// Matches when the selector is a Unary Selector
///
/// matcher = objCMessageExpr(matchesSelector(hasUnarySelector());
/// matches self.bodyView in the code below, but NOT the outer message
/// invocation of "loadHTMLString:baseURL:".
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER(ObjCMessageExpr, hasUnarySelector) {
return Node.getSelector().isUnarySelector();
}
/// Matches when the selector is a keyword selector
///
/// objCMessageExpr(hasKeywordSelector()) matches the generated setFrame
/// message expression in
///
/// \code
/// UIWebView *webView = ...;
/// CGRect bodyFrame = webView.frame;
/// bodyFrame.size.height = self.bodyContentHeight;
/// webView.frame = bodyFrame;
/// // ^---- matches here
/// \endcode
AST_MATCHER(ObjCMessageExpr, hasKeywordSelector) {
return Node.getSelector().isKeywordSelector();
}
/// Matches when the selector has the specified number of arguments
///
/// matcher = objCMessageExpr(numSelectorArgs(0));
/// matches self.bodyView in the code below
///
/// matcher = objCMessageExpr(numSelectorArgs(2));
/// matches the invocation of "loadHTMLString:baseURL:" but not that
/// of self.bodyView
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) {
return Node.getSelector().getNumArgs() == N;
}
/// Matches if the call expression's callee expression matches.
///
/// Given
/// \code
/// class Y { void x() { this->x(); x(); Y y; y.x(); } };
/// void f() { f(); }
/// \endcode
/// callExpr(callee(expr()))
/// matches this->x(), x(), y.x(), f()
/// with callee(...)
/// matching this->x, x, y.x, f respectively
///
/// Note: Callee cannot take the more general internal::Matcher<Expr>
/// because this introduces ambiguous overloads with calls to Callee taking a
/// internal::Matcher<Decl>, as the matcher hierarchy is purely
/// implemented in terms of implicit casts.
AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>,
InnerMatcher) {
const Expr *ExprNode = Node.getCallee();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches if the call expression's callee's declaration matches the
/// given matcher.
///
/// Example matches y.x() (matcher = callExpr(callee(
/// cxxMethodDecl(hasName("x")))))
/// \code
/// class Y { public: void x(); };
/// void z() { Y y; y.x(); }
/// \endcode
AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher,
1) {
return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder);
}
/// Matches if the expression's or declaration's type matches a type
/// matcher.
///
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and U (matcher = typedefDecl(hasType(asString("int")))
/// and friend class X (matcher = friendDecl(hasType("X"))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// typedef int U;
/// class Y { friend class X; };
/// \endcode
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType,
AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl,
ValueDecl),
internal::Matcher<QualType>, InnerMatcher, 0) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
return InnerMatcher.matches(QT, Finder, Builder);
return false;
}
/// Overloaded to match the declaration of the expression's or value
/// declaration's type.
///
/// In case of a value declaration (for example a variable declaration),
/// this resolves one layer of indirection. For example, in the value
/// declaration "X x;", cxxRecordDecl(hasName("X")) matches the declaration of
/// X, while varDecl(hasType(cxxRecordDecl(hasName("X")))) matches the
/// declaration of x.
///
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and friend class X (matcher = friendDecl(hasType("X"))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// class Y { friend class X; };
/// \endcode
///
/// Usable as: Matcher<Expr>, Matcher<ValueDecl>
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl),
internal::Matcher<Decl>, InnerMatcher, 1) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
return qualType(hasDeclaration(InnerMatcher)).matches(QT, Finder, Builder);
return false;
}
/// Matches if the type location of the declarator decl's type matches
/// the inner matcher.
///
/// Given
/// \code
/// int x;
/// \endcode
/// declaratorDecl(hasTypeLoc(loc(asString("int"))))
/// matches int x
AST_MATCHER_P(DeclaratorDecl, hasTypeLoc, internal::Matcher<TypeLoc>, Inner) {
if (!Node.getTypeSourceInfo())
// This happens for example for implicit destructors.
return false;
return Inner.matches(Node.getTypeSourceInfo()->getTypeLoc(), Finder, Builder);
}
/// Matches if the matched type is represented by the given string.
///
/// Given
/// \code
/// class Y { public: void x(); };
/// void z() { Y* y; y->x(); }
/// \endcode
/// cxxMemberCallExpr(on(hasType(asString("class Y *"))))
/// matches y->x()
AST_MATCHER_P(QualType, asString, std::string, Name) {
return Name == Node.getAsString();
}
/// Matches if the matched type is a pointer type and the pointee type
/// matches the specified matcher.
///
/// Example matches y->x()
/// (matcher = cxxMemberCallExpr(on(hasType(pointsTo
/// cxxRecordDecl(hasName("Y")))))))
/// \code
/// class Y { public: void x(); };
/// void z() { Y *y; y->x(); }
/// \endcode
AST_MATCHER_P(
QualType, pointsTo, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isAnyPointerType() &&
InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
}
/// Overloaded to match the pointee type's declaration.
AST_MATCHER_P_OVERLOAD(QualType, pointsTo, internal::Matcher<Decl>,
InnerMatcher, 1) {
return pointsTo(qualType(hasDeclaration(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches if the matched type matches the unqualified desugared
/// type of the matched node.
///
/// For example, in:
/// \code
/// class A {};
/// using B = A;
/// \endcode
/// The matcher type(hasUnqualifiedDesugaredType(recordType())) matches
/// both B and A.
AST_MATCHER_P(Type, hasUnqualifiedDesugaredType, internal::Matcher<Type>,
InnerMatcher) {
return InnerMatcher.matches(*Node.getUnqualifiedDesugaredType(), Finder,
Builder);
}
/// Matches if the matched type is a reference type and the referenced
/// type matches the specified matcher.
///
/// Example matches X &x and const X &y
/// (matcher = varDecl(hasType(references(cxxRecordDecl(hasName("X"))))))
/// \code
/// class X {
/// void a(X b) {
/// X &x = b;
/// const X &y = b;
/// }
/// };
/// \endcode
AST_MATCHER_P(QualType, references, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isReferenceType() &&
InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
}
/// Matches QualTypes whose canonical type matches InnerMatcher.
///
/// Given:
/// \code
/// typedef int &int_ref;
/// int a;
/// int_ref b = a;
/// \endcode
///
/// \c varDecl(hasType(qualType(referenceType()))))) will not match the
/// declaration of b but \c
/// varDecl(hasType(qualType(hasCanonicalType(referenceType())))))) does.
AST_MATCHER_P(QualType, hasCanonicalType, internal::Matcher<QualType>,
InnerMatcher) {
if (Node.isNull())
return false;
return InnerMatcher.matches(Node.getCanonicalType(), Finder, Builder);
}
/// Overloaded to match the referenced type's declaration.
AST_MATCHER_P_OVERLOAD(QualType, references, internal::Matcher<Decl>,
InnerMatcher, 1) {
return references(qualType(hasDeclaration(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches on the implicit object argument of a member call expression. Unlike
/// `on`, matches the argument directly without stripping away anything.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// Y g();
/// class X : public Y { void g(); };
/// void z(Y y, X x) { y.m(); x.m(); x.g(); (g()).m(); }
/// \endcode
/// cxxMemberCallExpr(onImplicitObjectArgument(hasType(
/// cxxRecordDecl(hasName("Y")))))
/// matches `y.m()`, `x.m()` and (g()).m(), but not `x.g()`.
/// cxxMemberCallExpr(on(callExpr()))
/// does not match `(g()).m()`, because the parens are not ignored.
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *ExprNode = Node.getImplicitObjectArgument();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches if the type of the expression's implicit object argument either
/// matches the InnerMatcher, or is a pointer to a type that matches the
/// InnerMatcher.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// class X : public Y { void g(); };
/// void z() { Y y; y.m(); Y *p; p->m(); X x; x.m(); x.g(); }
/// \endcode
/// cxxMemberCallExpr(thisPointerType(hasDeclaration(
/// cxxRecordDecl(hasName("Y")))))
/// matches `y.m()`, `p->m()` and `x.m()`.
/// cxxMemberCallExpr(thisPointerType(hasDeclaration(
/// cxxRecordDecl(hasName("X")))))
/// matches `x.g()`.
AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType,
internal::Matcher<QualType>, InnerMatcher, 0) {
return onImplicitObjectArgument(
anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))))
.matches(Node, Finder, Builder);
}
/// Overloaded to match the type's declaration.
AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType,
internal::Matcher<Decl>, InnerMatcher, 1) {
return onImplicitObjectArgument(
anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))))
.matches(Node, Finder, Builder);
}
/// Matches a DeclRefExpr that refers to a declaration that matches the
/// specified matcher.
///
/// Example matches x in if(x)
/// (matcher = declRefExpr(to(varDecl(hasName("x")))))
/// \code
/// bool x;
/// if (x) {}
/// \endcode
AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>,
InnerMatcher) {
const Decl *DeclNode = Node.getDecl();
return (DeclNode != nullptr &&
InnerMatcher.matches(*DeclNode, Finder, Builder));
}
/// Matches a \c DeclRefExpr that refers to a declaration through a
/// specific using shadow declaration.
///
/// Given
/// \code
/// namespace a { void f() {} }
/// using a::f;
/// void g() {
/// f(); // Matches this ..
/// a::f(); // .. but not this.
/// }
/// \endcode
/// declRefExpr(throughUsingDecl(anything()))
/// matches \c f()
AST_MATCHER_P(DeclRefExpr, throughUsingDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
const NamedDecl *FoundDecl = Node.getFoundDecl();
if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl))
return InnerMatcher.matches(*UsingDecl, Finder, Builder);
return false;
}
/// Matches an \c OverloadExpr if any of the declarations in the set of
/// overloads matches the given matcher.
///
/// Given
/// \code
/// template <typename T> void foo(T);
/// template <typename T> void bar(T);
/// template <typename T> void baz(T t) {
/// foo(t);
/// bar(t);
/// }
/// \endcode
/// unresolvedLookupExpr(hasAnyDeclaration(
/// functionTemplateDecl(hasName("foo"))))
/// matches \c foo in \c foo(t); but not \c bar in \c bar(t);
AST_MATCHER_P(OverloadExpr, hasAnyDeclaration, internal::Matcher<Decl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.decls_begin(),
Node.decls_end(), Finder, Builder);
}
/// Matches the Decl of a DeclStmt which has a single declaration.
///
/// Given
/// \code
/// int a, b;
/// int c;
/// \endcode
/// declStmt(hasSingleDecl(anything()))
/// matches 'int c;' but not 'int a, b;'.
AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) {
if (Node.isSingleDecl()) {
const Decl *FoundDecl = Node.getSingleDecl();
return InnerMatcher.matches(*FoundDecl, Finder, Builder);
}
return false;
}
/// Matches a variable declaration that has an initializer expression
/// that matches the given matcher.
///
/// Example matches x (matcher = varDecl(hasInitializer(callExpr())))
/// \code
/// bool y() { return true; }
/// bool x = y();
/// \endcode
AST_MATCHER_P(
VarDecl, hasInitializer, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Initializer = Node.getAnyInitializer();
return (Initializer != nullptr &&
InnerMatcher.matches(*Initializer, Finder, Builder));
}
/// \brief Matches a static variable with local scope.
///
/// Example matches y (matcher = varDecl(isStaticLocal()))
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// static int z;
/// \endcode
AST_MATCHER(VarDecl, isStaticLocal) {
return Node.isStaticLocal();
}
/// Matches a variable declaration that has function scope and is a
/// non-static local variable.
///
/// Example matches x (matcher = varDecl(hasLocalStorage())
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
AST_MATCHER(VarDecl, hasLocalStorage) {
return Node.hasLocalStorage();
}
/// Matches a variable declaration that does not have local storage.
///
/// Example matches y and z (matcher = varDecl(hasGlobalStorage())
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
AST_MATCHER(VarDecl, hasGlobalStorage) {
return Node.hasGlobalStorage();
}
/// Matches a variable declaration that has automatic storage duration.
///
/// Example matches x, but not y, z, or a.
/// (matcher = varDecl(hasAutomaticStorageDuration())
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// \endcode
AST_MATCHER(VarDecl, hasAutomaticStorageDuration) {
return Node.getStorageDuration() == SD_Automatic;
}
/// Matches a variable declaration that has static storage duration.
/// It includes the variable declared at namespace scope and those declared
/// with "static" and "extern" storage class specifiers.
///
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// static int b;
/// extern int c;
/// varDecl(hasStaticStorageDuration())
/// matches the function declaration y, a, b and c.
/// \endcode
AST_MATCHER(VarDecl, hasStaticStorageDuration) {
return Node.getStorageDuration() == SD_Static;
}
/// Matches a variable declaration that has thread storage duration.
///
/// Example matches z, but not x, z, or a.
/// (matcher = varDecl(hasThreadStorageDuration())
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// \endcode
AST_MATCHER(VarDecl, hasThreadStorageDuration) {
return Node.getStorageDuration() == SD_Thread;
}
/// Matches a variable declaration that is an exception variable from
/// a C++ catch block, or an Objective-C \@catch statement.
///
/// Example matches x (matcher = varDecl(isExceptionVariable())
/// \code
/// void f(int y) {
/// try {
/// } catch (int x) {
/// }
/// }
/// \endcode
AST_MATCHER(VarDecl, isExceptionVariable) {
return Node.isExceptionVariable();
}
/// Checks that a call expression or a constructor call expression has
/// a specific number of arguments (including absent default arguments).
///
/// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2)))
/// \code
/// void f(int x, int y);
/// f(0, 0);
/// \endcode
AST_POLYMORPHIC_MATCHER_P(argumentCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr,
ObjCMessageExpr),
unsigned, N) {
return Node.getNumArgs() == N;
}
/// Matches the n'th argument of a call expression or a constructor
/// call expression.
///
/// Example matches y in x(y)
/// (matcher = callExpr(hasArgument(0, declRefExpr())))
/// \code
/// void x(int) { int y; x(y); }
/// \endcode
AST_POLYMORPHIC_MATCHER_P2(hasArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr,
ObjCMessageExpr),
unsigned, N, internal::Matcher<Expr>, InnerMatcher) {
return (N < Node.getNumArgs() &&
InnerMatcher.matches(
*Node.getArg(N)->IgnoreParenImpCasts(), Finder, Builder));
}
/// Matches the n'th item of an initializer list expression.
///
/// Example matches y.
/// (matcher = initListExpr(hasInit(0, expr())))
/// \code
/// int x{y}.
/// \endcode
AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
return N < Node.getNumInits() &&
InnerMatcher.matches(*Node.getInit(N), Finder, Builder);
}
/// Matches declaration statements that contain a specific number of
/// declarations.
///
/// Example: Given
/// \code
/// int a, b;
/// int c;
/// int d = 2, e;
/// \endcode
/// declCountIs(2)
/// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'.
AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) {
return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N;
}
/// Matches the n'th declaration of a declaration statement.
///
/// Note that this does not work for global declarations because the AST
/// breaks up multiple-declaration DeclStmt's into multiple single-declaration
/// DeclStmt's.
/// Example: Given non-global declarations
/// \code
/// int a, b = 0;
/// int c;
/// int d = 2, e;
/// \endcode
/// declStmt(containsDeclaration(
/// 0, varDecl(hasInitializer(anything()))))
/// matches only 'int d = 2, e;', and
/// declStmt(containsDeclaration(1, varDecl()))
/// \code
/// matches 'int a, b = 0' as well as 'int d = 2, e;'
/// but 'int c;' is not matched.
/// \endcode
AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N,
internal::Matcher<Decl>, InnerMatcher) {
const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end());
if (N >= NumDecls)
return false;
DeclStmt::const_decl_iterator Iterator = Node.decl_begin();
std::advance(Iterator, N);
return InnerMatcher.matches(**Iterator, Finder, Builder);
}
/// Matches a C++ catch statement that has a catch-all handler.
///
/// Given
/// \code
/// try {
/// // ...
/// } catch (int) {
/// // ...
/// } catch (...) {
/// // ...
/// }
/// \endcode
/// cxxCatchStmt(isCatchAll()) matches catch(...) but not catch(int).
AST_MATCHER(CXXCatchStmt, isCatchAll) {
return Node.getExceptionDecl() == nullptr;
}
/// Matches a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(
/// hasAnyConstructorInitializer(anything())
/// )))
/// record matches Foo, hasAnyConstructorInitializer matches foo_(1)
AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.init_begin(),
Node.init_end(), Finder, Builder);
}
/// Matches the field declaration of a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer(
/// forField(hasName("foo_"))))))
/// matches Foo
/// with forField matching foo_
AST_MATCHER_P(CXXCtorInitializer, forField,
internal::Matcher<FieldDecl>, InnerMatcher) {
const FieldDecl *NodeAsDecl = Node.getAnyMember();
return (NodeAsDecl != nullptr &&
InnerMatcher.matches(*NodeAsDecl, Finder, Builder));
}
/// Matches the initializer expression of a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer(
/// withInitializer(integerLiteral(equals(1)))))))
/// matches Foo
/// with withInitializer matching (1)
AST_MATCHER_P(CXXCtorInitializer, withInitializer,
internal::Matcher<Expr>, InnerMatcher) {
const Expr* NodeAsExpr = Node.getInit();
return (NodeAsExpr != nullptr &&
InnerMatcher.matches(*NodeAsExpr, Finder, Builder));
}
/// Matches a constructor initializer if it is explicitly written in
/// code (as opposed to implicitly added by the compiler).
///
/// Given
/// \code
/// struct Foo {
/// Foo() { }
/// Foo(int) : foo_("A") { }
/// string foo_;
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isWritten()))
/// will match Foo(int), but not Foo()
AST_MATCHER(CXXCtorInitializer, isWritten) {
return Node.isWritten();
}
/// Matches a constructor initializer if it is initializing a base, as
/// opposed to a member.
///
/// Given
/// \code
/// struct B {};
/// struct D : B {
/// int I;
/// D(int i) : I(i) {}
/// };
/// struct E : B {
/// E() : B() {}
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isBaseInitializer()))
/// will match E(), but not match D(int).
AST_MATCHER(CXXCtorInitializer, isBaseInitializer) {
return Node.isBaseInitializer();
}
/// Matches a constructor initializer if it is initializing a member, as
/// opposed to a base.
///
/// Given
/// \code
/// struct B {};
/// struct D : B {
/// int I;
/// D(int i) : I(i) {}
/// };
/// struct E : B {
/// E() : B() {}
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isMemberInitializer()))
/// will match D(int), but not match E().
AST_MATCHER(CXXCtorInitializer, isMemberInitializer) {
return Node.isMemberInitializer();
}
/// Matches any argument of a call expression or a constructor call
/// expression, or an ObjC-message-send expression.
///
/// Given
/// \code
/// void x(int, int, int) { int y; x(1, y, 42); }
/// \endcode
/// callExpr(hasAnyArgument(declRefExpr()))
/// matches x(1, y, 42)
/// with hasAnyArgument(...)
/// matching y
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// void foo(I *i) { [i f:12]; }
/// \endcode
/// objcMessageExpr(hasAnyArgument(integerLiteral(equals(12))))
/// matches [i f:12]
AST_POLYMORPHIC_MATCHER_P(hasAnyArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(
CallExpr, CXXConstructExpr,
CXXUnresolvedConstructExpr, ObjCMessageExpr),
internal::Matcher<Expr>, InnerMatcher) {
for (const Expr *Arg : Node.arguments()) {
BoundNodesTreeBuilder Result(*Builder);
if (InnerMatcher.matches(*Arg, Finder, &Result)) {
*Builder = std::move(Result);
return true;
}
}
return false;
}
/// Matches a constructor call expression which uses list initialization.
AST_MATCHER(CXXConstructExpr, isListInitialization) {
return Node.isListInitialization();
}
/// Matches a constructor call expression which requires
/// zero initialization.
///
/// Given
/// \code
/// void foo() {
/// struct point { double x; double y; };
/// point pt[2] = { { 1.0, 2.0 } };
/// }
/// \endcode
/// initListExpr(has(cxxConstructExpr(requiresZeroInitialization()))
/// will match the implicit array filler for pt[1].
AST_MATCHER(CXXConstructExpr, requiresZeroInitialization) {
return Node.requiresZeroInitialization();
}
/// Matches the n'th parameter of a function or an ObjC method
/// declaration or a block.
///
/// Given
/// \code
/// class X { void f(int x) {} };
/// \endcode
/// cxxMethodDecl(hasParameter(0, hasType(varDecl())))
/// matches f(int x) {}
/// with hasParameter(...)
/// matching int x
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// \endcode
//
/// the matcher objcMethodDecl(hasParameter(0, hasName("y")))
/// matches the declaration of method f with hasParameter
/// matching y.
AST_POLYMORPHIC_MATCHER_P2(hasParameter,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
ObjCMethodDecl,
BlockDecl),
unsigned, N, internal::Matcher<ParmVarDecl>,
InnerMatcher) {
return (N < Node.parameters().size()
&& InnerMatcher.matches(*Node.parameters()[N], Finder, Builder));
}
/// Matches all arguments and their respective ParmVarDecl.
///
/// Given
/// \code
/// void f(int i);
/// int y;
/// f(y);
/// \endcode
/// callExpr(
/// forEachArgumentWithParam(
/// declRefExpr(to(varDecl(hasName("y")))),
/// parmVarDecl(hasType(isInteger()))
/// ))
/// matches f(y);
/// with declRefExpr(...)
/// matching int y
/// and parmVarDecl(...)
/// matching int i
AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr),
internal::Matcher<Expr>, ArgMatcher,
internal::Matcher<ParmVarDecl>, ParamMatcher) {
BoundNodesTreeBuilder Result;
// The first argument of an overloaded member operator is the implicit object
// argument of the method which should not be matched against a parameter, so
// we skip over it here.
BoundNodesTreeBuilder Matches;
unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl()))
.matches(Node, Finder, &Matches)
? 1
: 0;
int ParamIndex = 0;
bool Matched = false;
for (; ArgIndex < Node.getNumArgs(); ++ArgIndex) {
BoundNodesTreeBuilder ArgMatches(*Builder);
if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()),
Finder, &ArgMatches)) {
BoundNodesTreeBuilder ParamMatches(ArgMatches);
if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl(
hasParameter(ParamIndex, ParamMatcher)))),
callExpr(callee(functionDecl(
hasParameter(ParamIndex, ParamMatcher))))))
.matches(Node, Finder, &ParamMatches)) {
Result.addMatch(ParamMatches);
Matched = true;
}
}
++ParamIndex;
}
*Builder = std::move(Result);
return Matched;
}
/// Matches any parameter of a function or an ObjC method declaration or a
/// block.
///
/// Does not match the 'this' parameter of a method.
///
/// Given
/// \code
/// class X { void f(int x, int y, int z) {} };
/// \endcode
/// cxxMethodDecl(hasAnyParameter(hasName("y")))
/// matches f(int x, int y, int z) {}
/// with hasAnyParameter(...)
/// matching int y
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// \endcode
//
/// the matcher objcMethodDecl(hasAnyParameter(hasName("y")))
/// matches the declaration of method f with hasParameter
/// matching y.
///
/// For blocks, given
/// \code
/// b = ^(int y) { printf("%d", y) };
/// \endcode
///
/// the matcher blockDecl(hasAnyParameter(hasName("y")))
/// matches the declaration of the block b with hasParameter
/// matching y.
AST_POLYMORPHIC_MATCHER_P(hasAnyParameter,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
ObjCMethodDecl,
BlockDecl),
internal::Matcher<ParmVarDecl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.param_begin(),
Node.param_end(), Finder, Builder);
}
/// Matches \c FunctionDecls and \c FunctionProtoTypes that have a
/// specific parameter count.
///
/// Given
/// \code
/// void f(int i) {}
/// void g(int i, int j) {}
/// void h(int i, int j);
/// void j(int i);
/// void k(int x, int y, int z, ...);
/// \endcode
/// functionDecl(parameterCountIs(2))
/// matches \c g and \c h
/// functionProtoType(parameterCountIs(2))
/// matches \c g and \c h
/// functionProtoType(parameterCountIs(3))
/// matches \c k
AST_POLYMORPHIC_MATCHER_P(parameterCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType),
unsigned, N) {
return Node.getNumParams() == N;
}
/// Matches \c FunctionDecls that have a noreturn attribute.
///
/// Given
/// \code
/// void nope();
/// [[noreturn]] void a();
/// __attribute__((noreturn)) void b();
/// struct c { [[noreturn]] c(); };
/// \endcode
/// functionDecl(isNoReturn())
/// matches all of those except
/// \code
/// void nope();
/// \endcode
AST_MATCHER(FunctionDecl, isNoReturn) { return Node.isNoReturn(); }
/// Matches the return type of a function declaration.
///
/// Given:
/// \code
/// class X { int f() { return 1; } };
/// \endcode
/// cxxMethodDecl(returns(asString("int")))
/// matches int f() { return 1; }
AST_MATCHER_P(FunctionDecl, returns,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getReturnType(), Finder, Builder);
}
/// Matches extern "C" function or variable declarations.
///
/// Given:
/// \code
/// extern "C" void f() {}
/// extern "C" { void g() {} }
/// void h() {}
/// extern "C" int x = 1;
/// extern "C" int y = 2;
/// int z = 3;
/// \endcode
/// functionDecl(isExternC())
/// matches the declaration of f and g, but not the declaration of h.
/// varDecl(isExternC())
/// matches the declaration of x and y, but not the declaration of z.
AST_POLYMORPHIC_MATCHER(isExternC, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
VarDecl)) {
return Node.isExternC();
}
/// Matches variable/function declarations that have "static" storage
/// class specifier ("static" keyword) written in the source.
///
/// Given:
/// \code
/// static void f() {}
/// static int i = 0;
/// extern int j;
/// int k;
/// \endcode
/// functionDecl(isStaticStorageClass())
/// matches the function declaration f.
/// varDecl(isStaticStorageClass())
/// matches the variable declaration i.
AST_POLYMORPHIC_MATCHER(isStaticStorageClass,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
VarDecl)) {
return Node.getStorageClass() == SC_Static;
}
/// Matches deleted function declarations.
///
/// Given:
/// \code
/// void Func();
/// void DeletedFunc() = delete;
/// \endcode
/// functionDecl(isDeleted())
/// matches the declaration of DeletedFunc, but not Func.
AST_MATCHER(FunctionDecl, isDeleted) {
return Node.isDeleted();
}
/// Matches defaulted function declarations.
///
/// Given:
/// \code
/// class A { ~A(); };
/// class B { ~B() = default; };
/// \endcode
/// functionDecl(isDefaulted())
/// matches the declaration of ~B, but not ~A.
AST_MATCHER(FunctionDecl, isDefaulted) {
return Node.isDefaulted();
}
/// Matches functions that have a dynamic exception specification.
///
/// Given:
/// \code
/// void f();
/// void g() noexcept;
/// void h() noexcept(true);
/// void i() noexcept(false);
/// void j() throw();
/// void k() throw(int);
/// void l() throw(...);
/// \endcode
/// functionDecl(hasDynamicExceptionSpec()) and
/// functionProtoType(hasDynamicExceptionSpec())
/// match the declarations of j, k, and l, but not f, g, h, or i.
AST_POLYMORPHIC_MATCHER(hasDynamicExceptionSpec,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType)) {
if (const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node))
return FnTy->hasDynamicExceptionSpec();
return false;
}
/// Matches functions that have a non-throwing exception specification.
///
/// Given:
/// \code
/// void f();
/// void g() noexcept;
/// void h() throw();
/// void i() throw(int);
/// void j() noexcept(false);
/// \endcode
/// functionDecl(isNoThrow()) and functionProtoType(isNoThrow())
/// match the declarations of g, and h, but not f, i or j.
AST_POLYMORPHIC_MATCHER(isNoThrow,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType)) {
const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node);
// If the function does not have a prototype, then it is assumed to be a
// throwing function (as it would if the function did not have any exception
// specification).
if (!FnTy)
return false;
// Assume the best for any unresolved exception specification.
if (isUnresolvedExceptionSpec(FnTy->getExceptionSpecType()))
return true;
return FnTy->isNothrow();
}
/// Matches constexpr variable and function declarations,
/// and if constexpr.
///
/// Given:
/// \code
/// constexpr int foo = 42;
/// constexpr int bar();
/// void baz() { if constexpr(1 > 0) {} }
/// \endcode
/// varDecl(isConstexpr())
/// matches the declaration of foo.
/// functionDecl(isConstexpr())
/// matches the declaration of bar.
/// ifStmt(isConstexpr())
/// matches the if statement in baz.
AST_POLYMORPHIC_MATCHER(isConstexpr,
AST_POLYMORPHIC_SUPPORTED_TYPES(VarDecl,
FunctionDecl,
IfStmt)) {
return Node.isConstexpr();
}
/// Matches the condition expression of an if statement, for loop,
/// switch statement or conditional operator.
///
/// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true))))
/// \code
/// if (true) {}
/// \endcode
AST_POLYMORPHIC_MATCHER_P(
hasCondition,
AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt, WhileStmt, DoStmt,
SwitchStmt, AbstractConditionalOperator),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const Condition = Node.getCond();
return (Condition != nullptr &&
InnerMatcher.matches(*Condition, Finder, Builder));
}
/// Matches the then-statement of an if statement.
///
/// Examples matches the if statement
/// (matcher = ifStmt(hasThen(cxxBoolLiteral(equals(true)))))
/// \code
/// if (false) true; else false;
/// \endcode
AST_MATCHER_P(IfStmt, hasThen, internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Then = Node.getThen();
return (Then != nullptr && InnerMatcher.matches(*Then, Finder, Builder));
}
/// Matches the else-statement of an if statement.
///
/// Examples matches the if statement
/// (matcher = ifStmt(hasElse(cxxBoolLiteral(equals(true)))))
/// \code
/// if (false) false; else true;
/// \endcode
AST_MATCHER_P(IfStmt, hasElse, internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Else = Node.getElse();
return (Else != nullptr && InnerMatcher.matches(*Else, Finder, Builder));
}
/// Matches if a node equals a previously bound node.
///
/// Matches a node if it equals the node previously bound to \p ID.
///
/// Given
/// \code
/// class X { int a; int b; };
/// \endcode
/// cxxRecordDecl(
/// has(fieldDecl(hasName("a"), hasType(type().bind("t")))),
/// has(fieldDecl(hasName("b"), hasType(type(equalsBoundNode("t"))))))
/// matches the class \c X, as \c a and \c b have the same type.
///
/// Note that when multiple matches are involved via \c forEach* matchers,
/// \c equalsBoundNodes acts as a filter.
/// For example:
/// compoundStmt(
/// forEachDescendant(varDecl().bind("d")),
/// forEachDescendant(declRefExpr(to(decl(equalsBoundNode("d"))))))
/// will trigger a match for each combination of variable declaration
/// and reference to that variable declaration within a compound statement.
AST_POLYMORPHIC_MATCHER_P(equalsBoundNode,
AST_POLYMORPHIC_SUPPORTED_TYPES(Stmt, Decl, Type,
QualType),
std::string, ID) {
// FIXME: Figure out whether it makes sense to allow this
// on any other node types.
// For *Loc it probably does not make sense, as those seem
// unique. For NestedNameSepcifier it might make sense, as
// those also have pointer identity, but I'm not sure whether
// they're ever reused.
internal::NotEqualsBoundNodePredicate Predicate;
Predicate.ID = ID;
Predicate.Node = ast_type_traits::DynTypedNode::create(Node);
return Builder->removeBindings(Predicate);
}
/// Matches the condition variable statement in an if statement.
///
/// Given
/// \code
/// if (A* a = GetAPointer()) {}
/// \endcode
/// hasConditionVariableStatement(...)
/// matches 'A* a = GetAPointer()'.
AST_MATCHER_P(IfStmt, hasConditionVariableStatement,
internal::Matcher<DeclStmt>, InnerMatcher) {
const DeclStmt* const DeclarationStatement =
Node.getConditionVariableDeclStmt();
return DeclarationStatement != nullptr &&
InnerMatcher.matches(*DeclarationStatement, Finder, Builder);
}
/// Matches the index expression of an array subscript expression.
///
/// Given
/// \code
/// int i[5];
/// void f() { i[1] = 42; }
/// \endcode
/// arraySubscriptExpression(hasIndex(integerLiteral()))
/// matches \c i[1] with the \c integerLiteral() matching \c 1
AST_MATCHER_P(ArraySubscriptExpr, hasIndex,
internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getIdx())
return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// Matches the base expression of an array subscript expression.
///
/// Given
/// \code
/// int i[5];
/// void f() { i[1] = 42; }
/// \endcode
/// arraySubscriptExpression(hasBase(implicitCastExpr(
/// hasSourceExpression(declRefExpr()))))
/// matches \c i[1] with the \c declRefExpr() matching \c i
AST_MATCHER_P(ArraySubscriptExpr, hasBase,
internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getBase())
return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// Matches a 'for', 'while', 'do while' statement or a function
/// definition that has a given body.
///
/// Given
/// \code
/// for (;;) {}
/// \endcode
/// hasBody(compoundStmt())
/// matches 'for (;;) {}'
/// with compoundStmt()
/// matching '{}'
AST_POLYMORPHIC_MATCHER_P(hasBody,
AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt,
WhileStmt,
CXXForRangeStmt,
FunctionDecl),
internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node);
return (Statement != nullptr &&
InnerMatcher.matches(*Statement, Finder, Builder));
}
/// Matches compound statements where at least one substatement matches
/// a given matcher. Also matches StmtExprs that have CompoundStmt as children.
///
/// Given
/// \code
/// { {}; 1+2; }
/// \endcode
/// hasAnySubstatement(compoundStmt())
/// matches '{ {}; 1+2; }'
/// with compoundStmt()
/// matching '{}'
AST_POLYMORPHIC_MATCHER_P(hasAnySubstatement,
AST_POLYMORPHIC_SUPPORTED_TYPES(CompoundStmt,
StmtExpr),
internal::Matcher<Stmt>, InnerMatcher) {
const CompoundStmt *CS = CompoundStmtMatcher<NodeType>::get(Node);
return CS && matchesFirstInPointerRange(InnerMatcher, CS->body_begin(),
CS->body_end(), Finder, Builder);
}
/// Checks that a compound statement contains a specific number of
/// child statements.
///
/// Example: Given
/// \code
/// { for (;;) {} }
/// \endcode
/// compoundStmt(statementCountIs(0)))
/// matches '{}'
/// but does not match the outer compound statement.
AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) {
return Node.size() == N;
}
/// Matches literals that are equal to the given value of type ValueT.
///
/// Given
/// \code
/// f('\0', false, 3.14, 42);
/// \endcode
/// characterLiteral(equals(0))
/// matches '\0'
/// cxxBoolLiteral(equals(false)) and cxxBoolLiteral(equals(0))
/// match false
/// floatLiteral(equals(3.14)) and floatLiteral(equals(314e-2))
/// match 3.14
/// integerLiteral(equals(42))
/// matches 42
///
/// Note that you cannot directly match a negative numeric literal because the
/// minus sign is not part of the literal: It is a unary operator whose operand
/// is the positive numeric literal. Instead, you must use a unaryOperator()
/// matcher to match the minus sign:
///
/// unaryOperator(hasOperatorName("-"),
/// hasUnaryOperand(integerLiteral(equals(13))))
///
/// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>,
/// Matcher<FloatingLiteral>, Matcher<IntegerLiteral>
template <typename ValueT>
internal::PolymorphicMatcherWithParam1<internal::ValueEqualsMatcher, ValueT>
equals(const ValueT &Value) {
return internal::PolymorphicMatcherWithParam1<
internal::ValueEqualsMatcher,
ValueT>(Value);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
IntegerLiteral),
bool, Value, 0) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
IntegerLiteral),
unsigned, Value, 1) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
FloatingLiteral,
IntegerLiteral),
double, Value, 2) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
/// Matches the operator Name of operator expressions (binary or
/// unary).
///
/// Example matches a || b (matcher = binaryOperator(hasOperatorName("||")))
/// \code
/// !(a || b)
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasOperatorName,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
UnaryOperator),
std::string, Name) {
return Name == Node.getOpcodeStr(Node.getOpcode());
}
/// Matches all kinds of assignment operators.
///
/// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator()))
/// \code
/// if (a == b)
/// a += b;
/// \endcode
///
/// Example 2: matches s1 = s2
/// (matcher = cxxOperatorCallExpr(isAssignmentOperator()))
/// \code
/// struct S { S& operator=(const S&); };
/// void x() { S s1, s2; s1 = s2; })
/// \endcode
AST_POLYMORPHIC_MATCHER(isAssignmentOperator,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
CXXOperatorCallExpr)) {
return Node.isAssignmentOp();
}
/// Matches the left hand side of binary operator expressions.
///
/// Example matches a (matcher = binaryOperator(hasLHS()))
/// \code
/// a || b
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasLHS,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
ArraySubscriptExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *LeftHandSide = Node.getLHS();
return (LeftHandSide != nullptr &&
InnerMatcher.matches(*LeftHandSide, Finder, Builder));
}
/// Matches the right hand side of binary operator expressions.
///
/// Example matches b (matcher = binaryOperator(hasRHS()))
/// \code
/// a || b
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasRHS,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
ArraySubscriptExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *RightHandSide = Node.getRHS();
return (RightHandSide != nullptr &&
InnerMatcher.matches(*RightHandSide, Finder, Builder));
}
/// Matches if either the left hand side or the right hand side of a
/// binary operator matches.
inline internal::Matcher<BinaryOperator> hasEitherOperand(
const internal::Matcher<Expr> &InnerMatcher) {
return anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher));
}
/// Matches if the operand of a unary operator matches.
///
/// Example matches true (matcher = hasUnaryOperand(
/// cxxBoolLiteral(equals(true))))
/// \code
/// !true
/// \endcode
AST_MATCHER_P(UnaryOperator, hasUnaryOperand,
internal::Matcher<Expr>, InnerMatcher) {
const Expr * const Operand = Node.getSubExpr();
return (Operand != nullptr &&
InnerMatcher.matches(*Operand, Finder, Builder));
}
/// Matches if the cast's source expression
/// or opaque value's source expression matches the given matcher.
///
/// Example 1: matches "a string"
/// (matcher = castExpr(hasSourceExpression(cxxConstructExpr())))
/// \code
/// class URL { URL(string); };
/// URL url = "a string";
/// \endcode
///
/// Example 2: matches 'b' (matcher =
/// opaqueValueExpr(hasSourceExpression(implicitCastExpr(declRefExpr())))
/// \code
/// int a = b ?: 1;
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasSourceExpression,
AST_POLYMORPHIC_SUPPORTED_TYPES(CastExpr,
OpaqueValueExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const SubExpression =
internal::GetSourceExpressionMatcher<NodeType>::get(Node);
return (SubExpression != nullptr &&
InnerMatcher.matches(*SubExpression, Finder, Builder));
}
/// Matches casts that has a given cast kind.
///
/// Example: matches the implicit cast around \c 0
/// (matcher = castExpr(hasCastKind(CK_NullToPointer)))
/// \code
/// int *p = 0;
/// \endcode
///
/// If the matcher is use from clang-query, CastKind parameter
/// should be passed as a quoted string. e.g., ofKind("CK_NullToPointer").
AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) {
return Node.getCastKind() == Kind;
}
/// Matches casts whose destination type matches a given matcher.
///
/// (Note: Clang's AST refers to other conversions as "casts" too, and calls
/// actual casts "explicit" casts.)
AST_MATCHER_P(ExplicitCastExpr, hasDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
const QualType NodeType = Node.getTypeAsWritten();
return InnerMatcher.matches(NodeType, Finder, Builder);
}
/// Matches implicit casts whose destination type matches a given
/// matcher.
///
/// FIXME: Unit test this matcher
AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getType(), Finder, Builder);
}
/// Matches RecordDecl object that are spelled with "struct."
///
/// Example matches S, but not C or U.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// \endcode
AST_MATCHER(RecordDecl, isStruct) {
return Node.isStruct();
}
/// Matches RecordDecl object that are spelled with "union."
///
/// Example matches U, but not C or S.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// \endcode
AST_MATCHER(RecordDecl, isUnion) {
return Node.isUnion();
}
/// Matches RecordDecl object that are spelled with "class."
///
/// Example matches C, but not S or U.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// \endcode
AST_MATCHER(RecordDecl, isClass) {
return Node.isClass();
}
/// Matches the true branch expression of a conditional operator.
///
/// Example 1 (conditional ternary operator): matches a
/// \code
/// condition ? a : b
/// \endcode
///
/// Example 2 (conditional binary operator): matches opaqueValueExpr(condition)
/// \code
/// condition ?: b
/// \endcode
AST_MATCHER_P(AbstractConditionalOperator, hasTrueExpression,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *Expression = Node.getTrueExpr();
return (Expression != nullptr &&
InnerMatcher.matches(*Expression, Finder, Builder));
}
/// Matches the false branch expression of a conditional operator
/// (binary or ternary).
///
/// Example matches b
/// \code
/// condition ? a : b
/// condition ?: b
/// \endcode
AST_MATCHER_P(AbstractConditionalOperator, hasFalseExpression,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *Expression = Node.getFalseExpr();
return (Expression != nullptr &&
InnerMatcher.matches(*Expression, Finder, Builder));
}
/// Matches if a declaration has a body attached.
///
/// Example matches A, va, fa
/// \code
/// class A {};
/// class B; // Doesn't match, as it has no body.
/// int va;
/// extern int vb; // Doesn't match, as it doesn't define the variable.
/// void fa() {}
/// void fb(); // Doesn't match, as it has no body.
/// @interface X
/// - (void)ma; // Doesn't match, interface is declaration.
/// @end
/// @implementation X
/// - (void)ma {}
/// @end
/// \endcode
///
/// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>,
/// Matcher<ObjCMethodDecl>
AST_POLYMORPHIC_MATCHER(isDefinition,
AST_POLYMORPHIC_SUPPORTED_TYPES(TagDecl, VarDecl,
ObjCMethodDecl,
FunctionDecl)) {
return Node.isThisDeclarationADefinition();
}
/// Matches if a function declaration is variadic.
///
/// Example matches f, but not g or h. The function i will not match, even when
/// compiled in C mode.
/// \code
/// void f(...);
/// void g(int);
/// template <typename... Ts> void h(Ts...);
/// void i();
/// \endcode
AST_MATCHER(FunctionDecl, isVariadic) {
return Node.isVariadic();
}
/// Matches the class declaration that the given method declaration
/// belongs to.
///
/// FIXME: Generalize this for other kinds of declarations.
/// FIXME: What other kind of declarations would we need to generalize
/// this to?
///
/// Example matches A() in the last line
/// (matcher = cxxConstructExpr(hasDeclaration(cxxMethodDecl(
/// ofClass(hasName("A"))))))
/// \code
/// class A {
/// public:
/// A();
/// };
/// A a = A();
/// \endcode
AST_MATCHER_P(CXXMethodDecl, ofClass,
internal::Matcher<CXXRecordDecl>, InnerMatcher) {
const CXXRecordDecl *Parent = Node.getParent();
return (Parent != nullptr &&
InnerMatcher.matches(*Parent, Finder, Builder));
}
/// Matches each method overridden by the given method. This matcher may
/// produce multiple matches.
///
/// Given
/// \code
/// class A { virtual void f(); };
/// class B : public A { void f(); };
/// class C : public B { void f(); };
/// \endcode
/// cxxMethodDecl(ofClass(hasName("C")),
/// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d")
/// matches once, with "b" binding "A::f" and "d" binding "C::f" (Note
/// that B::f is not overridden by C::f).
///
/// The check can produce multiple matches in case of multiple inheritance, e.g.
/// \code
/// class A1 { virtual void f(); };
/// class A2 { virtual void f(); };
/// class C : public A1, public A2 { void f(); };
/// \endcode
/// cxxMethodDecl(ofClass(hasName("C")),
/// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d")
/// matches twice, once with "b" binding "A1::f" and "d" binding "C::f", and
/// once with "b" binding "A2::f" and "d" binding "C::f".
AST_MATCHER_P(CXXMethodDecl, forEachOverridden,
internal::Matcher<CXXMethodDecl>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto *Overridden : Node.overridden_methods()) {
BoundNodesTreeBuilder OverriddenBuilder(*Builder);
const bool OverriddenMatched =
InnerMatcher.matches(*Overridden, Finder, &OverriddenBuilder);
if (OverriddenMatched) {
Matched = true;
Result.addMatch(OverriddenBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches if the given method declaration is virtual.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// \endcode
/// matches A::x
AST_MATCHER(CXXMethodDecl, isVirtual) {
return Node.isVirtual();
}
/// Matches if the given method declaration has an explicit "virtual".
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// class B : public A {
/// public:
/// void x();
/// };
/// \endcode
/// matches A::x but not B::x
AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) {
return Node.isVirtualAsWritten();
}
/// Matches if the given method or class declaration is final.
///
/// Given:
/// \code
/// class A final {};
///
/// struct B {
/// virtual void f();
/// };
///
/// struct C : B {
/// void f() final;
/// };
/// \endcode
/// matches A and C::f, but not B, C, or B::f
AST_POLYMORPHIC_MATCHER(isFinal,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl,
CXXMethodDecl)) {
return Node.template hasAttr<FinalAttr>();
}
/// Matches if the given method declaration is pure.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x() = 0;
/// };
/// \endcode
/// matches A::x
AST_MATCHER(CXXMethodDecl, isPure) {
return Node.isPure();
}
/// Matches if the given method declaration is const.
///
/// Given
/// \code
/// struct A {
/// void foo() const;
/// void bar();
/// };
/// \endcode
///
/// cxxMethodDecl(isConst()) matches A::foo() but not A::bar()
AST_MATCHER(CXXMethodDecl, isConst) {
return Node.isConst();
}
/// Matches if the given method declaration declares a copy assignment
/// operator.
///
/// Given
/// \code
/// struct A {
/// A &operator=(const A &);
/// A &operator=(A &&);
/// };
/// \endcode
///
/// cxxMethodDecl(isCopyAssignmentOperator()) matches the first method but not
/// the second one.
AST_MATCHER(CXXMethodDecl, isCopyAssignmentOperator) {
return Node.isCopyAssignmentOperator();
}
/// Matches if the given method declaration declares a move assignment
/// operator.
///
/// Given
/// \code
/// struct A {
/// A &operator=(const A &);
/// A &operator=(A &&);
/// };
/// \endcode
///
/// cxxMethodDecl(isMoveAssignmentOperator()) matches the second method but not
/// the first one.
AST_MATCHER(CXXMethodDecl, isMoveAssignmentOperator) {
return Node.isMoveAssignmentOperator();
}
/// Matches if the given method declaration overrides another method.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// class B : public A {
/// public:
/// virtual void x();
/// };
/// \endcode
/// matches B::x
AST_MATCHER(CXXMethodDecl, isOverride) {
return Node.size_overridden_methods() > 0 || Node.hasAttr<OverrideAttr>();
}
/// Matches method declarations that are user-provided.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &) = default; // #2
/// S(S &&) = delete; // #3
/// };
/// \endcode
/// cxxConstructorDecl(isUserProvided()) will match #1, but not #2 or #3.
AST_MATCHER(CXXMethodDecl, isUserProvided) {
return Node.isUserProvided();
}
/// Matches member expressions that are called with '->' as opposed
/// to '.'.
///
/// Member calls on the implicit this pointer match as called with '->'.
///
/// Given
/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// template <class T> void f() { this->f<T>(); f<T>(); }
/// int a;
/// static int b;
/// };
/// template <class T>
/// class Z {
/// void x() { this->m; }
/// };
/// \endcode
/// memberExpr(isArrow())
/// matches this->x, x, y.x, a, this->b
/// cxxDependentScopeMemberExpr(isArrow())
/// matches this->m
/// unresolvedMemberExpr(isArrow())
/// matches this->f<T>, f<T>
AST_POLYMORPHIC_MATCHER(
isArrow, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr,
CXXDependentScopeMemberExpr)) {
return Node.isArrow();
}
/// Matches QualType nodes that are of integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isInteger())))
/// matches "a(int)", "b(long)", but not "c(double)".
AST_MATCHER(QualType, isInteger) {
return Node->isIntegerType();
}
/// Matches QualType nodes that are of unsigned integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(unsigned long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isUnsignedInteger())))
/// matches "b(unsigned long)", but not "a(int)" and "c(double)".
AST_MATCHER(QualType, isUnsignedInteger) {
return Node->isUnsignedIntegerType();
}
/// Matches QualType nodes that are of signed integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(unsigned long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isSignedInteger())))
/// matches "a(int)", but not "b(unsigned long)" and "c(double)".
AST_MATCHER(QualType, isSignedInteger) {
return Node->isSignedIntegerType();
}
/// Matches QualType nodes that are of character type.
///
/// Given
/// \code
/// void a(char);
/// void b(wchar_t);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isAnyCharacter())))
/// matches "a(char)", "b(wchar_t)", but not "c(double)".
AST_MATCHER(QualType, isAnyCharacter) {
return Node->isAnyCharacterType();
}
/// Matches QualType nodes that are of any pointer type; this includes
/// the Objective-C object pointer type, which is different despite being
/// syntactically similar.
///
/// Given
/// \code
/// int *i = nullptr;
///
/// @interface Foo
/// @end
/// Foo *f;
///
/// int j;
/// \endcode
/// varDecl(hasType(isAnyPointer()))
/// matches "int *i" and "Foo *f", but not "int j".
AST_MATCHER(QualType, isAnyPointer) {
return Node->isAnyPointerType();
}
/// Matches QualType nodes that are const-qualified, i.e., that
/// include "top-level" const.
///
/// Given
/// \code
/// void a(int);
/// void b(int const);
/// void c(const int);
/// void d(const int*);
/// void e(int const) {};
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isConstQualified())))
/// matches "void b(int const)", "void c(const int)" and
/// "void e(int const) {}". It does not match d as there
/// is no top-level const on the parameter type "const int *".
AST_MATCHER(QualType, isConstQualified) {
return Node.isConstQualified();
}
/// Matches QualType nodes that are volatile-qualified, i.e., that
/// include "top-level" volatile.
///
/// Given
/// \code
/// void a(int);
/// void b(int volatile);
/// void c(volatile int);
/// void d(volatile int*);
/// void e(int volatile) {};
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isVolatileQualified())))
/// matches "void b(int volatile)", "void c(volatile int)" and
/// "void e(int volatile) {}". It does not match d as there
/// is no top-level volatile on the parameter type "volatile int *".
AST_MATCHER(QualType, isVolatileQualified) {
return Node.isVolatileQualified();
}
/// Matches QualType nodes that have local CV-qualifiers attached to
/// the node, not hidden within a typedef.
///
/// Given
/// \code
/// typedef const int const_int;
/// const_int i;
/// int *const j;
/// int *volatile k;
/// int m;
/// \endcode
/// \c varDecl(hasType(hasLocalQualifiers())) matches only \c j and \c k.
/// \c i is const-qualified but the qualifier is not local.
AST_MATCHER(QualType, hasLocalQualifiers) {
return Node.hasLocalQualifiers();
}
/// Matches a member expression where the member is matched by a
/// given matcher.
///
/// Given
/// \code
/// struct { int first, second; } first, second;
/// int i(second.first);
/// int j(first.second);
/// \endcode
/// memberExpr(member(hasName("first")))
/// matches second.first
/// but not first.second (because the member name there is "second").
AST_MATCHER_P(MemberExpr, member,
internal::Matcher<ValueDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder);
}
/// Matches a member expression where the object expression is matched by a
/// given matcher. Implicit object expressions are included; that is, it matches
/// use of implicit `this`.
///
/// Given
/// \code
/// struct X {
/// int m;
/// int f(X x) { x.m; return m; }
/// };
/// \endcode
/// memberExpr(hasObjectExpression(hasType(cxxRecordDecl(hasName("X")))))
/// matches `x.m`, but not `m`; however,
/// memberExpr(hasObjectExpression(hasType(pointsTo(
// cxxRecordDecl(hasName("X"))))))
/// matches `m` (aka. `this->m`), but not `x.m`.
AST_POLYMORPHIC_MATCHER_P(
hasObjectExpression,
AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr,
CXXDependentScopeMemberExpr),
internal::Matcher<Expr>, InnerMatcher) {
if (const auto *E = dyn_cast<UnresolvedMemberExpr>(&Node))
if (E->isImplicitAccess())
return false;
if (const auto *E = dyn_cast<CXXDependentScopeMemberExpr>(&Node))
if (E->isImplicitAccess())
return false;
return InnerMatcher.matches(*Node.getBase(), Finder, Builder);
}
/// Matches any using shadow declaration.
///
/// Given
/// \code
/// namespace X { void b(); }
/// using X::b;
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasName("b"))))
/// matches \code using X::b \endcode
AST_MATCHER_P(UsingDecl, hasAnyUsingShadowDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(),
Node.shadow_end(), Finder, Builder);
}
/// Matches a using shadow declaration where the target declaration is
/// matched by the given matcher.
///
/// Given
/// \code
/// namespace X { int a; void b(); }
/// using X::a;
/// using X::b;
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl())))
/// matches \code using X::b \endcode
/// but not \code using X::a \endcode
AST_MATCHER_P(UsingShadowDecl, hasTargetDecl,
internal::Matcher<NamedDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder);
}
/// Matches template instantiations of function, class, or static
/// member variable template instantiations.
///
/// Given
/// \code
/// template <typename T> class X {}; class A {}; X<A> x;
/// \endcode
/// or
/// \code
/// template <typename T> class X {}; class A {}; template class X<A>;
/// \endcode
/// or
/// \code
/// template <typename T> class X {}; class A {}; extern template class X<A>;
/// \endcode
/// cxxRecordDecl(hasName("::X"), isTemplateInstantiation())
/// matches the template instantiation of X<A>.
///
/// But given
/// \code
/// template <typename T> class X {}; class A {};
/// template <> class X<A> {}; X<A> x;
/// \endcode
/// cxxRecordDecl(hasName("::X"), isTemplateInstantiation())
/// does not match, as X<A> is an explicit template specialization.
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
AST_POLYMORPHIC_MATCHER(isTemplateInstantiation,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl,
CXXRecordDecl)) {
return (Node.getTemplateSpecializationKind() == TSK_ImplicitInstantiation ||
Node.getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDefinition ||
Node.getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDeclaration);
}
/// Matches declarations that are template instantiations or are inside
/// template instantiations.
///
/// Given
/// \code
/// template<typename T> void A(T t) { T i; }
/// A(0);
/// A(0U);
/// \endcode
/// functionDecl(isInstantiated())
/// matches 'A(int) {...};' and 'A(unsigned) {...}'.
AST_MATCHER_FUNCTION(internal::Matcher<Decl>, isInstantiated) {
auto IsInstantiation = decl(anyOf(cxxRecordDecl(isTemplateInstantiation()),
functionDecl(isTemplateInstantiation())));
return decl(anyOf(IsInstantiation, hasAncestor(IsInstantiation)));
}
/// Matches statements inside of a template instantiation.
///
/// Given
/// \code
/// int j;
/// template<typename T> void A(T t) { T i; j += 42;}
/// A(0);
/// A(0U);
/// \endcode
/// declStmt(isInTemplateInstantiation())
/// matches 'int i;' and 'unsigned i'.
/// unless(stmt(isInTemplateInstantiation()))
/// will NOT match j += 42; as it's shared between the template definition and
/// instantiation.
AST_MATCHER_FUNCTION(internal::Matcher<Stmt>, isInTemplateInstantiation) {
return stmt(
hasAncestor(decl(anyOf(cxxRecordDecl(isTemplateInstantiation()),
functionDecl(isTemplateInstantiation())))));
}
/// Matches explicit template specializations of function, class, or
/// static member variable template instantiations.
///
/// Given
/// \code
/// template<typename T> void A(T t) { }
/// template<> void A(int N) { }
/// \endcode
/// functionDecl(isExplicitTemplateSpecialization())
/// matches the specialization A<int>().
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
AST_POLYMORPHIC_MATCHER(isExplicitTemplateSpecialization,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl,
CXXRecordDecl)) {
return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization);
}
/// Matches \c TypeLocs for which the given inner
/// QualType-matcher matches.
AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc,
internal::Matcher<QualType>, InnerMatcher, 0) {
return internal::BindableMatcher<TypeLoc>(
new internal::TypeLocTypeMatcher(InnerMatcher));
}
/// Matches type \c bool.
///
/// Given
/// \code
/// struct S { bool func(); };
/// \endcode
/// functionDecl(returns(booleanType()))
/// matches "bool func();"
AST_MATCHER(Type, booleanType) {
return Node.isBooleanType();
}
/// Matches type \c void.
///
/// Given
/// \code
/// struct S { void func(); };
/// \endcode
/// functionDecl(returns(voidType()))
/// matches "void func();"
AST_MATCHER(Type, voidType) {
return Node.isVoidType();
}
template <typename NodeType>
using AstTypeMatcher = internal::VariadicDynCastAllOfMatcher<Type, NodeType>;
/// Matches builtin Types.
///
/// Given
/// \code
/// struct A {};
/// A a;
/// int b;
/// float c;
/// bool d;
/// \endcode
/// builtinType()
/// matches "int b", "float c" and "bool d"
extern const AstTypeMatcher<BuiltinType> builtinType;
/// Matches all kinds of arrays.
///
/// Given
/// \code
/// int a[] = { 2, 3 };
/// int b[4];
/// void f() { int c[a[0]]; }
/// \endcode
/// arrayType()
/// matches "int a[]", "int b[4]" and "int c[a[0]]";
extern const AstTypeMatcher<ArrayType> arrayType;
/// Matches C99 complex types.
///
/// Given
/// \code
/// _Complex float f;
/// \endcode
/// complexType()
/// matches "_Complex float f"
extern const AstTypeMatcher<ComplexType> complexType;
/// Matches any real floating-point type (float, double, long double).
///
/// Given
/// \code
/// int i;
/// float f;
/// \endcode
/// realFloatingPointType()
/// matches "float f" but not "int i"
AST_MATCHER(Type, realFloatingPointType) {
return Node.isRealFloatingType();
}
/// Matches arrays and C99 complex types that have a specific element
/// type.
///
/// Given
/// \code
/// struct A {};
/// A a[7];
/// int b[7];
/// \endcode
/// arrayType(hasElementType(builtinType()))
/// matches "int b[7]"
///
/// Usable as: Matcher<ArrayType>, Matcher<ComplexType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasElementType, getElement,
AST_POLYMORPHIC_SUPPORTED_TYPES(ArrayType,
ComplexType));
/// Matches C arrays with a specified constant size.
///
/// Given
/// \code
/// void() {
/// int a[2];
/// int b[] = { 2, 3 };
/// int c[b[0]];
/// }
/// \endcode
/// constantArrayType()
/// matches "int a[2]"
extern const AstTypeMatcher<ConstantArrayType> constantArrayType;
/// Matches nodes that have the specified size.
///
/// Given
/// \code
/// int a[42];
/// int b[2 * 21];
/// int c[41], d[43];
/// char *s = "abcd";
/// wchar_t *ws = L"abcd";
/// char *w = "a";
/// \endcode
/// constantArrayType(hasSize(42))
/// matches "int a[42]" and "int b[2 * 21]"
/// stringLiteral(hasSize(4))
/// matches "abcd", L"abcd"
AST_POLYMORPHIC_MATCHER_P(hasSize,
AST_POLYMORPHIC_SUPPORTED_TYPES(ConstantArrayType,
StringLiteral),
unsigned, N) {
return internal::HasSizeMatcher<NodeType>::hasSize(Node, N);
}
/// Matches C++ arrays whose size is a value-dependent expression.
///
/// Given
/// \code
/// template<typename T, int Size>
/// class array {
/// T data[Size];
/// };
/// \endcode
/// dependentSizedArrayType
/// matches "T data[Size]"
extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType;
/// Matches C arrays with unspecified size.
///
/// Given
/// \code
/// int a[] = { 2, 3 };
/// int b[42];
/// void f(int c[]) { int d[a[0]]; };
/// \endcode
/// incompleteArrayType()
/// matches "int a[]" and "int c[]"
extern const AstTypeMatcher<IncompleteArrayType> incompleteArrayType;
/// Matches C arrays with a specified size that is not an
/// integer-constant-expression.
///
/// Given
/// \code
/// void f() {
/// int a[] = { 2, 3 }
/// int b[42];
/// int c[a[0]];
/// }
/// \endcode
/// variableArrayType()
/// matches "int c[a[0]]"
extern const AstTypeMatcher<VariableArrayType> variableArrayType;
/// Matches \c VariableArrayType nodes that have a specific size
/// expression.
///
/// Given
/// \code
/// void f(int b) {
/// int a[b];
/// }
/// \endcode
/// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to(
/// varDecl(hasName("b")))))))
/// matches "int a[b]"
AST_MATCHER_P(VariableArrayType, hasSizeExpr,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder);
}
/// Matches atomic types.
///
/// Given
/// \code
/// _Atomic(int) i;
/// \endcode
/// atomicType()
/// matches "_Atomic(int) i"
extern const AstTypeMatcher<AtomicType> atomicType;
/// Matches atomic types with a specific value type.
///
/// Given
/// \code
/// _Atomic(int) i;
/// _Atomic(float) f;
/// \endcode
/// atomicType(hasValueType(isInteger()))
/// matches "_Atomic(int) i"
///
/// Usable as: Matcher<AtomicType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasValueType, getValue,
AST_POLYMORPHIC_SUPPORTED_TYPES(AtomicType));
/// Matches types nodes representing C++11 auto types.
///
/// Given:
/// \code
/// auto n = 4;
/// int v[] = { 2, 3 }
/// for (auto i : v) { }
/// \endcode
/// autoType()
/// matches "auto n" and "auto i"
extern const AstTypeMatcher<AutoType> autoType;
/// Matches types nodes representing C++11 decltype(<expr>) types.
///
/// Given:
/// \code
/// short i = 1;
/// int j = 42;
/// decltype(i + j) result = i + j;
/// \endcode
/// decltypeType()
/// matches "decltype(i + j)"
extern const AstTypeMatcher<DecltypeType> decltypeType;
/// Matches \c AutoType nodes where the deduced type is a specific type.
///
/// Note: There is no \c TypeLoc for the deduced type and thus no
/// \c getDeducedLoc() matcher.
///
/// Given
/// \code
/// auto a = 1;
/// auto b = 2.0;
/// \endcode
/// autoType(hasDeducedType(isInteger()))
/// matches "auto a"
///
/// Usable as: Matcher<AutoType>
AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType,
AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType));
/// Matches \c DecltypeType nodes to find out the underlying type.
///
/// Given
/// \code
/// decltype(1) a = 1;
/// decltype(2.0) b = 2.0;
/// \endcode
/// decltypeType(hasUnderlyingType(isInteger()))
/// matches the type of "a"
///
/// Usable as: Matcher<DecltypeType>
AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType,
AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType));
/// Matches \c FunctionType nodes.
///
/// Given
/// \code
/// int (*f)(int);
/// void g();
/// \endcode
/// functionType()
/// matches "int (*f)(int)" and the type of "g".
extern const AstTypeMatcher<FunctionType> functionType;
/// Matches \c FunctionProtoType nodes.
///
/// Given
/// \code
/// int (*f)(int);
/// void g();
/// \endcode
/// functionProtoType()
/// matches "int (*f)(int)" and the type of "g" in C++ mode.
/// In C mode, "g" is not matched because it does not contain a prototype.
extern const AstTypeMatcher<FunctionProtoType> functionProtoType;
/// Matches \c ParenType nodes.
///
/// Given
/// \code
/// int (*ptr_to_array)[4];
/// int *array_of_ptrs[4];
/// \endcode
///
/// \c varDecl(hasType(pointsTo(parenType()))) matches \c ptr_to_array but not
/// \c array_of_ptrs.
extern const AstTypeMatcher<ParenType> parenType;
/// Matches \c ParenType nodes where the inner type is a specific type.
///
/// Given
/// \code
/// int (*ptr_to_array)[4];
/// int (*ptr_to_func)(int);
/// \endcode
///
/// \c varDecl(hasType(pointsTo(parenType(innerType(functionType()))))) matches
/// \c ptr_to_func but not \c ptr_to_array.
///
/// Usable as: Matcher<ParenType>
AST_TYPE_TRAVERSE_MATCHER(innerType, getInnerType,
AST_POLYMORPHIC_SUPPORTED_TYPES(ParenType));
/// Matches block pointer types, i.e. types syntactically represented as
/// "void (^)(int)".
///
/// The \c pointee is always required to be a \c FunctionType.
extern const AstTypeMatcher<BlockPointerType> blockPointerType;
/// Matches member pointer types.
/// Given
/// \code
/// struct A { int i; }
/// A::* ptr = A::i;
/// \endcode
/// memberPointerType()
/// matches "A::* ptr"
extern const AstTypeMatcher<MemberPointerType> memberPointerType;
/// Matches pointer types, but does not match Objective-C object pointer
/// types.
///
/// Given
/// \code
/// int *a;
/// int &b = *a;
/// int c = 5;
///
/// @interface Foo
/// @end
/// Foo *f;
/// \endcode
/// pointerType()
/// matches "int *a", but does not match "Foo *f".
extern const AstTypeMatcher<PointerType> pointerType;
/// Matches an Objective-C object pointer type, which is different from
/// a pointer type, despite being syntactically similar.
///
/// Given
/// \code
/// int *a;
///
/// @interface Foo
/// @end
/// Foo *f;
/// \endcode
/// pointerType()
/// matches "Foo *f", but does not match "int *a".
extern const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType;
/// Matches both lvalue and rvalue reference types.
///
/// Given
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c referenceType() matches the types of \c b, \c c, \c d, \c e, and \c f.
extern const AstTypeMatcher<ReferenceType> referenceType;
/// Matches lvalue reference types.
///
/// Given:
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c lValueReferenceType() matches the types of \c b, \c d, and \c e. \c e is
/// matched since the type is deduced as int& by reference collapsing rules.
extern const AstTypeMatcher<LValueReferenceType> lValueReferenceType;
/// Matches rvalue reference types.
///
/// Given:
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c rValueReferenceType() matches the types of \c c and \c f. \c e is not
/// matched as it is deduced to int& by reference collapsing rules.
extern const AstTypeMatcher<RValueReferenceType> rValueReferenceType;
/// Narrows PointerType (and similar) matchers to those where the
/// \c pointee matches a given matcher.
///
/// Given
/// \code
/// int *a;
/// int const *b;
/// float const *f;
/// \endcode
/// pointerType(pointee(isConstQualified(), isInteger()))
/// matches "int const *b"
///
/// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>,
/// Matcher<PointerType>, Matcher<ReferenceType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(
pointee, getPointee,
AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType,
PointerType, ReferenceType));
/// Matches typedef types.
///
/// Given
/// \code
/// typedef int X;
/// \endcode
/// typedefType()
/// matches "typedef int X"
extern const AstTypeMatcher<TypedefType> typedefType;
/// Matches enum types.
///
/// Given
/// \code
/// enum C { Green };
/// enum class S { Red };
///
/// C c;
/// S s;
/// \endcode
//
/// \c enumType() matches the type of the variable declarations of both \c c and
/// \c s.
extern const AstTypeMatcher<EnumType> enumType;
/// Matches template specialization types.
///
/// Given
/// \code
/// template <typename T>
/// class C { };
///
/// template class C<int>; // A
/// C<char> var; // B
/// \endcode
///
/// \c templateSpecializationType() matches the type of the explicit
/// instantiation in \c A and the type of the variable declaration in \c B.
extern const AstTypeMatcher<TemplateSpecializationType>
templateSpecializationType;
/// Matches types nodes representing unary type transformations.
///
/// Given:
/// \code
/// typedef __underlying_type(T) type;
/// \endcode
/// unaryTransformType()
/// matches "__underlying_type(T)"
extern const AstTypeMatcher<UnaryTransformType> unaryTransformType;
/// Matches record types (e.g. structs, classes).
///
/// Given
/// \code
/// class C {};
/// struct S {};
///
/// C c;
/// S s;
/// \endcode
///
/// \c recordType() matches the type of the variable declarations of both \c c
/// and \c s.
extern const AstTypeMatcher<RecordType> recordType;
/// Matches tag types (record and enum types).
///
/// Given
/// \code
/// enum E {};
/// class C {};
///
/// E e;
/// C c;
/// \endcode
///
/// \c tagType() matches the type of the variable declarations of both \c e
/// and \c c.
extern const AstTypeMatcher<TagType> tagType;
/// Matches types specified with an elaborated type keyword or with a
/// qualified name.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// class C {};
///
/// class C c;
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType() matches the type of the variable declarations of both
/// \c c and \c d.
extern const AstTypeMatcher<ElaboratedType> elaboratedType;
/// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier,
/// matches \c InnerMatcher if the qualifier exists.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N"))))
/// matches the type of the variable declaration of \c d.
AST_MATCHER_P(ElaboratedType, hasQualifier,
internal::Matcher<NestedNameSpecifier>, InnerMatcher) {
if (const NestedNameSpecifier *Qualifier = Node.getQualifier())
return InnerMatcher.matches(*Qualifier, Finder, Builder);
return false;
}
/// Matches ElaboratedTypes whose named type matches \c InnerMatcher.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType(namesType(recordType(
/// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable
/// declaration of \c d.
AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>,
InnerMatcher) {
return InnerMatcher.matches(Node.getNamedType(), Finder, Builder);
}
/// Matches types that represent the result of substituting a type for a
/// template type parameter.
///
/// Given
/// \code
/// template <typename T>
/// void F(T t) {
/// int i = 1 + t;
/// }
/// \endcode
///
/// \c substTemplateTypeParmType() matches the type of 't' but not '1'
extern const AstTypeMatcher<SubstTemplateTypeParmType>
substTemplateTypeParmType;
/// Matches template type parameter substitutions that have a replacement
/// type that matches the provided matcher.
///
/// Given
/// \code
/// template <typename T>
/// double F(T t);
/// int i;
/// double j = F(i);
/// \endcode
///
/// \c substTemplateTypeParmType(hasReplacementType(type())) matches int
AST_TYPE_TRAVERSE_MATCHER(
hasReplacementType, getReplacementType,
AST_POLYMORPHIC_SUPPORTED_TYPES(SubstTemplateTypeParmType));
/// Matches template type parameter types.
///
/// Example matches T, but not int.
/// (matcher = templateTypeParmType())
/// \code
/// template <typename T> void f(int i);
/// \endcode
extern const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType;
/// Matches injected class name types.
///
/// Example matches S s, but not S<T> s.
/// (matcher = parmVarDecl(hasType(injectedClassNameType())))
/// \code
/// template <typename T> struct S {
/// void f(S s);
/// void g(S<T> s);
/// };
/// \endcode
extern const AstTypeMatcher<InjectedClassNameType> injectedClassNameType;
/// Matches decayed type
/// Example matches i[] in declaration of f.
/// (matcher = valueDecl(hasType(decayedType(hasDecayedType(pointerType())))))
/// Example matches i[1].
/// (matcher = expr(hasType(decayedType(hasDecayedType(pointerType())))))
/// \code
/// void f(int i[]) {
/// i[1] = 0;
/// }
/// \endcode
extern const AstTypeMatcher<DecayedType> decayedType;
/// Matches the decayed type, whos decayed type matches \c InnerMatcher
AST_MATCHER_P(DecayedType, hasDecayedType, internal::Matcher<QualType>,
InnerType) {
return InnerType.matches(Node.getDecayedType(), Finder, Builder);
}
/// Matches declarations whose declaration context, interpreted as a
/// Decl, matches \c InnerMatcher.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// \endcode
///
/// \c cxxRcordDecl(hasDeclContext(namedDecl(hasName("M")))) matches the
/// declaration of \c class \c D.
AST_MATCHER_P(Decl, hasDeclContext, internal::Matcher<Decl>, InnerMatcher) {
const DeclContext *DC = Node.getDeclContext();
if (!DC) return false;
return InnerMatcher.matches(*Decl::castFromDeclContext(DC), Finder, Builder);
}
/// Matches nested name specifiers.
///
/// Given
/// \code
/// namespace ns {
/// struct A { static void f(); };
/// void A::f() {}
/// void g() { A::f(); }
/// }
/// ns::A a;
/// \endcode
/// nestedNameSpecifier()
/// matches "ns::" and both "A::"
extern const internal::VariadicAllOfMatcher<NestedNameSpecifier>
nestedNameSpecifier;
/// Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc.
extern const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc>
nestedNameSpecifierLoc;
/// Matches \c NestedNameSpecifierLocs for which the given inner
/// NestedNameSpecifier-matcher matches.
AST_MATCHER_FUNCTION_P_OVERLOAD(
internal::BindableMatcher<NestedNameSpecifierLoc>, loc,
internal::Matcher<NestedNameSpecifier>, InnerMatcher, 1) {
return internal::BindableMatcher<NestedNameSpecifierLoc>(
new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>(
InnerMatcher));
}
/// Matches nested name specifiers that specify a type matching the
/// given \c QualType matcher without qualifiers.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifier(specifiesType(
/// hasDeclaration(cxxRecordDecl(hasName("A")))
/// ))
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifier, specifiesType,
internal::Matcher<QualType>, InnerMatcher) {
if (!Node.getAsType())
return false;
return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder);
}
/// Matches nested name specifier locs that specify a type matching the
/// given \c TypeLoc.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type(
/// hasDeclaration(cxxRecordDecl(hasName("A")))))))
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc,
internal::Matcher<TypeLoc>, InnerMatcher) {
return Node && Node.getNestedNameSpecifier()->getAsType() &&
InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifier.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and
/// matches "A::"
AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix,
internal::Matcher<NestedNameSpecifier>, InnerMatcher,
0) {
const NestedNameSpecifier *NextNode = Node.getPrefix();
if (!NextNode)
return false;
return InnerMatcher.matches(*NextNode, Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifierLoc.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A")))))
/// matches "A::"
AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix,
internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher,
1) {
NestedNameSpecifierLoc NextNode = Node.getPrefix();
if (!NextNode)
return false;
return InnerMatcher.matches(NextNode, Finder, Builder);
}
/// Matches nested name specifiers that specify a namespace matching the
/// given namespace matcher.
///
/// Given
/// \code
/// namespace ns { struct A {}; }
/// ns::A a;
/// \endcode
/// nestedNameSpecifier(specifiesNamespace(hasName("ns")))
/// matches "ns::"
AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace,
internal::Matcher<NamespaceDecl>, InnerMatcher) {
if (!Node.getAsNamespace())
return false;
return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder);
}
/// Overloads for the \c equalsNode matcher.
/// FIXME: Implement for other node types.
/// @{
/// Matches if a node equals another node.
///
/// \c Decl has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Decl, equalsNode, const Decl*, Other, 0) {
return &Node == Other;
}
/// Matches if a node equals another node.
///
/// \c Stmt has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Stmt, equalsNode, const Stmt*, Other, 1) {
return &Node == Other;
}
/// Matches if a node equals another node.
///
/// \c Type has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Type, equalsNode, const Type*, Other, 2) {
return &Node == Other;
}
/// @}
/// Matches each case or default statement belonging to the given switch
/// statement. This matcher may produce multiple matches.
///
/// Given
/// \code
/// switch (1) { case 1: case 2: default: switch (2) { case 3: case 4: ; } }
/// \endcode
/// switchStmt(forEachSwitchCase(caseStmt().bind("c"))).bind("s")
/// matches four times, with "c" binding each of "case 1:", "case 2:",
/// "case 3:" and "case 4:", and "s" respectively binding "switch (1)",
/// "switch (1)", "switch (2)" and "switch (2)".
AST_MATCHER_P(SwitchStmt, forEachSwitchCase, internal::Matcher<SwitchCase>,
InnerMatcher) {
BoundNodesTreeBuilder Result;
// FIXME: getSwitchCaseList() does not necessarily guarantee a stable
// iteration order. We should use the more general iterating matchers once
// they are capable of expressing this matcher (for example, it should ignore
// case statements belonging to nested switch statements).
bool Matched = false;
for (const SwitchCase *SC = Node.getSwitchCaseList(); SC;
SC = SC->getNextSwitchCase()) {
BoundNodesTreeBuilder CaseBuilder(*Builder);
bool CaseMatched = InnerMatcher.matches(*SC, Finder, &CaseBuilder);
if (CaseMatched) {
Matched = true;
Result.addMatch(CaseBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches each constructor initializer in a constructor definition.
///
/// Given
/// \code
/// class A { A() : i(42), j(42) {} int i; int j; };
/// \endcode
/// cxxConstructorDecl(forEachConstructorInitializer(
/// forField(decl().bind("x"))
/// ))
/// will trigger two matches, binding for 'i' and 'j' respectively.
AST_MATCHER_P(CXXConstructorDecl, forEachConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto *I : Node.inits()) {
BoundNodesTreeBuilder InitBuilder(*Builder);
if (InnerMatcher.matches(*I, Finder, &InitBuilder)) {
Matched = true;
Result.addMatch(InitBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches constructor declarations that are copy constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isCopyConstructor()) will match #2, but not #1 or #3.
AST_MATCHER(CXXConstructorDecl, isCopyConstructor) {
return Node.isCopyConstructor();
}
/// Matches constructor declarations that are move constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isMoveConstructor()) will match #3, but not #1 or #2.
AST_MATCHER(CXXConstructorDecl, isMoveConstructor) {
return Node.isMoveConstructor();
}
/// Matches constructor declarations that are default constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isDefaultConstructor()) will match #1, but not #2 or #3.
AST_MATCHER(CXXConstructorDecl, isDefaultConstructor) {
return Node.isDefaultConstructor();
}
/// Matches constructors that delegate to another constructor.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(int) {} // #2
/// S(S &&) : S() {} // #3
/// };
/// S::S() : S(0) {} // #4
/// \endcode
/// cxxConstructorDecl(isDelegatingConstructor()) will match #3 and #4, but not
/// #1 or #2.
AST_MATCHER(CXXConstructorDecl, isDelegatingConstructor) {
return Node.isDelegatingConstructor();
}
/// Matches constructor, conversion function, and deduction guide declarations
/// that have an explicit specifier if this explicit specifier is resolved to
/// true.
///
/// Given
/// \code
/// template<bool b>
/// struct S {
/// S(int); // #1
/// explicit S(double); // #2
/// operator int(); // #3
/// explicit operator bool(); // #4
/// explicit(false) S(bool) // # 7
/// explicit(true) S(char) // # 8
/// explicit(b) S(S) // # 9
/// };
/// S(int) -> S<true> // #5
/// explicit S(double) -> S<false> // #6
/// \endcode
/// cxxConstructorDecl(isExplicit()) will match #2 and #8, but not #1, #7 or #9.
/// cxxConversionDecl(isExplicit()) will match #4, but not #3.
/// cxxDeductionGuideDecl(isExplicit()) will match #6, but not #5.
AST_POLYMORPHIC_MATCHER(isExplicit, AST_POLYMORPHIC_SUPPORTED_TYPES(
CXXConstructorDecl, CXXConversionDecl,
CXXDeductionGuideDecl)) {
return Node.isExplicit();
}
/// Matches the expression in an explicit specifier if present in the given
/// declaration.
///
/// Given
/// \code
/// template<bool b>
/// struct S {
/// S(int); // #1
/// explicit S(double); // #2
/// operator int(); // #3
/// explicit operator bool(); // #4
/// explicit(false) S(bool) // # 7
/// explicit(true) S(char) // # 8
/// explicit(b) S(S) // # 9
/// };
/// S(int) -> S<true> // #5
/// explicit S(double) -> S<false> // #6
/// \endcode
/// cxxConstructorDecl(hasExplicitSpecifier(constantExpr())) will match #7, #8 and #9, but not #1 or #2.
/// cxxConversionDecl(hasExplicitSpecifier(constantExpr())) will not match #3 or #4.
/// cxxDeductionGuideDecl(hasExplicitSpecifier(constantExpr())) will not match #5 or #6.
AST_MATCHER_P(FunctionDecl, hasExplicitSpecifier, internal::Matcher<Expr>,
InnerMatcher) {
ExplicitSpecifier ES = ExplicitSpecifier::getFromDecl(&Node);
if (!ES.getExpr())
return false;
return InnerMatcher.matches(*ES.getExpr(), Finder, Builder);
}
/// Matches function and namespace declarations that are marked with
/// the inline keyword.
///
/// Given
/// \code
/// inline void f();
/// void g();
/// namespace n {
/// inline namespace m {}
/// }
/// \endcode
/// functionDecl(isInline()) will match ::f().
/// namespaceDecl(isInline()) will match n::m.
AST_POLYMORPHIC_MATCHER(isInline,
AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl,
FunctionDecl)) {
// This is required because the spelling of the function used to determine
// whether inline is specified or not differs between the polymorphic types.
if (const auto *FD = dyn_cast<FunctionDecl>(&Node))
return FD->isInlineSpecified();
else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node))
return NSD->isInline();
llvm_unreachable("Not a valid polymorphic type");
}
/// Matches anonymous namespace declarations.
///
/// Given
/// \code
/// namespace n {
/// namespace {} // #1
/// }
/// \endcode
/// namespaceDecl(isAnonymous()) will match #1 but not ::n.
AST_MATCHER(NamespaceDecl, isAnonymous) {
return Node.isAnonymousNamespace();
}
/// Matches declarations in the namespace `std`, but not in nested namespaces.
///
/// Given
/// \code
/// class vector {};
/// namespace foo {
/// class vector {};
/// namespace std {
/// class vector {};
/// }
/// }
/// namespace std {
/// inline namespace __1 {
/// class vector {}; // #1
/// namespace experimental {
/// class vector {};
/// }
/// }
/// }
/// \endcode
/// cxxRecordDecl(hasName("vector"), isInStdNamespace()) will match only #1.
AST_MATCHER(Decl, isInStdNamespace) { return Node.isInStdNamespace(); }
/// If the given case statement does not use the GNU case range
/// extension, matches the constant given in the statement.
///
/// Given
/// \code
/// switch (1) { case 1: case 1+1: case 3 ... 4: ; }
/// \endcode
/// caseStmt(hasCaseConstant(integerLiteral()))
/// matches "case 1:"
AST_MATCHER_P(CaseStmt, hasCaseConstant, internal::Matcher<Expr>,
InnerMatcher) {
if (Node.getRHS())
return false;
return InnerMatcher.matches(*Node.getLHS(), Finder, Builder);
}
/// Matches declaration that has a given attribute.
///
/// Given
/// \code
/// __attribute__((device)) void f() { ... }
/// \endcode
/// decl(hasAttr(clang::attr::CUDADevice)) matches the function declaration of
/// f. If the matcher is used from clang-query, attr::Kind parameter should be
/// passed as a quoted string. e.g., hasAttr("attr::CUDADevice").
AST_MATCHER_P(Decl, hasAttr, attr::Kind, AttrKind) {
for (const auto *Attr : Node.attrs()) {
if (Attr->getKind() == AttrKind)
return true;
}
return false;
}
/// Matches the return value expression of a return statement
///
/// Given
/// \code
/// return a + b;
/// \endcode
/// hasReturnValue(binaryOperator())
/// matches 'return a + b'
/// with binaryOperator()
/// matching 'a + b'
AST_MATCHER_P(ReturnStmt, hasReturnValue, internal::Matcher<Expr>,
InnerMatcher) {
if (const auto *RetValue = Node.getRetValue())
return InnerMatcher.matches(*RetValue, Finder, Builder);
return false;
}
/// Matches CUDA kernel call expression.
///
/// Example matches,
/// \code
/// kernel<<<i,j>>>();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr>
cudaKernelCallExpr;
/// Matches expressions that resolve to a null pointer constant, such as
/// GNU's __null, C++11's nullptr, or C's NULL macro.
///
/// Given:
/// \code
/// void *v1 = NULL;
/// void *v2 = nullptr;
/// void *v3 = __null; // GNU extension
/// char *cp = (char *)0;
/// int *ip = 0;
/// int i = 0;
/// \endcode
/// expr(nullPointerConstant())
/// matches the initializer for v1, v2, v3, cp, and ip. Does not match the
/// initializer for i.
AST_MATCHER(Expr, nullPointerConstant) {
return Node.isNullPointerConstant(Finder->getASTContext(),
Expr::NPC_ValueDependentIsNull);
}
/// Matches declaration of the function the statement belongs to
///
/// Given:
/// \code
/// F& operator=(const F& o) {
/// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; });
/// return *this;
/// }
/// \endcode
/// returnStmt(forFunction(hasName("operator=")))
/// matches 'return *this'
/// but does not match 'return v > 0'
AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>,
InnerMatcher) {
const auto &Parents = Finder->getASTContext().getParents(Node);
llvm::SmallVector<ast_type_traits::DynTypedNode, 8> Stack(Parents.begin(),
Parents.end());
while(!Stack.empty()) {
const auto &CurNode = Stack.back();
Stack.pop_back();
if(const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) {
if(InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) {
return true;
}
} else if(const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) {
if(InnerMatcher.matches(*LambdaExprNode->getCallOperator(),
Finder, Builder)) {
return true;
}
} else {
for(const auto &Parent: Finder->getASTContext().getParents(CurNode))
Stack.push_back(Parent);
}
}
return false;
}
/// Matches a declaration that has external formal linkage.
///
/// Example matches only z (matcher = varDecl(hasExternalFormalLinkage()))
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
///
/// Example matches f() because it has external formal linkage despite being
/// unique to the translation unit as though it has internal likage
/// (matcher = functionDecl(hasExternalFormalLinkage()))
///
/// \code
/// namespace {
/// void f() {}
/// }
/// \endcode
AST_MATCHER(NamedDecl, hasExternalFormalLinkage) {
return Node.hasExternalFormalLinkage();
}
/// Matches a declaration that has default arguments.
///
/// Example matches y (matcher = parmVarDecl(hasDefaultArgument()))
/// \code
/// void x(int val) {}
/// void y(int val = 0) {}
/// \endcode
AST_MATCHER(ParmVarDecl, hasDefaultArgument) {
return Node.hasDefaultArg();
}
/// Matches array new expressions.
///
/// Given:
/// \code
/// MyClass *p1 = new MyClass[10];
/// \endcode
/// cxxNewExpr(isArray())
/// matches the expression 'new MyClass[10]'.
AST_MATCHER(CXXNewExpr, isArray) {
return Node.isArray();
}
/// Matches array new expressions with a given array size.
///
/// Given:
/// \code
/// MyClass *p1 = new MyClass[10];
/// \endcode
/// cxxNewExpr(hasArraySize(intgerLiteral(equals(10))))
/// matches the expression 'new MyClass[10]'.
AST_MATCHER_P(CXXNewExpr, hasArraySize, internal::Matcher<Expr>, InnerMatcher) {
return Node.isArray() && *Node.getArraySize() &&
InnerMatcher.matches(**Node.getArraySize(), Finder, Builder);
}
/// Matches a class declaration that is defined.
///
/// Example matches x (matcher = cxxRecordDecl(hasDefinition()))
/// \code
/// class x {};
/// class y;
/// \endcode
AST_MATCHER(CXXRecordDecl, hasDefinition) {
return Node.hasDefinition();
}
/// Matches C++11 scoped enum declaration.
///
/// Example matches Y (matcher = enumDecl(isScoped()))
/// \code
/// enum X {};
/// enum class Y {};
/// \endcode
AST_MATCHER(EnumDecl, isScoped) {
return Node.isScoped();
}
/// Matches a function declared with a trailing return type.
///
/// Example matches Y (matcher = functionDecl(hasTrailingReturn()))
/// \code
/// int X() {}
/// auto Y() -> int {}
/// \endcode
AST_MATCHER(FunctionDecl, hasTrailingReturn) {
if (const auto *F = Node.getType()->getAs<FunctionProtoType>())
return F->hasTrailingReturn();
return false;
}
/// Matches expressions that match InnerMatcher that are possibly wrapped in an
/// elidable constructor and other corresponding bookkeeping nodes.
///
/// In C++17, elidable copy constructors are no longer being generated in the
/// AST as it is not permitted by the standard. They are, however, part of the
/// AST in C++14 and earlier. So, a matcher must abstract over these differences
/// to work in all language modes. This matcher skips elidable constructor-call
/// AST nodes, `ExprWithCleanups` nodes wrapping elidable constructor-calls and
/// various implicit nodes inside the constructor calls, all of which will not
/// appear in the C++17 AST.
///
/// Given
///
/// \code
/// struct H {};
/// H G();
/// void f() {
/// H D = G();
/// }
/// \endcode
///
/// ``varDecl(hasInitializer(ignoringElidableConstructorCall(callExpr())))``
/// matches ``H D = G()`` in C++11 through C++17 (and beyond).
AST_MATCHER_P(Expr, ignoringElidableConstructorCall,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
// E tracks the node that we are examining.
const Expr *E = &Node;
// If present, remove an outer `ExprWithCleanups` corresponding to the
// underlying `CXXConstructExpr`. This check won't cover all cases of added
// `ExprWithCleanups` corresponding to `CXXConstructExpr` nodes (because the
// EWC is placed on the outermost node of the expression, which this may not
// be), but, it still improves the coverage of this matcher.
if (const auto *CleanupsExpr = dyn_cast<ExprWithCleanups>(&Node))
E = CleanupsExpr->getSubExpr();
if (const auto *CtorExpr = dyn_cast<CXXConstructExpr>(E)) {
if (CtorExpr->isElidable()) {
if (const auto *MaterializeTemp =
dyn_cast<MaterializeTemporaryExpr>(CtorExpr->getArg(0))) {
return InnerMatcher.matches(*MaterializeTemp->GetTemporaryExpr(),
Finder, Builder);
}
}
}
return InnerMatcher.matches(Node, Finder, Builder);
}
//----------------------------------------------------------------------------//
// OpenMP handling.
//----------------------------------------------------------------------------//
/// Matches any ``#pragma omp`` executable directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp taskyield
/// \endcode
///
/// ``ompExecutableDirective()`` matches ``omp parallel``,
/// ``omp parallel default(none)`` and ``omp taskyield``.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective>
ompExecutableDirective;
/// Matches standalone OpenMP directives,
/// i.e., directives that can't have a structured block.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// {}
/// #pragma omp taskyield
/// \endcode
///
/// ``ompExecutableDirective(isStandaloneDirective()))`` matches
/// ``omp taskyield``.
AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) {
return Node.isStandaloneDirective();
}
/// Matches the Stmt AST node that is marked as being the structured-block
/// of an OpenMP executable directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// {}
/// \endcode
///
/// ``stmt(isOMPStructuredBlock()))`` matches ``{}``.
AST_MATCHER(Stmt, isOMPStructuredBlock) { return Node.isOMPStructuredBlock(); }
/// Matches the structured-block of the OpenMP executable directive
///
/// Prerequisite: the executable directive must not be standalone directive.
/// If it is, it will never match.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// ;
/// #pragma omp parallel
/// {}
/// \endcode
///
/// ``ompExecutableDirective(hasStructuredBlock(nullStmt()))`` will match ``;``
AST_MATCHER_P(OMPExecutableDirective, hasStructuredBlock,
internal::Matcher<Stmt>, InnerMatcher) {
if (Node.isStandaloneDirective())
return false; // Standalone directives have no structured blocks.
return InnerMatcher.matches(*Node.getStructuredBlock(), Finder, Builder);
}
/// Matches any clause in an OpenMP directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// \endcode
///
/// ``ompExecutableDirective(hasAnyClause(anything()))`` matches
/// ``omp parallel default(none)``.
AST_MATCHER_P(OMPExecutableDirective, hasAnyClause,
internal::Matcher<OMPClause>, InnerMatcher) {
ArrayRef<OMPClause *> Clauses = Node.clauses();
return matchesFirstInPointerRange(InnerMatcher, Clauses.begin(),
Clauses.end(), Finder, Builder);
}
/// Matches OpenMP ``default`` clause.
///
/// Given
///
/// \code
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel
/// \endcode
///
/// ``ompDefaultClause()`` matches ``default(none)`` and ``default(shared)``.
extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause>
ompDefaultClause;
/// Matches if the OpenMP ``default`` clause has ``none`` kind specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// \endcode
///
/// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``.
AST_MATCHER(OMPDefaultClause, isNoneKind) {
return Node.getDefaultKind() == OMPC_DEFAULT_none;
}
/// Matches if the OpenMP ``default`` clause has ``shared`` kind specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// \endcode
///
/// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``.
AST_MATCHER(OMPDefaultClause, isSharedKind) {
return Node.getDefaultKind() == OMPC_DEFAULT_shared;
}
/// Matches if the OpenMP directive is allowed to contain the specified OpenMP
/// clause kind.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel for
/// #pragma omp for
/// \endcode
///
/// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches
/// ``omp parallel`` and ``omp parallel for``.
///
/// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter
/// should be passed as a quoted string. e.g.,
/// ``isAllowedToContainClauseKind("OMPC_default").``
AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind,
OpenMPClauseKind, CKind) {
return isAllowedClauseForDirective(Node.getDirectiveKind(), CKind);
}
//----------------------------------------------------------------------------//
// End OpenMP handling.
//----------------------------------------------------------------------------//
} // namespace ast_matchers
} // namespace clang
#endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
|
dlascl.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zlascl.c, normal z -> d, Fri Sep 28 17:38:08 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include <math.h>
/******************************************************************************/
int plasma_dlascl(plasma_enum_t uplo,
double cfrom, double cto,
int m, int n,
double *pA, int lda)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((uplo != PlasmaGeneral) &&
(uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
return -1;
}
if (cfrom == 0.0 || isnan(cfrom)) {
plasma_error("illegal value of cfrom");
return -2;
}
if (isnan(cto)) {
plasma_error("illegal value of cto");
return -3;
}
if (m < 0) {
plasma_error("illegal value of m");
return -4;
}
if (n < 0) {
plasma_error("illegal value of n");
return -5;
}
if (lda < imax(1, m)) {
plasma_error("illegal value of lda");
return -7;
}
// quick return
if (imin(n, m) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_lascl(plasma, PlasmaRealDouble, m, n);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
int retval;
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
m, n, 0, 0, m, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_general_desc_create() failed");
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_dge2desc(pA, lda, A, &sequence, &request);
// Call tile async function.
plasma_omp_dlascl(uplo, cfrom, cto, A, &sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_ddesc2ge(A, pA, lda, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&A);
// Return status.
int status = sequence.status;
return status;
}
/******************************************************************************/
void plasma_omp_dlascl(plasma_enum_t uplo,
double cfrom, double cto,
plasma_desc_t A,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((uplo != PlasmaGeneral) &&
(uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (cfrom == 0.0 || isnan(cfrom)) {
plasma_error("illegal value of cfrom");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (isnan(cto)) {
plasma_error("illegal value of cto");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (imin(A.m, A.n) == 0)
return;
// Call the parallel function.
plasma_pdlascl(uplo, cfrom, cto, A, sequence, request);
}
|
critical.c | /* Copyright (C) 2005-2014 Free Software Foundation, Inc.
C ontributed by Richard Henderson <r*th@redhat.com>.
This file is part of the GNU OpenMP Library (libgomp).
Libgomp is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* Copyright 2014 DEI - Universita' di Bologna
author DEI - Universita' di Bologna
Alessandro Capotondi - alessandro.capotondi@unibo.it
info #pragma omp critical-atomic implemetation */
#include "libgomp.h"
void
GOMP_atomic_start (void)
{
uint32_t pid = get_proc_id();
gomp_hal_lock(&(CURR_TEAM(pid)->atomic_lock));
}
void
GOMP_atomic_end (void)
{
uint32_t pid = get_proc_id();
gomp_hal_unlock(&(CURR_TEAM(pid)->atomic_lock));
}
void
GOMP_critical_start (void)
{
uint32_t pid = get_proc_id();
gomp_hal_lock(&(CURR_TEAM(pid)->critical_lock));
}
void
GOMP_critical_end (void)
{
uint32_t pid = get_proc_id();
gomp_hal_unlock(&(CURR_TEAM(pid)->critical_lock));
}
|
life3d-omp.c | #include "life3d-omp.h"
int main(int argc, char* argv[]){
char* file; /**< Input data file name */
int generations = 0; /**< Number of generations to proccess */
int cube_size = 0; /**< Size of the 3D space */
GraphNode*** graph; /**< Graph representation - 2D array of lists */
/* Lock variables */
omp_lock_t** graph_lock;
int g, i, j;
GraphNode* it;
int live_neighbours;
parseArgs(argc, argv, &file, &generations);
debug_print("ARGS: file: %s generations: %d.", file, generations);
graph = parseFile(file, &cube_size);
/* Initialize lock variables */
graph_lock = (omp_lock_t**)malloc(cube_size * sizeof(omp_lock_t*));
for(i = 0; i < cube_size; i++){
graph_lock[i] = (omp_lock_t*) malloc(cube_size * sizeof(omp_lock_t));
for(j = 0; j < cube_size; j++){
omp_init_lock(&(graph_lock[i][j]));
}
}
double start = omp_get_wtime(); // Start Timer
for(g = 1; g <= generations; g++){
#pragma omp parallel
{
/* First passage in the graph - notify neighbours */
#pragma omp for private(i, j, it)
for(i = 0; i < cube_size; i++){
for(j = 0; j < cube_size; j++){
for(it = graph[i][j]; it != NULL; it = it->next){
if(it->state == ALIVE)
visitNeighbours(graph, graph_lock, cube_size, i, j, it->z);
}
}
}
/* Second passage in the graph - decide next state */
#pragma omp for private(i, j, it, live_neighbours)
for(i = 0; i < cube_size; i++){
for(j = 0; j < cube_size; j++){
for (it = graph[i][j]; it != NULL; it = it->next){
live_neighbours = it->neighbours;
it->neighbours = 0;
if(it->state == ALIVE){
if(live_neighbours < 2 || live_neighbours > 4){
it->state = DEAD;
}
}else{
if(live_neighbours == 2 || live_neighbours == 3){
it->state = ALIVE;
}
}
}
}
}
/* Remove dead nodes from the graph every REMOVAL_PERIOD generations */
if(g % REMOVAL_PERIOD == 0){
#pragma omp for private(i, j)
for(i = 0; i < cube_size; i++){
for(j = 0; j < cube_size; j++){
GraphNode ** list = &graph[i][j];
graphListCleanup(list);
}
}
}
}/*pragma end*/
} /*generations loop end*/
double end = omp_get_wtime(); // Stop Timer
/* Print the final set of live cells */
printAndSortActive(graph, cube_size);
time_print("%f\n", end - start);
for(i = 0; i < cube_size; i++){
for(j = 0; j<cube_size; j++){
omp_destroy_lock(&(graph_lock[i][j]));
}
free(graph_lock[i]);
}
free(graph_lock);
freeGraph(graph, cube_size);
free(file);
return(EXIT_SUCCESS);
}
void visitNeighbours(GraphNode*** graph, omp_lock_t** graph_lock, int cube_size, coordinate x, coordinate y, coordinate z){
GraphNode* ptr;
coordinate x1, x2, y1, y2, z1, z2;
x1 = (x+1)%cube_size; x2 = (x-1) < 0 ? (cube_size-1) : (x-1);
y1 = (y+1)%cube_size; y2 = (y-1) < 0 ? (cube_size-1) : (y-1);
z1 = (z+1)%cube_size; z2 = (z-1) < 0 ? (cube_size-1) : (z-1);
/* If a cell is visited for the first time, add it to the update list, for fast access */
graphNodeAddNeighbour(&(graph[x1][y]), z, &(graph_lock[x1][y]));
graphNodeAddNeighbour(&(graph[x2][y]), z, &(graph_lock[x2][y]));
graphNodeAddNeighbour(&(graph[x][y1]), z, &(graph_lock[x][y1]));
graphNodeAddNeighbour(&(graph[x][y2]), z, &(graph_lock[x][y2]));
graphNodeAddNeighbour(&(graph[x][y]), z1, &(graph_lock[x][y]));
graphNodeAddNeighbour(&(graph[x][y]), z2, &(graph_lock[x][y]));
}
GraphNode*** initGraph(int size){
int i,j;
GraphNode*** graph = (GraphNode***) malloc(sizeof(GraphNode**) * size);
for (i = 0; i < size; i++){
graph[i] = (GraphNode**) malloc(sizeof(GraphNode*) * size);
for (j = 0; j < size; j++){
graph[i][j] = NULL;
}
}
return graph;
}
void freeGraph(GraphNode*** graph, int size){
int i, j;
if (graph != NULL){
for (i = 0; i < size; i++){
for (j = 0; j < size; j++){
graphNodeDelete(graph[i][j]);
}
free(graph[i]);
}
free(graph);
}
}
void printAndSortActive(GraphNode*** graph, int cube_size){
int x,y;
GraphNode* it;
for (x = 0; x < cube_size; ++x){
for (y = 0; y < cube_size; ++y){
/* Sort the list by ascending coordinate z */
graphNodeSort(&(graph[x][y]));
for (it = graph[x][y]; it != NULL; it = it->next){
if (it->state == ALIVE)
out_print("%d %d %d\n", x, y, it->z);
}
}
}
}
void parseArgs(int argc, char* argv[], char** file, int* generations){
if (argc == 3){
char* file_name = malloc(sizeof(char) * (strlen(argv[1]) + 1));
strcpy(file_name, argv[1]);
*file = file_name;
*generations = atoi(argv[2]);
if (*generations > 0 && file_name != NULL)
return;
}
printf("Usage: %s [data_file.in] [number_generations]", argv[0]);
exit(EXIT_FAILURE);
}
GraphNode*** parseFile(char* file, int* cube_size){
int first = 0;
char line[BUFFER_SIZE];
int x, y, z;
FILE* fp = fopen(file, "r");
if(fp == NULL){
err_print("Please input a valid file name");
exit(EXIT_FAILURE);
}
GraphNode*** graph;
while(fgets(line, sizeof(line), fp)){
if(!first){
if(sscanf(line, "%d\n", cube_size) == 1){
first = 1;
graph = initGraph(*cube_size);
}
}else{
if(sscanf(line, "%d %d %d\n", &x, &y, &z) == 3){
/* Insert live nodes in the graph and the update set */
graph[x][y] = graphNodeInsert(graph[x][y], z, ALIVE);
}
}
}
fclose(fp);
return graph;
}
|
soxr.c | /* SoX Resampler Library Copyright (c) 2007-18 robs@users.sourceforge.net
* Licence for this file: LGPL v2.1 See LICENCE for details. */
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "soxr.h"
#include "data-io.h"
#include "internal.h"
#if AVUTIL_FOUND
#include <libavutil/cpu.h>
#endif
#if WITH_DEV_TRACE
#include <stdarg.h>
#include <stdio.h>
int _soxr_trace_level;
void _soxr_trace(char const * fmt, ...)
{
va_list args;
va_start(args, fmt);
vfprintf(stderr, fmt, args);
fputc('\n', stderr);
va_end(args);
}
#endif
char const * soxr_version(void)
{
return "libsoxr-" SOXR_THIS_VERSION_STR;
}
typedef void sample_t; /* float or double */
typedef void (* fn_t)(void);
typedef fn_t control_block_t[10];
#define resampler_input (*(sample_t * (*)(void *, sample_t * samples, size_t n))p->control_block[0])
#define resampler_process (*(void (*)(void *, size_t))p->control_block[1])
#define resampler_output (*(sample_t const * (*)(void *, sample_t * samples, size_t * n))p->control_block[2])
#define resampler_flush (*(void (*)(void *))p->control_block[3])
#define resampler_close (*(void (*)(void *))p->control_block[4])
#define resampler_delay (*(double (*)(void *))p->control_block[5])
#define resampler_sizes (*(void (*)(size_t * shared, size_t * channel))p->control_block[6])
#define resampler_create (*(char const * (*)(void * channel, void * shared, double io_ratio, soxr_quality_spec_t * q_spec, soxr_runtime_spec_t * r_spec, double scale))p->control_block[7])
#define resampler_set_io_ratio (*(void (*)(void *, double io_ratio, size_t len))p->control_block[8])
#define resampler_id (*(char const * (*)(void))p->control_block[9])
typedef void * resampler_t; /* For one channel. */
typedef void * resampler_shared_t; /* Between channels. */
typedef void (* deinterleave_t)(sample_t * * dest,
soxr_datatype_t data_type, void const * * src0, size_t n, unsigned ch);
typedef size_t (* interleave_t)(soxr_datatype_t data_type, void * * dest,
sample_t const * const * src, size_t, unsigned, unsigned long *);
struct soxr {
unsigned num_channels;
double io_ratio;
soxr_error_t error;
soxr_quality_spec_t q_spec;
soxr_io_spec_t io_spec;
soxr_runtime_spec_t runtime_spec;
void * input_fn_state;
soxr_input_fn_t input_fn;
size_t max_ilen;
resampler_shared_t shared;
resampler_t * resamplers;
control_block_t control_block;
deinterleave_t deinterleave;
interleave_t interleave;
void * * channel_ptrs;
size_t clips;
unsigned long seed;
int flushing;
};
#if WITH_CR32 || WITH_CR32S || WITH_CR64 || WITH_CR64S
#include "filter.h"
#else
#define lsx_to_3dB(x) ((x)/(x))
#endif
soxr_quality_spec_t soxr_quality_spec(unsigned long recipe, unsigned long flags)
{
soxr_quality_spec_t spec, * p = &spec;
unsigned q = recipe & 0xf; /* TODO: move to soxr-lsr.c: */
unsigned quality = q > SOXR_LSR2Q+2? SOXR_VHQ : q > SOXR_LSR2Q? SOXR_QQ : q;
double rej;
memset(p, 0, sizeof(*p));
if (quality > SOXR_PRECISIONQ) {
p->e = "invalid quality type";
return spec;
}
flags |= quality < SOXR_LSR0Q ? RESET_ON_CLEAR : 0;
p->phase_response = "\62\31\144"[(recipe & 0x30)>>4];
p->stopband_begin = 1;
p->precision =
quality == SOXR_QQ ? 0 :
quality <= SOXR_16_BITQ ? 16 :
quality <= SOXR_32_BITQ ? 4 + quality * 4 :
quality <= SOXR_LSR2Q ? 55 - quality * 4 : /* TODO: move to soxr-lsr.c */
0;
rej = p->precision * linear_to_dB(2.);
p->flags = flags;
if (quality <= SOXR_32_BITQ || quality == SOXR_PRECISIONQ) {
#define LOW_Q_BW0 (1385 / 2048.) /* 0.67625 rounded to be a FP exact. */
p->passband_end = quality == 1? LOW_Q_BW0 : 1 - .05 / lsx_to_3dB(rej);
if (quality <= 2)
p->flags &= ~SOXR_ROLLOFF_NONE, p->flags |= SOXR_ROLLOFF_MEDIUM;
}
else { /* TODO: move to soxr-lsr.c */
static float const bw[] = {.931f, .832f, .663f};
p->passband_end = bw[quality - SOXR_LSR0Q];
if (quality == SOXR_LSR2Q) {
p->flags &= ~SOXR_ROLLOFF_NONE;
p->flags |= SOXR_ROLLOFF_LSR2Q | SOXR_PROMOTE_TO_LQ;
}
}
if (recipe & SOXR_STEEP_FILTER)
p->passband_end = 1 - .01 / lsx_to_3dB(rej);
return spec;
}
char const * soxr_engine(soxr_t p)
{
return resampler_id();
}
size_t * soxr_num_clips(soxr_t p)
{
return &p->clips;
}
soxr_error_t soxr_error(soxr_t p)
{
return p->error;
}
soxr_runtime_spec_t soxr_runtime_spec(unsigned num_threads)
{
soxr_runtime_spec_t spec, * p = &spec;
memset(p, 0, sizeof(*p));
p->log2_min_dft_size = 10;
p->log2_large_dft_size = 17;
p->coef_size_kbytes = 400;
p->num_threads = num_threads;
return spec;
}
soxr_io_spec_t soxr_io_spec(
soxr_datatype_t itype,
soxr_datatype_t otype)
{
soxr_io_spec_t spec, * p = &spec;
memset(p, 0, sizeof(*p));
if ((itype | otype) >= SOXR_SPLIT * 2)
p->e = "invalid io datatype(s)";
else {
p->itype = itype;
p->otype = otype;
p->scale = 1;
}
return spec;
}
#if (WITH_CR32S && WITH_CR32) || (WITH_CR64S && WITH_CR64)
#if defined __GNUC__ && defined __x86_64__
#define CPUID(type, eax_, ebx_, ecx_, edx_) \
__asm__ __volatile__ ( \
"cpuid \n\t" \
: "=a" (eax_), "=b" (ebx_), "=c" (ecx_), "=d" (edx_) \
: "a" (type), "c" (0));
#elif defined __GNUC__ && defined __i386__
#define CPUID(type, eax_, ebx_, ecx_, edx_) \
__asm__ __volatile__ ( \
"mov %%ebx, %%edi \n\t" \
"cpuid \n\t" \
"xchg %%edi, %%ebx \n\t" \
: "=a" (eax_), "=D" (ebx_), "=c" (ecx_), "=d" (edx_) \
: "a" (type), "c" (0));
#elif defined _M_X64 && defined _MSC_VER && _MSC_VER > 1500
void __cpuidex(int CPUInfo[4], int info_type, int ecxvalue);
#pragma intrinsic(__cpuidex)
#define CPUID(type, eax_, ebx_, ecx_, edx_) do { \
int regs[4]; \
__cpuidex(regs, type, 0); \
eax_ = regs[0], ebx_ = regs[1], ecx_ = regs[2], edx_ = regs[3]; \
} while(0)
#elif defined _M_X64 && defined _MSC_VER
void __cpuidex(int CPUInfo[4], int info_type);
#pragma intrinsic(__cpuidex)
#define CPUID(type, eax_, ebx_, ecx_, edx_) do { \
int regs[4]; \
__cpuidex(regs, type); \
eax_ = regs[0], ebx_ = regs[1], ecx_ = regs[2], edx_ = regs[3]; \
} while(0)
#elif defined _M_IX86 && defined _MSC_VER
#define CPUID(type, eax_, ebx_, ecx_, edx_) \
__asm pushad \
__asm mov eax, type \
__asm xor ecx, ecx \
__asm cpuid \
__asm mov eax_, eax \
__asm mov ebx_, ebx \
__asm mov ecx_, ecx \
__asm mov edx_, edx \
__asm popad
#endif
#endif
#if WITH_CR32S && WITH_CR32
static bool cpu_has_simd32(void)
{
#if defined __x86_64__ || defined _M_X64
return true;
#elif defined __i386__ || defined _M_IX86
enum {SSE = 1 << 25, SSE2 = 1 << 26};
unsigned eax_, ebx_, ecx_, edx_;
CPUID(1, eax_, ebx_, ecx_, edx_);
return (edx_ & (SSE|SSE2)) != 0;
#elif defined AV_CPU_FLAG_NEON
return !!(av_get_cpu_flags() & AV_CPU_FLAG_NEON);
#else
return false;
#endif
}
static bool should_use_simd32(void)
{
char const * e;
return ((e = getenv("SOXR_USE_SIMD" )))? !!atoi(e) :
((e = getenv("SOXR_USE_SIMD32")))? !!atoi(e) : cpu_has_simd32();
}
#else
#define should_use_simd32() true
#endif
#if WITH_CR64S && WITH_CR64
#if defined __GNUC__
#define XGETBV(type, eax_, edx_) \
__asm__ __volatile__ ( \
".byte 0x0f, 0x01, 0xd0\n" \
: "=a"(eax_), "=d"(edx_) : "c" (type));
#elif defined _M_X64 && defined _MSC_FULL_VER && _MSC_FULL_VER >= 160040219
#include <immintrin.h>
#define XGETBV(type, eax_, edx_) do { \
union {uint64_t x; uint32_t y[2];} a = {_xgetbv(0)}; \
eax_ = a.y[0], edx_ = a.y[1]; \
} while(0)
#elif defined _M_IX86 && defined _MSC_VER
#define XGETBV(type, eax_, edx_) \
__asm pushad \
__asm mov ecx, type \
__asm _emit 0x0f \
__asm _emit 0x01 \
__asm _emit 0xd0 \
__asm mov eax_, eax \
__asm mov edx_, edx \
__asm popad
#else
#define XGETBV(type, eax_, edx_) eax_ = edx_ = 0
#endif
static bool cpu_has_simd64(void)
{
enum {OSXSAVE = 1 << 27, AVX = 1 << 28};
unsigned eax_, ebx_, ecx_, edx_;
CPUID(1, eax_, ebx_, ecx_, edx_);
if ((ecx_ & (OSXSAVE|AVX)) == (OSXSAVE|AVX)) {
XGETBV(0, eax_, edx_);
return (eax_ & 6) == 6;
}
return false;
}
static bool should_use_simd64(void)
{
char const * e;
return ((e = getenv("SOXR_USE_SIMD" )))? !!atoi(e) :
((e = getenv("SOXR_USE_SIMD64")))? !!atoi(e) : cpu_has_simd64();
}
#else
#define should_use_simd64() true
#endif
extern control_block_t
_soxr_rate32_cb,
_soxr_rate32s_cb,
_soxr_rate64_cb,
_soxr_rate64s_cb,
_soxr_vr32_cb;
static void runtime_num(char const * env_name,
int min, int max, unsigned * field)
{
char const * e = getenv(env_name);
if (e) {
int i = atoi(e);
if (i >= min && i <= max)
*field = (unsigned)i;
}
}
static void runtime_flag(char const * env_name,
unsigned n_bits, unsigned n_shift, unsigned long * flags)
{
char const * e = getenv(env_name);
if (e) {
int i = atoi(e);
unsigned long mask = (1UL << n_bits) - 1;
if (i >= 0 && i <= (int)mask)
*flags &= ~(mask << n_shift), *flags |= ((unsigned long)i << n_shift);
}
}
soxr_t soxr_create(
double input_rate, double output_rate,
unsigned num_channels,
soxr_error_t * error0,
soxr_io_spec_t const * io_spec,
soxr_quality_spec_t const * q_spec,
soxr_runtime_spec_t const * runtime_spec)
{
double io_ratio = output_rate!=0? input_rate!=0?
input_rate / output_rate : -1 : input_rate!=0? -1 : 0;
static const float datatype_full_scale[] = {1, 1, 65536.*32768, 32768};
soxr_t p = 0;
soxr_error_t error = 0;
#if WITH_DEV_TRACE
#define _(x) (char)(sizeof(x)>=10? 'a'+(char)(sizeof(x)-10):'0'+(char)sizeof(x))
char const * e = getenv("SOXR_TRACE");
_soxr_trace_level = e? atoi(e) : 0;
{
static char const arch[] = {_(char), _(short), _(int), _(long), _(long long)
, ' ', _(float), _(double), _(long double)
, ' ', _(int *), _(int (*)(int))
, ' ', HAVE_BIGENDIAN ? 'B' : 'L'
#if defined _OPENMP
, ' ', 'O', 'M', 'P'
#endif
, 0};
#undef _
lsx_debug("arch: %s", arch);
}
#endif
if (q_spec && q_spec->e) error = q_spec->e;
else if (io_spec && (io_spec->itype | io_spec->otype) >= SOXR_SPLIT * 2)
error = "invalid io datatype(s)";
if (!error && !(p = calloc(sizeof(*p), 1))) error = "malloc failed";
if (p) {
control_block_t * control_block;
p->q_spec = q_spec? *q_spec : soxr_quality_spec(SOXR_HQ, 0);
if (q_spec) { /* Backwards compatibility with original API: */
if (p->q_spec.passband_end > 2)
p->q_spec.passband_end /= 100;
if (p->q_spec.stopband_begin > 2)
p->q_spec.stopband_begin = 2 - p->q_spec.stopband_begin / 100;
}
p->io_ratio = io_ratio;
p->num_channels = num_channels;
if (io_spec)
p->io_spec = *io_spec;
else
p->io_spec.scale = 1;
p->runtime_spec = runtime_spec? *runtime_spec : soxr_runtime_spec(1);
runtime_num("SOXR_MIN_DFT_SIZE", 8, 15, &p->runtime_spec.log2_min_dft_size);
runtime_num("SOXR_LARGE_DFT_SIZE", 8, 20, &p->runtime_spec.log2_large_dft_size);
runtime_num("SOXR_COEFS_SIZE", 100, 800, &p->runtime_spec.coef_size_kbytes);
runtime_num("SOXR_NUM_THREADS", 0, 64, &p->runtime_spec.num_threads);
runtime_flag("SOXR_COEF_INTERP", 2, 0, &p->runtime_spec.flags);
runtime_flag("SOXR_STRICT_BUF", 1, 2, &p->runtime_spec.flags);
runtime_flag("SOXR_NOSMALLINTOPT", 1, 3, &p->runtime_spec.flags);
p->io_spec.scale *= datatype_full_scale[p->io_spec.otype & 3] /
datatype_full_scale[p->io_spec.itype & 3];
p->seed = (unsigned long)time(0) ^ (unsigned long)(size_t)p;
#if WITH_CR32 || WITH_CR32S || WITH_VR32
if (0
#if WITH_VR32
|| ((!WITH_CR32 && !WITH_CR32S) || (p->q_spec.flags & SOXR_VR))
#endif
#if WITH_CR32 || WITH_CR32S
|| !(WITH_CR64 || WITH_CR64S) || (p->q_spec.precision <= 20 && !(p->q_spec.flags & SOXR_DOUBLE_PRECISION))
#endif
) {
p->deinterleave = (deinterleave_t)_soxr_deinterleave_f;
p->interleave = (interleave_t)_soxr_interleave_f;
control_block =
#if WITH_VR32
((!WITH_CR32 && !WITH_CR32S) || (p->q_spec.flags & SOXR_VR))? &_soxr_vr32_cb :
#endif
#if WITH_CR32S
!WITH_CR32 || should_use_simd32()? &_soxr_rate32s_cb :
#endif
&_soxr_rate32_cb;
}
#if WITH_CR64 || WITH_CR64S
else
#endif
#endif
#if WITH_CR64 || WITH_CR64S
{
p->deinterleave = (deinterleave_t)_soxr_deinterleave;
p->interleave = (interleave_t)_soxr_interleave;
control_block =
#if WITH_CR64S
!WITH_CR64 || should_use_simd64()? &_soxr_rate64s_cb :
#endif
&_soxr_rate64_cb;
}
#endif
memcpy(&p->control_block, control_block, sizeof(p->control_block));
if (p->num_channels && io_ratio!=0)
error = soxr_set_io_ratio(p, io_ratio, 0);
}
if (error)
soxr_delete(p), p = 0;
if (error0)
*error0 = error;
return p;
}
soxr_error_t soxr_set_input_fn(soxr_t p,
soxr_input_fn_t input_fn, void * input_fn_state, size_t max_ilen)
{
p->input_fn_state = input_fn_state;
p->input_fn = input_fn;
p->max_ilen = max_ilen? max_ilen : (size_t)-1;
return 0;
}
static void soxr_delete0(soxr_t p)
{
unsigned i;
if (p->resamplers) for (i = 0; i < p->num_channels; ++i) {
if (p->resamplers[i])
resampler_close(p->resamplers[i]);
free(p->resamplers[i]);
}
free(p->resamplers);
free(p->channel_ptrs);
free(p->shared);
memset(p, 0, sizeof(*p));
}
double soxr_delay(soxr_t p)
{
return
(p && !p->error && p->resamplers)? resampler_delay(p->resamplers[0]) : 0;
}
static soxr_error_t fatal_error(soxr_t p, soxr_error_t error)
{
soxr_delete0(p);
return p->error = error;
}
static soxr_error_t initialise(soxr_t p)
{
unsigned i;
size_t shared_size, channel_size;
resampler_sizes(&shared_size, &channel_size);
p->channel_ptrs = calloc(sizeof(*p->channel_ptrs), p->num_channels);
p->shared = calloc(shared_size, 1);
p->resamplers = calloc(sizeof(*p->resamplers), p->num_channels);
if (!p->shared || !p->channel_ptrs || !p->resamplers)
return fatal_error(p, "malloc failed");
for (i = 0; i < p->num_channels; ++i) {
soxr_error_t error;
if (!(p->resamplers[i] = calloc(channel_size, 1)))
return fatal_error(p, "malloc failed");
error = resampler_create(
p->resamplers[i],
p->shared,
p->io_ratio,
&p->q_spec,
&p->runtime_spec,
p->io_spec.scale);
if (error)
return fatal_error(p, error);
}
return 0;
}
soxr_error_t soxr_set_num_channels(soxr_t p, unsigned num_channels)
{
if (!p) return "invalid soxr_t pointer";
if (num_channels == p->num_channels) return p->error;
if (!num_channels) return "invalid # of channels";
if (p->resamplers) return "# of channels can't be changed";
p->num_channels = num_channels;
return soxr_set_io_ratio(p, p->io_ratio, 0);
}
soxr_error_t soxr_set_io_ratio(soxr_t p, double io_ratio, size_t slew_len)
{
unsigned i;
soxr_error_t error;
if (!p) return "invalid soxr_t pointer";
if ((error = p->error)) return error;
if (!p->num_channels) return "must set # channels before O/I ratio";
if (io_ratio <= 0) return "I/O ratio out-of-range";
if (!p->channel_ptrs) {
p->io_ratio = io_ratio;
return initialise(p);
}
if (p->control_block[8]) {
for (i = 0; !error && i < p->num_channels; ++i)
resampler_set_io_ratio(p->resamplers[i], io_ratio, slew_len);
return error;
}
return fabs(p->io_ratio - io_ratio) < 1e-15? 0 :
"varying O/I ratio is not supported with this quality level";
}
void soxr_delete(soxr_t p)
{
if (p)
soxr_delete0(p), free(p);
}
soxr_error_t soxr_clear(soxr_t p) /* TODO: this, properly. */
{
if (p) {
struct soxr tmp = *p;
soxr_delete0(p);
memset(p, 0, sizeof(*p));
p->input_fn = tmp.input_fn;
p->runtime_spec = tmp.runtime_spec;
p->q_spec = tmp.q_spec;
p->io_spec = tmp.io_spec;
p->num_channels = tmp.num_channels;
p->input_fn_state = tmp.input_fn_state;
memcpy(p->control_block, tmp.control_block, sizeof(p->control_block));
p->deinterleave = tmp.deinterleave;
p->interleave = tmp.interleave;
return (p->q_spec.flags & RESET_ON_CLEAR)?
soxr_set_io_ratio(p, tmp.io_ratio, 0) : 0;
}
return "invalid soxr_t pointer";
}
static void soxr_input_1ch(soxr_t p, unsigned i, soxr_cbuf_t src, size_t len)
{
sample_t * dest = resampler_input(p->resamplers[i], NULL, len);
(*p->deinterleave)(&dest, p->io_spec.itype, &src, len, 1);
}
static size_t soxr_input(soxr_t p, void const * in, size_t len)
{
bool separated = !!(p->io_spec.itype & SOXR_SPLIT);
unsigned i;
if (!p || p->error) return 0;
if (!in && len) {p->error = "null input buffer pointer"; return 0;}
if (!len) {
p->flushing = true;
return 0;
}
if (separated)
for (i = 0; i < p->num_channels; ++i)
soxr_input_1ch(p, i, ((soxr_cbufs_t)in)[i], len);
else {
for (i = 0; i < p->num_channels; ++i)
p->channel_ptrs[i] = resampler_input(p->resamplers[i], NULL, len);
(*p->deinterleave)(
(sample_t **)p->channel_ptrs, p->io_spec.itype, &in, len, p->num_channels);
}
return len;
}
static size_t soxr_output_1ch(soxr_t p, unsigned i, soxr_buf_t dest, size_t len, bool separated)
{
sample_t const * src;
if (p->flushing)
resampler_flush(p->resamplers[i]);
resampler_process(p->resamplers[i], len);
src = resampler_output(p->resamplers[i], NULL, &len);
if (separated)
p->clips += (p->interleave)(p->io_spec.otype, &dest, &src,
len, 1, (p->io_spec.flags & SOXR_NO_DITHER)? 0 : &p->seed);
else p->channel_ptrs[i] = (void /* const */ *)src;
return len;
}
static size_t soxr_output_no_callback(soxr_t p, soxr_buf_t out, size_t len)
{
unsigned u;
size_t done = 0;
bool separated = !!(p->io_spec.otype & SOXR_SPLIT);
#if defined _OPENMP
int i;
if (!p->runtime_spec.num_threads && p->num_channels > 1)
#pragma omp parallel for
for (i = 0; i < (int)p->num_channels; ++i) {
size_t done1;
done1 = soxr_output_1ch(p, (unsigned)i, ((soxr_bufs_t)out)[i], len, separated);
if (!i)
done = done1;
} else
#endif
for (u = 0; u < p->num_channels; ++u)
done = soxr_output_1ch(p, u, ((soxr_bufs_t)out)[u], len, separated);
if (!separated)
p->clips += (p->interleave)(p->io_spec.otype, &out, (sample_t const * const *)p->channel_ptrs,
done, p->num_channels, (p->io_spec.flags & SOXR_NO_DITHER)? 0 : &p->seed);
return done;
}
size_t soxr_output(soxr_t p, void * out, size_t len0)
{
size_t odone, odone0 = 0, olen = len0, osize, idone;
size_t ilen = min(p->max_ilen, (size_t)ceil((double)olen *p->io_ratio));
void const * in = out; /* Set to !=0, so that caller may leave unset. */
bool was_flushing;
if (!p || p->error) return 0;
if (!out && len0) {p->error = "null output buffer pointer"; return 0;}
do {
odone = soxr_output_no_callback(p, out, olen);
odone0 += odone;
if (odone0 == len0 || !p->input_fn || p->flushing)
break;
osize = soxr_datatype_size(p->io_spec.otype) * p->num_channels;
out = (char *)out + osize * odone;
olen -= odone;
idone = p->input_fn(p->input_fn_state, &in, ilen);
was_flushing = p->flushing;
if (!in)
p->error = "input function reported failure";
else soxr_input(p, in, idone);
} while (odone || idone || (!was_flushing && p->flushing));
return odone0;
}
static size_t soxr_i_for_o(soxr_t p, size_t olen, size_t ilen)
{
size_t result;
#if 0
if (p->runtime_spec.flags & SOXR_STRICT_BUFFERING)
result = rate_i_for_o(p->resamplers[0], olen);
else
#endif
result = (size_t)ceil((double)olen * p->io_ratio);
return min(result, ilen);
}
#if 0
static size_t soxr_o_for_i(soxr_t p, size_t ilen, size_t olen)
{
size_t result = (size_t)ceil((double)ilen / p->io_ratio);
return min(result, olen);
}
#endif
soxr_error_t soxr_process(soxr_t p,
void const * in , size_t ilen0, size_t * idone0,
void * out, size_t olen , size_t * odone0)
{
size_t ilen, idone, odone = 0;
unsigned u;
bool flush_requested = false;
if (!p) return "null pointer";
if (!in)
flush_requested = true, ilen = ilen0 = 0;
else {
if ((ptrdiff_t)ilen0 < 0)
flush_requested = true, ilen0 = ~ilen0;
if (idone0 && (1 || flush_requested))
ilen = soxr_i_for_o(p, olen, ilen0);
else
ilen = ilen0/*, olen = soxr_o_for_i(p, ilen, olen)*/;
}
p->flushing |= ilen == ilen0 && flush_requested;
if (!out || !in)
idone = ilen;
else if (p->io_spec.itype & p->io_spec.otype & SOXR_SPLIT) { /* Both i & o */
#if defined _OPENMP
int i;
if (!p->runtime_spec.num_threads && p->num_channels > 1)
#pragma omp parallel for
for (i = 0; i < (int)p->num_channels; ++i) {
size_t done;
if (in)
soxr_input_1ch(p, (unsigned)i, ((soxr_cbufs_t)in)[i], ilen);
done = soxr_output_1ch(p, (unsigned)i, ((soxr_bufs_t)out)[i], olen, true);
if (!i)
odone = done;
} else
#endif
for (u = 0; u < p->num_channels; ++u) {
if (in)
soxr_input_1ch(p, u, ((soxr_cbufs_t)in)[u], ilen);
odone = soxr_output_1ch(p, u, ((soxr_bufs_t)out)[u], olen, true);
}
idone = ilen;
}
else {
idone = ilen? soxr_input (p, in , ilen) : 0;
odone = soxr_output(p, out, olen);
}
if (idone0) *idone0 = idone;
if (odone0) *odone0 = odone;
return p->error;
}
soxr_error_t soxr_oneshot(
double irate, double orate,
unsigned num_channels,
void const * in , size_t ilen, size_t * idone,
void * out, size_t olen, size_t * odone,
soxr_io_spec_t const * io_spec,
soxr_quality_spec_t const * q_spec,
soxr_runtime_spec_t const * runtime_spec)
{
soxr_t resampler = NULL;
soxr_error_t error = q_spec? q_spec->e : 0;
if (!error) {
soxr_quality_spec_t q_spec1;
if (!q_spec)
q_spec1 = soxr_quality_spec(SOXR_LQ, 0), q_spec = &q_spec1;
resampler = soxr_create(irate, orate, num_channels,
&error, io_spec, q_spec, runtime_spec);
}
if (!error) {
error = soxr_process(resampler, in, ~ilen, idone, out, olen, odone);
soxr_delete(resampler);
}
return error;
}
soxr_error_t soxr_set_error(soxr_t p, soxr_error_t error)
{
if (!p) return "null pointer";
if (!p->error && p->error != error) return p->error;
p->error = error;
return 0;
}
|
residual.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
// calculate res_id = rhs_id - A(x_id)
void residual(level_type * level, int res_id, int x_id, int rhs_id, double a, double b){
// exchange the boundary for x in prep for Ax...
exchange_boundary(level,x_id,STENCIL_IS_STAR_SHAPED);
apply_BCs(level,x_id);
// now do residual/restriction proper...
uint64_t _timeStart = CycleTime();
int box;
#pragma omp parallel for private(box) OMP_THREAD_ACROSS_BOXES(level->concurrent_boxes)
for(box=0;box<level->num_my_boxes;box++){
int i,j,k;
int jStride = level->my_boxes[box].jStride;
int kStride = level->my_boxes[box].kStride;
int ghosts = level->my_boxes[box].ghosts;
int dim = level->my_boxes[box].dim;
double h2inv = 1.0/(level->h*level->h);
const double * __restrict__ x = level->my_boxes[box].vectors[ x_id] + ghosts*(1+jStride+kStride); // i.e. [0] = first non ghost zone point
const double * __restrict__ rhs = level->my_boxes[box].vectors[ rhs_id] + ghosts*(1+jStride+kStride);
const double * __restrict__ alpha = level->my_boxes[box].vectors[VECTOR_ALPHA ] + ghosts*(1+jStride+kStride);
const double * __restrict__ beta_i = level->my_boxes[box].vectors[VECTOR_BETA_I] + ghosts*(1+jStride+kStride);
const double * __restrict__ beta_j = level->my_boxes[box].vectors[VECTOR_BETA_J] + ghosts*(1+jStride+kStride);
const double * __restrict__ beta_k = level->my_boxes[box].vectors[VECTOR_BETA_K] + ghosts*(1+jStride+kStride);
const double * __restrict__ valid = level->my_boxes[box].vectors[VECTOR_VALID ] + ghosts*(1+jStride+kStride); // cell is inside the domain
double * __restrict__ res = level->my_boxes[box].vectors[ res_id] + ghosts*(1+jStride+kStride);
#pragma omp parallel for private(k,j,i) OMP_THREAD_WITHIN_A_BOX(level->threads_per_box)
for(k=0;k<dim;k++){
for(j=0;j<dim;j++){
for(i=0;i<dim;i++){
int ijk = i + j*jStride + k*kStride;
double Ax = apply_op_ijk(x);
res[ijk] = rhs[ijk]-Ax;
}}}
}
level->cycles.residual += (uint64_t)(CycleTime()-_timeStart);
}
|
mandelbrot_mpi.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
#include "mpi.h"
#include <string.h>
#include <math.h>
// USAGE: mandelbrot_mpi <cols> <rows> <task_size> <x0> <y0> <dx> <dy>
// OUTPUT: <time_spent_in_ms>
#define TRIALS 50
#define ISTR_SIZE 1
#define BUFFER_SIZE (ISTR_SIZE + MAX_DATA_SIZE)
#define MIN(x, y) (((x) < (y)) ? (x) : (y))
#define TAG_KILL 1
double get_time()
{
struct timespec tt;
clock_gettime(CLOCK_REALTIME, &tt);
double t = (double)tt.tv_sec * 1.0e9 + (double)tt.tv_nsec;
return t;
}
int main(int argc, char **argv)
{
FILE *fp;
int rows, cols, size, max_iteration, task_size, iMin, iMax, *grid;
double ttot, tstart, tend, tmin;
char filename[] = "results/mandelbrot_mpi.dat";
MPI_Status status;
int *buffer;
int me, numinstances;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &me);
MPI_Comm_size(MPI_COMM_WORLD, &numinstances);
if (argc < 4)
{
if (me == 0)
{
printf("Usage: mandelbrot_mpi cols rows task_size x0=-2.5 y0=-1 dx=-1 dy=1\n");
}
return 1;
}
cols = atoi(argv[1]);
rows = atoi(argv[2]);
task_size = atoi(argv[3]);
size = rows * cols;
max_iteration = 100;
double xmin = argc > 4 ? atof(argv[4]) : -2.5;
double ymin = argc > 5 ? atof(argv[5]) : -1;
double xmax = argc > 6 ? xmin + atof(argv[6]) : 1;
double ymax = argc > 7 ? ymin + atof(argv[7]) : 1;
if (xmin >= xmax || ymin >= ymax)
{
if (me == 0)
{
printf("Usage: mandelbrot_mpi cols rows task_size x0=-2.5 y0=-1 dx=-1 dy=1\n");
}
return 1;
}
MPI_Barrier(MPI_COMM_WORLD);
if (me == 0)
{
grid = (int *)malloc(size * sizeof(int));
}
buffer = (int *)malloc((ISTR_SIZE + task_size) * sizeof(int));
for (int k = 0; k < TRIALS; k++)
{
if (me == 0)
{
int tasks_sent = 0, init_task_num = 2;
//index of the tasks sent to the workers, it will be increased after every sent message
int task_idx = 0;
tmin = 10e10;
tstart = get_time();
//The initial number of tasks (init_task_num) is sent to all the workers
for (int other = 1; other < numinstances; other++)
{
for (int t = 0; t < init_task_num; t++)
{
//Checks if no more tasks should be sent
if (task_idx >= size)
{
other = numinstances;
break;
}
buffer[0] = task_idx;
MPI_Send(buffer, ISTR_SIZE, MPI_INT, other, 0, MPI_COMM_WORLD);
task_idx += task_size;
tasks_sent++;
}
}
//While the workers are still executing tasks, the coordinator should keep on listening
while (tasks_sent)
{
MPI_Recv(buffer, ISTR_SIZE + task_size, MPI_INT, MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &status);
tasks_sent--;
iMin = buffer[0];
//The results received may be smaller than task_size if the task received is the last one
iMax = MIN(iMin + task_size, size);
//The result of a task is copied to the final grid
memcpy(grid + iMin, buffer + ISTR_SIZE, (iMax - iMin) * sizeof(int));
//Checks again is no more tasks should be sent
if (task_idx >= size)
continue;
buffer[0] = task_idx;
task_idx += task_size;
MPI_Send(buffer, ISTR_SIZE, MPI_INT, status.MPI_SOURCE, 0, MPI_COMM_WORLD);
tasks_sent++;
}
//No more tasks to complete, a final message is sent to stop the workers
for (int other = 1; other < numinstances; other++)
{
MPI_Send(NULL, 0, MPI_INT, other, TAG_KILL, MPI_COMM_WORLD);
}
tend = get_time();
ttot = tend - tstart;
tmin = MIN(tmin, ttot);
}
else
{
while (1)
{
MPI_Recv(buffer, ISTR_SIZE, MPI_INT, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
//Checks if it's the final message
if (status.MPI_TAG == TAG_KILL)
break;
iMin = buffer[0];
//Checks if it is the edge case at the end of the grid
iMax = MIN(iMin + task_size, size);
//Mandelbrot calculations, parallelized with openMP
#pragma omp parallel for schedule(static)
for (int i = iMin; i < iMax; i++)
{
int px = i % rows;
int py = i / rows;
double x0 = (double)px / (rows - 1) * (xmax - xmin) + xmin;
double y0 = (double)py / (cols - 1) * (ymax - ymin) + ymin;
double x = 0;
double y = 0;
int iteration = 0;
while (x * x + y * y < 2 * 2 && iteration < max_iteration)
{
double xtemp = x * x - y * y + x0;
y = 2 * x * y + y0;
x = xtemp;
iteration++;
}
buffer[i - iMin + ISTR_SIZE] = iteration;
}
MPI_Send(buffer, ISTR_SIZE + task_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
}
}
}
free(buffer);
if (me == 0)
{
printf("%.2lf\n", tmin / 10e6);
fp = fopen(filename, "w");
fprintf(fp, "%.2lf %.2lf %.2lf %.2lf\n", xmin, ymin, xmax - xmin, ymax - ymin);
for (int i = 0; i < cols; i++)
{
for (int j = 0; j < rows; j++)
{
fprintf(fp, "%i ", grid[rows * i + j]);
}
fprintf(fp, "\n");
}
fclose(fp);
free(grid);
}
MPI_Finalize();
return 0;
}
|
nn_index.h | /***********************************************************************
* Software License Agreement (BSD License)
*
* Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
* Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
*
* THE BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*************************************************************************/
#ifndef FLANN_NNINDEX_H
#define FLANN_NNINDEX_H
#include <vector>
#include "flann/general.h"
#include "flann/util/matrix.h"
#include "flann/util/params.h"
#include "flann/util/result_set.h"
#include "flann/util/dynamic_bitset.h"
#include "flann/util/saving.h"
namespace flann
{
#define KNN_HEAP_THRESHOLD 250
class IndexBase
{
public:
virtual ~IndexBase() {};
virtual size_t veclen() const = 0;
virtual size_t size() const = 0;
virtual flann_algorithm_t getType() const = 0;
virtual int usedMemory() const = 0;
virtual IndexParams getParameters() const = 0;
#ifdef FLANN_SERIALIZATION_LZ4
virtual void loadIndex(FILE* stream) = 0;
virtual void saveIndex(FILE* stream) = 0;
#endif
};
/**
* Nearest-neighbour index base class
*/
template <typename Distance>
class NNIndex : public IndexBase
{
public:
typedef typename Distance::ElementType ElementType;
typedef typename Distance::ResultType DistanceType;
NNIndex(Distance d) : distance_(d), last_id_(0), size_(0), size_at_build_(0), veclen_(0),
removed_(false), removed_count_(0), data_ptr_(NULL)
{
}
NNIndex(const IndexParams& params, Distance d) : distance_(d), last_id_(0), size_(0), size_at_build_(0), veclen_(0),
index_params_(params), removed_(false), removed_count_(0), data_ptr_(NULL)
{
}
NNIndex(const NNIndex& other) :
distance_(other.distance_),
last_id_(other.last_id_),
size_(other.size_),
size_at_build_(other.size_at_build_),
veclen_(other.veclen_),
index_params_(other.index_params_),
removed_(other.removed_),
removed_points_(other.removed_points_),
removed_count_(other.removed_count_),
ids_(other.ids_),
points_(other.points_),
data_ptr_(NULL)
{
if (other.data_ptr_) {
data_ptr_ = new ElementType[size_*veclen_];
std::copy(other.data_ptr_, other.data_ptr_+size_*veclen_, data_ptr_);
for (size_t i=0;i<size_;++i) {
points_[i] = data_ptr_ + i*veclen_;
}
}
}
virtual ~NNIndex()
{
if (data_ptr_) {
delete[] data_ptr_;
}
}
virtual NNIndex* clone() const = 0;
/**
* Builds the index
*/
virtual void buildIndex()
{
freeIndex();
cleanRemovedPoints();
// building index
buildIndexImpl();
size_at_build_ = size_;
}
/**
* Builds the index using the specified dataset
* @param dataset the dataset to use
*/
virtual void buildIndex(const Matrix<ElementType>& dataset)
{
setDataset(dataset);
this->buildIndex();
}
/**
* @brief Incrementally add points to the index.
* @param points Matrix with points to be added
* @param rebuild_threshold
*/
virtual void addPoints(const Matrix<ElementType>& points, float rebuild_threshold = 2)
{
throw FLANNException("Functionality not supported by this index");
}
/**
* Remove point from the index
* @param index Index of point to be removed
*/
virtual void removePoint(size_t id)
{
if (!removed_) {
ids_.resize(size_);
for (size_t i=0;i<size_;++i) {
ids_[i] = i;
}
removed_points_.resize(size_);
removed_points_.reset();
last_id_ = size_;
removed_ = true;
}
size_t point_index = id_to_index(id);
if (point_index!=size_t(-1) && !removed_points_.test(point_index)) {
removed_points_.set(point_index);
removed_count_++;
}
}
/**
* Get point with specific id
* @param id
* @return
*/
virtual ElementType* getPoint(size_t id)
{
size_t index = id_to_index(id);
if (index!=size_t(-1)) {
return points_[index];
}
else {
return NULL;
}
}
/**
* @return number of features in this index.
*/
inline size_t size() const
{
return size_ - removed_count_;
}
/**
* @return The dimensionality of the features in this index.
*/
inline size_t veclen() const
{
return veclen_;
}
/**
* Returns the parameters used by the index.
*
* @return The index parameters
*/
IndexParams getParameters() const
{
return index_params_;
}
template<typename Archive>
void serialize(Archive& ar)
{
IndexHeader header;
if (Archive::is_saving::value) {
header.h.data_type = flann_datatype_value<ElementType>::value;
header.h.index_type = getType();
header.h.rows = size_;
header.h.cols = veclen_;
}
ar & header;
// sanity checks
if (Archive::is_loading::value) {
if (strncmp(header.h.signature,
FLANN_SIGNATURE_,
strlen(FLANN_SIGNATURE_) - strlen("v0.0")) != 0) {
throw FLANNException("Invalid index file, wrong signature");
}
if (header.h.data_type != flann_datatype_value<ElementType>::value) {
throw FLANNException("Datatype of saved index is different than of the one to be created.");
}
if (header.h.index_type != getType()) {
throw FLANNException("Saved index type is different then the current index type.");
}
// TODO: check for distance type
}
ar & size_;
ar & veclen_;
ar & size_at_build_;
bool save_dataset;
if (Archive::is_saving::value) {
save_dataset = get_param(index_params_,"save_dataset", false);
}
ar & save_dataset;
if (save_dataset) {
if (Archive::is_loading::value) {
if (data_ptr_) {
delete[] data_ptr_;
}
data_ptr_ = new ElementType[size_*veclen_];
points_.resize(size_);
for (size_t i=0;i<size_;++i) {
points_[i] = data_ptr_ + i*veclen_;
}
}
for (size_t i=0;i<size_;++i) {
ar & serialization::make_binary_object (points_[i], veclen_*sizeof(ElementType));
}
} else {
if (points_.size()!=size_) {
throw FLANNException("Saved index does not contain the dataset and no dataset was provided.");
}
}
ar & last_id_;
ar & ids_;
ar & removed_;
if (removed_) {
ar & removed_points_;
}
ar & removed_count_;
}
/**
* @brief Perform k-nearest neighbor search
* @param[in] queries The query points for which to find the nearest neighbors
* @param[out] indices The indices of the nearest neighbors found
* @param[out] dists Distances to the nearest neighbors found
* @param[in] knn Number of nearest neighbors to return
* @param[in] params Search parameters
*/
virtual int knnSearch(const Matrix<ElementType>& queries,
Matrix<size_t>& indices,
Matrix<DistanceType>& dists,
size_t knn,
const SearchParams& params) const
{
assert(queries.cols == veclen());
assert(indices.rows >= queries.rows);
assert(dists.rows >= queries.rows);
assert(indices.cols >= knn);
assert(dists.cols >= knn);
bool use_heap;
if (params.use_heap==FLANN_Undefined) {
use_heap = (knn>KNN_HEAP_THRESHOLD)?true:false;
}
else {
use_heap = (params.use_heap==FLANN_True)?true:false;
}
int count = 0;
if (use_heap) {
#pragma omp parallel num_threads(params.cores)
{
KNNResultSet2<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
resultSet.copy(indices[i], dists[i], n, params.sorted);
indices_to_ids(indices[i], indices[i], n);
count += n;
}
}
}
else {
#pragma omp parallel num_threads(params.cores)
{
KNNSimpleResultSet<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
resultSet.copy(indices[i], dists[i], n, params.sorted);
indices_to_ids(indices[i], indices[i], n);
count += n;
}
}
}
return count;
}
/**
*
* @param queries
* @param indices
* @param dists
* @param knn
* @param params
* @return
*/
int knnSearch(const Matrix<ElementType>& queries,
Matrix<int>& indices,
Matrix<DistanceType>& dists,
size_t knn,
const SearchParams& params) const
{
flann::Matrix<size_t> indices_(new size_t[indices.rows*indices.cols], indices.rows, indices.cols);
int result = knnSearch(queries, indices_, dists, knn, params);
for (size_t i=0;i<indices.rows;++i) {
for (size_t j=0;j<indices.cols;++j) {
indices[i][j] = indices_[i][j];
}
}
delete[] indices_.ptr();
return result;
}
/**
* @brief Perform k-nearest neighbor search
* @param[in] queries The query points for which to find the nearest neighbors
* @param[out] indices The indices of the nearest neighbors found
* @param[out] dists Distances to the nearest neighbors found
* @param[in] knn Number of nearest neighbors to return
* @param[in] params Search parameters
*/
int knnSearch(const Matrix<ElementType>& queries,
std::vector< std::vector<size_t> >& indices,
std::vector<std::vector<DistanceType> >& dists,
size_t knn,
const SearchParams& params) const
{
assert(queries.cols == veclen());
bool use_heap;
if (params.use_heap==FLANN_Undefined) {
use_heap = (knn>KNN_HEAP_THRESHOLD)?true:false;
}
else {
use_heap = (params.use_heap==FLANN_True)?true:false;
}
if (indices.size() < queries.rows ) indices.resize(queries.rows);
if (dists.size() < queries.rows ) dists.resize(queries.rows);
int count = 0;
if (use_heap) {
#pragma omp parallel num_threads(params.cores)
{
KNNResultSet2<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
indices[i].resize(n);
dists[i].resize(n);
if (n>0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
count += n;
}
}
}
else {
#pragma omp parallel num_threads(params.cores)
{
KNNSimpleResultSet<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
indices[i].resize(n);
dists[i].resize(n);
if (n>0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
count += n;
}
}
}
return count;
}
/**
*
* @param queries
* @param indices
* @param dists
* @param knn
* @param params
* @return
*/
int knnSearch(const Matrix<ElementType>& queries,
std::vector< std::vector<int> >& indices,
std::vector<std::vector<DistanceType> >& dists,
size_t knn,
const SearchParams& params) const
{
std::vector<std::vector<size_t> > indices_;
int result = knnSearch(queries, indices_, dists, knn, params);
indices.resize(indices_.size());
for (size_t i=0;i<indices_.size();++i) {
indices[i].assign(indices_[i].begin(), indices_[i].end());
}
return result;
}
/**
* @brief Perform radius search
* @param[in] query The query point
* @param[out] indices The indices of the neighbors found within the given radius
* @param[out] dists The distances to the nearest neighbors found
* @param[in] radius The radius used for search
* @param[in] params Search parameters
* @return Number of neighbors found
*/
int radiusSearch(const Matrix<ElementType>& queries,
Matrix<size_t>& indices,
Matrix<DistanceType>& dists,
float radius,
const SearchParams& params) const
{
assert(queries.cols == veclen());
int count = 0;
size_t num_neighbors = std::min(indices.cols, dists.cols);
int max_neighbors = params.max_neighbors;
if (max_neighbors<0) max_neighbors = num_neighbors;
else max_neighbors = std::min(max_neighbors,(int)num_neighbors);
if (max_neighbors==0) {
#pragma omp parallel num_threads(params.cores)
{
CountRadiusResultSet<DistanceType> resultSet(radius);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
count += resultSet.size();
}
}
}
else {
// explicitly indicated to use unbounded radius result set
// and we know there'll be enough room for resulting indices and dists
if (params.max_neighbors<0 && (num_neighbors>=size())) {
#pragma omp parallel num_threads(params.cores)
{
RadiusResultSet<DistanceType> resultSet(radius);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = resultSet.size();
count += n;
if (n>num_neighbors) n = num_neighbors;
resultSet.copy(indices[i], dists[i], n, params.sorted);
// mark the next element in the output buffers as unused
if (n<indices.cols) indices[i][n] = size_t(-1);
if (n<dists.cols) dists[i][n] = std::numeric_limits<DistanceType>::infinity();
indices_to_ids(indices[i], indices[i], n);
}
}
}
else {
// number of neighbors limited to max_neighbors
#pragma omp parallel num_threads(params.cores)
{
KNNRadiusResultSet<DistanceType> resultSet(radius, max_neighbors);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = resultSet.size();
count += n;
if ((int)n>max_neighbors) n = max_neighbors;
resultSet.copy(indices[i], dists[i], n, params.sorted);
// mark the next element in the output buffers as unused
if (n<indices.cols) indices[i][n] = size_t(-1);
if (n<dists.cols) dists[i][n] = std::numeric_limits<DistanceType>::infinity();
indices_to_ids(indices[i], indices[i], n);
}
}
}
}
return count;
}
/**
*
* @param queries
* @param indices
* @param dists
* @param radius
* @param params
* @return
*/
int radiusSearch(const Matrix<ElementType>& queries,
Matrix<int>& indices,
Matrix<DistanceType>& dists,
float radius,
const SearchParams& params) const
{
flann::Matrix<size_t> indices_(new size_t[indices.rows*indices.cols], indices.rows, indices.cols);
int result = radiusSearch(queries, indices_, dists, radius, params);
for (size_t i=0;i<indices.rows;++i) {
for (size_t j=0;j<indices.cols;++j) {
indices[i][j] = indices_[i][j];
}
}
delete[] indices_.ptr();
return result;
}
/**
* @brief Perform radius search
* @param[in] query The query point
* @param[out] indices The indices of the neighbors found within the given radius
* @param[out] dists The distances to the nearest neighbors found
* @param[in] radius The radius used for search
* @param[in] params Search parameters
* @return Number of neighbors found
*/
int radiusSearch(const Matrix<ElementType>& queries,
std::vector< std::vector<size_t> >& indices,
std::vector<std::vector<DistanceType> >& dists,
float radius,
const SearchParams& params) const
{
assert(queries.cols == veclen());
int count = 0;
// just count neighbors
if (params.max_neighbors==0) {
#pragma omp parallel num_threads(params.cores)
{
CountRadiusResultSet<DistanceType> resultSet(radius);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
count += resultSet.size();
}
}
}
else {
if (indices.size() < queries.rows ) indices.resize(queries.rows);
if (dists.size() < queries.rows ) dists.resize(queries.rows);
if (params.max_neighbors<0) {
// search for all neighbors
#pragma omp parallel num_threads(params.cores)
{
RadiusResultSet<DistanceType> resultSet(radius);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = resultSet.size();
count += n;
indices[i].resize(n);
dists[i].resize(n);
if (n > 0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
}
}
}
else {
// number of neighbors limited to max_neighbors
#pragma omp parallel num_threads(params.cores)
{
KNNRadiusResultSet<DistanceType> resultSet(radius, params.max_neighbors);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = resultSet.size();
count += n;
if ((int)n>params.max_neighbors) n = params.max_neighbors;
indices[i].resize(n);
dists[i].resize(n);
if (n > 0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
}
}
}
}
return count;
}
/**
*
* @param queries
* @param indices
* @param dists
* @param radius
* @param params
* @return
*/
int radiusSearch(const Matrix<ElementType>& queries,
std::vector< std::vector<int> >& indices,
std::vector<std::vector<DistanceType> >& dists,
float radius,
const SearchParams& params) const
{
std::vector<std::vector<size_t> > indices_;
int result = radiusSearch(queries, indices_, dists, radius, params);
indices.resize(indices_.size());
for (size_t i=0;i<indices_.size();++i) {
indices[i].assign(indices_[i].begin(), indices_[i].end());
}
return result;
}
virtual void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams) const = 0;
protected:
virtual void freeIndex() = 0;
virtual void buildIndexImpl() = 0;
size_t id_to_index(size_t id)
{
if (ids_.size()==0) {
return id;
}
size_t point_index = size_t(-1);
if (id < ids_.size() && ids_[id]==id) {
return id;
}
else {
// binary search
size_t start = 0;
size_t end = ids_.size();
while (start<end) {
size_t mid = (start+end)/2;
if (ids_[mid]==id) {
point_index = mid;
break;
}
else if (ids_[mid]<id) {
start = mid + 1;
}
else {
end = mid;
}
}
}
return point_index;
}
void indices_to_ids(const size_t* in, size_t* out, size_t size) const
{
if (removed_) {
for (size_t i=0;i<size;++i) {
out[i] = ids_[in[i]];
}
}
}
void setDataset(const Matrix<ElementType>& dataset)
{
size_ = dataset.rows;
veclen_ = dataset.cols;
last_id_ = 0;
ids_.clear();
removed_points_.clear();
removed_ = false;
removed_count_ = 0;
points_.resize(size_);
for (size_t i=0;i<size_;++i) {
points_[i] = dataset[i];
}
}
void extendDataset(const Matrix<ElementType>& new_points)
{
size_t new_size = size_ + new_points.rows;
if (removed_) {
removed_points_.resize(new_size);
ids_.resize(new_size);
}
points_.resize(new_size);
for (size_t i=size_;i<new_size;++i) {
points_[i] = new_points[i-size_];
if (removed_) {
ids_[i] = last_id_++;
removed_points_.reset(i);
}
}
size_ = new_size;
}
void cleanRemovedPoints()
{
if (!removed_) return;
size_t last_idx = 0;
for (size_t i=0;i<size_;++i) {
if (!removed_points_.test(i)) {
points_[last_idx] = points_[i];
ids_[last_idx] = ids_[i];
removed_points_.reset(last_idx);
++last_idx;
}
}
points_.resize(last_idx);
ids_.resize(last_idx);
removed_points_.resize(last_idx);
size_ = last_idx;
removed_count_ = 0;
}
void swap(NNIndex& other)
{
std::swap(distance_, other.distance_);
std::swap(last_id_, other.last_id_);
std::swap(size_, other.size_);
std::swap(size_at_build_, other.size_at_build_);
std::swap(veclen_, other.veclen_);
std::swap(index_params_, other.index_params_);
std::swap(removed_, other.removed_);
std::swap(removed_points_, other.removed_points_);
std::swap(removed_count_, other.removed_count_);
std::swap(ids_, other.ids_);
std::swap(points_, other.points_);
std::swap(data_ptr_, other.data_ptr_);
}
protected:
/**
* The distance functor
*/
Distance distance_;
/**
* Each index point has an associated ID. IDs are assigned sequentially in
* increasing order. This indicates the ID assigned to the last point added to the
* index.
*/
size_t last_id_;
/**
* Number of points in the index (and database)
*/
size_t size_;
/**
* Number of features in the dataset when the index was last built.
*/
size_t size_at_build_;
/**
* Size of one point in the index (and database)
*/
size_t veclen_;
/**
* Parameters of the index.
*/
IndexParams index_params_;
/**
* Flag indicating if at least a point was removed from the index
*/
bool removed_;
/**
* Array used to mark points removed from the index
*/
DynamicBitset removed_points_;
/**
* Number of points removed from the index
*/
size_t removed_count_;
/**
* Array of point IDs, returned by nearest-neighbour operations
*/
std::vector<size_t> ids_;
/**
* Point data
*/
std::vector<ElementType*> points_;
/**
* Pointer to dataset memory if allocated by this index, otherwise NULL
*/
ElementType* data_ptr_;
};
#define USING_BASECLASS_SYMBOLS \
using NNIndex<Distance>::distance_;\
using NNIndex<Distance>::size_;\
using NNIndex<Distance>::size_at_build_;\
using NNIndex<Distance>::veclen_;\
using NNIndex<Distance>::index_params_;\
using NNIndex<Distance>::removed_points_;\
using NNIndex<Distance>::ids_;\
using NNIndex<Distance>::removed_;\
using NNIndex<Distance>::points_;\
using NNIndex<Distance>::extendDataset;\
using NNIndex<Distance>::setDataset;\
using NNIndex<Distance>::cleanRemovedPoints;\
using NNIndex<Distance>::indices_to_ids;
}
#endif //FLANN_NNINDEX_H
|
critical-2.c | /* { dg-do compile } */
void f1(void)
{
#pragma omp critical a /* { dg-error "expected" } */
;
#pragma omp critical ( /* { dg-error "expected identifier" } */
;
#pragma omp critical (a /* { dg-error "expected .\\)." } */
;
#pragma omp critical (a b) /* { dg-error "expected .\\)." } */
} /* { dg-error "expected expression" } */
|
Dropout.h | // --------------------------------------------------------------------------
// Binary Brain -- binary neural net framework
//
// Copyright (C) 2018-2019 by Ryuji Fuchikami
// https://github.com/ryuz
// ryuji.fuchikami@nifty.com
// --------------------------------------------------------------------------
#pragma once
#include "bb/Manager.h"
#include "bb/Activation.h"
namespace bb {
// Dropout
template <typename FT = float, typename BT = float>
class Dropout : public Activation
{
using _super = Activation;
public:
static inline std::string ModelName(void) { return "Dropout"; }
static inline std::string ObjectName(void){ return ModelName() + "_" + DataType<FT>::Name() + "_" + DataType<BT>::Name(); }
std::string GetModelName(void) const override { return ModelName(); }
std::string GetObjectName(void) const override { return ObjectName(); }
protected:
bool m_host_only = false;
double m_rate = 0.5;
std::mt19937_64 m_mt;
Tensor_<std::int8_t> m_mask;
FrameBuffer m_y_buf;
FrameBuffer m_dx_buf;
struct create_t
{
double rate = 0.5;
std::uint64_t seed = 1;
};
protected:
Dropout(create_t const &create)
{
m_rate = create.rate;
m_mt.seed(create.seed);
}
/**
* @brief コマンド処理
* @detail コマンド処理
* @param args コマンド
*/
void CommandProc(std::vector<std::string> args) override
{
// HostOnlyモード設定
if (args.size() == 2 && args[0] == "host_only")
{
m_host_only = EvalBool(args[1]);
}
}
public:
~Dropout() {}
static std::shared_ptr<Dropout> Create(create_t const &create)
{
return std::shared_ptr<Dropout>(new Dropout(create));
}
static std::shared_ptr<Dropout> Create(double rate=0.5, std::uint64_t seed=1)
{
create_t create;
create.rate = rate;
create.seed = seed;
return Create(create);
}
#ifdef BB_PYBIND11
static std::shared_ptr<Dropout> CreatePy(double rate=0.5, std::uint64_t seed=1)
{
create_t create;
create.rate = rate;
create.seed = seed;
return Create(create);
}
#endif
// ノード単位でのForward計算
std::vector<double> ForwardNode(index_t node, std::vector<double> x_vec) const override
{
return x_vec;
}
/**
* @brief forward演算
* @detail forward演算を行う
* @param x 入力データ
* @param train 学習時にtrueを指定
* @return forward演算結果
*/
inline FrameBuffer Forward(FrameBuffer x_buf, bool train = true) override
{
BB_ASSERT(x_buf.GetType() == DataType<FT>::type);
// 戻り値のサイズ設定
m_y_buf.ResizeLike(x_buf);
m_mask.Resize(x_buf.GetNodeSize());
{
index_t frame_size = x_buf.GetFrameSize();
index_t node_size = x_buf.GetNodeSize();
auto x_ptr = x_buf.LockConst<FT>();
auto y_ptr = m_y_buf.Lock<FT>(true);
if (train) {
// generate mask
auto mask_ptr = m_mask.Lock(true);
std::uniform_real_distribution<double> dist(0.0, 1.0);
for (index_t node = 0; node < node_size; ++node) {
mask_ptr[node] = (dist(m_mt) > m_rate) ? 0xff : 0;
}
#pragma omp parallel for
for (index_t node = 0; node < node_size; ++node) {
if (mask_ptr[node] != 0) {
for (index_t frame = 0; frame < frame_size; ++frame) {
y_ptr.Set(frame, node, x_ptr.Get(frame, node));
}
}
else {
for (index_t frame = 0; frame < frame_size; ++frame) {
y_ptr.Set(frame, node, (FT)0);
}
}
}
}
else {
#pragma omp parallel for
for (index_t node = 0; node < node_size; ++node) {
for (index_t frame = 0; frame < frame_size; ++frame) {
y_ptr.Set(frame, node, x_ptr.Get(frame, node) * (FT)(1.0 - m_rate));
}
}
}
}
return m_y_buf;
}
/**
* @brief backward演算
* @detail backward演算を行う
*
* @return backward演算結果
*/
inline FrameBuffer Backward(FrameBuffer dy_buf) override
{
if (dy_buf.Empty()) {
return dy_buf;
}
BB_ASSERT(dy_buf.GetType() == DataType<BT>::type);
// 戻り値のサイズ設定
m_dx_buf.ResizeLike(dy_buf);
{
index_t frame_size = m_dx_buf.GetFrameSize();
index_t node_size = m_dx_buf.GetNodeSize();
auto dy_ptr = dy_buf.LockConst<BT>();
auto dx_ptr = m_dx_buf.Lock<BT>(true);
auto mask_ptr = m_mask.LockConst();
#pragma omp parallel for
for (index_t node = 0; node < node_size; ++node) {
if ( mask_ptr[node] != 0 ) {
for (index_t frame = 0; frame < frame_size; ++frame) {
dx_ptr.Set(frame, node, dy_ptr.Get(frame, node));
}
}
else {
for (index_t frame = 0; frame < frame_size; ++frame) {
dx_ptr.Set(frame, node, 0);
}
}
}
return m_dx_buf;
}
}
// シリアライズ
protected:
void DumpObjectData(std::ostream &os) const override
{
// バージョン
std::int64_t ver = 1;
bb::SaveValue(os, ver);
// 親クラス
_super::DumpObjectData(os);
// メンバ
bb::SaveValue(os, m_host_only);
bb::SaveValue(os, m_rate);
m_mask.DumpObject(os);
}
void LoadObjectData(std::istream &is) override
{
// バージョン
std::int64_t ver;
bb::LoadValue(is, ver);
BB_ASSERT(ver == 1);
// 親クラス
_super::LoadObjectData(is);
// メンバ
bb::LoadValue(is, m_host_only);
bb::LoadValue(is, m_rate);
m_mask.LoadObject(is);
// 再構築
}
};
}
|
mergeSortv8.c | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#define SEED 0
#define MIN_BLOCK_SIZE 30 //SIZE 1 --> R - L = 1 --> 2 array positions
inline void swapValues(int* a, int* b) { int c=*a; *a=*b; *b=c;}
inline void swapPointers(int** a, int** b) { int* c=*a; *a=*b; *b=c;}
/*inline void manualCopy(int* a, int* b, int l, int r){ //UTILTZADA PER DEBUGAR
int i;
for(i=0; i<r+1-l; i++)
b[l+i] = a[l+i];
}*/
// Merges two subarrays of arr[].
// First subarray is arr[l..m]
// Second subarray is arr[m+1..r]
inline void bubbleSort(int* arr, int l, int r){
int i,j,aux;
for(i=1; i<(r-l+1); i++)
for(j=0; j<(r-l+1)-i; j++)
if(arr[l+j]>arr[l+j+1])
swapValues(&arr[l+j],&arr[l+j+1]);
}
void __attribute__ ((noinline)) insertionSort(int* arr, int l, int r){
int j, temp;
for (int i = 0; i < (r-l+1); i++){
j = i;
while (j > 0 && arr[l+j] < arr[l+j-1]){
temp = arr[l+j];
arr[l+j] = arr[l+j-1];
arr[l+j-1] = temp;
j--;
}
}
}
void __attribute__ ((noinline)) merge(int* arr, int* arr_aux, int l, int m, int r)
{
int i, j, k;
int n1 = m - l + 1;
int n2 = r - m;
/* Point to array's halves */
int* L = arr_aux+l;
int* R = arr_aux+m+1;
/* Merge the temp arrays back into arr[l..r]*/
i = 0; // Initial index of first subarray
j = 0; // Initial index of second subarray
k = l; // Initial index of merged subarray
while (i < n1 && j < n2)
{
/*if (L[i] <= R[j])
{
arr[k] = L[i];
i++;
}
else
{
arr[k] = R[j];
j++;
}*/
arr[k] = (L[i] <= R[j]) ? L[i++] : R[j++];
k++;
}
/* Copy the remaining elements of L[], if there
are any */
if(i < n1)
memcpy(arr+k, L+i, (n1-i)*sizeof(int));
/* Copy the remaining elements of R[], if there
are any */
if(j < n2)
memcpy(arr+k, R+j, (n2-j)*sizeof(int));
}
/* l is for left index and r is right index of the
sub-array of arr to be sorted */
void mergeSort(int* arr, int* arr_aux, int l, int r, int depth)
{
if (r - l > MIN_BLOCK_SIZE)
{
swapPointers(&arr, &arr_aux);
depth++;
int m = l+(r-l)/2;
mergeSort(arr, arr_aux, l, m, depth);
mergeSort(arr, arr_aux, m+1, r, depth);
merge(arr, arr_aux, l, m, r);
}else{
if(depth%2==0){
insertionSort(arr, l, r);
memcpy (arr_aux+l, arr+l, sizeof(int)*(r-l+1));
}else{
insertionSort(arr_aux, l, r);
memcpy (arr+l, arr_aux+l, sizeof(int)*(r-l+1));
}
}
}
void mergeSortParallel(int* arr, int* arr_aux, int l, int r, int depth, int n_threads)
{
n_threads/=2;
if (r - l > MIN_BLOCK_SIZE)
{
if(n_threads<=0){ //if(r-l<1000){
mergeSort(arr, arr_aux, l, r, depth);
return;
}
swapPointers(&arr, &arr_aux);
depth++;
int m = l+(r-l)/2;
#pragma omp task
mergeSortParallel(arr, arr_aux, l, m, depth, n_threads);
#pragma omp task
mergeSortParallel(arr, arr_aux, m+1, r, depth, n_threads);
#pragma omp taskwait
merge(arr, arr_aux, l, m, r);
}else{
if(depth%2==0){
insertionSort(arr, l, r);
memcpy (arr_aux+l, arr+l, sizeof(int)*(r-l+1));
}else{
insertionSort(arr_aux, l, r);
memcpy (arr+l, arr_aux+l, sizeof(int)*(r-l+1));
}
}
}
/* UTILITY FUNCTIONS */
void printArray(int* A, int size)
{
int i;
int i_jump = 1;
if(size>10){
i_jump = size/10;
i_jump = (i_jump == 1) ? 2 : i_jump; //Avoid printing N positions when 10<size<20
}
for (i=0; i < size; i+=i_jump)
printf("[%d] %d\n", i, A[i]);
if(i_jump>1 && size%2==0)
printf("[%d] %d\n", size-1, A[size-1]);
}
inline void generateRandomValues(int* A, int size)
{
srand(SEED);
int i;
for(i=0; i<size; i++)
A[i] = rand();
}
void checkOrder (int *arr, int size){
int i = 1, well_sorted = 1;
while(well_sorted && i<size){
well_sorted = (arr[i-1]<=arr[i]);
i+=1;
}
printf("Well Sorted? %d\n", well_sorted);
}
/* Driver program to test above functions */
int main(int argc, char** argv)
{
int arr_size = 100000000;
int check_merge_sort = 1;
int n_threads = 1;
int printing_array = 0;
if(argc>1) { arr_size = atoi(argv[1]);} printf("[1] Array Size = %d\n", arr_size);
if(argc>2) { check_merge_sort = atoi(argv[2]);} printf("[2] Checking? = %d\n", check_merge_sort);
if(argc>3) { n_threads = atoi(argv[3]);} printf("[3] N_THREADS = %d\n", n_threads);
if(argc>4) { printing_array = atoi(argv[4]);} printf("[4] Printing array? = %d\n (up to 11 (non consecutive) positions)\n\n", printing_array);
int *arr = (int*)malloc(sizeof(int)*(arr_size));
int *arr_aux = (int*)malloc(sizeof(int)*(arr_size));
generateRandomValues(arr, arr_size);
if(printing_array){
printf("Given array is \n");
printArray(arr, arr_size);
}
if(n_threads <2){ //Avoid pragma overhead
mergeSort(arr, arr_aux, 0, arr_size - 1, 0);
}else{
#pragma omp parallel num_threads(n_threads)
#pragma omp single
mergeSortParallel(arr, arr_aux, 0, arr_size - 1, 0, n_threads);
}
swapPointers(&arr, &arr_aux);
if(printing_array){
printf("\nSorted array is \n");
printArray(arr, arr_size);
}
if(check_merge_sort)
checkOrder(arr, arr_size);
if(printing_array){
printf("\nAux array is \n");
printArray(arr_aux, arr_size);
}
if(check_merge_sort)
checkOrder(arr_aux, arr_size);
return 0;
}
|
GB_unop__lnot_int8_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__lnot_int8_int8
// op(A') function: GB_unop_tran__lnot_int8_int8
// C type: int8_t
// A type: int8_t
// cast: int8_t cij = aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CAST(z, aij) \
int8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = aij ; \
Cx [pC] = !(z != 0) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__lnot_int8_int8
(
int8_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int8_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = !(z != 0) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = !(z != 0) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__lnot_int8_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
stream.c | /*-----------------------------------------------------------------------*/
/* Program: STREAM */
/* Revision: $Id: stream.c,v 5.10 2013/01/17 16:01:06 mccalpin Exp mccalpin $ */
/* Original code developed by John D. McCalpin */
/* Programmers: John D. McCalpin */
/* Joe R. Zagar */
/* */
/* This program measures memory transfer rates in MB/s for simple */
/* computational kernels coded in C. */
/*-----------------------------------------------------------------------*/
/* Copyright 1991-2013: John D. McCalpin */
/*-----------------------------------------------------------------------*/
/* License: */
/* 1. You are free to use this program and/or to redistribute */
/* this program. */
/* 2. You are free to modify this program for your own use, */
/* including commercial use, subject to the publication */
/* restrictions in item 3. */
/* 3. You are free to publish results obtained from running this */
/* program, or from works that you derive from this program, */
/* with the following limitations: */
/* 3a. In order to be referred to as "STREAM benchmark results", */
/* published results must be in conformance to the STREAM */
/* Run Rules, (briefly reviewed below) published at */
/* http://www.cs.virginia.edu/stream/ref.html */
/* and incorporated herein by reference. */
/* As the copyright holder, John McCalpin retains the */
/* right to determine conformity with the Run Rules. */
/* 3b. Results based on modified source code or on runs not in */
/* accordance with the STREAM Run Rules must be clearly */
/* labelled whenever they are published. Examples of */
/* proper labelling include: */
/* "tuned STREAM benchmark results" */
/* "based on a variant of the STREAM benchmark code" */
/* Other comparable, clear, and reasonable labelling is */
/* acceptable. */
/* 3c. Submission of results to the STREAM benchmark web site */
/* is encouraged, but not required. */
/* 4. Use of this program or creation of derived works based on this */
/* program constitutes acceptance of these licensing restrictions. */
/* 5. Absolutely no warranty is expressed or implied. */
/*-----------------------------------------------------------------------*/
# include <stdio.h>
# include <unistd.h>
# include <math.h>
# include <float.h>
# include <limits.h>
# include <sys/time.h>
/*-----------------------------------------------------------------------
* INSTRUCTIONS:
*
* 1) STREAM requires different amounts of memory to run on different
* systems, depending on both the system cache size(s) and the
* granularity of the system timer.
* You should adjust the value of 'STREAM_ARRAY_SIZE' (below)
* to meet *both* of the following criteria:
* (a) Each array must be at least 4 times the size of the
* available cache memory. I don't worry about the difference
* between 10^6 and 2^20, so in practice the minimum array size
* is about 3.8 times the cache size.
* Example 1: One Xeon E3 with 8 MB L3 cache
* STREAM_ARRAY_SIZE should be >= 4 million, giving
* an array size of 30.5 MB and a total memory requirement
* of 91.5 MB.
* Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP)
* STREAM_ARRAY_SIZE should be >= 20 million, giving
* an array size of 153 MB and a total memory requirement
* of 458 MB.
* (b) The size should be large enough so that the 'timing calibration'
* output by the program is at least 20 clock-ticks.
* Example: most versions of Windows have a 10 millisecond timer
* granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds.
* If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec.
* This means the each array must be at least 1 GB, or 128M elements.
*
* Version 5.10 increases the default array size from 2 million
* elements to 10 million elements in response to the increasing
* size of L3 caches. The new default size is large enough for caches
* up to 20 MB.
* Version 5.10 changes the loop index variables from "register int"
* to "ssize_t", which allows array indices >2^32 (4 billion)
* on properly configured 64-bit systems. Additional compiler options
* (such as "-mcmodel=medium") may be required for large memory runs.
*
* Array size can be set at compile time without modifying the source
* code for the (many) compilers that support preprocessor definitions
* on the compile line. E.g.,
* gcc -O -DSTREAM_ARRAY_SIZE=100000000 stream.c -o stream.100M
* will override the default size of 10M with a new size of 100M elements
* per array.
*/
#ifndef STREAM_ARRAY_SIZE
# define STREAM_ARRAY_SIZE 10000000
#endif
/* 2) STREAM runs each kernel "NTIMES" times and reports the *best* result
* for any iteration after the first, therefore the minimum value
* for NTIMES is 2.
* There are no rules on maximum allowable values for NTIMES, but
* values larger than the default are unlikely to noticeably
* increase the reported performance.
* NTIMES can also be set on the compile line without changing the source
* code using, for example, "-DNTIMES=7".
*/
#ifdef NTIMES
#if NTIMES<=1
# define NTIMES 10
#endif
#endif
#ifndef NTIMES
# define NTIMES 10
#endif
/* Users are allowed to modify the "OFFSET" variable, which *may* change the
* relative alignment of the arrays (though compilers may change the
* effective offset by making the arrays non-contiguous on some systems).
* Use of non-zero values for OFFSET can be especially helpful if the
* STREAM_ARRAY_SIZE is set to a value close to a large power of 2.
* OFFSET can also be set on the compile line without changing the source
* code using, for example, "-DOFFSET=56".
*/
#ifndef OFFSET
# define OFFSET 0
#endif
/*
* 3) Compile the code with optimization. Many compilers generate
* unreasonably bad code before the optimizer tightens things up.
* If the results are unreasonably good, on the other hand, the
* optimizer might be too smart for me!
*
* For a simple single-core version, try compiling with:
* cc -O stream.c -o stream
* This is known to work on many, many systems....
*
* To use multiple cores, you need to tell the compiler to obey the OpenMP
* directives in the code. This varies by compiler, but a common example is
* gcc -O -fopenmp stream.c -o stream_omp
* The environment variable OMP_NUM_THREADS allows runtime control of the
* number of threads/cores used when the resulting "stream_omp" program
* is executed.
*
* To run with single-precision variables and arithmetic, simply add
* -DSTREAM_TYPE=float
* to the compile line.
* Note that this changes the minimum array sizes required --- see (1) above.
*
* The preprocessor directive "TUNED" does not do much -- it simply causes the
* code to call separate functions to execute each kernel. Trivial versions
* of these functions are provided, but they are *not* tuned -- they just
* provide predefined interfaces to be replaced with tuned code.
*
*
* 4) Optional: Mail the results to mccalpin@cs.virginia.edu
* Be sure to include info that will help me understand:
* a) the computer hardware configuration (e.g., processor model, memory type)
* b) the compiler name/version and compilation flags
* c) any run-time information (such as OMP_NUM_THREADS)
* d) all of the output from the test case.
*
* Thanks!
*
*-----------------------------------------------------------------------*/
# define HLINE "-------------------------------------------------------------\n"
# ifndef MIN
# define MIN(x,y) ((x)<(y)?(x):(y))
# endif
# ifndef MAX
# define MAX(x,y) ((x)>(y)?(x):(y))
# endif
#ifndef STREAM_TYPE
#define STREAM_TYPE double
#endif
static STREAM_TYPE a[STREAM_ARRAY_SIZE+OFFSET],
b[STREAM_ARRAY_SIZE+OFFSET],
c[STREAM_ARRAY_SIZE+OFFSET];
static double avgtime[4] = {0}, maxtime[4] = {0},
mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX};
static char *label[4] = {"Copy: ", "Scale: ",
"Add: ", "Triad: "};
static double bytes[4] = {
2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE
};
extern double mysecond();
extern void checkSTREAMresults();
#ifdef TUNED
extern void tuned_STREAM_Copy();
extern void tuned_STREAM_Scale(STREAM_TYPE scalar);
extern void tuned_STREAM_Add();
extern void tuned_STREAM_Triad(STREAM_TYPE scalar);
#endif
#ifdef _OPENMP
extern int omp_get_num_threads();
#endif
int
main()
{
int quantum, checktick();
int BytesPerWord;
int k;
ssize_t j;
STREAM_TYPE scalar;
double t, times[4][NTIMES];
/* --- SETUP --- determine precision and check timing --- */
printf(HLINE);
printf("STREAM version $Revision: 5.10 $\n");
printf(HLINE);
BytesPerWord = sizeof(STREAM_TYPE);
printf("This system uses %d bytes per array element.\n",
BytesPerWord);
printf(HLINE);
#ifdef N
printf("***** WARNING: ******\n");
printf(" It appears that you set the preprocessor variable N when compiling this code.\n");
printf(" This version of the code uses the preprocesor variable STREAM_ARRAY_SIZE to control the array size\n");
printf(" Reverting to default value of STREAM_ARRAY_SIZE=%llu\n",(unsigned long long) STREAM_ARRAY_SIZE);
printf("***** WARNING: ******\n");
#endif
printf("Array size = %llu (elements), Offset = %d (elements)\n" , (unsigned long long) STREAM_ARRAY_SIZE, OFFSET);
printf("Memory per array = %.1f MiB (= %.1f GiB).\n",
BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0),
BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0/1024.0));
printf("Total memory required = %.1f MiB (= %.1f GiB).\n",
(3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.),
(3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024./1024.));
printf("Each kernel will be executed %d times.\n", NTIMES);
printf(" The *best* time for each kernel (excluding the first iteration)\n");
printf(" will be used to compute the reported bandwidth.\n");
#ifdef _OPENMP
printf(HLINE);
#pragma omp parallel
{
#pragma omp master
{
k = omp_get_num_threads();
printf ("Number of Threads requested = %i\n",k);
}
}
#endif
#ifdef _OPENMP
k = 0;
#pragma omp parallel
#pragma omp atomic
k++;
printf ("Number of Threads counted = %i\n",k);
#endif
/* Get initial value for system clock. */
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
a[j] = 1.0;
b[j] = 2.0;
c[j] = 0.0;
}
printf(HLINE);
if ( (quantum = checktick()) >= 1)
printf("Your clock granularity/precision appears to be "
"%d microseconds.\n", quantum);
else {
printf("Your clock granularity appears to be "
"less than one microsecond.\n");
quantum = 1;
}
t = mysecond();
#pragma omp parallel for
for (j = 0; j < STREAM_ARRAY_SIZE; j++)
a[j] = 2.0E0 * a[j];
t = 1.0E6 * (mysecond() - t);
printf("Each test below will take on the order"
" of %d microseconds.\n", (int) t );
printf(" (= %d clock ticks)\n", (int) (t/quantum) );
printf("Increase the size of the arrays if this shows that\n");
printf("you are not getting at least 20 clock ticks per test.\n");
printf(HLINE);
printf("WARNING -- The above is only a rough guideline.\n");
printf("For best results, please be sure you know the\n");
printf("precision of your system timer.\n");
printf(HLINE);
/* --- MAIN LOOP --- repeat test cases NTIMES times --- */
scalar = 3.0;
for (k=0; k<NTIMES; k++)
{
times[0][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Copy();
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j];
#endif
times[0][k] = mysecond() - times[0][k];
times[1][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Scale(scalar);
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
b[j] = scalar*c[j];
#endif
times[1][k] = mysecond() - times[1][k];
times[2][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Add();
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j]+b[j];
#endif
times[2][k] = mysecond() - times[2][k];
times[3][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Triad(scalar);
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
a[j] = b[j]+scalar*c[j];
#endif
times[3][k] = mysecond() - times[3][k];
}
/* --- SUMMARY --- */
for (k=1; k<NTIMES; k++) /* note -- skip first iteration */
{
for (j=0; j<4; j++)
{
avgtime[j] = avgtime[j] + times[j][k];
mintime[j] = MIN(mintime[j], times[j][k]);
maxtime[j] = MAX(maxtime[j], times[j][k]);
}
}
printf("Function Best Rate MB/s Avg time Min time Max time\n");
for (j=0; j<4; j++) {
avgtime[j] = avgtime[j]/(double)(NTIMES-1);
printf("%s%12.1f %11.6f %11.6f %11.6f\n", label[j],
1.0E-06 * bytes[j]/mintime[j],
avgtime[j],
mintime[j],
maxtime[j]);
}
printf(HLINE);
/* --- Check Results --- */
checkSTREAMresults();
printf(HLINE);
/* --- Print all times to disk */
FILE *f = fopen("c.results", "w");
if (f == NULL) {
fprintf(stderr, "Could not open file 'c.results'\n");
return 1;
}
fprintf(f, "CCopy;CScale;CAdd;CTriad\n");
for (k=1; k<NTIMES; k++) {
fprintf(f, "%11.6f;%11.6f;%11.6f;%11.6f\n", times[0][k], times[1][k], times[2][k], times[3][k]);
}
fclose(f);
return 0;
}
# define M 20
int
checktick()
{
int i, minDelta, Delta;
double t1, t2, timesfound[M];
/* Collect a sequence of M unique time values from the system. */
for (i = 0; i < M; i++) {
t1 = mysecond();
while( ((t2=mysecond()) - t1) < 1.0E-6 )
;
timesfound[i] = t1 = t2;
}
/*
* Determine the minimum difference between these M values.
* This result will be our estimate (in microseconds) for the
* clock granularity.
*/
minDelta = 1000000;
for (i = 1; i < M; i++) {
Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1]));
minDelta = MIN(minDelta, MAX(Delta,0));
}
return(minDelta);
}
/* A gettimeofday routine to give access to the wall
clock timer on most UNIX-like systems. */
#include <sys/time.h>
double mysecond()
{
struct timeval tp;
struct timezone tzp;
int i;
i = gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
#ifndef abs
#define abs(a) ((a) >= 0 ? (a) : -(a))
#endif
void checkSTREAMresults ()
{
STREAM_TYPE aj,bj,cj,scalar;
STREAM_TYPE aSumErr,bSumErr,cSumErr;
STREAM_TYPE aAvgErr,bAvgErr,cAvgErr;
double epsilon;
ssize_t j;
int k,ierr,err;
/* reproduce initialization */
aj = 1.0;
bj = 2.0;
cj = 0.0;
/* a[] is modified during timing check */
aj = 2.0E0 * aj;
/* now execute timing loop */
scalar = 3.0;
for (k=0; k<NTIMES; k++)
{
cj = aj;
bj = scalar*cj;
cj = aj+bj;
aj = bj+scalar*cj;
}
printf("aj = %lf, bj = %lf, cj = %lf\n", aj, bj, cj);
/* accumulate deltas between observed and expected results */
aSumErr = 0.0;
bSumErr = 0.0;
cSumErr = 0.0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
aSumErr += abs(a[j] - aj);
bSumErr += abs(b[j] - bj);
cSumErr += abs(c[j] - cj);
// if (j == 417) printf("Index 417: c[j]: %f, cj: %f\n",c[j],cj); // MCCALPIN
}
aAvgErr = aSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
bAvgErr = bSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
cAvgErr = cSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
if (sizeof(STREAM_TYPE) == 4) {
epsilon = 1.e-6;
}
else if (sizeof(STREAM_TYPE) == 8) {
epsilon = 1.e-13;
}
else {
printf("WEIRD: sizeof(STREAM_TYPE) = %lu\n",sizeof(STREAM_TYPE));
epsilon = 1.e-6;
}
err = 0;
if (abs(aAvgErr/aj) > epsilon) {
err++;
printf ("Failed Validation on array a[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",aj,aAvgErr,abs(aAvgErr)/aj);
ierr = 0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
if (abs(a[j]/aj-1.0) > epsilon) {
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
printf(" array a: index: %ld, expected: %e, observed: %e, relative error: %e\n",
j,aj,a[j],abs((aj-a[j])/aAvgErr));
}
#endif
}
}
printf(" For array a[], %d errors were found.\n",ierr);
}
if (abs(bAvgErr/bj) > epsilon) {
err++;
printf ("Failed Validation on array b[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",bj,bAvgErr,abs(bAvgErr)/bj);
printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon);
ierr = 0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
if (abs(b[j]/bj-1.0) > epsilon) {
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
printf(" array b: index: %ld, expected: %e, observed: %e, relative error: %e\n",
j,bj,b[j],abs((bj-b[j])/bAvgErr));
}
#endif
}
}
printf(" For array b[], %d errors were found.\n",ierr);
}
if (abs(cAvgErr/cj) > epsilon) {
err++;
printf ("Failed Validation on array c[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",cj,cAvgErr,abs(cAvgErr)/cj);
printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon);
ierr = 0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
if (abs(c[j]/cj-1.0) > epsilon) {
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
printf(" array c: index: %ld, expected: %e, observed: %e, relative error: %e\n",
j,cj,c[j],abs((cj-c[j])/cAvgErr));
}
#endif
}
}
printf(" For array c[], %d errors were found.\n",ierr);
}
if (err == 0) {
printf ("Solution Validates: avg error less than %e on all three arrays\n",epsilon);
}
#ifdef VERBOSE
printf ("Results Validation Verbose Results: \n");
printf (" Expected a(1), b(1), c(1): %f %f %f \n",aj,bj,cj);
printf (" Observed a(1), b(1), c(1): %f %f %f \n",a[1],b[1],c[1]);
printf (" Rel Errors on a, b, c: %e %e %e \n",abs(aAvgErr/aj),abs(bAvgErr/bj),abs(cAvgErr/cj));
#endif
}
#ifdef TUNED
/* stubs for "tuned" versions of the kernels */
void tuned_STREAM_Copy()
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j];
}
void tuned_STREAM_Scale(STREAM_TYPE scalar)
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
b[j] = scalar*c[j];
}
void tuned_STREAM_Add()
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j]+b[j];
}
void tuned_STREAM_Triad(STREAM_TYPE scalar)
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
a[j] = b[j]+scalar*c[j];
}
/* end of stubs for the "tuned" versions of the kernels */
#endif
|
zgels.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> s d c
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_gels
*
* Solves overdetermined or underdetermined linear systems
* involving an m-by-n matrix A using a QR or LQ factorization of A. It
* is assumed that A has full rank. The following options are provided:
*
* # trans = PlasmaNoTrans and m >= n: find the least squares solution of an
* overdetermined system, i.e., solve the least squares problem:
* minimize || B - A*X ||.
*
* # trans = PlasmaNoTrans and m < n: find the minimum norm solution of an
* underdetermined system A * X = B.
*
* Several right-hand side vectors B and solution vectors X can be handled in a
* single call; they are stored as the columns of the m-by-nrhs right-hand side
* matrix B and the n-by-nrhs solution matrix X.
*
*******************************************************************************
*
* @param[in] trans
* - PlasmaNoTrans: the linear system involves A
* (the only supported option for now).
*
* @param[in] m
* The number of rows of the matrix A. m >= 0.
*
* @param[in] n
* The number of columns of the matrix A. n >= 0.
*
* @param[in] nrhs
* The number of right hand sides, i.e., the number of columns of the
* matrices B and X. nrhs >= 0.
*
* @param[in,out] pA
* On entry, pointer to the m-by-n matrix A.
* On exit,
* if m >= n, A is overwritten by details of its QR factorization as
* returned by plasma_zgeqrf;
* if m < n, A is overwritten by details of its LQ factorization as
* returned by plasma_zgelqf.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,m).
*
* @param[out] T
* On exit, auxiliary factorization data.
* Matrix of T is allocated inside this function and needs to be
* destroyed by plasma_desc_destroy.
*
* @param[in,out] pB
* On entry, pointer to the m-by-nrhs matrix B of right-hand side
* vectors, stored columnwise;
* On exit, if return value = 0, B is overwritten by the solution
* vectors, stored columnwise:
* if m >= n, rows 1 to N of B contain the least squares solution
* vectors; the residual sum of squares for the solution in each column
* is given by the sum of squares of the modulus of elements n+1 to m
* in that column;
* if m < n, rows 1 to n of B contain the minimum norm solution
* vectors;
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,m,n).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
*******************************************************************************
*
* @sa plasma_omp_zgels
* @sa plasma_cgels
* @sa plasma_dgels
* @sa plasma_sgels
* @sa plasma_zgeqrf
* @sa plasma_zgeqrs
*
******************************************************************************/
int plasma_zgels(plasma_enum_t trans,
int m, int n, int nrhs,
plasma_complex64_t *pA, int lda,
plasma_desc_t *T,
plasma_complex64_t *pB, int ldb)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if (trans != PlasmaNoTrans) {
plasma_error("only PlasmaNoTrans supported");
return PlasmaErrorNotSupported;
}
if (m < 0) {
plasma_error("illegal value of m");
return -2;
}
if (n < 0) {
plasma_error("illegal value of n");
return -3;
}
if (nrhs < 0) {
plasma_error("illegal value of nrhs");
return -4;
}
if (lda < imax(1, m)) {
plasma_error("illegal value of lda");
return -6;
}
if (ldb < imax(1, imax(m, n))) {
plasma_error("illegal value of ldb");
return -9;
}
// quick return
if (imin(m, imin(n, nrhs)) == 0) {
for (int i = 0; i < imax(m, n); i++)
for (int j = 0; j < nrhs; j++)
pB[j*ldb+i] = 0.0;
return PlasmaSuccess;
}
// Tune parameters.
if (plasma->tuning) {
if (m < n)
plasma_tune_gelqf(plasma, PlasmaComplexDouble, m, n);
else
plasma_tune_geqrf(plasma, PlasmaComplexDouble, m, n);
}
// Set tiling parameters.
int ib = plasma->ib;
int nb = plasma->nb;
plasma_enum_t householder_mode = plasma->householder_mode;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t B;
int retval;
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
m, n, 0, 0, m, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
imax(m, n), nrhs, 0, 0, imax(m, n),
nrhs, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Prepare descriptor T.
retval = plasma_descT_create(A, ib, householder_mode, T);
if (retval != PlasmaSuccess) {
plasma_error("plasma_descT_create() failed");
return retval;
}
// Allocate workspace.
plasma_workspace_t work;
size_t lwork = nb + ib*nb; // geqrt/gelqt: tau + work
retval = plasma_workspace_create(&work, lwork, PlasmaComplexDouble);
if (retval != PlasmaSuccess) {
plasma_error("plasma_workspace_create() failed");
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_zge2desc(pA, lda, A, &sequence, &request);
plasma_omp_zge2desc(pB, ldb, B, &sequence, &request);
// Call the tile async function.
plasma_omp_zgels(PlasmaNoTrans,
A, *T,
B, work,
&sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_zdesc2ge(A, pA, lda, &sequence, &request);
plasma_omp_zdesc2ge(B, pB, ldb, &sequence, &request);
}
// implicit synchronization
plasma_workspace_destroy(&work);
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_gels
*
* Solves overdetermined or underdetermined linear
* system of equations using the tile QR or the tile LQ factorization.
* May return before the computation is finished.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] trans
* - PlasmaNoTrans: the linear system involves A
* (the only supported option for now).
*
* @param[in,out] A
* Descriptor of matrix A stored in the tile layout.
* On exit,
* if m >= n, A is overwritten by details of its QR factorization
* as returned by plasma_zgeqrf;
* if m < n, A is overwritten by details of its LQ factorization
* as returned by plasma_zgelqf.
*
* @param[out] T
* Descriptor of matrix T.
* Auxiliary factorization data, computed by
* plasma_zgeqrf or plasma_zgelqf.
*
* @param[in,out] B
* Descriptor of matrix B.
* On entry, right-hand side matrix B in the tile layout.
* On exit, solution matrix X in the tile layout.
*
* @param[in] work
* Workspace for the auxiliary arrays needed by some coreblas kernels.
* For QR/LQ factorizations used in GELS, it contains preallocated
* space for tau and work arrays.
* Allocated by the plasma_workspace_create function.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes).
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_zgels
* @sa plasma_omp_cgels
* @sa plasma_omp_dgels
* @sa plasma_omp_sgels
*
******************************************************************************/
void plasma_omp_zgels(plasma_enum_t trans,
plasma_desc_t A, plasma_desc_t T,
plasma_desc_t B, plasma_workspace_t work,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if (trans != PlasmaNoTrans) {
plasma_error("only PlasmaNoTrans supported");
plasma_request_fail(sequence, request, PlasmaErrorNotSupported);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid descriptor A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(T) != PlasmaSuccess) {
plasma_error("invalid descriptor T");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid descriptor B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (A.m == 0 || A.n == 0 || B.n == 0) {
// Zero matrix B.
plasma_pzlaset(PlasmaGeneral, 0.0, 0.0, B, sequence, request);
return;
}
//===============================
// Solve using QR factorization.
//===============================
if (A.m >= A.n) {
if (plasma->householder_mode == PlasmaTreeHouseholder) {
plasma_pzgeqrf_tree(A, T, work, sequence, request);
}
else {
plasma_pzgeqrf(A, T, work, sequence, request);
}
if (plasma->householder_mode == PlasmaTreeHouseholder) {
plasma_pzunmqr_tree(PlasmaLeft, Plasma_ConjTrans,
A, T, B,
work, sequence, request);
}
else {
plasma_pzunmqr(PlasmaLeft, Plasma_ConjTrans,
A, T, B,
work, sequence, request);
}
plasma_pztrsm(PlasmaLeft, PlasmaUpper,
PlasmaNoTrans, PlasmaNonUnit,
1.0,
plasma_desc_view(A, 0, 0, A.n, A.n),
plasma_desc_view(B, 0, 0, A.n, B.n),
sequence, request);
}
//===============================
// Solve using LQ factorization.
//===============================
else {
if (plasma->householder_mode == PlasmaTreeHouseholder) {
plasma_pzgelqf_tree(A, T, work, sequence, request);
}
else {
plasma_pzgelqf(A, T, work, sequence, request);
}
// Zero the trailing block of the right-hand-side matrix.
// B has less rows than X.
plasma_pzlaset(PlasmaGeneral, 0.0, 0.0,
plasma_desc_view(B, A.m, 0, A.n-A.m, B.n),
sequence, request);
// Solve L * Y = B.
plasma_pztrsm(
PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaNonUnit,
1.0, plasma_desc_view(A, 0, 0, A.m, A.m),
plasma_desc_view(B, 0, 0, A.m, B.n),
sequence, request);
// Find X = Q^H * Y.
if (plasma->householder_mode == PlasmaTreeHouseholder) {
plasma_pzunmlq_tree(PlasmaLeft, Plasma_ConjTrans,
A, T, B,
work, sequence, request);
}
else {
plasma_pzunmlq(PlasmaLeft, Plasma_ConjTrans,
A, T, B,
work, sequence, request);
}
}
}
|
ParallelOpenMP.h | #pragma once
#include <cstddef>
#include <exception>
#ifdef _OPENMP
#define INTRA_OP_PARALLEL
#include <omp.h>
#endif
namespace at {
template <class F>
inline void parallel_for(
const int64_t begin,
const int64_t end,
const int64_t grain_size,
const F& f) {
TORCH_CHECK(grain_size >= 0);
at::internal::lazy_init_num_threads();
if (begin >= end) {
return;
}
#ifdef _OPENMP
std::atomic_flag err_flag = ATOMIC_FLAG_INIT;
std::exception_ptr eptr;
// Work around memory leak when using 1 thread in nested "omp parallel"
// caused by some buggy OpenMP versions and the fact that omp_in_parallel()
// returns false when omp_get_max_threads() == 1 inside nested "omp parallel"
// See issue gh-32284
#pragma omp parallel if (omp_get_max_threads() > 1 && !omp_in_parallel() && ((end - begin) > grain_size))
{
// choose number of tasks based on grain size and number of threads
// can't use num_threads clause due to bugs in GOMP's thread pool (See #32008)
int64_t num_threads = omp_get_num_threads();
if (grain_size > 0) {
num_threads = std::min(num_threads, divup((end - begin), grain_size));
}
int64_t tid = omp_get_thread_num();
int64_t chunk_size = divup((end - begin), num_threads);
int64_t begin_tid = begin + tid * chunk_size;
if (begin_tid < end) {
try {
f(begin_tid, std::min(end, chunk_size + begin_tid));
} catch (...) {
if (!err_flag.test_and_set()) {
eptr = std::current_exception();
}
}
}
}
if (eptr) {
std::rethrow_exception(eptr);
}
#else
f(begin, end);
#endif
}
template <class scalar_t, class F, class SF>
inline scalar_t parallel_reduce(
const int64_t begin,
const int64_t end,
const int64_t grain_size,
const scalar_t ident,
const F& f,
const SF& sf) {
TORCH_CHECK(grain_size >= 0);
at::internal::lazy_init_num_threads();
if (begin >= end) {
return ident;
} else if (in_parallel_region() || get_num_threads() == 1) {
return f(begin, end, ident);
} else {
const int64_t num_results = divup((end - begin), grain_size);
std::vector<scalar_t> results(num_results);
scalar_t* results_data = results.data();
std::atomic_flag err_flag = ATOMIC_FLAG_INIT;
std::exception_ptr eptr;
#pragma omp parallel for if ((end - begin) >= grain_size)
for (int64_t id = 0; id < num_results; id++) {
int64_t i = begin + id * grain_size;
try {
results_data[id] = f(i, i + std::min(end - i, grain_size), ident);
} catch (...) {
if (!err_flag.test_and_set()) {
eptr = std::current_exception();
}
}
}
if (eptr) {
std::rethrow_exception(eptr);
}
scalar_t result = ident;
for (auto partial_result : results) {
result = sf(result, partial_result);
}
return result;
}
}
} // namespace at
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 4;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
matrixstrassen.h | /**
* @file matrixstrassen.h matrix strassen operations.
* @author TPOC: contact@palisade-crypto.org
*
* @copyright Copyright (c) 2019, New Jersey Institute of Technology (NJIT)
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution. THIS SOFTWARE IS
* PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef LBCRYPTO_MATH_MATRIXSTRASSEN_H
#define LBCRYPTO_MATH_MATRIXSTRASSEN_H
#include <assert.h>
#include "matrix.h"
namespace lbcrypto {
template <class Element>
class MatrixStrassen { // FIXME : public Serializable {
public:
typedef vector<vector<Element>> data_t;
typedef vector<Element> lineardata_t;
typedef typename vector<Element>::iterator it_lineardata_t;
typedef std::function<Element(void)> alloc_func;
/**
* Constructor that initializes matrix values using a zero allocator
*
* @param &allocZero lambda function for zero initialization.
* @param &rows number of rows.
* @param &rows number of columns.
*/
MatrixStrassen(alloc_func allocZero, size_t rows, size_t cols)
: data(), rows(rows), cols(cols), allocZero(allocZero) {
data.resize(rows);
for (auto row = data.begin(); row != data.end(); ++row) {
for (size_t col = 0; col < cols; ++col) {
row->push_back(allocZero());
}
}
}
/**
* Constructor that initializes matrix values using a distribution generation
* allocator
*
* @param &allocZero lambda function for zero initialization (used for
* initializing derived matrix objects)
* @param &rows number of rows.
* @param &rows number of columns.
* @param &allocGen lambda function for intialization using a distribution
* generator.
*/
MatrixStrassen(alloc_func allocZero, size_t rows, size_t cols,
alloc_func allocGen);
/**
* Constructor of an empty matrix; SetSize must be called on this matrix to
* use it Basically this exists to support deserializing
*
* @param &allocZero lambda function for zero initialization.
*/
MatrixStrassen(alloc_func allocZero)
: data(), rows(0), cols(0), allocZero(allocZero) {}
void SetSize(size_t rows, size_t cols) {
if (this->rows != 0 || this->cols != 0) {
PALISADE_THROW(not_available_error,
"You cannot SetSize on a non-empty matrix");
}
this->rows = rows;
this->cols = cols;
data.resize(rows);
for (auto row = data.begin(); row != data.end(); ++row) {
for (size_t col = 0; col < cols; ++col) {
row->push_back(allocZero());
}
}
}
/**
* Copy constructor
*
* @param &other the matrix object to be copied
*/
MatrixStrassen(const MatrixStrassen<Element>& other)
: data(), rows(other.rows), cols(other.cols), allocZero(other.allocZero) {
deepCopyData(other.data);
}
/**
* Assignment operator
*
* @param &other the matrix object whose values are to be copied
* @return the resulting matrix
*/
inline MatrixStrassen<Element>& operator=(
const MatrixStrassen<Element>& other);
/**
* In-place change of the current matrix to a matrix of all ones
*
* @return the resulting matrix
*/
inline MatrixStrassen<Element>& Ones();
/**
* Fill matrix using the same element
*
* @param &val the element the matrix is filled by
*
* @return the resulting matrix
*/
inline MatrixStrassen<Element>& Fill(const Element& val);
/**
* In-place change of the current matrix to Identity matrix
*
* @return the resulting matrix
*/
inline MatrixStrassen<Element>& Identity();
/**
* Sets the first row to be powers of two
*
* @return the resulting matrix
*/
inline MatrixStrassen<Element> GadgetVector(int32_t base = 2) const;
/**
* Computes the infinity norm
*
* @return the norm in double format
*/
inline double Norm() const;
/**
* Operator for matrix multiplication
*
* @param &other the multiplier matrix
* @return the result of multiplication
*/
inline MatrixStrassen<Element> operator*(
MatrixStrassen<Element> const& other) const {
return Mult(other);
}
/**
* Multiplication of matrix by a scalar
*
* @param &other the multiplier element
* @return the result of multiplication
*/
inline MatrixStrassen<Element> ScalarMult(Element const& other) const {
MatrixStrassen<Element> result(*this);
#pragma omp parallel for
for (int32_t col = 0; col < result.cols; ++col) {
for (int32_t row = 0; row < result.rows; ++row) {
*result.data[row][col] = *result.data[row][col] * other;
}
}
return result;
}
/**
* Operator for scalar multiplication
*
* @param &other the multiplier element
* @return the result of multiplication
*/
inline MatrixStrassen<Element> operator*(Element const& other) const {
return ScalarMult(other);
}
/**
* Equality check
*
* @param &other the matrix object to compare to
* @return the boolean result
*/
inline bool Equal(MatrixStrassen<Element> const& other) const {
if (rows != other.rows || cols != other.cols) {
return false;
}
for (size_t i = 0; i < rows; ++i) {
for (size_t j = 0; j < cols; ++j) {
if (data[i][j] != other.data[i][j]) {
return false;
}
}
}
return true;
}
/**
* Operator for equality check
*
* @param &other the matrix object to compare to
* @return the boolean result
*/
inline bool operator==(MatrixStrassen<Element> const& other) const {
return Equal(other);
}
/**
* Operator for non-equality check
*
* @param &other the matrix object to compare to
* @return the boolean result
*/
inline bool operator!=(MatrixStrassen<Element> const& other) const {
return !Equal(other);
}
/**
* Get property to access the data as a vector of vectors
*
* @return the data as vector of vectors
*/
const data_t& GetData() const { return data; }
/**
* Get property to access the number of rows in the matrix
*
* @return the number of rows
*/
size_t GetRows() const { return rows; }
/**
* Get property to access the number of columns in the matrix
*
* @return the number of columns
*/
size_t GetCols() const { return cols; }
/**
* Get property to access the zero allocator for the matrix
*
* @return the lambda function corresponding to the element zero allocator
*/
alloc_func GetAllocator() const { return allocZero; }
/**
* Sets the evaluation or coefficient representation for all ring elements
* that support the SetFormat method
*
* @param &format the enum value corresponding to coefficient or evaluation
* representation
*/
void SetFormat(Format format);
/**
* MatrixStrassen addition
*
* @param &other the matrix to be added
* @return the resulting matrix
*/
inline MatrixStrassen<Element> Add(
MatrixStrassen<Element> const& other) const {
if (rows != other.rows || cols != other.cols) {
PALISADE_THROW(math_error,
"Addition operands have incompatible dimensions");
}
MatrixStrassen<Element> result(*this);
#pragma omp parallel for
for (int32_t j = 0; j < cols; ++j) {
for (int32_t i = 0; i < rows; ++i) {
*result.data[i][j] += *other.data[i][j];
}
}
return result;
}
/**
* Operator for matrix addition
*
* @param &other the matrix to be added
* @return the resulting matrix
*/
inline MatrixStrassen<Element> operator+(
MatrixStrassen<Element> const& other) const {
return this->Add(other);
}
/**
* Operator for in-place addition
*
* @param &other the matrix to be added
* @return the resulting matrix (same object)
*/
inline MatrixStrassen<Element>& operator+=(
MatrixStrassen<Element> const& other);
/**
* MatrixStrassen substraction
*
* @param &other the matrix to be substracted
* @return the resulting matrix
*/
inline MatrixStrassen<Element> Sub(
MatrixStrassen<Element> const& other) const {
if (rows != other.rows || cols != other.cols) {
PALISADE_THROW(math_error,
"Subtraction operands have incompatible dimensions");
}
MatrixStrassen<Element> result(allocZero, rows, other.cols);
#pragma omp parallel for
for (int32_t j = 0; j < cols; ++j) {
for (int32_t i = 0; i < rows; ++i) {
*result.data[i][j] = *data[i][j] - *other.data[i][j];
}
}
return result;
}
/**
* Operator for matrix substraction
*
* @param &other the matrix to be substracted
* @return the resulting matrix
*/
inline MatrixStrassen<Element> operator-(
MatrixStrassen<Element> const& other) const {
return this->Sub(other);
}
/**
* Operator for in-place matrix substraction
*
* @param &other the matrix to be substracted
* @return the resulting matrix (same object)
*/
inline MatrixStrassen<Element>& operator-=(
MatrixStrassen<Element> const& other);
/**
* MatrixStrassen transposition
*
* @return the resulting matrix
*/
inline MatrixStrassen<Element> Transpose() const;
// YSP The signature of this method needs to be changed in the future
/**
* MatrixStrassen determinant - found using Laplace formula with complexity
* O(d!), where d is the dimension
*
* @param *result where the result is stored
*/
inline void Determinant(Element* result) const;
/**
* Cofactor matrix - the matrix of determinants of the minors A_{ij}
* multiplied by -1^{i+j}
*
* @return the cofactor matrix for the given matrix
*/
inline MatrixStrassen<Element> CofactorMatrixStrassen() const;
/**
* Add rows to bottom of the matrix
*
* @param &other the matrix to be added to the bottom of current matrix
* @return the resulting matrix
*/
inline MatrixStrassen<Element>& VStack(MatrixStrassen<Element> const& other);
/**
* Add columns the right of the matrix
*
* @param &other the matrix to be added to the right of current matrix
* @return the resulting matrix
*/
inline MatrixStrassen<Element>& HStack(MatrixStrassen<Element> const& other);
/**
* MatrixStrassen indexing operator - writeable instance of the element
*
* @param &row row index
* @param &col column index
* @return the element at the index
*/
inline Element& operator()(size_t row, size_t col) { return data[row][col]; }
/**
* MatrixStrassen indexing operator - read-only instance of the element
*
* @param &row row index
* @param &col column index
* @return the element at the index
*/
inline Element const& operator()(size_t row, size_t col) const {
return data[row][col];
}
/**
* MatrixStrassen row extractor
*
* @param &row row index
* @return the row at the index
*/
inline MatrixStrassen<Element> ExtractRow(size_t row) const {
MatrixStrassen<Element> result(this->allocZero, 1, this->cols);
int i = 0;
for (auto elem = this->GetData()[row].begin();
elem != this->GetData()[row].end(); ++elem) {
result(0, i) = **elem;
i++;
}
return result;
// return *this;
}
/**
* Call switch format for each (ring) element
*
*/
inline void SwitchFormat();
/**
* MatrixStrassen multiplication
*
* @param &other the multiplier matrix
* @return the result of multiplication
*/
MatrixStrassen<Element> Mult(const MatrixStrassen<Element>& other,
int nrec = 0, int pad = -1) const;
/*
* Multiply the matrix by a vector whose elements are all 1's. This causes
* the elements of each row of the matrix to be added and placed into the
* corresponding position in the output vector.
*/
MatrixStrassen<Element> MultByUnityVector() const;
/*
* Multiply the matrix by a vector of random 1's and 0's, which is the same as
* adding select elements in each row together. Return a vector that is a rows
* x 1 matrix.
*/
MatrixStrassen<Element> MultByRandomVector(std::vector<int> ranvec) const;
private:
struct MatDescriptor {
int lda;
int nrec;
int nproc;
int nprocr;
int nprocc;
int nproc_summa;
int bs;
};
const int DESC_SIZE = 7; // number of ints that make up a MatDescriptor
const int rank = 0, base = 0;
mutable data_t data;
size_t rows;
mutable int rowpad = 0;
size_t cols;
mutable int colpad = 0;
alloc_func allocZero;
mutable char* pattern = NULL;
mutable int numAdd = 0;
mutable int numMult = 0;
mutable int numSub = 0;
mutable MatDescriptor desc;
mutable Element zeroUniquePtr = allocZero();
mutable int NUM_THREADS = 1;
void multiplyInternalCAPS(it_lineardata_t A, it_lineardata_t B,
it_lineardata_t C, MatDescriptor desc,
it_lineardata_t work) const;
void strassenDFSCAPS(it_lineardata_t A, it_lineardata_t B, it_lineardata_t C,
MatDescriptor desc,
it_lineardata_t workPassThrough) const;
void block_multiplyCAPS(it_lineardata_t A, it_lineardata_t B,
it_lineardata_t C, MatDescriptor d,
it_lineardata_t workPassThrough) const;
void LinearizeDataCAPS(lineardata_t* lineardataPtr) const;
void UnlinearizeDataCAPS(lineardata_t* lineardataPtr) const;
int getRank() const;
void verifyDescriptor(MatDescriptor desc);
long long numEntriesPerProc(MatDescriptor desc) const;
// deep copy of data - used for copy constructor
void deepCopyData(data_t const& src);
void getData(const data_t& Adata, const data_t& Bdata, const data_t& Cdata,
int row, int inner, int col) const;
void smartSubtractionCAPS(it_lineardata_t result, it_lineardata_t A,
it_lineardata_t B) const;
void smartAdditionCAPS(it_lineardata_t result, it_lineardata_t A,
it_lineardata_t B) const;
void addMatricesCAPS(int numEntries, it_lineardata_t C, it_lineardata_t A,
it_lineardata_t B) const;
void addSubMatricesCAPS(int numEntries, it_lineardata_t T1,
it_lineardata_t S11, it_lineardata_t S12,
it_lineardata_t T2, it_lineardata_t S21,
it_lineardata_t S22) const;
void subMatricesCAPS(int numEntries, it_lineardata_t C, it_lineardata_t A,
it_lineardata_t B) const;
void tripleAddMatricesCAPS(int numEntries, it_lineardata_t T1,
it_lineardata_t S11, it_lineardata_t S12,
it_lineardata_t T2, it_lineardata_t S21,
it_lineardata_t S22, it_lineardata_t T3,
it_lineardata_t S31, it_lineardata_t S32) const;
void tripleSubMatricesCAPS(int numEntries, it_lineardata_t T1,
it_lineardata_t S11, it_lineardata_t S12,
it_lineardata_t T2, it_lineardata_t S21,
it_lineardata_t S22, it_lineardata_t T3,
it_lineardata_t S31, it_lineardata_t S32) const;
void distributeFrom1ProcCAPS(MatDescriptor desc, it_lineardata_t O,
it_lineardata_t I) const;
void collectTo1ProcCAPS(MatDescriptor desc, it_lineardata_t O,
it_lineardata_t I) const;
void sendBlockCAPS(int rank, int target, it_lineardata_t O, int bs,
int source, it_lineardata_t I, int ldi) const;
void receiveBlockCAPS(int rank, int target, it_lineardata_t O, int bs,
int source, it_lineardata_t I, int ldo) const;
void distributeFrom1ProcRecCAPS(MatDescriptor desc, it_lineardata_t O,
it_lineardata_t I, int ldi) const;
void collectTo1ProcRecCAPS(MatDescriptor desc, it_lineardata_t O,
it_lineardata_t I, int ldo) const;
};
/**
* Operator for scalar multiplication of matrix
*
* @param &e element
* @param &M matrix
* @return the resulting matrix
*/
template <class Element>
inline MatrixStrassen<Element> operator*(Element const& e,
MatrixStrassen<Element> const& M) {
return M.ScalarMult(e);
}
/**
* Generates a matrix of rotations. See pages 7-8 of
* https://eprint.iacr.org/2013/297
*
* @param &inMat the matrix of power-of-2 cyclotomic ring elements to be rotated
* @return the resulting matrix of big binary integers
*/
inline MatrixStrassen<BigInteger> Rotate(MatrixStrassen<Poly> const& inMat);
/**
* Each element becomes a square matrix with columns of that element's
* rotations in coefficient form. See pages 7-8 of
* https://eprint.iacr.org/2013/297
*
* @param &inMat the matrix of power-of-2 cyclotomic ring elements to be rotated
* @return the resulting matrix of big binary integers
*/
inline MatrixStrassen<BigVector> RotateVecResult(
MatrixStrassen<Poly> const& inMat);
/**
* Stream output operator
*
* @param &os stream
* @param &m matrix to be outputted
* @return the chained stream
*/
template <class Element>
inline std::ostream& operator<<(std::ostream& os,
const MatrixStrassen<Element>& m);
/**
* Gives the Choleshky decomposition of the input matrix.
* The assumption is that covariance matrix does not have large coefficients
* because it is formed by discrete gaussians e and s; this implies int32_t can
* be used This algorithm can be further improved - see the Darmstadt paper
* section 4.4 http://eprint.iacr.org/2013/297.pdf
*
* @param &input the matrix for which the Cholesky decomposition is to be
* computed
* @return the resulting matrix of floating-point numbers
*/
inline MatrixStrassen<double> Cholesky(const MatrixStrassen<int32_t>& input);
/**
* Convert a matrix of integers from BigInteger to int32_t
* Convert from Z_q to [-q/2, q/2]
*
* @param &input the input matrix
* @param &modulus the ring modulus
* @return the resulting matrix of int32_t
*/
inline MatrixStrassen<int32_t> ConvertToInt32(
const MatrixStrassen<BigInteger>& input, const BigInteger& modulus);
/**
* Convert a matrix of BigVector to int32_t
* Convert from Z_q to [-q/2, q/2]
*
* @param &input the input matrix
* @param &modulus the ring modulus
* @return the resulting matrix of int32_t
*/
inline MatrixStrassen<int32_t> ConvertToInt32(
const MatrixStrassen<BigVector>& input, const BigInteger& modulus);
/**
* Split a vector of int32_t into a vector of ring elements with ring dimension
* n
*
* @param &other the input matrix
* @param &n the ring dimension
* @param ¶ms Poly element params
* @return the resulting matrix of Poly
*/
inline MatrixStrassen<Poly> SplitInt32IntoPolyElements(
MatrixStrassen<int32_t> const& other, size_t n,
const shared_ptr<ILParams> params);
/**
* Another method for splitting a vector of int32_t into a vector of ring
* elements with ring dimension n
*
* @param &other the input matrix
* @param &n the ring dimension
* @param ¶ms Poly element params
* @return the resulting matrix of Poly
*/
inline MatrixStrassen<Poly> SplitInt32AltIntoPolyElements(
MatrixStrassen<int32_t> const& other, size_t n,
const shared_ptr<ILParams> params);
} // namespace lbcrypto
#endif // LBCRYPTO_MATH_MATRIXSTRASSEN_H
|
GB_unop__lnot_bool_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__lnot_bool_bool
// op(A') function: GB_unop_tran__lnot_bool_bool
// C type: bool
// A type: bool
// cast: bool cij = aij
// unaryop: cij = !aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !x ;
// casting
#define GB_CAST(z, aij) \
bool z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
bool aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
bool z = aij ; \
Cx [pC] = !z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__lnot_bool_bool
(
bool *Cx, // Cx and Ax may be aliased
const bool *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
bool z = aij ;
Cx [p] = !z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__lnot_bool_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
task-create.c | /*
* task-create.c -- Archer testcase
*/
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
//
// See tools/archer/LICENSE.txt for details.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// RUN: %libarcher-compile-and-run | FileCheck %s
#include <omp.h>
#include <stdio.h>
#include <unistd.h>
#include "ompt/ompt-signal.h"
int main(int argc, char *argv[]) {
int var = 0, a = 0;
#pragma omp parallel num_threads(2) shared(var, a)
#pragma omp master
{
var++;
#pragma omp task shared(var, a)
{
var++;
OMPT_SIGNAL(a);
}
// Give other thread time to steal the task.
OMPT_WAIT(a, 1);
}
fprintf(stderr, "DONE\n");
int error = (var != 2);
return error;
}
// CHECK-NOT: ThreadSanitizer: data race
// CHECK-NOT: ThreadSanitizer: reported
// CHECK: DONE
|
threshold.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD %
% T H H R R E SS H H O O L D D %
% T HHHHH RRRR EEE SSS HHHHH O O L D D %
% T H H R R E SS H H O O L D D %
% T H H R R EEEEE SSSSS H H OOO LLLLL DDDD %
% %
% %
% MagickCore Image Threshold Methods %
% %
% Software Design %
% John Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2009 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/property.h"
#include "magick/blob.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/configure.h"
#include "magick/constitute.h"
#include "magick/decorate.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/effect.h"
#include "magick/fx.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/montage.h"
#include "magick/pixel-private.h"
#include "magick/quantize.h"
#include "magick/quantum.h"
#include "magick/random_.h"
#include "magick/resize.h"
#include "magick/resource_.h"
#include "magick/segment.h"
#include "magick/shear.h"
#include "magick/signature-private.h"
#include "magick/string_.h"
#include "magick/transform.h"
#include "magick/threshold.h"
#include "magick/option.h"
#include "magick/xml-tree.h"
/*
Define declarations.
*/
#define ThresholdsFilename "thresholds.xml"
/*
Typedef declarations.
*/
struct _ThresholdMap
{
char
*map_id,
*description;
unsigned long
width,
height;
long
divisor,
*levels;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveThresholdImage() selects an individual threshold for each pixel
% based on the range of intensity values in its local neighborhood. This
% allows for thresholding of an image whose global intensity histogram
% doesn't contain distinctive peaks.
%
% The format of the AdaptiveThresholdImage method is:
%
% Image *AdaptiveThresholdImage(const Image *image,
% const unsigned long width,const unsigned long height,
% const long offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the width of the local neighborhood.
%
% o height: the height of the local neighborhood.
%
% o offset: the mean offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveThresholdImage(const Image *image,
const unsigned long width,const unsigned long height,const long offset,
ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
Image
*threshold_image;
long
progress,
y;
MagickBooleanType
status;
MagickPixelPacket
zero;
MagickRealType
number_pixels;
ViewInfo
*image_view,
*threshold_view;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((image->columns < width) || (image->rows < height))
ThrowImageException(OptionError,"ImageSmallerThanRadius");
threshold_image=CloneImage(image,0,0,MagickTrue,exception);
if (threshold_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(threshold_image,DirectClass) == MagickFalse)
{
InheritException(exception,&threshold_image->exception);
threshold_image=DestroyImage(threshold_image);
return((Image *) NULL);
}
/*
Local adaptive threshold.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&zero);
number_pixels=(MagickRealType) width*height;
image_view=AcquireCacheView(image);
threshold_view=AcquireCacheView(threshold_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) image->rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register IndexPacket
*threshold_indexes;
register long
x;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((long) width/2L),y-height/2L,
image->columns+width,height,exception);
q=GetCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
threshold_indexes=GetCacheViewAuthenticIndexQueue(threshold_view);
for (x=0; x < (long) image->columns; x++)
{
long
v;
MagickPixelPacket
mean,
pixel;
register const PixelPacket
*r;
register long
u;
pixel=zero;
mean=zero;
r=p;
for (v=0; v < (long) height; v++)
{
for (u=0; u < (long) width; u++)
{
pixel.red+=r[u].red;
pixel.green+=r[u].green;
pixel.blue+=r[u].blue;
pixel.opacity+=r[u].opacity;
if (image->colorspace == CMYKColorspace)
pixel.index=(MagickRealType) indexes[x+(r-p)+u];
}
r+=image->columns+width;
}
mean.red=(MagickRealType) (pixel.red/number_pixels+offset);
mean.green=(MagickRealType) (pixel.green/number_pixels+offset);
mean.blue=(MagickRealType) (pixel.blue/number_pixels+offset);
mean.opacity=(MagickRealType) (pixel.opacity/number_pixels+offset);
if (image->colorspace == CMYKColorspace)
mean.index=(MagickRealType) (pixel.index/number_pixels+offset);
q->red=(Quantum) (((MagickRealType) q->red <= mean.red) ?
0 : QuantumRange);
q->green=(Quantum) (((MagickRealType) q->green <= mean.green) ?
0 : QuantumRange);
q->blue=(Quantum) (((MagickRealType) q->blue <= mean.blue) ?
0 : QuantumRange);
q->opacity=(Quantum) (((MagickRealType) q->opacity <= mean.opacity) ?
0 : QuantumRange);
if (image->colorspace == CMYKColorspace)
threshold_indexes[x]=(IndexPacket) (((MagickRealType)
threshold_indexes[x] <= mean.index) ? 0 : QuantumRange);
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(threshold_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
threshold_view=DestroyCacheView(threshold_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
threshold_image=DestroyImage(threshold_image);
return(threshold_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B i l e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BilevelImage() changes the value of individual pixels based on the
% intensity of each pixel channel. The result is a high-contrast image.
%
% More precisely each channel value of the image is 'thresholded' so that if
% it is equal to or less than the given value it is set to zero, while any
% value greater than that give is set to it maximum or QuantumRange.
%
% This function is what is used to implement the "-threshold" operator for
% the command line API.
%
% If the default channel setting is given the image is thresholded using just
% the gray 'intensity' of the image, rather than the individual channels.
%
% The format of the BilevelImageChannel method is:
%
% MagickBooleanType BilevelImage(Image *image,const double threshold)
% MagickBooleanType BilevelImageChannel(Image *image,
% const ChannelType channel,const double threshold)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o threshold: define the threshold values.
%
% Aside: You can get the same results as operator using LevelImageChannels()
% with the 'threshold' value for both the black_point and the white_point.
%
*/
MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold)
{
MagickBooleanType
status;
status=BilevelImageChannel(image,DefaultChannels,threshold);
return(status);
}
MagickExport MagickBooleanType BilevelImageChannel(Image *image,
const ChannelType channel,const double threshold)
{
#define ThresholdImageTag "Threshold/Image"
ExceptionInfo
*exception;
long
progress,
y;
MagickBooleanType
status;
ViewInfo
*image_view;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
/*
Bilevel threshold image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) image->rows; y++)
{
register IndexPacket
*indexes;
register long
x;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (channel == DefaultChannels)
{
for (x=0; x < (long) image->columns; x++)
{
q->red=(Quantum) ((MagickRealType) PixelIntensityToQuantum(q) <=
threshold ? 0 : QuantumRange);
q->green=q->red;
q->blue=q->red;
q++;
}
}
else
for (x=0; x < (long) image->columns; x++)
{
if ((channel & RedChannel) != 0)
q->red=(Quantum) ((MagickRealType) q->red <= threshold ? 0 :
QuantumRange);
if ((channel & GreenChannel) != 0)
q->green=(Quantum) ((MagickRealType) q->green <= threshold ? 0 :
QuantumRange);
if ((channel & BlueChannel) != 0)
q->blue=(Quantum) ((MagickRealType) q->blue <= threshold ? 0 :
QuantumRange);
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
q->opacity=(Quantum) ((MagickRealType) q->opacity <= threshold ?
0 : QuantumRange);
else
q->opacity=(Quantum) ((MagickRealType) q->opacity <= threshold ?
OpaqueOpacity : TransparentOpacity);
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
indexes[x]=(IndexPacket) ((MagickRealType) indexes[x] <= threshold ?
0 : QuantumRange);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l a c k T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlackThresholdImage() is like ThresholdImage() but forces all pixels below
% the threshold into black while leaving all pixels above the threshold
% unchanged.
%
% The format of the BlackThresholdImage method is:
%
% MagickBooleanType BlackThresholdImage(Image *image,const char *threshold)
% MagickBooleanType BlackThresholdImageChannel(Image *image,
% const ChannelType channel,const char *threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel or channels to be thresholded.
%
% o threshold: Define the threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType BlackThresholdImage(Image *image,
const char *threshold)
{
MagickBooleanType
status;
status=BlackThresholdImageChannel(image,DefaultChannels,threshold,
&image->exception);
return(status);
}
static inline MagickBooleanType IsLongGrayPixel(const LongPixelPacket *pixel)
{
#if !defined(MAGICKCORE_HDRI_SUPPORT)
if ((pixel->red == pixel->green) && (pixel->green == pixel->blue))
return(MagickTrue);
#else
if ((fabs(pixel->red-pixel->green) <= MagickEpsilon) &&
(fabs(pixel->green-pixel->blue) <= MagickEpsilon))
return(MagickTrue);
#endif
return(MagickFalse);
}
MagickExport MagickBooleanType BlackThresholdImageChannel(Image *image,
const ChannelType channel,const char *threshold,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
GeometryInfo
geometry_info;
long
progress,
y;
LongPixelPacket
pixel;
MagickBooleanType
status;
MagickStatusType
flags;
ViewInfo
*image_view;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (threshold == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
flags=ParseGeometry(threshold,&geometry_info);
pixel.red=(long) (geometry_info.rho+0.5);
pixel.green=(long) (geometry_info.sigma+0.5);
if ((flags & SigmaValue) == 0)
pixel.green=pixel.red;
pixel.blue=(long) (geometry_info.xi+0.5);
if ((flags & XiValue) == 0)
pixel.blue=pixel.red;
pixel.opacity=(long) (geometry_info.psi+0.5);
if ((flags & XiValue) == 0)
pixel.opacity=pixel.red;
pixel.index=(long) (geometry_info.chi+0.5);
if ((flags & ChiValue) == 0)
pixel.index=pixel.red;
if ((flags & PercentValue) != 0)
{
pixel.red*=(QuantumRange/100.0);
pixel.green*=(QuantumRange/100.0);
pixel.blue*=(QuantumRange/100.0);
pixel.opacity*=(QuantumRange/100.0);
pixel.index*=(QuantumRange/100.0);
}
/*
Black threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) image->rows; y++)
{
register IndexPacket
*indexes;
register long
x;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (IsLongGrayPixel(&pixel) != MagickFalse)
for (x=0; x < (long) image->columns; x++)
{
if ((MagickRealType) PixelIntensityToQuantum(q) < pixel.red)
{
if ((channel & RedChannel) != 0)
q->red=(Quantum) 0;
if ((channel & GreenChannel) != 0)
q->green=(Quantum) 0;
if ((channel & BlueChannel) != 0)
q->blue=(Quantum) 0;
}
q++;
}
else
for (x=0; x < (long) image->columns; x++)
{
if (((channel & RedChannel) != 0) &&
(q->red < pixel.red))
q->red=(Quantum) 0;
if (((channel & GreenChannel) != 0) &&
(q->green < pixel.green))
q->green=(Quantum) 0;
if (((channel & BlueChannel) != 0) &&
(q->blue < pixel.blue))
q->blue=(Quantum) 0;
if (((channel & OpacityChannel) != 0) &&
(q->opacity < pixel.opacity))
q->opacity=(Quantum) 0;
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(indexes[x] < pixel.index))
indexes[x]=(Quantum) 0;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y T h r e s h o l d M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyThresholdMap() de-allocate the given ThresholdMap
%
% The format of the ListThresholdMaps method is:
%
% ThresholdMap *DestroyThresholdMap(Threshold *map)
%
% A description of each parameter follows.
%
% o map: Pointer to the Threshold map to destroy
%
*/
MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map)
{
assert(map != (ThresholdMap *) NULL);
if (map->map_id != (char *) NULL)
map->map_id=DestroyString(map->map_id);
if (map->description != (char *) NULL)
map->description=DestroyString(map->description);
if (map->levels != (long *) NULL)
map->levels=(long *) RelinquishMagickMemory(map->levels);
map=(ThresholdMap *) RelinquishMagickMemory(map);
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t T h r e s h o l d M a p F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetThresholdMapFile() look for a given threshold map name or alias in the
% given XML file data, and return the allocated the map when found.
%
% The format of the ListThresholdMaps method is:
%
% ThresholdMap *GetThresholdMap(const char *xml,const char *filename,
% const char *map_id,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o xml: The threshold map list in XML format.
%
% o filename: The threshold map XML filename.
%
% o map_id: ID of the map to look for in XML list.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ThresholdMap *GetThresholdMapFile(const char *xml,
const char *filename,const char *map_id,ExceptionInfo *exception)
{
const char *attr, *content;
XMLTreeInfo *thresholds,*threshold,*description,*levels;
ThresholdMap *map;
map = (ThresholdMap *)NULL;
(void) LogMagickEvent(ConfigureEvent,GetMagickModule(),
"Loading threshold map file \"%s\" ...",filename);
thresholds=NewXMLTree(xml,exception);
if ( thresholds == (XMLTreeInfo *)NULL )
return(map);
for( threshold = GetXMLTreeChild(thresholds,"threshold");
threshold != (XMLTreeInfo *)NULL;
threshold = GetNextXMLTreeTag(threshold) ) {
attr = GetXMLTreeAttribute(threshold, "map");
if ( (attr != (char *)NULL) && (LocaleCompare(map_id,attr) == 0) )
break;
attr = GetXMLTreeAttribute(threshold, "alias");
if ( (attr != (char *)NULL) && (LocaleCompare(map_id,attr) == 0) )
break;
}
if ( threshold == (XMLTreeInfo *)NULL ) {
return(map);
}
description = GetXMLTreeChild(threshold,"description");
if ( description == (XMLTreeInfo *)NULL ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<description>, map \"%s\"", map_id);
thresholds = DestroyXMLTree(thresholds);
return(map);
}
levels = GetXMLTreeChild(threshold,"levels");
if ( levels == (XMLTreeInfo *)NULL ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<levels>, map \"%s\"", map_id);
thresholds = DestroyXMLTree(thresholds);
return(map);
}
/* The map has been found -- Allocate a Threshold Map to return */
map = (ThresholdMap *)AcquireMagickMemory(sizeof(ThresholdMap));
if ( map == (ThresholdMap *)NULL )
ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap");
map->map_id = (char *)NULL;
map->description = (char *)NULL;
map->levels = (long *) NULL;
/* Assign Basic Attributes */
attr = GetXMLTreeAttribute(threshold, "map");
if ( attr != (char *)NULL )
map->map_id = ConstantString(attr);
content = GetXMLTreeContent(description);
if ( content != (char *)NULL )
map->description = ConstantString(content);
attr = GetXMLTreeAttribute(levels, "width");
if ( attr == (char *)NULL ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels width>, map \"%s\"", map_id);
thresholds = DestroyXMLTree(thresholds);
map = DestroyThresholdMap(map);
return(map);
}
map->width = (unsigned long) atoi(attr);
if ( map->width == 0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels width>, map \"%s\"", map_id);
thresholds = DestroyXMLTree(thresholds);
map = DestroyThresholdMap(map);
return(map);
}
attr = GetXMLTreeAttribute(levels, "height");
if ( attr == (char *)NULL ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels height>, map \"%s\"", map_id);
thresholds = DestroyXMLTree(thresholds);
map = DestroyThresholdMap(map);
return(map);
}
map->height = (unsigned long) atoi(attr);
if ( map->height == 0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels height>, map \"%s\"", map_id);
thresholds = DestroyXMLTree(thresholds);
map = DestroyThresholdMap(map);
return(map);
}
attr = GetXMLTreeAttribute(levels, "divisor");
if ( attr == (char *)NULL ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels divisor>, map \"%s\"", map_id);
thresholds = DestroyXMLTree(thresholds);
map = DestroyThresholdMap(map);
return(map);
}
map->divisor = atoi(attr);
if ( map->divisor < 2 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels divisor>, map \"%s\"", map_id);
thresholds = DestroyXMLTree(thresholds);
map = DestroyThresholdMap(map);
return(map);
}
/* Allocate theshold levels array */
content = GetXMLTreeContent(levels);
if ( content == (char *)NULL ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingContent", "<levels>, map \"%s\"", map_id);
thresholds = DestroyXMLTree(thresholds);
map = DestroyThresholdMap(map);
return(map);
}
map->levels=(long *) AcquireQuantumMemory((size_t) map->width,map->height*
sizeof(*map->levels));
if ( map->levels == (long *)NULL )
ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap");
{ /* parse levels into integer array */
int i;
char *p;
for( i=0; i< (long) (map->width*map->height); i++) {
map->levels[i] = (int)strtol(content, &p, 10);
if ( p == content ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> too few values, map \"%s\"", map_id);
thresholds = DestroyXMLTree(thresholds);
map = DestroyThresholdMap(map);
return(map);
}
if ( map->levels[i] < 0 || map->levels[i] > map->divisor ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> %ld out of range, map \"%s\"",
map->levels[i], map_id);
thresholds = DestroyXMLTree(thresholds);
map = DestroyThresholdMap(map);
return(map);
}
content = p;
}
(void) strtol(content, &p, 10);
if ( p != content ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> too many values, map \"%s\"", map_id);
thresholds = DestroyXMLTree(thresholds);
map = DestroyThresholdMap(map);
return(map);
}
}
thresholds = DestroyXMLTree(thresholds);
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t T h r e s h o l d M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetThresholdMap() load and search one or more threshold map files for the
% a map matching the given name or aliase.
%
% The format of the GetThresholdMap method is:
%
% ThresholdMap *GetThresholdMap(const char *map_id,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o map_id: ID of the map to look for.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ThresholdMap *GetThresholdMap(const char *map_id,
ExceptionInfo *exception)
{
const StringInfo
*option;
LinkedListInfo
*options;
ThresholdMap
*map;
map=(ThresholdMap *)NULL;
options=GetConfigureOptions(ThresholdsFilename,exception);
while (( option=(const StringInfo *) GetNextValueInLinkedList(options) )
!= (const StringInfo *) NULL && map == (ThresholdMap *)NULL )
map=GetThresholdMapFile((const char *) GetStringInfoDatum(option),
GetStringInfoPath(option),map_id,exception);
options=DestroyConfigureOptions(options);
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ L i s t T h r e s h o l d M a p F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ListThresholdMapFile() lists the threshold maps and their descriptions
% in the given XML file data.
%
% The format of the ListThresholdMaps method is:
%
% MagickBooleanType ListThresholdMaps(FILE *file,const char*xml,
% const char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o file: An pointer to the output FILE.
%
% o xml: The threshold map list in XML format.
%
% o filename: The threshold map XML filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml,
const char *filename,ExceptionInfo *exception)
{
XMLTreeInfo *thresholds,*threshold,*description;
const char *map,*alias,*content;
assert( xml != (char *)NULL );
assert( file != (FILE *)NULL );
(void) LogMagickEvent(ConfigureEvent,GetMagickModule(),
"Loading threshold map file \"%s\" ...",filename);
thresholds=NewXMLTree(xml,exception);
if ( thresholds == (XMLTreeInfo *)NULL )
return(MagickFalse);
(void) fprintf(file,"%-16s %-12s %s\n", "Map", "Alias", "Description");
(void) fprintf(file,"----------------------------------------------------\n");
for( threshold = GetXMLTreeChild(thresholds,"threshold");
threshold != (XMLTreeInfo *)NULL;
threshold = GetNextXMLTreeTag(threshold) )
{
map = GetXMLTreeAttribute(threshold, "map");
if (map == (char *) NULL) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<map>");
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
alias = GetXMLTreeAttribute(threshold, "alias");
/* alias is optional, no if test needed */
description=GetXMLTreeChild(threshold,"description");
if ( description == (XMLTreeInfo *)NULL ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<description>, map \"%s\"", map);
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
content=GetXMLTreeContent(description);
if ( content == (char *)NULL ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingContent", "<description>, map \"%s\"", map);
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
(void) fprintf(file,"%-16s %-12s %s\n",map,alias ? alias : "", content);
}
thresholds=DestroyXMLTree(thresholds);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i s t T h r e s h o l d M a p s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ListThresholdMaps() lists the threshold maps and their descriptions
% as defined by "threshold.xml" to a file.
%
% The format of the ListThresholdMaps method is:
%
% MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o file: An pointer to the output FILE.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ListThresholdMaps(FILE *file,
ExceptionInfo *exception)
{
const StringInfo
*option;
LinkedListInfo
*options;
MagickStatusType
status;
status=MagickFalse;
if ( file == (FILE *)NULL )
file = stdout;
options=GetConfigureOptions(ThresholdsFilename,exception);
(void) fprintf(file, "\n Threshold Maps for Ordered Dither Operations\n");
while ( ( option=(const StringInfo *) GetNextValueInLinkedList(options) )
!= (const StringInfo *) NULL)
{
(void) fprintf(file,"\nPATH: %s\n\n",GetStringInfoPath(option));
status|=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option),
GetStringInfoPath(option),exception);
}
options=DestroyConfigureOptions(options);
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O r d e r e d D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OrderedDitherImage() uses the ordered dithering technique of reducing color
% images to monochrome using positional information to retain as much
% information as possible.
%
% WARNING: This function is depreciated, and is now just a call to
% the more more powerful OrderedPosterizeImage(); function.
%
% The format of the OrderedDitherImage method is:
%
% MagickBooleanType OrderedDitherImage(Image *image)
% MagickBooleanType OrderedDitherImageChannel(Image *image,
% const ChannelType channel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel or channels to be thresholded.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType OrderedDitherImage(Image *image)
{
MagickBooleanType
status;
status=OrderedDitherImageChannel(image,DefaultChannels,&image->exception);
return(status);
}
MagickExport MagickBooleanType OrderedDitherImageChannel(Image *image,
const ChannelType channel,ExceptionInfo *exception)
{
MagickBooleanType
status;
/*
Call the augumented function OrderedPosterizeImage()
*/
status=OrderedPosterizeImageChannel(image,channel,"o8x8",exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O r d e r e d P o s t e r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OrderedPosterizeImage() will perform a ordered dither based on a number
% of pre-defined dithering threshold maps, but over multiple intensity
% levels, which can be different for different channels, according to the
% input argument.
%
% The format of the OrderedPosterizeImage method is:
%
% MagickBooleanType OrderedPosterizeImage(Image *image,
% const char *threshold_map,ExceptionInfo *exception)
% MagickBooleanType OrderedPosterizeImageChannel(Image *image,
% const ChannelType channel,const char *threshold_map,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel or channels to be thresholded.
%
% o threshold_map: A string containing the name of the threshold dither
% map to use, followed by zero or more numbers representing the number
% of color levels tho dither between.
%
% Any level number less than 2 will be equivelent to 2, and means only
% binary dithering will be applied to each color channel.
%
% No numbers also means a 2 level (bitmap) dither will be applied to all
% channels, while a single number is the number of levels applied to each
% channel in sequence. More numbers will be applied in turn to each of
% the color channels.
%
% For example: "o3x3,6" will generate a 6 level posterization of the
% image with a ordered 3x3 diffused pixel dither being applied between
% each level. While checker,8,8,4 will produce a 332 colormaped image
% with only a single checkerboard hash pattern (50% grey) between each
% color level, to basically double the number of color levels with
% a bare minimim of dithering.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType OrderedPosterizeImage(Image *image,
const char *threshold_map,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=OrderedPosterizeImageChannel(image,DefaultChannels,threshold_map,
exception);
return(status);
}
MagickExport MagickBooleanType OrderedPosterizeImageChannel(Image *image,
const ChannelType channel,const char *threshold_map,ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
long
progress,
y;
LongPixelPacket
levels;
MagickBooleanType
status;
ThresholdMap
*map;
ViewInfo
*image_view;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if (threshold_map == (const char *) NULL)
return(MagickTrue);
{
char token[MaxTextExtent], *p;
p=(char *)threshold_map;
while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) &&
(*p != '\0'))
p++;
threshold_map=p;
while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) &&
(*p != '\0')) {
if ((p-threshold_map) >= MaxTextExtent)
break;
token[p-threshold_map] = *p;
p++;
}
token[p-threshold_map] = '\0';
map = GetThresholdMap(token, exception);
if ( map == (ThresholdMap *)NULL ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","ordered-dither",threshold_map);
return(MagickFalse);
}
}
/* Set channel levels from extra comma seperated arguments
Default to 2, the single value given, or individual channel values
*/
#if 1
{ /* parse directly as a comma seperated list of integers */
char *p;
p = strchr(threshold_map,',');
if ( p != (char *)NULL && isdigit((int) ((unsigned char) *(++p))) )
levels.index = (unsigned long) strtol(p, &p, 10);
else
levels.index = 2;
levels.red = ((channel & RedChannel ) != 0) ? levels.index : 0;
levels.green = ((channel & GreenChannel) != 0) ? levels.index : 0;
levels.blue = ((channel & BlueChannel) != 0) ? levels.index : 0;
levels.opacity = ((channel & OpacityChannel) != 0) ? levels.index : 0;
levels.index = ((channel & IndexChannel) != 0
&& (image->colorspace == CMYKColorspace)) ? levels.index : 0;
/* if more than a single number, each channel has a separate value */
if ( p != (char *) NULL && *p == ',' ) {
p=strchr(threshold_map,',');
p++;
if ((channel & RedChannel) != 0)
levels.red = (unsigned long) strtol(p, &p, 10), (void)(*p == ',' && p++);
if ((channel & GreenChannel) != 0)
levels.green = (unsigned long) strtol(p, &p, 10), (void)(*p == ',' && p++);
if ((channel & BlueChannel) != 0)
levels.blue = (unsigned long) strtol(p, &p, 10), (void)(*p == ',' && p++);
if ((channel & IndexChannel) != 0 && image->colorspace == CMYKColorspace)
levels.index=(unsigned long) strtol(p, &p, 10), (void)(*p == ',' && p++);
if ((channel & OpacityChannel) != 0)
levels.opacity = (unsigned long) strtol(p, &p, 10), (void)(*p == ',' && p++);
}
}
#else
/* Parse level values as a geometry */
/* This difficult!
* How to map GeometryInfo structure elements into
* LongPixelPacket structure elements, but according to channel?
* Note the channels list may skip elements!!!!
* EG -channel BA -ordered-dither map,2,3
* will need to map g.rho -> l.blue, and g.sigma -> l.opacity
* A simpler way is needed, probably converting geometry to a temporary
* array, then using channel to advance the index into long pixel packet.
*/
#endif
#if 0
printf("DEBUG levels r=%ld g=%ld b=%ld a=%ld i=%ld\n",
levels.red, levels.green, levels.blue, levels.opacity, levels.index);
#endif
{ /* Do the posterized ordered dithering of the image */
int
d;
/* d = number of psuedo-level divisions added between color levels */
d = map->divisor-1;
/* reduce levels to levels - 1 */
levels.red = levels.red ? levels.red-1 : 0;
levels.green = levels.green ? levels.green-1 : 0;
levels.blue = levels.blue ? levels.blue-1 : 0;
levels.opacity = levels.opacity ? levels.opacity-1 : 0;
levels.index = levels.index ? levels.index-1 : 0;
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) image->rows; y++)
{
register long
x;
register IndexPacket
*indexes;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (long) image->columns; x++)
{
register int
threshold,
t,
l;
/*
Figure out the dither threshold for this pixel
This must be a integer from 1 to map->divisor-1
*/
threshold = map->levels[(x%map->width) +map->width*(y%map->height)];
/* Dither each channel in the image as appropriate
Notes on the integer Math...
total number of divisions = (levels-1)*(divisor-1)+1)
t1 = this colors psuedo_level =
q->red * total_divisions / (QuantumRange+1)
l = posterization level 0..levels
t = dither threshold level 0..divisor-1 NB: 0 only on last
Each color_level is of size QuantumRange / (levels-1)
NB: All input levels and divisor are already had 1 subtracted
Opacity is inverted so 'off' represents transparent.
*/
if (levels.red) {
t = (int) (QuantumScale*q->red*(levels.red*d+1));
l = t/d; t = t-l*d;
q->red=(Quantum) ((l+(t >= threshold))*QuantumRange/levels.red);
}
if (levels.green) {
t = (int) (QuantumScale*q->green*(levels.green*d+1));
l = t/d; t = t-l*d;
q->green=(Quantum) ((l+(t >= threshold))*QuantumRange/levels.green);
}
if (levels.blue) {
t = (int) (QuantumScale*q->blue*(levels.blue*d+1));
l = t/d; t = t-l*d;
q->blue=(Quantum) ((l+(t >= threshold))*QuantumRange/levels.blue);
}
if (levels.opacity) {
t = (int) ((1.0-QuantumScale*q->opacity)*(levels.opacity*d+1));
l = t/d; t = t-l*d;
q->opacity=(Quantum) ((1.0-l-(t >= threshold))*QuantumRange/
levels.opacity);
}
if (levels.index) {
t = (int) (QuantumScale*indexes[x]*(levels.index*d+1));
l = t/d; t = t-l*d;
indexes[x]=(IndexPacket) ((l+(t>=threshold))*QuantumRange/
levels.index);
}
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,DitherImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
}
map=DestroyThresholdMap(map);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a n d o m T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RandomThresholdImage() changes the value of individual pixels based on the
% intensity of each pixel compared to a random threshold. The result is a
% low-contrast, two color image.
%
% The format of the RandomThresholdImage method is:
%
% MagickBooleanType RandomThresholdImageChannel(Image *image,
% const char *thresholds,ExceptionInfo *exception)
% MagickBooleanType RandomThresholdImageChannel(Image *image,
% const ChannelType channel,const char *thresholds,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel or channels to be thresholded.
%
% o thresholds: a geometry string containing low,high thresholds. If the
% string contains 2x2, 3x3, or 4x4, an ordered dither of order 2, 3, or 4
% is performed instead.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RandomThresholdImage(Image *image,
const char *thresholds,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=RandomThresholdImageChannel(image,DefaultChannels,thresholds,
exception);
return(status);
}
MagickExport MagickBooleanType RandomThresholdImageChannel(Image *image,
const ChannelType channel,const char *thresholds,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
GeometryInfo
geometry_info;
MagickStatusType
flags;
long
progress,
y;
MagickBooleanType
status;
MagickPixelPacket
threshold;
MagickRealType
min_threshold,
max_threshold;
ViewInfo
*image_view;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if (thresholds == (const char *) NULL)
return(MagickTrue);
GetMagickPixelPacket(image,&threshold);
min_threshold=0.0;
max_threshold=(MagickRealType) QuantumRange;
flags=ParseGeometry(thresholds,&geometry_info);
min_threshold=geometry_info.rho;
max_threshold=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
max_threshold=min_threshold;
if (strchr(thresholds,'%') != (char *) NULL)
{
max_threshold*=(MagickRealType) (0.01*QuantumRange);
min_threshold*=(MagickRealType) (0.01*QuantumRange);
}
else
if (((max_threshold == min_threshold) || (max_threshold == 1)) &&
(min_threshold <= 8))
{
/*
Backward Compatibility -- ordered-dither -- IM v 6.2.9-6.
*/
status=OrderedPosterizeImageChannel(image,channel,thresholds,exception);
return(status);
}
/*
Random threshold image.
*/
status=MagickTrue;
progress=0;
if (channel == AllChannels)
{
if (AcquireImageColormap(image,2) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) image->rows; y++)
{
MagickBooleanType
sync;
register IndexPacket
*indexes;
register long
x;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (long) image->columns; x++)
{
IndexPacket
index;
MagickRealType
intensity;
intensity=(MagickRealType) PixelIntensityToQuantum(q);
if (intensity < min_threshold)
threshold.index=min_threshold;
else if (intensity > max_threshold)
threshold.index=max_threshold;
else
threshold.index=(MagickRealType)(QuantumRange*
GetPseudoRandomValue());
index=(IndexPacket) (intensity <= threshold.index ? 0 : 1);
indexes[x]=index;
*q++=image->colormap[(long) index];
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
return(MagickFalse);
}
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) image->rows; y++)
{
register IndexPacket
*indexes;
register long
x;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (long) image->columns; x++)
{
if ((channel & RedChannel) != 0)
{
if ((MagickRealType) q->red < min_threshold)
threshold.red=min_threshold;
else
if ((MagickRealType) q->red > max_threshold)
threshold.red=max_threshold;
else
threshold.red=(MagickRealType) (QuantumRange*
GetPseudoRandomValue());
}
if ((channel & GreenChannel) != 0)
{
if ((MagickRealType) q->green < min_threshold)
threshold.green=min_threshold;
else
if ((MagickRealType) q->green > max_threshold)
threshold.green=max_threshold;
else
threshold.green=(MagickRealType) (QuantumRange*
GetPseudoRandomValue());
}
if ((channel & BlueChannel) != 0)
{
if ((MagickRealType) q->blue < min_threshold)
threshold.blue=min_threshold;
else
if ((MagickRealType) q->blue > max_threshold)
threshold.blue=max_threshold;
else
threshold.blue=(MagickRealType) (QuantumRange*
GetPseudoRandomValue());
}
if ((channel & OpacityChannel) != 0)
{
if ((MagickRealType) q->opacity < min_threshold)
threshold.opacity=min_threshold;
else
if ((MagickRealType) q->opacity > max_threshold)
threshold.opacity=max_threshold;
else
threshold.opacity=(MagickRealType) (QuantumRange*
GetPseudoRandomValue());
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
if ((MagickRealType) indexes[x] < min_threshold)
threshold.index=min_threshold;
else
if ((MagickRealType) indexes[x] > max_threshold)
threshold.index=max_threshold;
else
threshold.index=(MagickRealType) (QuantumRange*
GetPseudoRandomValue());
}
if ((channel & RedChannel) != 0)
q->red=(Quantum) ((MagickRealType) q->red <= threshold.red ? 0 :
QuantumRange);
if ((channel & GreenChannel) != 0)
q->green=(Quantum) ((MagickRealType) q->green <= threshold.green ? 0 :
QuantumRange);
if ((channel & BlueChannel) != 0)
q->blue=(Quantum) ((MagickRealType) q->blue <= threshold.blue ? 0 :
QuantumRange);
if ((channel & OpacityChannel) != 0)
q->opacity=(Quantum) ((MagickRealType) q->opacity <= threshold.opacity ?
0 : QuantumRange);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
indexes[x]=(IndexPacket) ((MagickRealType) indexes[x] <=
threshold.index ? 0 : QuantumRange);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W h i t e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WhiteThresholdImage() is like ThresholdImage() but forces all pixels above
% the threshold into white while leaving all pixels below the threshold
% unchanged.
%
% The format of the WhiteThresholdImage method is:
%
% MagickBooleanType WhiteThresholdImage(Image *image,const char *threshold)
% MagickBooleanType WhiteThresholdImageChannel(Image *image,
% const ChannelType channel,const char *threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel or channels to be thresholded.
%
% o threshold: Define the threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WhiteThresholdImage(Image *image,
const char *threshold)
{
MagickBooleanType
status;
status=WhiteThresholdImageChannel(image,DefaultChannels,threshold,
&image->exception);
return(status);
}
MagickExport MagickBooleanType WhiteThresholdImageChannel(Image *image,
const ChannelType channel,const char *threshold,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
GeometryInfo
geometry_info;
long
progress,
y;
LongPixelPacket
pixel;
MagickBooleanType
status;
MagickStatusType
flags;
ViewInfo
*image_view;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (threshold == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
flags=ParseGeometry(threshold,&geometry_info);
pixel.red=(long) (geometry_info.rho+0.5);
pixel.green=(long) (geometry_info.sigma+0.5);
if ((flags & SigmaValue) == 0)
pixel.green=pixel.red;
pixel.blue=geometry_info.xi;
if ((flags & XiValue) == 0)
pixel.blue=pixel.red;
pixel.opacity=(long) (geometry_info.psi+0.5);
if ((flags & XiValue) == 0)
pixel.opacity=pixel.red;
pixel.index=geometry_info.chi;
if ((flags & ChiValue) == 0)
pixel.index=pixel.red;
if ((flags & PercentValue) != 0)
{
pixel.red*=(QuantumRange/100.0);
pixel.green*=(QuantumRange/100.0);
pixel.blue*=(QuantumRange/100.0);
pixel.opacity*=(QuantumRange/100.0);
pixel.index*=(QuantumRange/100.0);
}
/*
White threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) image->rows; y++)
{
register IndexPacket
*indexes;
register long
x;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (IsLongGrayPixel(&pixel) != MagickFalse)
for (x=0; x < (long) image->columns; x++)
{
if ((MagickRealType) PixelIntensityToQuantum(q) > pixel.red)
{
if ((channel & RedChannel) != 0)
q->red=(Quantum) QuantumRange;
if ((channel & GreenChannel) != 0)
q->green=(Quantum) QuantumRange;
if ((channel & BlueChannel) != 0)
q->blue=(Quantum) QuantumRange;
}
q++;
}
else
for (x=0; x < (long) image->columns; x++)
{
if (((channel & RedChannel) != 0) &&
(q->red > pixel.red))
q->red=(Quantum) QuantumRange;
if (((channel & GreenChannel) != 0) &&
(q->green > pixel.green))
q->green=(Quantum) QuantumRange;
if (((channel & BlueChannel) != 0) &&
(q->blue > pixel.blue))
q->blue=(Quantum) QuantumRange;
if (((channel & OpacityChannel) != 0) &&
(q->opacity > pixel.opacity))
q->opacity=(Quantum) QuantumRange;
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(indexes[x] > pixel.index))
indexes[x]=(Quantum) QuantumRange;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
tsne_inl.h | /*
*
* Copyright (c) 2014, Nicola Pezzotti (Delft University of Technology)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the Delft University of Technology.
* 4. Neither the name of the Delft University of Technology nor the names of
* its contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY NICOLA PEZZOTTI ''AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL NICOLA PEZZOTTI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
*/
#ifndef TSNE_INL
#define TSNE_INL
#include "hdi/dimensionality_reduction/tsne.h"
#include "hdi/utils/math_utils.h"
#include "hdi/utils/log_helper_functions.h"
#include <time.h>
#include <cmath>
#ifdef __USE_GCD__
#include <dispatch/dispatch.h>
#endif
namespace hdi{
namespace dr{
/////////////////////////////////////////////////////////////////////////
template <typename scalar_type>
TSNE<scalar_type>::InitParams::InitParams():
_perplexity(30),
_seed(0),
_embedding_dimensionality(2),
_minimum_gain(0.1),
_eta(200),
_momentum(0.5),
_final_momentum(0.8),
_mom_switching_iter(250),
_exaggeration_factor(4),
_remove_exaggeration_iter(250)
{}
/////////////////////////////////////////////////////////////////////////
template <typename scalar_type>
TSNE<scalar_type>::TSNE():
_initialized(false),
_dimensionality(0),
_logger(nullptr)
{
}
template <typename scalar_type>
typename TSNE<scalar_type>::data_handle_type TSNE<scalar_type>::addDataPoint(const scalar_type* ptr){
checkAndThrowLogic(!_initialized,"Class should be uninitialized to add a data-point");
checkAndThrowLogic(_dimensionality > 0,"Invalid dimensionality");
_high_dimensional_data.push_back(ptr);
return static_cast<data_handle_type>(_high_dimensional_data.size() - 1);
}
template <typename scalar_type>
void TSNE<scalar_type>::reset(){
_initialized = false;
}
template <typename scalar_type>
void TSNE<scalar_type>::clear(){
_high_dimensional_data.clear();
_embedding->clear();
_initialized = false;
}
template <typename scalar_type>
void TSNE<scalar_type>::getHighDimensionalDescriptor(scalar_vector_type& data_point, data_handle_type handle)const{
data_point.resize(_dimensionality);
for(int i = 0; i < _dimensionality; ++i){
data_point[i] = *(_high_dimensional_data[handle]+i);
}
}
template <typename scalar_type>
void TSNE<scalar_type>::getEmbeddingPosition(scalar_vector_type& embedding_position, data_handle_type handle)const{
if(!_initialized){
throw std::logic_error("Algorithm must be initialized before ");
}
embedding_position.resize(_init_params._embedding_dimensionality);
for(int i = 0; i < _init_params._embedding_dimensionality; ++i){
embedding_position[i] = _embedding->getContainer()[handle*_init_params._embedding_dimensionality + i];
}
}
/////////////////////////////////////////////////////////////////////////
template <typename scalar_type>
void TSNE<scalar_type>::initialize(data::Embedding<scalar_type>* embedding, InitParams params){
utils::secureLog(_logger,"Initializing tSNE...");
if(size() == 0){
throw std::logic_error("Cannot initialize an empty dataset");
}
{
_embedding = embedding;
int size_sq = size();
size_sq *= size_sq;
_P.resize(size_sq);
_Q.resize(size_sq);
_distances_squared.resize(size_sq);
_embedding->resize(params._embedding_dimensionality,size(),0);
_embedding_container = &_embedding->getContainer();
_gradient.resize(size()*params._embedding_dimensionality,0);
_previous_gradient.resize(size()*params._embedding_dimensionality,0);
_gain.resize(size()*params._embedding_dimensionality,1);
_sigmas.resize(size());
_init_params = _init_params;
}
//compute distances between data-points
computeHighDimensionalDistances();
//Compute gaussian distributions
computeGaussianDistributions(params._perplexity);
//Compute High-dimensional distribution
computeHighDimensionalDistribution();
//Initialize Embedding position
initializeEmbeddingPosition(params._seed);
_iteration = 0;
_initialized = true;
utils::secureLog(_logger,"Initialization complete!");
}
template <typename scalar_type>
void TSNE<scalar_type>::computeHighDimensionalDistances(){
utils::secureLog(_logger,"Computing High-dimensional distances...");
const int n = size();
#ifdef __USE_GCD__
std::cout << "GCD dispatch, tsne_inl 165.\n";
dispatch_apply(n, dispatch_get_global_queue(0, 0), ^(size_t j) {
#else
#pragma omp parallel for
for(int j = 0; j < n; ++j){
#endif //__USE_GCD__
_distances_squared[j*n + j] = 0;
for(int i = j+1; i < n; ++i){
scalar_type res(utils::euclideanDistance<scalar_type>(_high_dimensional_data[i],_high_dimensional_data[i]+_dimensionality, _high_dimensional_data[j],_high_dimensional_data[j]+_dimensionality));
//scalar_type res(utils::euclideanDistanceSquared<scalar_type>(_high_dimensional_data[i],_high_dimensional_data[i]+_dimensionality, _high_dimensional_data[j],_high_dimensional_data[j]+_dimensionality));
_distances_squared[j*n + i] = res;
_distances_squared[i*n + j] = res;
}
}
#ifdef __USE_GCD__
);
#endif
}
template <typename scalar_type>
void TSNE<scalar_type>::computeGaussianDistributions(double perplexity){
utils::secureLog(_logger,"Computing gaussian distributions...");
const int n = size();
#ifdef __USE_GCD__
std::cout << "GCD dispatch, tsne_inl 189.\n";
dispatch_apply(n, dispatch_get_global_queue(0, 0), ^(size_t j) {
#else
#pragma omp parallel for
for(int j = 0; j < n; ++j){
#endif //__USE_GCD__
const auto sigma = utils::computeGaussianDistributionWithFixedPerplexity<scalar_vector_type>(
_distances_squared.begin() + j*n,
_distances_squared.begin() + (j + 1)*n,
_P.begin() + j*n,
_P.begin() + (j + 1)*n,
perplexity,
200,
1e-5,
j
);
_P[j*n + j] = 0.;
_sigmas[j] = static_cast<scalar_type>(sigma);
}
#ifdef __USE_GCD__
);
#endif
}
template <typename scalar_type>
void TSNE<scalar_type>::computeHighDimensionalDistribution(){
utils::secureLog(_logger,"Computing high-dimensional joint probability distribution...");
const int n = size();
//#pragma omp parallel for
for(int j = 0; j < n; ++j){
for(int i = j+1; i < n; ++i){
const double v = (_P[j*n + i]+_P[i*n + j])*0.5/n;
_P[j*n + i] = static_cast<scalar_type>(v);
_P[i*n + j] = static_cast<scalar_type>(v);
}
}
}
template <typename scalar_type>
void TSNE<scalar_type>::initializeEmbeddingPosition(int seed, double multiplier){
utils::secureLog(_logger,"Initializing the embedding...");
if(seed < 0){
std::srand(static_cast<unsigned int>(time(NULL)));
}
else{
std::srand(seed);
}
for(auto& v : _embedding->getContainer()){
double x(0.);
double y(0.);
double radius(0.);
do {
x = 2 * (rand() / ((double)RAND_MAX + 1)) - 1;
y = 2 * (rand() / ((double)RAND_MAX + 1)) - 1;
radius = (x * x) + (y * y);
} while((radius >= 1.0) || (radius == 0.0));
radius = sqrt(-2 * log(radius) / radius);
x *= radius;
y *= radius;
v = static_cast<scalar_type>(x * multiplier);
}
}
template <typename scalar_type>
void TSNE<scalar_type>::doAnIteration(double mult){
if(!_initialized){
throw std::logic_error("Cannot compute a gradient descent iteration on unitialized data");
}
if(_iteration == _init_params._mom_switching_iter){
utils::secureLog(_logger,"Switch to final momentum...");
}
if(_iteration == _init_params._remove_exaggeration_iter){
utils::secureLog(_logger,"Remove exaggeration...");
}
//Compute Low-dimensional distribution
computeLowDimensionalDistribution();
//Compute gradient of the KL function
computeGradient((_iteration<_init_params._remove_exaggeration_iter)?_init_params._exaggeration_factor:1.);
//Compute gradient of the KL function
updateTheEmbedding(mult);
}
template <typename scalar_type>
void TSNE<scalar_type>::computeLowDimensionalDistribution(){
const int n = size();
#ifdef __USE_GCD__
std::cout << "GCD dispatch, tsne_inl 283.\n";
dispatch_apply(n, dispatch_get_global_queue(0, 0), ^(size_t j) {
#else
#pragma omp parallel for
for(int j = 0; j < n; ++j){
#endif //__USE_GCD__
_Q[j*n + j] = 0;
for(int i = j+1; i < n; ++i){
const double euclidean_dist_sq(
utils::euclideanDistanceSquared<scalar_type>(
_embedding_container->begin()+j*_init_params._embedding_dimensionality,
_embedding_container->begin()+(j+1)*_init_params._embedding_dimensionality,
_embedding_container->begin()+i*_init_params._embedding_dimensionality,
_embedding_container->begin()+(i+1)*_init_params._embedding_dimensionality
)
);
const double v = 1./(1.+euclidean_dist_sq);
_Q[j*n + i] = static_cast<scalar_type>(v);
_Q[i*n + j] = static_cast<scalar_type>(v);
}
}
#ifdef __USE_GCD__
);
#endif
double sum_Q = 0;
for(auto& v : _Q){
sum_Q += v;
}
_normalization_Q = static_cast<scalar_type>(sum_Q);
}
template <typename scalar_type>
void TSNE<scalar_type>::computeGradient(double exaggeration){
const int n = size();
const int dim = _init_params._embedding_dimensionality;
//#pragma omp parallel for
for(int i = 0; i < n; ++i){
for(int d = 0; d < dim; ++d){
_gradient[i * dim + d] = 0;
double sum_positive(0.);
double sum_negative(0.);
for(int j = 0; j < n; ++j){
const int idx = i*n + j;
const double distance((*_embedding_container)[i * dim + d] - (*_embedding_container)[j * dim + d]);
const double positive(_P[idx] * _Q[idx] * distance);
const double negative(_Q[idx] * _Q[idx] / _normalization_Q * distance);
sum_positive += positive;
sum_negative += negative;
}
_gradient[i * dim + d] = static_cast<scalar_type>(4 * (exaggeration*sum_positive - sum_negative));
}
}
}
//temp
template <typename T>
T sign(T x) { return (x == .0 ? .0 : (x < .0 ? -1.0 : 1.0)); }
template <typename scalar_type>
void TSNE<scalar_type>::updateTheEmbedding(double mult){
for(int i = 0; i < _gradient.size(); ++i){
_gain[i] = static_cast<scalar_type>((sign(_gradient[i]) != sign(_previous_gradient[i])) ? (_gain[i] + .2) : (_gain[i] * .8));
if(_gain[i] < _init_params._minimum_gain){
_gain[i] = static_cast<scalar_type>(_init_params._minimum_gain);
}
_gradient[i] = static_cast<scalar_type>((_gradient[i]>0?1:-1)*std::abs(_gradient[i]*_init_params._eta* _gain[i])/(_init_params._eta*_gain[i]));
_previous_gradient[i] = static_cast<scalar_type>(((_iteration<_init_params._mom_switching_iter)?_init_params._momentum:_init_params._final_momentum) * _previous_gradient[i] - _init_params._eta * _gain[i] * _gradient[i]);
(*_embedding_container)[i] += _previous_gradient[i] * mult;
}
++_iteration;
}
template <typename scalar_type>
double TSNE<scalar_type>::computeKullbackLeiblerDivergence(){
double kl = 0;
const int n = size();
for(int j = 0; j < n; ++j){
for(int i = 0; i < n; ++i){
if(i == j)
continue;
kl += _P[j*n + i] * std::log(_P[j*n + i] / (_Q[j*n + i]/_normalization_Q));
}
}
return kl;
}
}
}
#endif
|
NETSPLITLM_fmt_plug.c | /*
* NETHALFLM_fmt.c
* Written by DSK (Based on NetLM/NetNTLM patch by JoMo-Kun)
* Performs brute-force cracking of the HalfLM challenge/response pairs.
*
* Modified for performance and OMP support by magnum 2011
*
* Storage Format:
* domain\username:::lm response:nt response:challenge
*
* NOTE, in loader.c, the format appeared to be domain\username:::lm response:challenge
* so that format has been built into the 'prepare' function (JimF).
*
* Code is in public domain.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_NETHALFLM;
#elif FMT_REGISTERS_H
john_register_one(&fmt_NETHALFLM);
#else
#include <string.h>
#ifdef _OPENMP
#ifdef __MIC__
#ifndef OMP_SCALE
#define OMP_SCALE 2048
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 65536
#endif
#endif // __MIC__
#include <omp.h>
#endif
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "unicode.h"
#include <openssl/des.h>
#include "memdbg.h"
#ifndef uchar
#define uchar unsigned char
#endif
#define FORMAT_LABEL "nethalflm"
#define FORMAT_NAME "HalfLM C/R"
#define FORMAT_TAG "$NETHALFLM$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "DES 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 7
#define BINARY_SIZE 8
#define BINARY_ALIGN 4
#define SALT_SIZE 8
#define SALT_ALIGN 4
#define CIPHERTEXT_LENGTH 48
#define TOTAL_LENGTH 12 + 2 * SALT_SIZE + CIPHERTEXT_LENGTH
// these may be altered in init() if running OMP
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests tests[] = {
{"", "G3RG3P00!", {"domain\\username", "", "", "6E1EC36D3417CE9E09A4424309F116C4C991948DAEB4ADAD", "", "1122334455667788"} },
{"$NETHALFLM$1122334455667788$6E1EC36D3417CE9E09A4424309F116C4C991948DAEB4ADAD", "G3RG3P00!"},
{"$NETHALFLM$1122334455667788$6E1EC36D3417CE9E09A4424309F116C4C991948DAEB4ADAD", "g3rg3p0"},
{"$NETHALFLM$1122334455667788$1354FD5ABF3B627B8B49587B8F2BBA0F9F6C5E420824E0A2", "zeeez@1"},
{"", "G3RG3P0", {"domain\\username", "", "", "6E1EC36D3417CE9E09A4424309F116C4C991948DAEB4ADAD", "", "1122334455667788"} },
{"", "ZEEEZ@1", {"domain\\username", "", "", "1354FD5ABF3B627B8B49587B8F2BBA0F9F6C5E420824E0A2", "", "1122334455667788"} },
// repeat last hash in exactly the same format that is used in john.pot
{"$NETHALFLM$1122334455667788$1354fd5abf3b627b8b49587b8f2bba0f9f6c5e420824e0a2", "ZEEEZ@1"},
{NULL}
};
static uchar (*saved_plain)[PLAINTEXT_LENGTH + 1];
static uchar (*saved_pre)[8];
static uchar (*output)[BINARY_SIZE];
static uchar *challenge;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_plain = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_plain));
saved_pre = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_pre));
output = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*output));
}
static void done(void)
{
MEM_FREE(output);
MEM_FREE(saved_pre);
MEM_FREE(saved_plain);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *pos;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)!=0) return 0;
if (strlen(ciphertext) < TOTAL_LENGTH) return 0;
if (ciphertext[27] != '$') return 0;
if (strncmp(&ciphertext[28 + 2 * SALT_SIZE],
"00000000000000000000000000000000", 32) == 0)
return 0; // This is NTLM ESS C/R
for (pos = &ciphertext[28]; atoi16[ARCH_INDEX(*pos)] != 0x7F; pos++)
;
if (!*pos && pos - ciphertext - 28 == CIPHERTEXT_LENGTH) {
return 1;
}
else
return 0;
}
static char *prepare(char *split_fields[10], struct fmt_main *self)
{
char *tmp;
char *srv_challenge = split_fields[3];
char *nethashv2 = split_fields[4];
char *cli_challenge = split_fields[5];
if (!strncmp(split_fields[1], FORMAT_TAG, FORMAT_TAG_LEN))
return split_fields[1];
if (!srv_challenge || !nethashv2 || !cli_challenge)
return split_fields[1];
if (strlen(srv_challenge) != CIPHERTEXT_LENGTH)
return split_fields[1];
// if LMresp == NTresp then it's NTLM-only, not LM
if (!strncmp(srv_challenge, nethashv2, 48))
return split_fields[1];
// this string suggests we have an improperly formatted NTLMv2
if (strlen(nethashv2) > 31) {
if (!strncmp(&nethashv2[32], "0101000000000000", 16))
return split_fields[1];
}
tmp = (char *) mem_alloc(FORMAT_TAG_LEN + strlen(srv_challenge) + 1 + strlen(cli_challenge) + 1);
sprintf(tmp, "%s%s$%s", FORMAT_TAG, cli_challenge, srv_challenge);
if (valid(tmp,self)) {
char *cp2 = str_alloc_copy(tmp);
MEM_FREE(tmp);
return cp2;
}
MEM_FREE(tmp);
return split_fields[1];
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[TOTAL_LENGTH + 1] = {0};
memcpy(out, ciphertext, TOTAL_LENGTH);
strlwr(&out[FORMAT_TAG_LEN]); /* Exclude: $NETHALFLM$ */
return out;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
uint32_t dummy;
} binary;
int i;
ciphertext+=28;
for (i=0; i<BINARY_SIZE; i++)
{
binary.c[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])])<<4;
binary.c[i] |= (atoi16[ARCH_INDEX(ciphertext[i*2+1])]);
}
return binary.c;
}
inline static void setup_des_key(unsigned char key_56[], DES_key_schedule *ks)
{
DES_cblock key;
key[0] = key_56[0];
key[1] = (key_56[0] << 7) | (key_56[1] >> 1);
key[2] = (key_56[1] << 6) | (key_56[2] >> 2);
key[3] = (key_56[2] << 5) | (key_56[3] >> 3);
key[4] = (key_56[3] << 4) | (key_56[4] >> 4);
key[5] = (key_56[4] << 3) | (key_56[5] >> 5);
key[6] = (key_56[5] << 2) | (key_56[6] >> 6);
key[7] = (key_56[6] << 1);
DES_set_key(&key, ks);
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
DES_key_schedule ks;
int i;
#ifdef _OPENMP
#pragma omp parallel for default(none) private(i, ks) shared(count, output, challenge, saved_pre)
#endif
for (i=0; i<count; i++) {
/* DES-encrypt challenge using the partial LM hash */
setup_des_key(saved_pre[i], &ks);
DES_ecb_encrypt((DES_cblock*)challenge, (DES_cblock*)output[i], &ks, DES_ENCRYPT);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index=0; index<count; index++)
if (!memcmp(output[index], binary, BINARY_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(output[index], binary, BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return !memcmp(output[index], get_binary(source), BINARY_SIZE);
}
static void *get_salt(char *ciphertext)
{
static union {
unsigned char c[SALT_SIZE];
uint32_t dummy;
} out;
int i;
ciphertext += FORMAT_TAG_LEN;
for (i = 0; i < SALT_SIZE; ++i) {
out.c[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) + atoi16[ARCH_INDEX(ciphertext[i*2+1])];
}
return (void*)out.c;
}
static void set_salt(void *salt)
{
challenge = salt;
}
static void netsplitlm_set_key(char *key, int index)
{
const unsigned char magic[] = {0x4b, 0x47, 0x53, 0x21, 0x40, 0x23, 0x24, 0x25};
DES_key_schedule ks;
strnzcpyn((char *)saved_plain[index], key, PLAINTEXT_LENGTH + 1);
/* Upper-case password */
enc_strupper((char *)saved_plain[index]);
/* Generate first 8-bytes of LM hash */
setup_des_key(saved_plain[index], &ks);
DES_ecb_encrypt((DES_cblock*)magic, (DES_cblock*)saved_pre[index], &ks, DES_ENCRYPT);
}
static char *get_key(int index)
{
return (char *)saved_plain[index];
}
static int salt_hash(void *salt)
{
return *(uint32_t *)salt & (SALT_HASH_SIZE - 1);
}
static int get_hash_0(int index)
{
return *(uint32_t *)output[index] & PH_MASK_0;
}
static int get_hash_1(int index)
{
return *(uint32_t *)output[index] & PH_MASK_1;
}
static int get_hash_2(int index)
{
return *(uint32_t *)output[index] & PH_MASK_2;
}
static int get_hash_3(int index)
{
return *(uint32_t *)output[index] & PH_MASK_3;
}
static int get_hash_4(int index)
{
return *(uint32_t *)output[index] & PH_MASK_4;
}
static int get_hash_5(int index)
{
return *(uint32_t *)output[index] & PH_MASK_5;
}
static int get_hash_6(int index)
{
return *(uint32_t *)output[index] & PH_MASK_6;
}
struct fmt_main fmt_NETHALFLM = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_8_BIT | FMT_TRUNC | FMT_SPLIT_UNIFIES_CASE | FMT_OMP | FMT_OMP_BAD,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
netsplitlm_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_unop__identity_uint16_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_uint16_fp64
// op(A') function: GB_unop_tran__identity_uint16_fp64
// C type: uint16_t
// A type: double
// cast: uint16_t cij = GB_cast_to_uint16_t ((double) (aij))
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = GB_cast_to_uint16_t ((double) (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = GB_cast_to_uint16_t ((double) (aij)) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_uint16_fp64
(
uint16_t *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
uint16_t z = GB_cast_to_uint16_t ((double) (aij)) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
uint16_t z = GB_cast_to_uint16_t ((double) (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_uint16_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ComputeMaxNeighboursWorklet.h | //============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//
// Copyright 2014 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
// Copyright 2014 UT-Battelle, LLC.
// Copyright 2014 Los Alamos National Security.
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
// Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National
// Laboratory (LANL), the U.S. Government retains certain rights in
// this software.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_augmented_contourtree_mesh_inc_compute_max_neighbour_worklet_h
#define vtk_m_worklet_contourtree_augmented_contourtree_mesh_inc_compute_max_neighbour_worklet_h
#include <vtkm/worklet/WorkletMapField.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_augmented
{
namespace mesh_dem_contourtree_mesh_inc
{
// Worklet to update all of the edges so that the far end resets to the result of the ascent in the previous step
class ComputeMaxNeighboursWorklet : public vtkm::worklet::WorkletMapField
{
public:
typedef void ControlSignature(WholeArrayIn firstNeighbour, // (input) firstNeighbour
WholeArrayOut nNeighbours); // (output)
typedef void ExecutionSignature(_1, InputIndex, _2);
typedef _1 InputDomain;
// Default Constructor
VTKM_EXEC_CONT
ComputeMaxNeighboursWorklet(const vtkm::Id neighboursSize)
: NeighboursSize(neighboursSize)
{
}
template <typename OutFieldPortalType, typename InFieldPortalType>
VTKM_EXEC void operator()(const InFieldPortalType& firstNeighbourPortal,
vtkm::Id startVtxNo,
const OutFieldPortalType& nNeighboursPortal) const
{
if (startVtxNo < firstNeighbourPortal.GetNumberOfValues() - 1)
{
nNeighboursPortal.Set(startVtxNo,
firstNeighbourPortal.Get(startVtxNo + 1) -
firstNeighbourPortal.Get(startVtxNo));
}
else
{
nNeighboursPortal.Set(startVtxNo,
NeighboursSize -
firstNeighbourPortal.Get(nNeighboursPortal.GetNumberOfValues() - 1));
}
// In serial this worklet implements the following operation
// #pragma omp parallel for
// for (indexVector::size_type startVtxNo = 0; startVtxNo < firstNeighbour.size()-1; ++startVtxNo)
// {
// nNeighbours[startVtxNo] = firstNeighbour[startVtxNo+1] - firstNeighbour[startVtxNo];
// }
// nNeighbours[nNeighbours.size() - 1] = neighbours.size() - firstNeighbour[nNeighbours.size() - 1];
//
// // NOTE: In the above we change the loop to run for the full length of the array and instead
// // then do a conditional assign for the last element directly within the loop, rather
// // than shortcutting the loop and doing a special assigne after the loop. This allows
// // us to process all elements on the device in parallel rather than having to pull
// // data back into the control area to do the last assignement
}
private:
vtkm::Id NeighboursSize;
}; // ComputeMaxNeighboursWorklet
} // namespace mesh_dem_contourtree_mesh_inc
} // namespace contourtree_augmented
} // namespace worklet
} // namespace vtkm
#endif
|
cpu.c | /**
* @file main.c
* @brief This file contains the source code of the application to parallelise.
* @details This application is a classic heat spread simulation.
* @author Ludovic Capelli
**/
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#include <omp.h>
#include <inttypes.h>
#include <math.h>
#include <sched.h>
#include <unistd.h>
#include <string.h>
#include "util.h"
/**
* @argv[0] Name of the program
* @argv[1] path to the dataset to load
**/
int main(int argc, char* argv[])
{
(void)argc;
(void)argv;
MPI_Init(NULL, NULL);
/////////////////////////////////////////////////////
// -- PREPARATION 1: COLLECT USEFUL INFORMATION -- //
/////////////////////////////////////////////////////
// Ranks for convenience so that we don't throw raw values all over the code
const int MASTER_PROCESS_RANK = 0;
// The rank of the MPI process in charge of this instance
int my_rank;
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
// Number of MPI processes in total, commonly called "comm_size" for "communicator size".
int comm_size;
MPI_Comm_size(MPI_COMM_WORLD, &comm_size);
/// Rank of the first MPI process
const int FIRST_PROCESS_RANK = 0;
/// Rank of the last MPI process
const int LAST_PROCESS_RANK = comm_size - 1;
// Rank of my up neighbour if any
int up_neighbour_rank = (my_rank == FIRST_PROCESS_RANK) ? MPI_PROC_NULL : my_rank - 1;
// Rank of my down neighbour if any
int down_neighbour_rank = (my_rank == LAST_PROCESS_RANK) ? MPI_PROC_NULL : my_rank + 1;
//report_placement();
////////////////////////////////////////////////////////////////////
// -- PREPARATION 2: INITIALISE TEMPERATURES ON MASTER PROCESS -- //
////////////////////////////////////////////////////////////////////
/// Array that will contain my part chunk. It will include the 2 ghost rows (1 up, 1 down)
double temperatures[ROWS_PER_MPI_PROCESS+2][COLUMNS_PER_MPI_PROCESS];
/// Temperatures from the previous iteration, same dimensions as the array above.
double temperatures_last[ROWS_PER_MPI_PROCESS+2][COLUMNS_PER_MPI_PROCESS];
/// On master process only: contains all temperatures read from input file.
double all_temperatures[ROWS][COLUMNS];
// The master MPI process will read a chunk from the file, send it to the corresponding MPI process and repeat until all chunks are read.
if(my_rank == MASTER_PROCESS_RANK)
{
initialise_temperatures(all_temperatures);
}
MPI_Barrier(MPI_COMM_WORLD);
///////////////////////////////////////////
// ^ //
// / \ //
// / | \ CODE FROM HERE IS TIMED //
// / o \ //
// /_______\ //
///////////////////////////////////////////
////////////////////////////////////////////////////////
// -- TASK 1: DISTRIBUTE DATA TO ALL MPI PROCESSES -- //
////////////////////////////////////////////////////////
double total_time_so_far = 0.0;
double start_time = MPI_Wtime();
if(my_rank == MASTER_PROCESS_RANK)
{
for(int i = 0; i < comm_size; i++)
{
// Is the i'th chunk meant for me, the master MPI process?
if(i != my_rank)
{
// No, so send the corresponding chunk to that MPI process.
MPI_Ssend(&all_temperatures[i * ROWS_PER_MPI_PROCESS][0], ROWS_PER_MPI_PROCESS * COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, i, 0, MPI_COMM_WORLD);
}
else
{
// Yes, let's copy it straight for the array in which we read the file into.
for(int j = 1; j <= ROWS_PER_MPI_PROCESS; j++)
{
for(int k = 0; k < COLUMNS_PER_MPI_PROCESS; k++)
{
temperatures_last[j][k] = all_temperatures[j-1][k];
}
}
}
}
}
else
{
// Receive my chunk.
MPI_Recv(&temperatures_last[1][0], ROWS_PER_MPI_PROCESS * COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, MASTER_PROCESS_RANK, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
// MPI_Scatter(all_temperatures, ROWS_PER_MPI_PROCESS * COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, &temperatures_last[1][0], ROWS_PER_MPI_PROCESS * COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, MASTER_PROCESS_RANK, MPI_COMM_WORLD);
// Copy the temperatures into the current iteration temperature as well
#pragma omp parallel for shared(temperatures, temperatures_last, ROWS_PER_MPI_PROCESS, ROWS_PER_MPI_PROCESS) collapse(2)
for(int i = 1; i <= ROWS_PER_MPI_PROCESS; i++)
{
for(int j = 0; j < COLUMNS_PER_MPI_PROCESS; j++)
{
temperatures[i][j] = temperatures_last[i][j];
}
}
if(my_rank == MASTER_PROCESS_RANK)
{
printf("Data acquisition complete.\n");
}
// Wait for everybody to receive their part before we can start processing
MPI_Barrier(MPI_COMM_WORLD);
/////////////////////////////
// TASK 2: DATA PROCESSING //
/////////////////////////////
int iteration_count = 0;
/// Maximum temperature change observed across all MPI processes
double global_temperature_change;
/// Maximum temperature change for us
double my_temperature_change;
/// The last snapshot made
double snapshot[ROWS][COLUMNS];
while(total_time_so_far < MAX_TIME)
{
my_temperature_change = 0.0;
// ////////////////////////////////////////
// -- SUBTASK 1: EXCHANGE GHOST CELLS -- //
// ////////////////////////////////////////
// Send data to up neighbour for its ghost cells. If my up_neighbour_rank is MPI_PROC_NULL, this MPI_Ssend will do nothing.
MPI_Ssend(&temperatures[1][0], COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, up_neighbour_rank, 0, MPI_COMM_WORLD);
// Receive data from down neighbour to fill our ghost cells. If my down_neighbour_rank is MPI_PROC_NULL, this MPI_Recv will do nothing.
MPI_Recv(&temperatures_last[ROWS_PER_MPI_PROCESS+1][0], COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, down_neighbour_rank, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
// Send data to down neighbour for its ghost cells. If my down_neighbour_rank is MPI_PROC_NULL, this MPI_Ssend will do nothing.
MPI_Ssend(&temperatures[ROWS_PER_MPI_PROCESS][0], COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, down_neighbour_rank, 0, MPI_COMM_WORLD);
// Receive data from up neighbour to fill our ghost cells. If my up_neighbour_rank is MPI_PROC_NULL, this MPI_Recv will do nothing.
MPI_Recv(&temperatures_last[0][0], COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, up_neighbour_rank, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
// ///////////// Using SendRecv
// // Send data to up neighbour from down neighbour
// MPI_Sendrecv(&temperatures[1][0], COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, up_neighbour_rank, 0, &temperatures_last[ROWS_PER_MPI_PROCESS+1][0], COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, down_neighbour_rank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
// // Send data to down neighbour from up neighbour
// MPI_Sendrecv(&temperatures[ROWS_PER_MPI_PROCESS][0], COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, down_neighbour_rank, 0, &temperatures_last[0][0], COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, up_neighbour_rank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
/////////////////////////////////////////////
// -- SUBTASK 2: PROPAGATE TEMPERATURES -- //
/////////////////////////////////////////////
#pragma omp parallel for shared(temperatures, temperatures_last, ROWS_PER_MPI_PROCESS, ROWS_PER_MPI_PROCESS, MAX_TEMPERATURE) collapse(2)
for(int i = 1; i <= ROWS_PER_MPI_PROCESS; i++)
{
// Process all cells between the first and last columns excluded, which each has both left and right neighbours
for(int j = 1; j < COLUMNS_PER_MPI_PROCESS - 1; j++)
{
if(temperatures[i][j] != MAX_TEMPERATURE)
{
temperatures[i][j] = 0.25 * (temperatures_last[i-1][j ] +
temperatures_last[i+1][j ] +
temperatures_last[i ][j-1] +
temperatures_last[i ][j+1]);
}
}
}
#pragma omp parallel for shared(temperatures, temperatures_last, ROWS_PER_MPI_PROCESS, ROWS_PER_MPI_PROCESS, MAX_TEMPERATURE)
for(int i = 1; i <= ROWS_PER_MPI_PROCESS; i++)
{
// Process the cell at the first column, which has no left neighbour
if(temperatures[i][0] != MAX_TEMPERATURE)
{
temperatures[i][0] = (temperatures_last[i-1][0] +
temperatures_last[i+1][0] +
temperatures_last[i ][1]) / 3.0;
}
}
#pragma omp parallel for shared(temperatures, temperatures_last, ROWS_PER_MPI_PROCESS, ROWS_PER_MPI_PROCESS, MAX_TEMPERATURE)
for(int i = 1; i <= ROWS_PER_MPI_PROCESS; i++)
{
// Process the cell at the last column, which has no right neighbour
if(temperatures[i][COLUMNS_PER_MPI_PROCESS - 1] != MAX_TEMPERATURE)
{
temperatures[i][COLUMNS_PER_MPI_PROCESS - 1] = (temperatures_last[i-1][COLUMNS_PER_MPI_PROCESS - 1] +
temperatures_last[i+1][COLUMNS_PER_MPI_PROCESS - 1] +
temperatures_last[i ][COLUMNS_PER_MPI_PROCESS - 2]) / 3.0;
}
}
// Start the gather of the snapshot here
MPI_Request gather_request;
if(iteration_count % SNAPSHOT_INTERVAL == 0)
{
MPI_Igather(&temperatures[1][0], ROWS_PER_MPI_PROCESS * COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, snapshot, ROWS_PER_MPI_PROCESS * COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, MASTER_PROCESS_RANK, MPI_COMM_WORLD, &gather_request);
}
///////////////////////////////////////////////////////
// -- SUBTASK 3: CALCULATE MAX TEMPERATURE CHANGE -- //
///////////////////////////////////////////////////////
my_temperature_change = 0.0;
#pragma omp parallel for shared(temperatures, temperatures_last, ROWS_PER_MPI_PROCESS, ROWS_PER_MPI_PROCESS) collapse(2) reduciton(max:my_temperature_change)
for(int i = 1; i <= ROWS_PER_MPI_PROCESS; i++)
{
for(int j = 0; j < COLUMNS_PER_MPI_PROCESS; j++)
{
my_temperature_change = fmax(fabs(temperatures[i][j] - temperatures_last[i][j]), my_temperature_change);
temperatures_last[i][j] = temperatures[i][j];
}
}
//////////////////////////////////////////////////////////
// -- SUBTASK 4: FIND MAX TEMPERATURE CHANGE OVERALL -- //
//////////////////////////////////////////////////////////
MPI_Request allreduce_request;
MPI_Iallreduce(&my_temperature_change, &global_temperature_change, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD, &allreduce_request);
// MPI_Allreduce(&my_temperature_change, &global_temperature_change, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
// Wait for the all reduce to find the max temp to complete
MPI_Wait(&allreduce_request, MPI_STATUS_IGNORE);
///////////////////////////////////
// -- SUBTASK 6: GET SNAPSHOT -- //
///////////////////////////////////
if(iteration_count % SNAPSHOT_INTERVAL == 0)
{
if(my_rank == MASTER_PROCESS_RANK)
{
// Wait there to gather the snapshot
MPI_Wait(&gather_request, MPI_STATUS_IGNORE);
printf("Iteration %d: %.18f\n", iteration_count, global_temperature_change);
}
// MPI_Gather(&temperatures[1][0], ROWS_PER_MPI_PROCESS * COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, snapshot, ROWS_PER_MPI_PROCESS * COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, MASTER_PROCESS_RANK, MPI_COMM_WORLD);
}
// Calculate the total time spent processing
if(my_rank == MASTER_PROCESS_RANK)
{
total_time_so_far = MPI_Wtime() - start_time;
}
// Send total timer to everybody so they too can exit the loop if more than the allowed runtime has elapsed already
MPI_Bcast(&total_time_so_far, 1, MPI_DOUBLE, MASTER_PROCESS_RANK, MPI_COMM_WORLD);
// Update the iteration number
iteration_count++;
}
///////////////////////////////////////////////
// ^ //
// / \ //
// / | \ CODE FROM HERE IS NOT TIMED //
// / o \ //
// /_______\ //
///////////////////////////////////////////////
/////////////////////////////////////////
// -- FINALISATION 2: PRINT SUMMARY -- //
/////////////////////////////////////////
if(my_rank == MASTER_PROCESS_RANK)
{
printf("The program took %.2f seconds in total and executed %d iterations.\n", total_time_so_far, iteration_count);
}
MPI_Finalize();
return EXIT_SUCCESS;
}
|
GB_unaryop__abs_uint16_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint16_int8
// op(A') function: GB_tran__abs_uint16_int8
// C type: uint16_t
// A type: int8_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint16_t z = (uint16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT16 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint16_int8
(
uint16_t *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint16_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__minus_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__minus_uint64
// A.*B function (eWiseMult): GB_AemultB__minus_uint64
// A*D function (colscale): GB_AxD__minus_uint64
// D*A function (rowscale): GB_DxB__minus_uint64
// C+=B function (dense accum): GB_Cdense_accumB__minus_uint64
// C+=b function (dense accum): GB_Cdense_accumb__minus_uint64
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__minus_uint64
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__minus_uint64
// C=scalar+B GB_bind1st__minus_uint64
// C=scalar+B' GB_bind1st_tran__minus_uint64
// C=A+scalar GB_bind2nd__minus_uint64
// C=A'+scalar GB_bind2nd_tran__minus_uint64
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = (aij - bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x - y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINUS || GxB_NO_UINT64 || GxB_NO_MINUS_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__minus_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__minus_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__minus_uint64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__minus_uint64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__minus_uint64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__minus_uint64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__minus_uint64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__minus_uint64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__minus_uint64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t bij = Bx [p] ;
Cx [p] = (x - bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__minus_uint64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
Cx [p] = (aij - y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = (x - aij) ; \
}
GrB_Info GB_bind1st_tran__minus_uint64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = (aij - y) ; \
}
GrB_Info GB_bind2nd_tran__minus_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
inputNomain.c | /*
test input
a file without the main entry
By C. Liao
*/
#include <stdio.h>
#ifdef _OPENMP
#include "omp.h"
#endif
int foo(void)
{
#ifdef _OPENMP
omp_set_nested(1);
#endif
#pragma omp parallel
printf("Hello,world!\n");
#pragma omp parallel
{
printf("1Hello,world!\n");
#pragma omp parallel
printf("2Hello,world!\n");
}
return 0;
}
|
pr59917-1.c | /* PR middle-end/59917 */
/* { dg-do compile } */
/* { dg-options "-O2 -fopenmp" } */
struct J { long buf[8]; };
extern int setjmp (struct J[1]);
extern struct J j[1];
void foo (int);
void
bar (void)
{
if (setjmp (j) == 0)
{
int k;
foo (-1);
#pragma omp parallel
for (k = 0; k < 10; ++k)
foo (k);
foo (-2);
}
}
|
toimg.c | /* Copyright 2013-2018 The Regents of the University of California.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2013, 2015 Martin Uecker <uecker@eecs.berkeley.edu>
* 2015, 2018 Jon Tamir <jtamir@eecs.berkeley.edu>
*/
#include <stdlib.h>
#include <assert.h>
#include <stdio.h>
#include <stdint.h>
#include <strings.h>
#include <complex.h>
#include <stdbool.h>
#include <math.h>
#include "num/multind.h"
#include "num/init.h"
#include "num/flpmath.h"
#include "misc/misc.h"
#include "misc/debug.h"
#include "misc/mmio.h"
#include "misc/png.h"
#include "misc/dicom.h"
#include "misc/opts.h"
#ifndef DIMS
#define DIMS 16
#endif
#ifndef CFL_SIZE
#define CFL_SIZE sizeof(complex float)
#endif
static const char usage_str[] = "[-h] <input> <output_prefix>";
static const char help_str[] = "Create magnitude images as png or proto-dicom.\n"
"The first two non-singleton dimensions will\n"
"be used for the image, and the other dimensions\n"
"will be looped over.\n";
// from view:src/draw.c
static double clamp(double a, double b, double x)
{
return (x < a) ? a : ((x > b) ? b : x);
}
static double windowing(double g, double a, double b, double x)
{
return pow(clamp(0., 1., (x - a) / (b - a)), g);
}
static void toimg(bool dicom, bool use_windowing, const char* name, long inum, float gamma, float contrast, float window, float scale, long h, long w, const complex float* data)
{
int len = strlen(name);
assert(len >= 1);
int nr_bytes = dicom ? 2 : 3;
unsigned char (*buf)[h][w][nr_bytes] = TYPE_ALLOC(unsigned char[h][w][nr_bytes]);
float max_val = dicom ? 65535. : 255.;
for (int i = 0; i < h; i++) {
for (int j = 0; j < w; j++) {
double val = cabsf(data[j * h + i]) / scale;
unsigned int value = (unsigned int)(max_val * (use_windowing ? windowing(gamma, contrast, window, val) : val));
if (!dicom) {
(*buf)[i][j][0] = value;
(*buf)[i][j][1] = value;
(*buf)[i][j][2] = value;
} else {
(*buf)[i][j][0] = (value >> 0) & 0xFF;
(*buf)[i][j][1] = (value >> 8) & 0xFF;
}
}
}
(dicom ? dicom_write : png_write_rgb24)(name, w, h, inum, &(*buf)[0][0][0]);
free(buf);
}
static void toimg_stack(const char* name, bool dicom, bool single_scale, bool use_windowing, float gamma, float contrast, float window, const long dims[DIMS], const complex float* data)
{
long data_size = md_calc_size(DIMS, dims);
long sq_dims[DIMS] = { [0 ... DIMS - 1] = 1 };
int l = 0;
for (int i = 0; i < DIMS; i++)
if (1 != dims[i])
sq_dims[l++] = dims[i];
float max = 0.;
for (long i = 0; i < data_size; i++)
max = MAX(cabsf(data[i]), max);
int len = strlen(name);
assert(len >= 1);
long num_imgs = md_calc_size(DIMS - 2, sq_dims + 2);
long img_size = md_calc_size(2, sq_dims);
debug_printf(DP_INFO, "Writing %d image(s)...", num_imgs);
#pragma omp parallel for
for (long i = 0; i < num_imgs; i++) {
char name_i[len + 10]; // extra space for ".0000.png"
if (num_imgs > 1)
sprintf(name_i, "%s-%04ld.%s", name, i, dicom ? "dcm" : "png");
else
sprintf(name_i, "%s.%s", name, dicom ? "dcm" : "png");
float scale = 0.;
if (use_windowing)
scale = md_znorm(2, sq_dims, data + i * img_size) / md_calc_size(2, sq_dims);
else if (single_scale)
scale = max;
else
for (long j = 0; j < md_calc_size(2, sq_dims); j++)
scale = MAX(cabsf(data[i * img_size + j]), scale);
if (0. == scale)
scale = 1.;
toimg(dicom, use_windowing, name_i, i, gamma, contrast, window, scale, sq_dims[0], sq_dims[1], data + i * img_size);
}
debug_printf(DP_INFO, "done.\n", num_imgs);
}
int main_toimg(int argc, char* argv[])
{
float gamma = 1.;
float contrast = 0.;
float window = 750.;
bool use_windowing = false;
bool single_scale = true;
bool dicom = false;
const struct opt_s opts[] = {
OPT_FLOAT('g', &gamma, "gamma", "gamma level"),
OPT_FLOAT('c', &contrast, "contrast", "contrast level"),
OPT_FLOAT('w', &window, "window", "window level"),
OPT_SET('d', &dicom, "write to dicom format (deprecated, use extension .dcm)"),
OPT_CLEAR('m', &single_scale, "re-scale each image"),
OPT_SET('W', &use_windowing, "use dynamic windowing"),
};
cmdline(&argc, argv, 2, 2, usage_str, help_str, ARRAY_SIZE(opts), opts);
num_init();
char* ext = rindex(argv[2], '.');
if (NULL != ext) {
assert(!dicom);
if (0 == strcmp(ext, ".dcm"))
dicom = true;
else
if (0 != strcmp(ext, ".png"))
error("Unknown file extension.");
*ext = '\0';
}
long dims[DIMS];
complex float* data = load_cfl(argv[1], DIMS, dims);
toimg_stack(argv[2], dicom, single_scale, use_windowing, gamma, contrast, window, dims, data);
unmap_cfl(DIMS, dims, data);
return 0;
}
|
Scene.h | #pragma once
#include "bitmap_image.hpp"
#include "Triangle.h"
#include "Plane.h"
#include <vector>
#include <optional>
#include <algorithm>
#include <atomic>
#include <utility>
#include <chrono>
#include <CL/cl.h>
#pragma comment(lib, "OpenCL.lib")
template<typename T>
using Vec = std::vector<T>;
template<typename T>
using Opt = std::optional<T>;
using namespace std::chrono;
namespace RT
{
F64 Map(F64 x, F64 in_min, F64 in_max, F64 out_min, F64 out_max)
{
return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min;
}
static std::atomic<U64> RaysTraced = 0;
static std::atomic<U64> RaysHit = 0;
static std::atomic<U64> Overlaps = 0;
struct Scene
{
Bitmap Render(U32 X, U32 Y) const
{
Bitmap Image(X, Y);
Image.set_all_channels(0, 0, 0);
auto T1 = high_resolution_clock::now();
RayTrace(Image);
auto T2 = high_resolution_clock::now();
auto duration = duration_cast<microseconds>( T2 - T1 ).count();
std::cout << "Traced = " << RaysTraced.load()
<< "\nHit = " << RaysHit.load()
<< "\nOverlaps = " << Overlaps.load()
<< "\nDuration = " << duration << std::endl;
//OpenCL();
return Image;
}
Scene& WithEye(const Point& P)
{
Eye = P;
return *this;
}
Scene& WithBottomLeft(const Point& T)
{
BottomLeft = T;
return *this;
}
Scene& WithTopRight(const Point& T)
{
TopRight = T;
return *this;
}
Scene& WithStep(F64 S)
{
Step = S;
return *this;
}
Scene& Add(const Triangle& T)
{
Objects.push_back(T);
return *this;
}
Scene& Light(const Point& P)
{
Lights.push_back(P);
return *this;
}
Point Eye, BottomLeft, TopRight;
Vec<Triangle> Objects;
Vec<Point> Lights;
F64 Step;
F64 CastNearestLight(const Point& Start, const Point& P)
{
return 0.0;
}
void AtomicStep(std::atomic<F64>& F) const
{
auto C = F.load();
while (!F.compare_exchange_weak(C, C + Step));
}
private:
const char* kernel = R"(
__kernel void square(__global float* in, __global float* out)
{
int i = get_global_id(0);
out[i] = in[i] * in[i];
}
)";
void OpenCL() const
{
auto T1 = high_resolution_clock::now();
float* data = new float[1024];
float* out = new float[1024];
for (int i = 0; i < 1024; i++)
{
data[i] = (float)i;
}
cl_platform_id PlatformID = nullptr;
cl_device_id DeviceID = nullptr;
cl_uint NumDevices, NumPlatforms;
cl_int Ret;
Ret = clGetPlatformIDs(1, &PlatformID, &NumPlatforms);
Ret = clGetDeviceIDs(PlatformID, CL_DEVICE_TYPE_GPU, 1, &DeviceID, &NumDevices);
cl_context Context = clCreateContext(nullptr, 1, &DeviceID, nullptr, nullptr, &Ret);
cl_command_queue Queue = clCreateCommandQueue(Context, DeviceID, 0, &Ret);
cl_mem DataMem = clCreateBuffer(Context, CL_MEM_READ_ONLY, sizeof(float) * 1024, nullptr, &Ret);
cl_mem OutMem = clCreateBuffer(Context, CL_MEM_WRITE_ONLY, sizeof(float) * 1024, nullptr, &Ret);
Ret = clEnqueueWriteBuffer(Queue, DataMem, CL_TRUE, 0, sizeof(float) * 1024, data, 0, nullptr, nullptr);
cl_program Program = clCreateProgramWithSource(Context, 1, (const char**)&kernel, nullptr, &Ret);
Ret = clBuildProgram(Program, 1, &DeviceID, nullptr, nullptr, nullptr);
cl_kernel Kernel = clCreateKernel(Program, "square", &Ret);
Ret = clSetKernelArg(Kernel, 0, sizeof(cl_mem), &DataMem);
Ret = clSetKernelArg(Kernel, 1, sizeof(cl_mem), &OutMem);
size_t ItemSize = 1024;
size_t ItemGroup = 64;
Ret = clEnqueueNDRangeKernel(Queue, Kernel, 1, nullptr, &ItemSize, &ItemGroup, 0, nullptr, nullptr);
float* Output = new float[1024];
Ret = clEnqueueReadBuffer(Queue, OutMem, CL_TRUE, 0, sizeof(float) * 1024, Output, 0, nullptr, nullptr);
auto T2 = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(T2 - T1).count();
std::cout << "OpenCL took " << duration << " microseconds" << std::endl;
Ret = clFlush(Queue);
Ret = clFinish(Queue);
Ret = clReleaseKernel(Kernel);
Ret = clReleaseProgram(Program);
Ret = clReleaseMemObject(DataMem);
Ret = clReleaseMemObject(OutMem);
Ret = clReleaseCommandQueue(Queue);
Ret = clReleaseContext(Context);
}
void RayTrace(Bitmap& Img) const
{
std::atomic<F64> X, Y;
X.store(BottomLeft.X);
while(X <= TopRight.X)
{
Y.store(BottomLeft.Y);
#pragma omp parallel
while(Y <= TopRight.Y)
{
TraceOnePoint(Img, { X, Y, BottomLeft.Z });
AtomicStep(Y);
}
AtomicStep(X);
}
}
void TraceOnePoint(Bitmap& Img, const Point& P) const
{
//colour for pixel
U64 R = 0, G = 0, B = 0,
//how many overlaps
Count = 1;
//cos average
F64 Cos = 0;
U64 CosN = 1;
F64 Dist = 0;
//for every triangle in the scene
for(auto& Tri : Objects)
{
//trace to that triangle
auto I = Plane(Tri).Intersect({Eye, P});
//stats
RaysTraced++;
//if it intersects
if(I.Valid && Tri.Contains(I.Location))
{
Cos += I.Cos;
CosN++;
//stats
RaysHit++;
//figure out the distance between the camera and the hit
auto D = I.Location.DistanceBetween(P);
//call the vertex shader to get colour
auto Colour = Tri.Shader({(U64)(Img.width() * P.X), (U64)(Img.width() * P.Y)}, this, Tri);
//if the distance between the hit point and the actual point
//is less than the last point this triangle must be closer
if(D > Dist)
{
//stats
Overlaps++;
//change the counters to make this triangle at the front
Dist = D;
Count = 1;
R = Colour.red;
G = Colour.green;
B = Colour.blue;
}
else
{
//add colour to average for this ray
Count++;
R += Colour.red;
G += Colour.green;
B += Colour.blue;
}
}
}
//get averages for raycasts
R /= Count;
G /= Count;
B /= Count;
Cos /= CosN;
//get the pixel to set
U32 Col = ((Cos > 0.0)
? (Cos * (Img.width() / 2))
: (Img.width() / 2) + (Cos * -(Img.width() / 2)));
Col %= Img.width();
//set the actual pixel
Img.set_pixel((U32)(Img.width() * P.X), (U32)(Img.width() * P.Y), (U8)R, (U8)G, (U8)B);
}
};
}
|
papi.c | #include "XSbench_header.h"
void counter_init( int *eventset, int *num_papi_events )
{
char error_str[PAPI_MAX_STR_LEN];
// int events[] = {PAPI_TOT_INS,PAPI_BR_INS,PAPI_SR_INS};
int events[] = {PAPI_TOT_CYC,PAPI_L3_TCM};
int stat;
int thread = omp_get_thread_num();
if( thread == 0 )
printf("Initializing PAPI counters...\n");
*num_papi_events = sizeof(events) / sizeof(int);
if ((stat = PAPI_thread_init((long unsigned int (*)(void)) omp_get_thread_num)) != PAPI_OK){
PAPI_perror("PAPI_thread_init");
exit(1);
}
if ( (stat= PAPI_create_eventset(eventset)) != PAPI_OK){
PAPI_perror("PAPI_create_eventset");
exit(1);
}
for( int i = 0; i < *num_papi_events; i++ ){
if ((stat=PAPI_add_event(*eventset,events[i])) != PAPI_OK){
PAPI_perror("PAPI_add_event");
exit(1);
}
}
if ((stat=PAPI_start(*eventset)) != PAPI_OK){
PAPI_perror("PAPI_start");
exit(1);
}
}
// Stops the papi counters and prints results
void counter_stop( int * eventset, int num_papi_events )
{
int * events = malloc(num_papi_events * sizeof(int));
int n = num_papi_events;
PAPI_list_events( *eventset, events, &n );
PAPI_event_info_t info;
long_long * values = malloc( num_papi_events * sizeof(long_long));
PAPI_stop(*eventset, values);
int thread = omp_get_thread_num();
#if defined(_OPENMP)
#pragma omp critical (papi)
#endif
{
printf("Thread %d\n", thread);
for( int i = 0; i < num_papi_events; i++ )
{
PAPI_get_event_info(events[i], &info);
printf("%-15lld\t%s\t%s\n", values[i],info.symbol,info.long_descr);
}
free(events);
free(values);
}
}
|
GB_binop__gt_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__gt_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__gt_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__gt_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__gt_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_uint64)
// A*D function (colscale): GB (_AxD__gt_uint64)
// D*A function (rowscale): GB (_DxB__gt_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__gt_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__gt_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_uint64)
// C=scalar+B GB (_bind1st__gt_uint64)
// C=scalar+B' GB (_bind1st_tran__gt_uint64)
// C=A+scalar GB (_bind2nd__gt_uint64)
// C=A'+scalar GB (_bind2nd_tran__gt_uint64)
// C type: bool
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GT || GxB_NO_UINT64 || GxB_NO_GT_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__gt_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__gt_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__gt_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__gt_uint64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__gt_uint64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__gt_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__gt_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__gt_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__gt_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__gt_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__gt_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__gt_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__gt_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__gt_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
kernel_cpu.c | #ifdef __cplusplus
extern "C" {
#endif
//========================================================================================================================================================================================================200
// DEFINE/INCLUDE
//========================================================================================================================================================================================================200
//======================================================================================================================================================150
// LIBRARIES
//======================================================================================================================================================150
//#include <omp.h> // (in path known to compiler) needed by openmp
#include <stdlib.h> // (in path known to compiler) needed by malloc
#include <stdio.h> // (in path known to compiler) needed by printf
#include <math.h> // (in path known to compiler) needed by exp
//======================================================================================================================================================150
// MAIN FUNCTION HEADER
//======================================================================================================================================================150
#include "./../main.h" // (in the main program folder) needed to recognized input variables
//======================================================================================================================================================150
// UTILITIES
//======================================================================================================================================================150
#include "./../util/timer/timer.h" // (in library path specified to compiler) needed by timer
//======================================================================================================================================================150
// KERNEL_CPU FUNCTION HEADER
//======================================================================================================================================================150
#include "kernel_cpu.h" // (in the current directory)
//========================================================================================================================================================================================================200
// PLASMAKERNEL_GPU
//========================================================================================================================================================================================================200
void kernel_cpu( par_str par,
dim_str dim,
box_str* box,
FOUR_VECTOR* rv,
fp* qv,
FOUR_VECTOR* fv)
{
//======================================================================================================================================================150
// Variables
//======================================================================================================================================================150
// timer
long long time0;
time0 = get_time();
// timer
long long time1;
long long time2;
long long time3;
long long time4;
// parameters
fp alpha;
fp a2;
// counters
int i, j, k, l;
// home box
long first_i;
FOUR_VECTOR* rA;
FOUR_VECTOR* fA;
// neighbor box
int pointer;
long first_j;
FOUR_VECTOR* rB;
fp* qB;
// common
fp r2;
fp u2;
fp fs;
fp vij;
fp fxij,fyij,fzij;
THREE_VECTOR d;
time1 = get_time();
//======================================================================================================================================================150
// MCPU SETUP
//======================================================================================================================================================150
//omp_set_num_threads(dim.cores_arg);
time2 = get_time();
//======================================================================================================================================================150
// INPUTS
//======================================================================================================================================================150
alpha = par.alpha;
a2 = 2.0*alpha*alpha;
time3 = get_time();
//======================================================================================================================================================150
// PROCESS INTERACTIONS
//======================================================================================================================================================150
printf("the value of NUMBER_PAR_PER_BOX is: %d \n", NUMBER_PAR_PER_BOX);
//#pragma omp parallel for \
private(i, j, k) \
private(first_i, rA, fA) \
private(pointer, first_j, rB, qB) \
private(r2, u2, fs, vij, fxij, fyij, fzij, d)
for(l=0; l<dim.number_boxes; l=l+1){
//------------------------------------------------------------------------------------------100
// home box - box parameters
//------------------------------------------------------------------------------------------100
first_i = box[l].offset; // offset to common arrays
//------------------------------------------------------------------------------------------100
// home box - distance, force, charge and type parameters from common arrays
//------------------------------------------------------------------------------------------100
rA = &rv[first_i];
fA = &fv[first_i];
//------------------------------------------------------------------------------------------100
// Do for the # of (home+neighbor) boxes
//------------------------------------------------------------------------------------------100
for (k=0; k<(1+box[l].nn); k++)
{
//----------------------------------------50
// neighbor box - get pointer to the right box
//----------------------------------------50
if(k==0){
pointer = l; // set first box to be processed to home box
}
else{
pointer = box[l].nei[k-1].number; // remaining boxes are neighbor boxes
}
//----------------------------------------50
// neighbor box - box parameters
//----------------------------------------50
first_j = box[pointer].offset;
//----------------------------------------50
// neighbor box - distance, force, charge and type parameters
//----------------------------------------50
rB = &rv[first_j];
qB = &qv[first_j];
//----------------------------------------50
// Do for the # of particles in home box
//----------------------------------------50
for (i=0; i<NUMBER_PAR_PER_BOX; i=i+1){
// do for the # of particles in current (home or neighbor) box
for (j=0; j<NUMBER_PAR_PER_BOX; j=j+1){
// // coefficients
r2 = rA[i].v + rB[j].v - DOT(rA[i],rB[j]);
u2 = a2*r2;
vij= exp(-u2);
fs = 2.*vij;
d.x = rA[i].x - rB[j].x;
d.y = rA[i].y - rB[j].y;
d.z = rA[i].z - rB[j].z;
fxij=fs*d.x;
fyij=fs*d.y;
fzij=fs*d.z;
// forces
fA[i].v += qB[j]*vij;
fA[i].x += qB[j]*fxij;
fA[i].y += qB[j]*fyij;
fA[i].z += qB[j]*fzij;
} // for j
} // for i
} // for k
} // for l
time4 = get_time();
//======================================================================================================================================================150
// DISPLAY TIMING
//======================================================================================================================================================150
printf("Time spent in different stages of CPU/MCPU KERNEL:\n");
printf("%15.12f s, %15.12f % : CPU/MCPU: VARIABLES\n", (float) (time1-time0) / 1000000, (float) (time1-time0) / (float) (time4-time0) * 100);
printf("%15.12f s, %15.12f % : MCPU: SET DEVICE\n", (float) (time2-time1) / 1000000, (float) (time2-time1) / (float) (time4-time0) * 100);
printf("%15.12f s, %15.12f % : CPU/MCPU: INPUTS\n", (float) (time3-time2) / 1000000, (float) (time3-time2) / (float) (time4-time0) * 100);
printf("%15.12f s, %15.12f % : CPU/MCPU: KERNEL\n", (float) (time4-time3) / 1000000, (float) (time4-time3) / (float) (time4-time0) * 100);
printf("Total time:\n");
printf("%.12f s\n", (float) (time4-time0) / 1000000);
} // main
#ifdef __cplusplus
}
#endif
|
GB_unaryop__lnot_bool_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_bool_bool
// op(A') function: GB_tran__lnot_bool_bool
// C type: bool
// A type: bool
// cast: bool cij = (bool) aij
// unaryop: cij = !aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_bool_bool
(
bool *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_bool_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
MaxLut.h | // --------------------------------------------------------------------------
// Binary Brain -- binary neural net framework
//
// Copyright (C) 2018 by Ryuji Fuchikami
// https://github.com/ryuz
// ryuji.fuchikami@nifty.com
// --------------------------------------------------------------------------
#pragma once
#include <array>
#include <vector>
#include "bb/BinaryLutModel.h"
namespace bb {
// LUT版popcount
template <typename BinType = float, typename RealType = float>
class MaxLut : public BinaryLutModel
{
using _super = BinaryLutModel;
public:
static inline std::string ClassName(void) { return "MaxLut"; }
static inline std::string ObjectName(void){ return ClassName() + "_" + DataType<BinType>::Name() + "_" + DataType<RealType>::Name(); }
std::string GetModelName(void) const override { return ClassName(); }
std::string GetObjectName(void) const override { return ObjectName(); }
protected:
bool m_host_only = false;
bool m_binarize_input = false;
bool m_binarize_output = true;
std::string m_connection;
int m_n = 6;
indices_t m_input_shape;
indices_t m_output_shape;
Tensor_<std::int32_t> m_input_index;
std::mt19937_64 m_mt;
public:
struct create_t
{
int n = 6;
indices_t output_shape;
std::string connection = "";
bool binarize_input = false;
bool binarize_output = true;
std::uint64_t seed = 1;
};
protected:
MaxLut(create_t const &create)
{
BB_ASSERT(!create.output_shape.empty());
m_mt.seed(create.seed);
m_n = create.n;
m_output_shape = create.output_shape;
m_connection = create.connection;
m_input_index.Resize(CalcShapeSize(m_output_shape), (index_t)m_n);
}
void CommandProc(std::vector<std::string> args) override
{
// HostOnlyモード設定
if (args.size() == 2 && args[0] == "host_only")
{
m_host_only = EvalBool(args[1]);
}
// バイナリモード設定
if ( args.size() == 2 && args[0] == "binary" )
{
m_binarize_input = EvalBool(args[1]);
m_binarize_output = EvalBool(args[1]);
}
if ( args.size() == 2 && args[0] == "binarize_input" )
{
m_binarize_input = EvalBool(args[1]);
}
if ( args.size() == 2 && args[0] == "binarize_output" )
{
m_binarize_output = EvalBool(args[1]);
}
}
public:
~MaxLut() {}
static std::shared_ptr<MaxLut> Create(create_t const &create)
{
return std::shared_ptr<MaxLut>(new MaxLut(create));
}
static std::shared_ptr<MaxLut> Create(int n, indices_t const &output_shape, std::string connection = "", bool binarize = true, bool binarize_input = false, std::uint64_t seed = 1)
{
create_t create;
create.n = n;
create.output_shape = output_shape;
create.connection = connection;
create.binarize_input = binarize_input;
create.binarize_output = binarize;
create.seed = seed;
return Create(create);
}
static std::shared_ptr<MaxLut> Create(int n, index_t output_node_size, std::string connection = "", bool binarize = true, bool binarize_input = false, std::uint64_t seed = 1)
{
create_t create;
create.n = n;
create.output_shape.resize(1);
create.output_shape[0] = output_node_size;
create.connection = connection;
create.binarize_input = binarize_input;
create.binarize_output = binarize;
create.seed = seed;
return Create(create);
}
static std::shared_ptr<MaxLut> Create(void)
{
return Create(create_t());
}
#ifdef BB_PYBIND11 // python用
static std::shared_ptr<MaxLut> CreatePy(
int n,
indices_t output_shape,
std::string connection="",
bool binarize = true,
bool binarize_input = false,
std::uint64_t seed = 1)
{
create_t create;
create.n = n;
create.output_shape = output_shape;
create.connection = connection;
create.binarize_input = binarize_input;
create.binarize_output = binarize;
create.seed = seed;
return Create(create);
}
#endif
auto lock_InputIndex(void) { return m_input_index.Lock(); }
auto lock_InputIndex_const(void) const { return m_input_index.LockConst(); }
// 疎結合の管理
index_t GetNodeConnectionSize(index_t node) const override
{
return m_n;
}
void SetNodeConnectionIndex(index_t node, index_t input_index, index_t input_node) override
{
BB_ASSERT(node >= 0 && node < CalcShapeSize(m_output_shape));
BB_ASSERT(input_index >= 0 && input_index < m_n);
BB_DEBUG_ASSERT(input_node >= 0 && input_node < GetInputNodeSize());
auto ptr = lock_InputIndex();
ptr(node, input_index) = (std::int32_t)input_node;
}
index_t GetNodeConnectionIndex(index_t node, index_t input_index) const override
{
BB_ASSERT(node >= 0 && node < CalcShapeSize(m_output_shape));
BB_ASSERT(input_index >= 0 && input_index < m_n);
auto ptr = lock_InputIndex_const();
return (index_t)ptr(node, input_index);
}
// LUT操作の定義
int GetLutTableSize(index_t node) const
{
return (1 << m_n);
}
void SetLutTable(index_t node, int bitpos, bool value) override
{
}
bool GetLutTable(index_t node, int bitpos) const override
{
int count = 0;
for ( int i = 0; i < m_n; ++i ) {
count += (bitpos & 1) ? +1 : -1;
bitpos >>= 1;
}
return count > 0;
}
/**
* @brief 入力のshape設定
* @detail 入力のshape設定
* @param shape 新しいshape
* @return なし
*/
indices_t SetInputShape(indices_t shape) override
{
// 設定済みなら何もしない
if ( shape == this->GetInputShape() ) {
return this->GetOutputShape();
}
// 形状設定
m_input_shape = shape;
// 接続初期化
this->InitializeNodeInput(m_mt(), m_connection);
return m_output_shape;
}
/**
* @brief 出力のshape設定
* @detail 出力のshape設定
* 出力ノード数が変わらない限りshpeは自由
* @param shape 新しいshape
* @return なし
*/
void SetOutputShape(indices_t const &shape)
{
BB_ASSERT(CalcShapeSize(shape) == this->m_output_node_size);
m_output_shape = shape;
}
/**
* @brief 入力形状取得
* @detail 入力形状を取得する
* @return 入力形状を返す
*/
indices_t GetInputShape(void) const override
{
return m_input_shape;
}
/**
* @brief 出力形状取得
* @detail 出力形状を取得する
* @return 出力形状を返す
*/
indices_t GetOutputShape(void) const override
{
return m_output_shape;
}
public:
FrameBuffer Forward(FrameBuffer x_buf, bool train = true) override
{
BB_ASSERT(x_buf.GetType() == DataType<BinType>::type);
// SetInputShpaeされていなければ初回に設定
if (x_buf.GetShape() != m_input_shape) {
SetInputShape(x_buf.GetShape());
}
if (train) {
this->PushFrameBuffer(x_buf);
}
// 出力を設定
FrameBuffer y_buf(x_buf.GetFrameSize(), m_output_shape, DataType<BinType>::type);
#ifdef BB_WITH_CUDA
if ( DataType<BinType>::type == BB_TYPE_FP32 && !m_host_only
&& x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) {
auto x_ptr = x_buf.LockDeviceMemoryConst();
auto y_ptr = y_buf.LockDeviceMemory(true);
auto input_index_ptr = m_input_index.LockDeviceMemoryConst();
bbcu_MaxLut_Forward<float>
(
(float const *)x_ptr.GetAddr(),
(float *)y_ptr.GetAddr(),
(int const *)input_index_ptr.GetAddr(),
(int )m_n,
(int )y_buf.GetNodeSize(),
(int )y_buf.GetFrameSize(),
(int )(y_buf.GetFrameStride() / sizeof(float)),
(bool )m_binarize_input,
(bool )m_binarize_output
);
return y_buf;
}
if ( DataType<BinType>::type == BB_TYPE_BIT && !m_host_only
&& x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) {
auto x_ptr = x_buf.LockDeviceMemoryConst();
auto y_ptr = y_buf.LockDeviceMemory(true);
auto input_index_ptr = m_input_index.LockDeviceMemoryConst();
bbcu_bit_MaxLut_Forward
(
(int const *)x_ptr.GetAddr(),
(int *)y_ptr.GetAddr(),
(int const *)input_index_ptr.GetAddr(),
(int )m_n,
(int )y_buf.GetNodeSize(),
(int )y_buf.GetFrameSize(),
(int )(y_buf.GetFrameStride() / sizeof(int))
);
return y_buf;
}
#endif
{
// 汎用版
auto x_ptr = x_buf.LockConst<BinType>();
auto y_ptr = y_buf.Lock<BinType>();
auto input_index_ptr = m_input_index.LockConst();
index_t frame_size = x_buf.GetFrameSize();
index_t node_size = this->GetOutputNodeSize();
#pragma omp parallel for
for (index_t node = 0; node < node_size; ++node) {
for (index_t frame = 0; frame < frame_size; ++frame) {
BinType max_val = (BinType)BB_BINARY_LO;
for (index_t i = 0; i < m_n; i++) {
index_t input_node = input_index_ptr(node, i);
BinType val = (RealType)x_ptr.Get(frame, input_node);
if (val > max_val) {
max_val = val;
}
}
if (m_binarize_output) {
max_val = (max_val > (BinType)0) ? (BinType)BB_BINARY_HI : (BinType)BB_BINARY_LO;
}
y_ptr.Set(frame, node, max_val);
}
}
return y_buf;
}
}
// Backward
FrameBuffer Backward(FrameBuffer dy_buf) override
{
if (dy_buf.Empty()) {
return dy_buf;
}
BB_ASSERT(dy_buf.GetType() == DataType<RealType>::type);
FrameBuffer x_buf = this->PopFrameBuffer();
BB_ASSERT(x_buf.GetType() == DataType<BinType>::type);
// 出力を設定
FrameBuffer dx_buf(dy_buf.GetFrameSize(), m_input_shape, DataType<RealType>::type);
#ifdef BB_WITH_CUDA
if ( DataType<BinType>::type == BB_TYPE_FP32 && DataType<RealType>::type == BB_TYPE_FP32 && !m_host_only
&& dy_buf.IsDeviceAvailable() && dx_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) {
auto x_ptr = x_buf.LockDeviceMemoryConst();
auto dy_ptr = dy_buf.LockDeviceMemoryConst();
auto dx_ptr = dx_buf.LockDeviceMemory(true);
auto input_index_ptr = m_input_index.LockDeviceMemoryConst();
bbcu_MaxLut_Backward<float>
(
(float const *)x_ptr.GetAddr(),
(float const *)dy_ptr.GetAddr(),
(float *)dx_ptr.GetAddr(),
(int const *)input_index_ptr.GetAddr(),
(int )m_n,
(int )dx_buf.GetNodeSize(),
(int )dy_buf.GetNodeSize(),
(int )dy_buf.GetFrameSize(),
(int )(dy_buf.GetFrameStride() / sizeof(float)),
(bool )m_binarize_input
);
return dx_buf;
}
if ( DataType<BinType>::type == BB_TYPE_BIT && DataType<RealType>::type == BB_TYPE_FP32 && !m_host_only
&& dy_buf.IsDeviceAvailable() && dx_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) {
auto x_ptr = x_buf.LockDeviceMemoryConst();
auto dy_ptr = dy_buf.LockDeviceMemoryConst();
auto dx_ptr = dx_buf.LockDeviceMemory(true);
auto input_index_ptr = m_input_index.LockDeviceMemoryConst();
bbcu_bit_MaxLut_Backward<float>
(
(int const *)x_ptr.GetAddr(),
(float const *)dy_ptr.GetAddr(),
(float *)dx_ptr.GetAddr(),
(int const *)input_index_ptr.GetAddr(),
(int )m_n,
(int )dx_buf.GetNodeSize(),
(int )dy_buf.GetNodeSize(),
(int )dy_buf.GetFrameSize(),
(int )(x_buf.GetFrameStride() / sizeof(int)),
(int )(dy_buf.GetFrameStride() / sizeof(float))
);
return dx_buf;
}
#endif
{
// 汎用版
dx_buf.FillZero();
auto x_ptr = x_buf.LockConst<BinType>();
auto dy_ptr = dy_buf.LockConst<RealType>();
auto dx_ptr = dx_buf.Lock<RealType>();
auto input_index_ptr = m_input_index.LockConst();
index_t frame_size = dy_buf.GetFrameSize();
index_t node_size = this->GetOutputNodeSize();
#pragma omp parallel for
for (index_t node = 0; node < node_size; ++node) {
for (index_t frame = 0; frame < frame_size; ++frame) {
BinType max_val = (BinType)BB_BINARY_LO;
index_t max_idx = 0;
for (index_t i = 0; i < m_n; i++) {
index_t input_node = input_index_ptr(node, i);
BinType val = (RealType)x_ptr.Get(frame, input_node);
if (val > max_val) {
max_val = val;
max_idx = i;
}
}
auto dx = dy_ptr.Get(frame, node);
index_t input_node = input_index_ptr(node, max_idx);
dx_ptr.Add(frame, input_node, dx);
}
}
return dx_buf;
}
}
// シリアライズ
protected:
void DumpObjectData(std::ostream &os) const override
{
// バージョン
std::int64_t ver = 1;
bb::SaveValue(os, ver);
// 親クラス
_super::DumpObjectData(os);
// メンバ
bb::SaveValue(os, m_n);
bb::SaveValue(os, m_host_only);
bb::SaveValue(os, m_connection);
bb::SaveValue(os, m_input_shape);
bb::SaveValue(os, m_output_shape);
bb::SaveValue(os, m_binarize_input);
bb::SaveValue(os, m_binarize_output);
m_input_index.DumpObject(os);
}
void LoadObjectData(std::istream &is) override
{
// バージョン
std::int64_t ver;
bb::LoadValue(is, ver);
BB_ASSERT(ver == 1);
// 親クラス
_super::LoadObjectData(is);
// メンバ
bb::LoadValue(is, m_n);
bb::LoadValue(is, m_host_only);
bb::LoadValue(is, m_connection);
bb::LoadValue(is, m_input_shape);
bb::LoadValue(is, m_output_shape);
bb::LoadValue(is, m_binarize_input);
bb::LoadValue(is, m_binarize_output);
m_input_index.LoadObject(is);
}
};
} |
GB_unaryop__one_int8_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__one_int8_int8
// op(A') function: GB_tran__one_int8_int8
// C type: int8_t
// A type: int8_t
// cast: ;
// unaryop: cij = 1
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = 1 ;
// casting
#define GB_CASTING(z, aij) \
; ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ONE || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__one_int8_int8
(
int8_t *Cx, // Cx and Ax may be aliased
int8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__one_int8_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Partition.h | /*
* Partition.h
*
* Created on: 03.10.2013
* Author: cls
*/
#ifndef PARTITION_H_
#define PARTITION_H_
#include <cinttypes>
#include <set>
#include <vector>
#include <map>
#include <cassert>
#include <limits>
#include "../Globals.h"
namespace NetworKit {
/**
* @ingroup structures
* Implements a partition of a set, i.e. a subdivision of the
* set into disjoint subsets.
*/
class Partition {
public:
Partition();
/**
* Create a new partition data structure for @a z elements.
*
* @param[in] z maximum index
*/
Partition(index z);
/**
* Create a new partition data structure for @a z elements.
* Initialize each entry to the default value.
* WARNING: this circumvents the standard interface and may leave the object
* in an inconsistent state. Use only in exceptional cases.
*
* @param[in] z maximum index
* @param[in] defaultValue
*/
Partition(index z, index defaultValue);
Partition(const std::vector<index>& data);
/**
* Index operator.
*
* @param[in] e an element
*/
inline index& operator [](const index& e) {
return this->data[e];
}
/**
* Index operator for const instances of this class.
*
* @param[in] e an element
*/
inline const index& operator [](const index& e) const {
return this->data[e];
}
/**
* Get the set (id) in which the element @a e is contained.
*
* @param e Index of element.
* @return The index of the set in which @a e is contained.
*/
inline index subsetOf(index e) const {
assert (e < this->numberOfElements());
return this->data[e];
}
/**
* Extend the data structure and create a slot
* for one more element. Initializes the entry to none
* and returns the index of the entry.
*/
inline index extend() {
data.push_back(none);
z++;
assert (z == data.size()); //(data.size() - 1)
return z-1;
}
/**
* Removes the entry for the given element
* by setting it to none.
*/
inline void remove(index e) {
assert (e < z);
data[e] = none;
}
/**
* Add a (previously unassigned) element @a e to the set @a s.
*
* @param s The index of the subset.
* @param e The element to add.
*/
inline void addToSubset(index s, index e) {
assert (data[e] == none); // guarantee that element was unassigned
assert (s <= omega); // do not create new subset ids
data[e] = s;
}
/**
* Move the (previously assigned) element @a e to the set @a s.
*
* @param s The index of the subset.
* @param e The element to move.
*/
inline void moveToSubset(index s, index e) {
assert (this->contains(e));
assert (s <= omega); // do not create new subset ids
data[e] = s;
}
/**
* Creates a singleton set containing the element @a e.
*
* @param e The index of the element.
*/
inline void toSingleton(index e) {
data[e] = newSubsetId();
}
/**
* Assigns every element to a singleton set.
* Set id is equal to element id.
*/
void allToSingletons();
/**
* Assigns every element to the same subset.
* Set id is equal to zero.
*/
void allToOnePartition();
/**
* Assigns the elements from both sets to a new set and returns the id of it.
*
* @param s Set to merge.
* @param t Set to merge.
* @return Id of newly created set.
*/
index mergeSubsets(index s, index t);
/**
* Sets an upper bound for the subset ids that CAN be assigned.
*
* @param[in] upper highest assigned subset ID + 1
*/
inline void setUpperBound(index upper) {
this->omega = upper-1;
}
/**
* Return an upper bound for the subset ids that have been assigned.
* (This is the maximum id + 1.)
*
* @return The upper bound.
*/
inline index upperBound() const {
return omega+1;
}
/**
* Get a lower bound for the subset ids that have been assigned.
*
* @return The lower bound.
*/
inline index lowerBound() const {
return 0;
}
/**
* Change subset IDs to be consecutive, starting at 0.
* @param useTurbo Default: false. If set to true, a vector instead of a map to assign new ids
* which results in a shorter running time but possibly a large space overhead.
*/
void compact(bool useTurbo = false);
bool isCompact() const {
std::vector<bool> id_contained(upperBound(), false);
for (index e = 0; e < z; ++e) {
id_contained[data[e]] = true;
}
for (index e = 0; e < upperBound(); ++e) {
if (!id_contained[e]) {
return false;
}
}
return true;
}
/**
* Check if partition assigns a valid subset to the element @a e.
*
* @param e The element.
* @return @c true if the assigned subset is valid, @c false otherwise.
*/
inline bool contains(index e) const {
return (e < z) && (data[e] != none); // e is in the element index range and the entry is not empty
}
/**
* Check if two elements @a e1 and @a e2 belong to the same subset.
*
* @param e1 Element.
* @param e2 Element.
* @return @c true if @a e1 and @a e2 belong to same subset, @c false otherwise.
*/
inline bool inSameSubset(index e1, index e2) const {
assert (data[e1] != none);
assert (data[e2] != none);
return (data[e1] == data[e2]);
}
/**
* Get a list of subset sizes. Indices do not necessarily correspond to subset ids.
*
* @return A vector of subset sizes.
*/
std::vector<count> subsetSizes() const;
/**
* Get a map from subset id to size of the subset.
*
* @return A map from subset id to size of the subset.
*/
std::map<index, count> subsetSizeMap() const;
/**
* Get the members of the subset @a s.
*
* @param s The subset.
* @return A set containing the members of @a s.
*/
std::set<index> getMembers(const index s) const;
/**
* @return number of elements in the partition.
*/
inline count numberOfElements() const {
return z; // z is the maximum element id
}
/**
* Get the current number of sets in this partition.
*
* @return The current number of sets.
*/
count numberOfSubsets() const;
/**
* Get the actual vector representing the partition data structure.
* @return vector containing information about partitions.
*/
std::vector<index> getVector() const;
/**
* Move the vector representing the partition data structure and leave behind an invalid vector
* @return vector containing partition
*/
std::vector<index> moveVector();
/**
* @return the subsets of the partition as a set of sets.
*/
std::set<std::set<index> > getSubsets() const;
/**
* Get the ids of nonempty subsets.
*
* @return A set of ids of nonempty subsets.
*/
std::set<index> getSubsetIds() const;
/**
* Set a human-readable identifier @a name for the instance.
*
* @param name The name.
*/
inline void setName(std::string name) {
this->name = name;
}
/**
* Get the human-readable identifier.
*
* @return The name of this partition.
*/
inline std::string getName() const {
return this->name;
}
/**
* Iterate over all entries (node, cluster id) and execute callback function @a func (lambda closure).
*
* @param func Takes parameters <code>(node, index)</code>
*/
template<typename Callback> void forEntries(Callback func) const;
/**
* Iterate over all entries (node, cluster id) in parallel and execute callback function @a handle (lambda closure).
*
* @param handle Takes parameters <code>(node, index)</code>
*/
template<typename Callback> void parallelForEntries(Callback handle, bool parallel = true) const;
private:
index z; //!< maximum element index that can be mapped
index omega; //!< maximum subset index ever assigned
std::vector<index> data; //!< data container, indexed by element index, containing subset index
std::string name;
/**
* Allocates and returns a new subset id.
*/
inline index newSubsetId() {
index s = ++omega;
return s;
}
};
template<typename Callback>
inline void Partition::forEntries(Callback handle) const {
for (index e = 0; e < this->z; e++) {
handle(e, data[e]);
}
}
template<typename Callback>
inline void Partition::parallelForEntries(Callback handle, bool parallel) const {
#pragma omp parallel for if (parallel)
for (omp_index e = 0; e < static_cast<omp_index>(this->z); e++) {
handle(e, this->data[e]);
}
}
} /* namespace NetworKit */
#endif /* PARTITION_H_ */
|
trsm_x_dia_u_lo_row.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT m = A->rows;
ALPHA_INT main_diag_pos = 0;
int num_thread = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for (ALPHA_INT i = 0; i < A->ndiag; i++)
if(A->distance[i] == 0)
{
main_diag_pos = i;
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for(ALPHA_INT out_y_col = 0; out_y_col < columns; out_y_col++)
{
for (ALPHA_INT r = 0; r < m; r++)
{
ALPHA_Number temp;
alpha_setzero(temp);
for (ALPHA_INT ndiag = 0; ndiag < main_diag_pos; ndiag++)
{
if (-A->distance[ndiag] <= r)
{
ALPHA_INT ac = r + A->distance[ndiag];
alpha_madde(temp, A->values[ndiag * A->lval + r], y[ac * ldy + out_y_col]);
}
}
ALPHA_Number t;
alpha_setzero(t);
alpha_mul(t, alpha, x[r * ldx + out_y_col]);
alpha_sub(y[r * ldy + out_y_col], t, temp);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
GeneralMatrixMatrix.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 3 of the License, or (at your option) any later version.
//
// Alternatively, you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation; either version 2 of
// the License, or (at your option) any later version.
//
// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
#ifndef EIGEN_GENERAL_MATRIX_MATRIX_H
#define EIGEN_GENERAL_MATRIX_MATRIX_H
template<typename _LhsScalar, typename _RhsScalar> class ei_level3_blocking;
/* Specialization for a row-major destination matrix => simple transposition of the product */
template<
typename Scalar, typename Index,
int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs>
struct ei_general_matrix_matrix_product<Scalar,Index,LhsStorageOrder,ConjugateLhs,RhsStorageOrder,ConjugateRhs,RowMajor>
{
static EIGEN_STRONG_INLINE void run(
Index rows, Index cols, Index depth,
const Scalar* lhs, Index lhsStride,
const Scalar* rhs, Index rhsStride,
Scalar* res, Index resStride,
Scalar alpha,
ei_level3_blocking<Scalar,Scalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
// transpose the product such that the result is column major
ei_general_matrix_matrix_product<Scalar, Index,
RhsStorageOrder==RowMajor ? ColMajor : RowMajor,
ConjugateRhs,
LhsStorageOrder==RowMajor ? ColMajor : RowMajor,
ConjugateLhs,
ColMajor>
::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info);
}
};
/* Specialization for a col-major destination matrix
* => Blocking algorithm following Goto's paper */
template<
typename Scalar, typename Index,
int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs>
struct ei_general_matrix_matrix_product<Scalar,Index,LhsStorageOrder,ConjugateLhs,RhsStorageOrder,ConjugateRhs,ColMajor>
{
static void run(Index rows, Index cols, Index depth,
const Scalar* _lhs, Index lhsStride,
const Scalar* _rhs, Index rhsStride,
Scalar* res, Index resStride,
Scalar alpha,
ei_level3_blocking<Scalar,Scalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
ei_const_blas_data_mapper<Scalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
ei_const_blas_data_mapper<Scalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
if (ConjugateRhs)
alpha = ei_conj(alpha);
typedef typename ei_packet_traits<Scalar>::type PacketType;
typedef ei_product_blocking_traits<Scalar> Blocking;
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = std::min(rows,blocking.mc()); // cache block size along the M direction
//Index nc = blocking.nc(); // cache block size along the N direction
ei_gemm_pack_rhs<Scalar, Index, Blocking::nr, RhsStorageOrder> pack_rhs;
ei_gemm_pack_lhs<Scalar, Index, Blocking::mr, LhsStorageOrder> pack_lhs;
ei_gebp_kernel<Scalar, Index, Blocking::mr, Blocking::nr, ei_conj_helper<ConjugateLhs,ConjugateRhs> > gebp;
#ifdef EIGEN_HAS_OPENMP
if(info)
{
// this is the parallel version!
Index tid = omp_get_thread_num();
Index threads = omp_get_num_threads();
Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc);
std::size_t sizeW = kc*Blocking::PacketSize*Blocking::nr*8;
Scalar* w = ei_aligned_stack_new(Scalar, sizeW);
Scalar* blockB = blocking.blockB();
ei_internal_assert(blockB!=0);
// For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...
for(Index k=0; k<depth; k+=kc)
{
const Index actual_kc = std::min(k+kc,depth)-k; // => rows of B', and cols of the A'
// In order to reduce the chance that a thread has to wait for the other,
// let's start by packing A'.
pack_lhs(blockA, &lhs(0,k), lhsStride, actual_kc, mc);
// Pack B_k to B' in a parallel fashion:
// each thread packs the sub block B_k,j to B'_j where j is the thread id.
// However, before copying to B'_j, we have to make sure that no other thread is still using it,
// i.e., we test that info[tid].users equals 0.
// Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it.
while(info[tid].users!=0) {}
info[tid].users += threads;
pack_rhs(blockB+info[tid].rhs_start*kc, &rhs(k,info[tid].rhs_start), rhsStride, alpha, actual_kc, info[tid].rhs_length);
// Notify the other threads that the part B'_j is ready to go.
info[tid].sync = k;
// Computes C_i += A' * B' per B'_j
for(Index shift=0; shift<threads; ++shift)
{
Index j = (tid+shift)%threads;
// At this point we have to make sure that B'_j has been updated by the thread j,
// we use testAndSetOrdered to mimic a volatile access.
// However, no need to wait for the B' part which has been updated by the current thread!
if(shift>0)
while(info[j].sync!=k) {}
gebp(res+info[j].rhs_start*resStride, resStride, blockA, blockB+info[j].rhs_start*kc, mc, actual_kc, info[j].rhs_length, -1,-1,0,0, w);
}
// Then keep going as usual with the remaining A'
for(Index i=mc; i<rows; i+=mc)
{
const Index actual_mc = std::min(i+mc,rows)-i;
// pack A_i,k to A'
pack_lhs(blockA, &lhs(i,k), lhsStride, actual_kc, actual_mc);
// C_i += A' * B'
gebp(res+i, resStride, blockA, blockB, actual_mc, actual_kc, cols, -1,-1,0,0, w);
}
// Release all the sub blocks B'_j of B' for the current thread,
// i.e., we simply decrement the number of users by 1
for(Index j=0; j<threads; ++j)
#pragma omp atomic
--(info[j].users);
}
ei_aligned_stack_delete(Scalar, blockA, kc*mc);
ei_aligned_stack_delete(Scalar, w, sizeW);
}
else
#endif // EIGEN_HAS_OPENMP
{
EIGEN_UNUSED_VARIABLE(info);
// this is the sequential version!
std::size_t sizeA = kc*mc;
std::size_t sizeB = kc*cols;
std::size_t sizeW = kc*Blocking::PacketSize*Blocking::nr;
Scalar *blockA = blocking.blockA()==0 ? ei_aligned_stack_new(Scalar, sizeA) : blocking.blockA();
Scalar *blockB = blocking.blockB()==0 ? ei_aligned_stack_new(Scalar, sizeB) : blocking.blockB();
Scalar *blockW = blocking.blockW()==0 ? ei_aligned_stack_new(Scalar, sizeW) : blocking.blockW();
// For each horizontal panel of the rhs, and corresponding panel of the lhs...
// (==GEMM_VAR1)
for(Index k2=0; k2<depth; k2+=kc)
{
const Index actual_kc = std::min(k2+kc,depth)-k2;
// OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
// => Pack rhs's panel into a sequential chunk of memory (L2 caching)
// Note that this panel will be read as many times as the number of blocks in the lhs's
// vertical panel which is, in practice, a very low number.
pack_rhs(blockB, &rhs(k2,0), rhsStride, alpha, actual_kc, cols);
// For each mc x kc block of the lhs's vertical panel...
// (==GEPP_VAR1)
for(Index i2=0; i2<rows; i2+=mc)
{
const Index actual_mc = std::min(i2+mc,rows)-i2;
// We pack the lhs's block into a sequential chunk of memory (L1 caching)
// Note that this block will be read a very high number of times, which is equal to the number of
// micro vertical panel of the large rhs's panel (e.g., cols/4 times).
pack_lhs(blockA, &lhs(i2,k2), lhsStride, actual_kc, actual_mc);
// Everything is packed, we can now call the block * panel kernel:
gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, -1, -1, 0, 0, blockW);
}
}
if(blocking.blockA()==0) ei_aligned_stack_delete(Scalar, blockA, kc*mc);
if(blocking.blockB()==0) ei_aligned_stack_delete(Scalar, blockB, sizeB);
if(blocking.blockW()==0) ei_aligned_stack_delete(Scalar, blockW, sizeW);
}
}
};
/*********************************************************************************
* Specialization of GeneralProduct<> for "large" GEMM, i.e.,
* implementation of the high level wrapper to ei_general_matrix_matrix_product
**********************************************************************************/
template<typename Lhs, typename Rhs>
struct ei_traits<GeneralProduct<Lhs,Rhs,GemmProduct> >
: ei_traits<ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs> >
{};
template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>
struct ei_gemm_functor
{
ei_gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, Scalar actualAlpha,
BlockingType& blocking)
: m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking)
{}
void initParallelSession() const
{
m_blocking.allocateB();
}
void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const
{
if(cols==-1)
cols = m_rhs.cols();
Gemm::run(rows, cols, m_lhs.cols(),
(const Scalar*)&(m_lhs.const_cast_derived().coeffRef(row,0)), m_lhs.outerStride(),
(const Scalar*)&(m_rhs.const_cast_derived().coeffRef(0,col)), m_rhs.outerStride(),
(Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(),
m_actualAlpha, m_blocking, info);
}
protected:
const Lhs& m_lhs;
const Rhs& m_rhs;
Dest& m_dest;
Scalar m_actualAlpha;
BlockingType& m_blocking;
};
template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth,
bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class ei_gemm_blocking_space;
template<typename _LhsScalar, typename _RhsScalar>
class ei_level3_blocking
{
typedef _LhsScalar LhsScalar;
typedef _RhsScalar RhsScalar;
protected:
LhsScalar* m_blockA;
RhsScalar* m_blockB;
RhsScalar* m_blockW;
DenseIndex m_mc;
DenseIndex m_nc;
DenseIndex m_kc;
public:
ei_level3_blocking()
: m_blockA(0), m_blockB(0), m_blockW(0), m_mc(0), m_nc(0), m_kc(0)
{}
inline DenseIndex mc() const { return m_mc; }
inline DenseIndex nc() const { return m_nc; }
inline DenseIndex kc() const { return m_kc; }
inline LhsScalar* blockA() { return m_blockA; }
inline RhsScalar* blockB() { return m_blockB; }
inline RhsScalar* blockW() { return m_blockW; }
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth>
class ei_gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, true>
: public ei_level3_blocking<
typename ei_meta_if<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::ret,
typename ei_meta_if<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::ret>
{
enum {
Transpose = StorageOrder==RowMajor,
ActualRows = Transpose ? MaxCols : MaxRows,
ActualCols = Transpose ? MaxRows : MaxCols
};
typedef typename ei_meta_if<Transpose,_RhsScalar,_LhsScalar>::ret LhsScalar;
typedef typename ei_meta_if<Transpose,_LhsScalar,_RhsScalar>::ret RhsScalar;
typedef ei_product_blocking_traits<RhsScalar> Blocking;
enum {
SizeA = ActualRows * MaxDepth,
SizeB = ActualCols * MaxDepth,
SizeW = MaxDepth * Blocking::nr * ei_packet_traits<RhsScalar>::size
};
EIGEN_ALIGN16 LhsScalar m_staticA[SizeA];
EIGEN_ALIGN16 RhsScalar m_staticB[SizeB];
EIGEN_ALIGN16 RhsScalar m_staticW[SizeW];
public:
ei_gemm_blocking_space(DenseIndex /*rows*/, DenseIndex /*cols*/, DenseIndex /*depth*/)
{
this->m_mc = ActualRows;
this->m_nc = ActualCols;
this->m_kc = MaxDepth;
this->m_blockA = m_staticA;
this->m_blockB = m_staticB;
this->m_blockW = m_staticW;
}
inline void allocateA() {}
inline void allocateB() {}
inline void allocateW() {}
inline void allocateAll() {}
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth>
class ei_gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, false>
: public ei_level3_blocking<
typename ei_meta_if<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::ret,
typename ei_meta_if<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::ret>
{
enum {
Transpose = StorageOrder==RowMajor
};
typedef typename ei_meta_if<Transpose,_RhsScalar,_LhsScalar>::ret LhsScalar;
typedef typename ei_meta_if<Transpose,_LhsScalar,_RhsScalar>::ret RhsScalar;
typedef ei_product_blocking_traits<RhsScalar> Blocking;
DenseIndex m_sizeA;
DenseIndex m_sizeB;
DenseIndex m_sizeW;
public:
ei_gemm_blocking_space(DenseIndex rows, DenseIndex cols, DenseIndex depth)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
computeProductBlockingSizes<LhsScalar,RhsScalar>(this->m_kc, this->m_mc, this->m_nc);
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
m_sizeW = this->m_kc*ei_packet_traits<RhsScalar>::size*Blocking::nr;
}
void allocateA()
{
if(this->m_blockA==0)
this->m_blockA = ei_aligned_new<LhsScalar>(m_sizeA);
}
void allocateB()
{
if(this->m_blockB==0)
this->m_blockB = ei_aligned_new<RhsScalar>(m_sizeB);
}
void allocateW()
{
if(this->m_blockW==0)
this->m_blockW = ei_aligned_new<RhsScalar>(m_sizeW);
}
void allocateAll()
{
allocateA();
allocateB();
allocateW();
}
~ei_gemm_blocking_space()
{
ei_aligned_delete(this->m_blockA, m_sizeA);
ei_aligned_delete(this->m_blockB, m_sizeB);
ei_aligned_delete(this->m_blockW, m_sizeW);
}
};
template<typename Lhs, typename Rhs>
class GeneralProduct<Lhs, Rhs, GemmProduct>
: public ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs>
{
enum {
MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime)
};
public:
EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
{
EIGEN_STATIC_ASSERT((ei_is_same_type<typename Lhs::Scalar, typename Rhs::Scalar>::ret),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
}
template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
{
ei_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
const ActualLhsType lhs = LhsBlasTraits::extract(m_lhs);
const ActualRhsType rhs = RhsBlasTraits::extract(m_rhs);
Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs)
* RhsBlasTraits::extractScalarFactor(m_rhs);
typedef ei_gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,Scalar,Scalar,
Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType;
typedef ei_gemm_functor<
Scalar, Index,
ei_general_matrix_matrix_product<
Scalar, Index,
(_ActualLhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate),
(_ActualRhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate),
(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>,
_ActualLhsType, _ActualRhsType, Dest, BlockingType> GemmFunctor;
BlockingType blocking(dst.rows(), dst.cols(), lhs.cols());
ei_parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), this->rows(), this->cols(), Dest::Flags&RowMajorBit);
}
};
#endif // EIGEN_GENERAL_MATRIX_MATRIX_H
|
GB_binop__fmod_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__fmod_fp32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__fmod_fp32)
// A.*B function (eWiseMult): GB (_AemultB_03__fmod_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__fmod_fp32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((node))
// C+=B function (dense accum): GB (_Cdense_accumB__fmod_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__fmod_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__fmod_fp32)
// C=scalar+B GB (_bind1st__fmod_fp32)
// C=scalar+B' GB (_bind1st_tran__fmod_fp32)
// C=A+scalar GB (_bind2nd__fmod_fp32)
// C=A'+scalar GB (_bind2nd_tran__fmod_fp32)
// C type: float
// A type: float
// B,b type: float
// BinaryOp: cij = fmodf (aij, bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
float bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = fmodf (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FMOD || GxB_NO_FP32 || GxB_NO_FMOD_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__fmod_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__fmod_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__fmod_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((node))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__fmod_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__fmod_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__fmod_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__fmod_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__fmod_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__fmod_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = Bx [p] ;
Cx [p] = fmodf (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__fmod_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = Ax [p] ;
Cx [p] = fmodf (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = fmodf (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__fmod_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = fmodf (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__fmod_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_sgemm_int8.h | // BUG1989 is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static inline signed char float2int8(float v)
{
int int32 = round(v);
if (int32 > 127) return 127;
if (int32 < -128) return -128;
return (signed char)int32;
}
static void conv_im2col_sgemm_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, \
const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const signed char *kernel = _kernel;
// im2row
Mat bottom_im2row(kernel_h*kernel_w*inch, outw*outh, 1UL, opt.workspace_allocator);
{
signed char* ret = (signed char*)bottom_im2row;
int retID = 0;
for (int i=0; i<outh; i++)
{
for (int j=0; j<outw; j++)
{
for (int p=0; p<inch; p++)
{
const signed char* input = bottom_blob.channel(p);
for (int u=0; u<kernel_h; u++)
{
for (int v=0; v<kernel_w; v++)
{
int row = u + i * stride_h;
int col = v + j * stride_w;
int index = row * w + col;
ret[retID] = input[index];
retID++;
}
}
}
}
}
}
int kernel_size = kernel_w * kernel_h;
int out_size = outw * outh;
// int M = outch; // outch
int N = outw * outh; // outsize or out stride
int K = kernel_w * kernel_h * inch; // ksize * inch
// bottom_im2row memory packed 4 x 4
Mat bottom_tm(4*kernel_size, inch, out_size/4 + out_size%4, (size_t)1u, opt.workspace_allocator);
{
int nn_size = out_size >> 2;
int remain_size_start = nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = ii * 4;
const signed char* img0 = bottom_im2row.row<signed char>(i);
const signed char* img1 = bottom_im2row.row<signed char>(i+1);
const signed char* img2 = bottom_im2row.row<signed char>(i+2);
const signed char* img3 = bottom_im2row.row<signed char>(i+3);
signed char* tmpptr = bottom_tm.channel(i/4);
int q = 0;
for (; q+1<inch*kernel_size; q=q+2)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img1[0];
tmpptr[3] = img1[1];
tmpptr[4] = img2[0];
tmpptr[5] = img2[1];
tmpptr[6] = img3[0];
tmpptr[7] = img3[1];
tmpptr += 8;
img0 += 2;
img1 += 2;
img2 += 2;
img3 += 2;
}
for (; q<inch*kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr += 4;
img0 += 1;
img1 += 1;
img2 += 1;
img3 += 1;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_size_start; i<out_size; i++)
{
const signed char* img0 = bottom_im2row.row<signed char>(i);
signed char* tmpptr = bottom_tm.channel(i/4 + i%4);
int q=0;
for (; q+1<inch*kernel_size; q=q+2)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr += 2;
img0 += 2;
}
for (; q<inch*kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += 1;
}
}
}
// kernel memory packed 4 x 4
Mat kernel_tm(4*kernel_size, inch, outch/4 + outch%4, (size_t)1u, opt.workspace_allocator);
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
const signed char* k0 = kernel + (p+0)*inch*kernel_size;
const signed char* k1 = kernel + (p+1)*inch*kernel_size;
const signed char* k2 = kernel + (p+2)*inch*kernel_size;
const signed char* k3 = kernel + (p+3)*inch*kernel_size;
signed char* ktmp = kernel_tm.channel(p/4);
int q=0;
for (; q+1<inch*kernel_size; q+=2)
{
ktmp[0] = k0[0];
ktmp[1] = k0[1];
ktmp[2] = k1[0];
ktmp[3] = k1[1];
ktmp[4] = k2[0];
ktmp[5] = k2[1];
ktmp[6] = k3[0];
ktmp[7] = k3[1];
ktmp += 8;
k0 += 2;
k1 += 2;
k2 += 2;
k3 += 2;
}
for (; q<inch*kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp += 4;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
const signed char* k0 = kernel + (p+0)*inch*kernel_size;
signed char* ktmp = kernel_tm.channel(p/4 + p%4);
int q=0;
for (; q+1<inch*kernel_size; q=q+2)
{
ktmp[0] = k0[0];
ktmp[1] = k0[1];
ktmp += 2;
k0 += 2;
}
for (; q<inch*kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp++;
k0++;
}
}
}
// 4x4
// sgemm(int M, int N, int K, float* A, float* B, float* C)
{
// int M = outch; // outch
// int N = outw * outh; // outsize or out stride
// int L = kernel_w * kernel_h * inch; // ksize * inch
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int i = pp * 4;
int* output0 = top_blob.channel(i);
int* output1 = top_blob.channel(i+1);
int* output2 = top_blob.channel(i+2);
int* output3 = top_blob.channel(i+3);
int j=0;
for (; j+3<N; j=j+4)
{
signed char* vb = bottom_tm.channel(j/4);
signed char* va = kernel_tm.channel(i/4);
int sum0[4] = {0};
int sum1[4] = {0};
int sum2[4] = {0};
int sum3[4] = {0};
int k=0;
for (; k+1<K; k=k+2)
{
for (int n=0; n<4; n++)
{
sum0[n] += (int)va[0] * vb[2*n]; // k0
sum0[n] += (int)va[1] * vb[2*n+1];
sum1[n] += (int)va[2] * vb[2*n]; // k1
sum1[n] += (int)va[3] * vb[2*n+1];
sum2[n] += (int)va[4] * vb[2*n]; // k2
sum2[n] += (int)va[5] * vb[2*n+1];
sum3[n] += (int)va[6] * vb[2*n]; // k3
sum3[n] += (int)va[7] * vb[2*n+1];
}
va += 8;
vb += 8;
}
for (; k<K; k++)
{
for (int n=0; n<4; n++)
{
sum0[n] += (int)va[0] * vb[n];
sum1[n] += (int)va[1] * vb[n];
sum2[n] += (int)va[2] * vb[n];
sum3[n] += (int)va[3] * vb[n];
}
va += 4;
vb += 4;
}
for (int n=0; n<4; n++)
{
output0[n] = sum0[n];
output1[n] = sum1[n];
output2[n] = sum2[n];
output3[n] = sum3[n];
}
output0 += 4;
output1 += 4;
output2 += 4;
output3 += 4;
}
for (; j<N; j++)
{
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
signed char* vb = bottom_tm.channel(j/4 + j%4);
signed char* va = kernel_tm.channel(i/4);
int k=0;
for (; k+1<K; k=k+2)
{
sum0 += (int)va[0] * vb[0];
sum0 += (int)va[1] * vb[1];
sum1 += (int)va[2] * vb[0];
sum1 += (int)va[3] * vb[1];
sum2 += (int)va[4] * vb[0];
sum2 += (int)va[5] * vb[1];
sum3 += (int)va[6] * vb[0];
sum3 += (int)va[7] * vb[1];
va += 8;
vb += 2;
}
for (; k<K; k++)
{
sum0 += (int)va[0] * vb[0];
sum1 += (int)va[1] * vb[0];
sum2 += (int)va[2] * vb[0];
sum3 += (int)va[3] * vb[0];
va += 4;
vb += 1;
}
output0[0] = sum0;
output1[0] = sum1;
output2[0] = sum2;
output3[0] = sum3;
output0++;
output1++;
output2++;
output3++;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_outch_start; i<outch; i++)
{
int* output = top_blob.channel(i);
int j=0;
for (; j+3<N; j=j+4)
{
signed char* vb = bottom_tm.channel(j/4);
signed char* va = kernel_tm.channel(i/4 + i%4);
int sum[4] = {0};
int k=0;
for (; k+1<K; k=k+2)
{
for (int n=0; n<4; n++)
{
sum[n] += (int)va[0] * vb[2*n];
sum[n] += (int)va[1] * vb[2*n+1];
}
va += 2;
vb += 8;
}
for (; k<K; k++)
{
for (int n=0; n<4; n++)
{
sum[n] += (int)va[0] * vb[n];
}
va += 1;
vb += 4;
}
for (int n=0; n<4; n++)
{
output[n] = sum[n];
}
output += 4;
}
for (; j<N; j++)
{
int sum = 0;
signed char* vb = bottom_tm.channel(j/4 + j%4);
signed char* va = kernel_tm.channel(i/4 + i%4);
for (int k=0; k<K; k++)
{
sum += (int)va[0] * vb[0];
va += 1;
vb += 1;
}
output[0] = sum;
output++;
}
}
}
// // sgemm(int M, int N, int K, float* A, float* B, float* C)
// {
// for (int i=0; i<M; i++)
// {
// int* output = top_blob.channel(i);
// for (int j=0; j<N; j++)
// {
// int sum = 0;
// signed char* vb = (signed char*)bottom_im2row + K * j;
// const signed char* va = kernel + K * i;
// for (int k=0; k<K; k++)
// {
// sum += (int)va[0] * vb[0];
// va += 1;
// vb += 1;
// }
// output[0] = sum;
// output++;
// }
// }
// }
}
static void conv_im2col_sgemm_int8_dequant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, \
const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Mat &_bias, std::vector<float> scale_dequant, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const signed char *kernel = _kernel;
const float* bias = _bias;
// im2row
Mat bottom_im2row(kernel_h*kernel_w*inch, outw*outh, 1UL, opt.workspace_allocator);
{
signed char* ret = (signed char*)bottom_im2row;
int retID = 0;
for (int i=0; i<outh; i++)
{
for (int j=0; j<outw; j++)
{
for (int p=0; p<inch; p++)
{
const signed char* input = bottom_blob.channel(p);
for (int u=0; u<kernel_h; u++)
{
for (int v=0; v<kernel_w; v++)
{
int row = u + i * stride_h;
int col = v + j * stride_w;
int index = row * w + col;
ret[retID] = input[index];
retID++;
}
}
}
}
}
}
int kernel_size = kernel_w * kernel_h;
int out_size = outw * outh;
// int M = outch; // outch
int N = outw * outh; // outsize or out stride
int K = kernel_w * kernel_h * inch; // ksize * inch
// bottom_im2row memory packed 4 x 4
Mat bottom_tm(4*kernel_size, inch, out_size/4 + out_size%4, (size_t)1u, opt.workspace_allocator);
{
int nn_size = out_size >> 2;
int remain_size_start = nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = ii * 4;
const signed char* img0 = bottom_im2row.row<signed char>(i);
const signed char* img1 = bottom_im2row.row<signed char>(i+1);
const signed char* img2 = bottom_im2row.row<signed char>(i+2);
const signed char* img3 = bottom_im2row.row<signed char>(i+3);
signed char* tmpptr = bottom_tm.channel(i/4);
int q = 0;
for (; q+1<inch*kernel_size; q=q+2)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img1[0];
tmpptr[3] = img1[1];
tmpptr[4] = img2[0];
tmpptr[5] = img2[1];
tmpptr[6] = img3[0];
tmpptr[7] = img3[1];
tmpptr += 8;
img0 += 2;
img1 += 2;
img2 += 2;
img3 += 2;
}
for (; q<inch*kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr += 4;
img0 += 1;
img1 += 1;
img2 += 1;
img3 += 1;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_size_start; i<out_size; i++)
{
const signed char* img0 = bottom_im2row.row<signed char>(i);
signed char* tmpptr = bottom_tm.channel(i/4 + i%4);
int q=0;
for (; q+1<inch*kernel_size; q=q+2)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr += 2;
img0 += 2;
}
for (; q<inch*kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += 1;
}
}
}
// kernel memory packed 4 x 4
Mat kernel_tm(4*kernel_size, inch, outch/4 + outch%4, (size_t)1u, opt.workspace_allocator);
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
const signed char* k0 = kernel + (p+0)*inch*kernel_size;
const signed char* k1 = kernel + (p+1)*inch*kernel_size;
const signed char* k2 = kernel + (p+2)*inch*kernel_size;
const signed char* k3 = kernel + (p+3)*inch*kernel_size;
signed char* ktmp = kernel_tm.channel(p/4);
int q=0;
for (; q+1<inch*kernel_size; q+=2)
{
ktmp[0] = k0[0];
ktmp[1] = k0[1];
ktmp[2] = k1[0];
ktmp[3] = k1[1];
ktmp[4] = k2[0];
ktmp[5] = k2[1];
ktmp[6] = k3[0];
ktmp[7] = k3[1];
ktmp += 8;
k0 += 2;
k1 += 2;
k2 += 2;
k3 += 2;
}
for (; q<inch*kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp += 4;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
const signed char* k0 = kernel + (p+0)*inch*kernel_size;
signed char* ktmp = kernel_tm.channel(p/4 + p%4);
int q=0;
for (; q+1<inch*kernel_size; q=q+2)
{
ktmp[0] = k0[0];
ktmp[1] = k0[1];
ktmp += 2;
k0 += 2;
}
for (; q<inch*kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp++;
k0++;
}
}
}
// 4x4
// sgemm(int M, int N, int K, float* A, float* B, float* C)
{
// int M = outch; // outch
// int N = outw * outh; // outsize or out stride
// int L = kernel_w * kernel_h * inch; // ksize * inch
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int i = pp * 4;
const float bias0 = bias ? bias[i] : 0.f;
const float bias1 = bias ? bias[i+1] : 0.f;
const float bias2 = bias ? bias[i+2] : 0.f;
const float bias3 = bias ? bias[i+3] : 0.f;
const float scale_dequant0 = scale_dequant[i];
const float scale_dequant1 = scale_dequant[i+1];
const float scale_dequant2 = scale_dequant[i+2];
const float scale_dequant3 = scale_dequant[i+3];
float* output0 = top_blob.channel(i);
float* output1 = top_blob.channel(i+1);
float* output2 = top_blob.channel(i+2);
float* output3 = top_blob.channel(i+3);
int j=0;
for (; j+3<N; j=j+4)
{
signed char* vb = bottom_tm.channel(j/4);
signed char* va = kernel_tm.channel(i/4);
int sum0[4] = {0};
int sum1[4] = {0};
int sum2[4] = {0};
int sum3[4] = {0};
int k=0;
for (; k+1<K; k=k+2)
{
for (int n=0; n<4; n++)
{
sum0[n] += (int)va[0] * vb[2*n]; // k0
sum0[n] += (int)va[1] * vb[2*n+1];
sum1[n] += (int)va[2] * vb[2*n]; // k1
sum1[n] += (int)va[3] * vb[2*n+1];
sum2[n] += (int)va[4] * vb[2*n]; // k2
sum2[n] += (int)va[5] * vb[2*n+1];
sum3[n] += (int)va[6] * vb[2*n]; // k3
sum3[n] += (int)va[7] * vb[2*n+1];
}
va += 8;
vb += 8;
}
for (; k<K; k++)
{
for (int n=0; n<4; n++)
{
sum0[n] += (int)va[0] * vb[n];
sum1[n] += (int)va[1] * vb[n];
sum2[n] += (int)va[2] * vb[n];
sum3[n] += (int)va[3] * vb[n];
}
va += 4;
vb += 4;
}
for (int n=0; n<4; n++)
{
output0[n] = (float)sum0[n] * scale_dequant0 + bias0;
output1[n] = (float)sum1[n] * scale_dequant1 + bias1;
output2[n] = (float)sum2[n] * scale_dequant2 + bias2;
output3[n] = (float)sum3[n] * scale_dequant3 + bias3;
}
output0 += 4;
output1 += 4;
output2 += 4;
output3 += 4;
}
for (; j<N; j++)
{
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
signed char* vb = bottom_tm.channel(j/4 + j%4);
signed char* va = kernel_tm.channel(i/4);
int k=0;
for (; k+1<K; k=k+2)
{
sum0 += (int)va[0] * vb[0];
sum0 += (int)va[1] * vb[1];
sum1 += (int)va[2] * vb[0];
sum1 += (int)va[3] * vb[1];
sum2 += (int)va[4] * vb[0];
sum2 += (int)va[5] * vb[1];
sum3 += (int)va[6] * vb[0];
sum3 += (int)va[7] * vb[1];
va += 8;
vb += 2;
}
for (; k<K; k++)
{
sum0 += (int)va[0] * vb[0];
sum1 += (int)va[1] * vb[0];
sum2 += (int)va[2] * vb[0];
sum3 += (int)va[3] * vb[0];
va += 4;
vb += 1;
}
output0[0] = (float)sum0 * scale_dequant0 + bias0;
output1[0] = (float)sum1 * scale_dequant1 + bias1;
output2[0] = (float)sum2 * scale_dequant2 + bias2;
output3[0] = (float)sum3 * scale_dequant3 + bias3;
output0++;
output1++;
output2++;
output3++;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_outch_start; i<outch; i++)
{
float* output = top_blob.channel(i);
const float bias0 = bias ? bias[i] : 0.f;
const float scale_dequant0 = scale_dequant[i];
int j=0;
for (; j+3<N; j=j+4)
{
signed char* vb = bottom_tm.channel(j/4);
signed char* va = kernel_tm.channel(i/4 + i%4);
int sum[4] = {0};
int k=0;
for (; k+1<K; k=k+2)
{
for (int n=0; n<4; n++)
{
sum[n] += (int)va[0] * vb[2*n];
sum[n] += (int)va[1] * vb[2*n+1];
}
va += 2;
vb += 8;
}
for (; k<K; k++)
{
for (int n=0; n<4; n++)
{
sum[n] += (int)va[0] * vb[n];
}
va += 1;
vb += 4;
}
for (int n=0; n<4; n++)
{
output[n] = (float)sum[n] * scale_dequant0 + bias0;
}
output += 4;
}
for (; j<N; j++)
{
int sum = 0;
signed char* vb = bottom_tm.channel(j/4 + j%4);
signed char* va = kernel_tm.channel(i/4 + i%4);
for (int k=0; k<K; k++)
{
sum += (int)va[0] * vb[0];
va += 1;
vb += 1;
}
output[0] = (float)sum * scale_dequant0 + bias0;
output++;
}
}
}
// // sgemm(int M, int N, int K, float* A, float* B, float* C)
// {
// for (int i=0; i<M; i++)
// {
// int* output = top_blob.channel(i);
// for (int j=0; j<N; j++)
// {
// int sum = 0;
// signed char* vb = (signed char*)bottom_im2row + K * j;
// const signed char* va = kernel + K * i;
// for (int k=0; k<K; k++)
// {
// sum += (int)va[0] * vb[0];
// va += 1;
// vb += 1;
// }
// output[0] = sum;
// output++;
// }
// }
// }
}
static void conv_im2col_sgemm_int8_requant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, \
const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Mat &_bias, std::vector<float> scale_requant, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const signed char *kernel = _kernel;
const float* bias = _bias;
// im2row
Mat bottom_im2row(kernel_h*kernel_w*inch, outw*outh, 1UL, opt.workspace_allocator);
{
signed char* ret = (signed char*)bottom_im2row;
int retID = 0;
for (int i=0; i<outh; i++)
{
for (int j=0; j<outw; j++)
{
for (int p=0; p<inch; p++)
{
const signed char* input = bottom_blob.channel(p);
for (int u=0; u<kernel_h; u++)
{
for (int v=0; v<kernel_w; v++)
{
int row = u + i * stride_h;
int col = v + j * stride_w;
int index = row * w + col;
ret[retID] = input[index];
retID++;
}
}
}
}
}
}
int kernel_size = kernel_w * kernel_h;
int out_size = outw * outh;
// int M = outch; // outch
int N = outw * outh; // outsize or out stride
int K = kernel_w * kernel_h * inch; // ksize * inch
// bottom_im2row memory packed 4 x 4
Mat bottom_tm(4*kernel_size, inch, out_size/4 + out_size%4, (size_t)1u, opt.workspace_allocator);
{
int nn_size = out_size >> 2;
int remain_size_start = nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = ii * 4;
const signed char* img0 = bottom_im2row.row<signed char>(i);
const signed char* img1 = bottom_im2row.row<signed char>(i+1);
const signed char* img2 = bottom_im2row.row<signed char>(i+2);
const signed char* img3 = bottom_im2row.row<signed char>(i+3);
signed char* tmpptr = bottom_tm.channel(i/4);
int q = 0;
for (; q+1<inch*kernel_size; q=q+2)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img1[0];
tmpptr[3] = img1[1];
tmpptr[4] = img2[0];
tmpptr[5] = img2[1];
tmpptr[6] = img3[0];
tmpptr[7] = img3[1];
tmpptr += 8;
img0 += 2;
img1 += 2;
img2 += 2;
img3 += 2;
}
for (; q<inch*kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr += 4;
img0 += 1;
img1 += 1;
img2 += 1;
img3 += 1;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_size_start; i<out_size; i++)
{
const signed char* img0 = bottom_im2row.row<signed char>(i);
signed char* tmpptr = bottom_tm.channel(i/4 + i%4);
int q=0;
for (; q+1<inch*kernel_size; q=q+2)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr += 2;
img0 += 2;
}
for (; q<inch*kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += 1;
}
}
}
// kernel memory packed 4 x 4
Mat kernel_tm(4*kernel_size, inch, outch/4 + outch%4, (size_t)1u, opt.workspace_allocator);
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
const signed char* k0 = kernel + (p+0)*inch*kernel_size;
const signed char* k1 = kernel + (p+1)*inch*kernel_size;
const signed char* k2 = kernel + (p+2)*inch*kernel_size;
const signed char* k3 = kernel + (p+3)*inch*kernel_size;
signed char* ktmp = kernel_tm.channel(p/4);
int q=0;
for (; q+1<inch*kernel_size; q+=2)
{
ktmp[0] = k0[0];
ktmp[1] = k0[1];
ktmp[2] = k1[0];
ktmp[3] = k1[1];
ktmp[4] = k2[0];
ktmp[5] = k2[1];
ktmp[6] = k3[0];
ktmp[7] = k3[1];
ktmp += 8;
k0 += 2;
k1 += 2;
k2 += 2;
k3 += 2;
}
for (; q<inch*kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp += 4;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
const signed char* k0 = kernel + (p+0)*inch*kernel_size;
signed char* ktmp = kernel_tm.channel(p/4 + p%4);
int q=0;
for (; q+1<inch*kernel_size; q=q+2)
{
ktmp[0] = k0[0];
ktmp[1] = k0[1];
ktmp += 2;
k0 += 2;
}
for (; q<inch*kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp++;
k0++;
}
}
}
// 4x4
// sgemm(int M, int N, int K, float* A, float* B, float* C)
{
// int M = outch; // outch
// int N = outw * outh; // outsize or out stride
// int L = kernel_w * kernel_h * inch; // ksize * inch
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int i = pp * 4;
signed char* output0 = top_blob.channel(i);
signed char* output1 = top_blob.channel(i+1);
signed char* output2 = top_blob.channel(i+2);
signed char* output3 = top_blob.channel(i+3);
const float bias0 = bias ? bias[i] : 0.f;
const float bias1 = bias ? bias[i+1] : 0.f;
const float bias2 = bias ? bias[i+2] : 0.f;
const float bias3 = bias ? bias[i+3] : 0.f;
const float scale_requant_in0 = scale_requant[2*i];
const float scale_requant_out0 = scale_requant[2*i+1];
const float scale_requant_in1 = scale_requant[2*(i+1)];
const float scale_requant_out1 = scale_requant[2*(i+1)+1];
const float scale_requant_in2 = scale_requant[2*(i+2)];
const float scale_requant_out2 = scale_requant[2*(i+2)+1];
const float scale_requant_in3 = scale_requant[2*(i+3)];
const float scale_requant_out3 = scale_requant[2*(i+3)+1];
int j=0;
for (; j+3<N; j=j+4)
{
signed char* vb = bottom_tm.channel(j/4);
signed char* va = kernel_tm.channel(i/4);
int sum0[4] = {0};
int sum1[4] = {0};
int sum2[4] = {0};
int sum3[4] = {0};
int k=0;
for (; k+1<K; k=k+2)
{
for (int n=0; n<4; n++)
{
sum0[n] += (int)va[0] * vb[2*n]; // k0
sum0[n] += (int)va[1] * vb[2*n+1];
sum1[n] += (int)va[2] * vb[2*n]; // k1
sum1[n] += (int)va[3] * vb[2*n+1];
sum2[n] += (int)va[4] * vb[2*n]; // k2
sum2[n] += (int)va[5] * vb[2*n+1];
sum3[n] += (int)va[6] * vb[2*n]; // k3
sum3[n] += (int)va[7] * vb[2*n+1];
}
va += 8;
vb += 8;
}
for (; k<K; k++)
{
for (int n=0; n<4; n++)
{
sum0[n] += (int)va[0] * vb[n];
sum1[n] += (int)va[1] * vb[n];
sum2[n] += (int)va[2] * vb[n];
sum3[n] += (int)va[3] * vb[n];
}
va += 4;
vb += 4;
}
for (int n=0; n<4; n++)
{
output0[n] = float2int8(((float)sum0[n] * scale_requant_in0 + bias0) * scale_requant_out0);
output1[n] = float2int8(((float)sum1[n] * scale_requant_in1 + bias1) * scale_requant_out1);
output2[n] = float2int8(((float)sum2[n] * scale_requant_in2 + bias2) * scale_requant_out2);
output3[n] = float2int8(((float)sum3[n] * scale_requant_in3 + bias3) * scale_requant_out3);
}
output0 += 4;
output1 += 4;
output2 += 4;
output3 += 4;
}
for (; j<N; j++)
{
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
signed char* vb = bottom_tm.channel(j/4 + j%4);
signed char* va = kernel_tm.channel(i/4);
int k=0;
for (; k+1<K; k=k+2)
{
sum0 += (int)va[0] * vb[0];
sum0 += (int)va[1] * vb[1];
sum1 += (int)va[2] * vb[0];
sum1 += (int)va[3] * vb[1];
sum2 += (int)va[4] * vb[0];
sum2 += (int)va[5] * vb[1];
sum3 += (int)va[6] * vb[0];
sum3 += (int)va[7] * vb[1];
va += 8;
vb += 2;
}
for (; k<K; k++)
{
sum0 += (int)va[0] * vb[0];
sum1 += (int)va[1] * vb[0];
sum2 += (int)va[2] * vb[0];
sum3 += (int)va[3] * vb[0];
va += 4;
vb += 1;
}
output0[0] = float2int8(((float)sum0 * scale_requant_in0 + bias0) * scale_requant_out0);
output1[0] = float2int8(((float)sum1 * scale_requant_in1 + bias1) * scale_requant_out1);
output2[0] = float2int8(((float)sum2 * scale_requant_in2 + bias2) * scale_requant_out2);
output3[0] = float2int8(((float)sum3 * scale_requant_in3 + bias3) * scale_requant_out3);
output0++;
output1++;
output2++;
output3++;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_outch_start; i<outch; i++)
{
signed char* output = top_blob.channel(i);
const float bias0 = bias ? bias[i] : 0.f;
const float scale_requant_in0 = scale_requant[2*i];
const float scale_requant_out0 = scale_requant[2*i+1];
int j=0;
for (; j+3<N; j=j+4)
{
signed char* vb = bottom_tm.channel(j/4);
signed char* va = kernel_tm.channel(i/4 + i%4);
int sum[4] = {0};
int k=0;
for (; k+1<K; k=k+2)
{
for (int n=0; n<4; n++)
{
sum[n] += (int)va[0] * vb[2*n];
sum[n] += (int)va[1] * vb[2*n+1];
}
va += 2;
vb += 8;
}
for (; k<K; k++)
{
for (int n=0; n<4; n++)
{
sum[n] += (int)va[0] * vb[n];
}
va += 1;
vb += 4;
}
for (int n=0; n<4; n++)
{
output[n] = float2int8(((float)sum[n] * scale_requant_in0 + bias0) * scale_requant_out0);
}
output += 4;
}
for (; j<N; j++)
{
int sum = 0;
signed char* vb = bottom_tm.channel(j/4 + j%4);
signed char* va = kernel_tm.channel(i/4 + i%4);
for (int k=0; k<K; k++)
{
sum += (int)va[0] * vb[0];
va += 1;
vb += 1;
}
output[0] = float2int8(((float)sum * scale_requant_in0 + bias0) * scale_requant_out0);
output++;
}
}
}
// // sgemm(int M, int N, int K, float* A, float* B, float* C)
// {
// for (int i=0; i<M; i++)
// {
// int* output = top_blob.channel(i);
// for (int j=0; j<N; j++)
// {
// int sum = 0;
// signed char* vb = (signed char*)bottom_im2row + K * j;
// const signed char* va = kernel + K * i;
// for (int k=0; k<K; k++)
// {
// sum += (int)va[0] * vb[0];
// va += 1;
// vb += 1;
// }
// output[0] = sum;
// output++;
// }
// }
// }
}
|
target_update_array_extension.c | // --------------------------------------------------
// Check 'to' and extends before
// --------------------------------------------------
// RUN: %libomptarget-compile-generic \
// RUN: -DCLAUSE=to -DEXTENDS=BEFORE
// RUN: %libomptarget-run-generic 2>&1 \
// RUN: | %fcheck-generic
// --------------------------------------------------
// Check 'from' and extends before
// --------------------------------------------------
// RUN: %libomptarget-compile-generic \
// RUN: -DCLAUSE=from -DEXTENDS=BEFORE
// RUN: %libomptarget-run-generic 2>&1 \
// RUN: | %fcheck-generic
// --------------------------------------------------
// Check 'to' and extends after
// --------------------------------------------------
// RUN: %libomptarget-compile-generic \
// RUN: -DCLAUSE=to -DEXTENDS=AFTER
// RUN: %libomptarget-run-generic 2>&1 \
// RUN: | %fcheck-generic
// --------------------------------------------------
// Check 'from' and extends after
// --------------------------------------------------
// RUN: %libomptarget-compile-generic \
// RUN: -DCLAUSE=from -DEXTENDS=AFTER
// RUN: %libomptarget-run-generic 2>&1 \
// RUN: | %fcheck-generic
// END.
#include <stdio.h>
#define BEFORE 0
#define AFTER 1
#if EXTENDS == BEFORE
# define SMALL 2:3
# define LARGE 0:5
#elif EXTENDS == AFTER
# define SMALL 0:3
# define LARGE 0:5
#else
# error EXTENDS undefined
#endif
int main() {
int arr[5];
// CHECK-NOT: Libomptarget
#pragma omp target data map(alloc: arr[LARGE])
{
#pragma omp target update CLAUSE(arr[SMALL])
}
// CHECK: success
fprintf(stderr, "success\n");
// CHECK-NOT: Libomptarget
#pragma omp target data map(alloc: arr[SMALL])
{
#pragma omp target update CLAUSE(arr[LARGE])
}
// CHECK: success
fprintf(stderr, "success\n");
return 0;
}
|
GB_binop__plus_fc32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__plus_fc32)
// A.*B function (eWiseMult): GB (_AemultB_08__plus_fc32)
// A.*B function (eWiseMult): GB (_AemultB_02__plus_fc32)
// A.*B function (eWiseMult): GB (_AemultB_04__plus_fc32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_fc32)
// A*D function (colscale): GB (_AxD__plus_fc32)
// D*A function (rowscale): GB (_DxB__plus_fc32)
// C+=B function (dense accum): GB (_Cdense_accumB__plus_fc32)
// C+=b function (dense accum): GB (_Cdense_accumb__plus_fc32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_fc32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_fc32)
// C=scalar+B GB (_bind1st__plus_fc32)
// C=scalar+B' GB (_bind1st_tran__plus_fc32)
// C=A+scalar GB (_bind2nd__plus_fc32)
// C=A'+scalar GB (_bind2nd_tran__plus_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// A pattern? 0
// B type: GxB_FC32_t
// B pattern? 0
// BinaryOp: cij = GB_FC32_add (aij, bij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_BTYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_FC32_add (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PLUS || GxB_NO_FC32 || GxB_NO_PLUS_FC32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__plus_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__plus_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__plus_fc32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__plus_fc32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC32_t
GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__plus_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__plus_fc32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__plus_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
GxB_FC32_t alpha_scalar ;
GxB_FC32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((GxB_FC32_t *) alpha_scalar_in)) ;
beta_scalar = (*((GxB_FC32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__plus_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__plus_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__plus_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__plus_fc32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__plus_fc32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ;
GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_FC32_add (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__plus_fc32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ;
GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_FC32_add (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC32_add (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__plus_fc32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC32_add (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__plus_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_smithW-v6.2-target-inlined.c | /*********************************************************************************
* Smith–Waterman algorithm
* Purpose: Local alignment of nucleotide or protein sequences
* Authors: Daniel Holanda, Hanoch Griner, Taynara Pinheiro
*
* Compilation: gcc omp_smithW.c -o omp_smithW -fopenmp -DDEBUG // debugging mode
* gcc omp_smithW.c -O3 -o omp_smithW -fopenmp // production run
* Execution: ./omp_smithW <number_of_col> <number_of_rows>
*
* Updated by C. Liao, Jan 2nd, 2019
*********************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#include <time.h>
#include <assert.h>
#include <stdbool.h> // C99 does not support the boolean data type
#include "parameters.h"
/*--------------------------------------------------------------------
* Text Tweaks
*/
#define RESET "\033[0m"
#define BOLDRED "\033[1m\033[31m" /* Bold Red */
/* End of text tweaks */
/*--------------------------------------------------------------------
* Constants
*/
#define PATH -1
#define NONE 0
#define UP 1
#define LEFT 2
#define DIAGONAL 3
/* End of constants */
/*--------------------------------------------------------------------
* Helpers
*/
#define min(x, y) (((x) < (y)) ? (x) : (y))
#define max(a,b) ((a) > (b) ? a : b)
// #define DEBUG
/* End of Helpers */
#ifndef _OPENMP
#include <sys/time.h>
double time_stamp()
{
struct timeval t;
double time;
gettimeofday(&t, NULL);
time = t.tv_sec + 1.0e-6*t.tv_usec;
return time;
}
double omp_get_wtime()
{
return time_stamp();
}
#endif
/*--------------------------------------------------------------------
* Functions Prototypes
*/
#pragma omp declare target
//Defines size of strings to be compared
long long int m = 8 ; //Columns - Size of string a
long long int n = 9; //Lines - Size of string b
int gapScore = -2;
//Defines scores
int matchScore = 3;
int missmatchScore = -3;
//Strings over the Alphabet Sigma
char *a, *b;
int matchMissmatchScore(long long int i, long long int j);
void similarityScore(long long int i, long long int j, int* H, int* P, long long int* maxPos);
#pragma omp end declare target
// without omp critical: how to conditionalize it?
void similarityScore2(long long int i, long long int j, int* H, int* P, long long int* maxPos);
void backtrack(int* P, long long int maxPos);
void printMatrix(int* matrix);
void printPredecessorMatrix(int* matrix);
void generate(void);
long long int nElement(long long int i);
void calcFirstDiagElement(long long int i, long long int *si, long long int *sj);
/* End of prototypes */
/*--------------------------------------------------------------------
* Global Variables
*/
bool useBuiltInData=true;
// the generated scoring matrix's size is m++ and n++ later to have the first row/column as 0s.
/* End of global variables */
/*--------------------------------------------------------------------
* Function: main
*/
int main(int argc, char* argv[]) {
// thread_count is no longer used
int thread_count;
if (argc==3)
{
m = strtoll(argv[1], NULL, 10);
n = strtoll(argv[2], NULL, 10);
useBuiltInData = false;
}
//#ifdef DEBUG
if (useBuiltInData)
printf ("Using built-in data for testing ..\n");
printf("Problem size: Matrix[%lld][%lld], FACTOR=%d CUTOFF=%d\n", n, m, FACTOR, CUTOFF);
//#endif
//Allocates a and b
a = (char*) malloc(m * sizeof(char));
// printf ("debug: a's address=%p\n", a);
b = (char*) malloc(n * sizeof(char));
// printf ("debug: b's address=%p\n", b);
//Because now we have zeros
m++;
n++;
//Allocates similarity matrix H
int *H;
H = (int *) calloc(m * n, sizeof(int));
// printf ("debug: H's address=%p\n", H);
//Allocates predecessor matrix P
int *P;
P = (int *)calloc(m * n, sizeof(int));
// printf ("debug: P's address=%p\n", P);
unsigned long long sz = (m+n +2*m*n)*sizeof(int)/1024/1024;
if (sz>=1024)
printf("Total memory footprint is:%llu GB\n", sz/1024) ;
else
printf("Total memory footprint is:%llu MB\n", sz);
if (useBuiltInData)
{
//Uncomment this to test the sequence available at
//http://vlab.amrita.edu/?sub=3&brch=274&sim=1433&cnt=1
// OBS: m=11 n=7
// a[0] = 'C';
// a[1] = 'G';
// a[2] = 'T';
// a[3] = 'G';
// a[4] = 'A';
// a[5] = 'A';
// a[6] = 'T';
// a[7] = 'T';
// a[8] = 'C';
// a[9] = 'A';
// a[10] = 'T';
// b[0] = 'G';
// b[1] = 'A';
// b[2] = 'C';
// b[3] = 'T';
// b[4] = 'T';
// b[5] = 'A';
// b[6] = 'C';
// https://en.wikipedia.org/wiki/Smith%E2%80%93Waterman_algorithm#Example
// Using the wiki example to verify the results
b[0] = 'G';
b[1] = 'G';
b[2] = 'T';
b[3] = 'T';
b[4] = 'G';
b[5] = 'A';
b[6] = 'C';
b[7] = 'T';
b[8] = 'A';
a[0] = 'T';
a[1] = 'G';
a[2] = 'T';
a[3] = 'T';
a[4] = 'A';
a[5] = 'C';
a[6] = 'G';
a[7] = 'G';
}
else
{
//Gen random arrays a and b
generate();
}
//Start position for backtrack
long long int maxPos = 0;
//Calculates the similarity matrix
long long int i, j;
// The way to generate all wavefront is to go through the top edge elements
// starting from the left top of the matrix, go to the bottom top -> down, then left->right
// total top edge element count = dim1_size + dim2_size -1
//Because now we have zeros ((m-1) + (n-1) - 1)
long long int nDiag = m + n - 3;
#ifdef DEBUG
printf("nDiag=%lld\n", nDiag);
printf("Number of wavefront lines and their first element positions:\n");
#endif
#ifdef _OPENMP
#pragma omp parallel
{
#pragma omp master
{
thread_count = omp_get_num_threads();
printf ("Using %d out of max %d threads...\n", thread_count, omp_get_max_threads());
}
}
// detect GPU support
int runningOnGPU = 0;
printf ("The number of target devices =%d\n", omp_get_num_devices());
/* Test if GPU is available using OpenMP4.5 */
#pragma omp target map(from:runningOnGPU)
{
// This function returns true if currently running on the host device, false otherwise.
if (!omp_is_initial_device())
runningOnGPU = 1;
}
/* If still running on CPU, GPU must not be available */
if (runningOnGPU == 1)
printf("### Able to use the GPU! ### \n");
else
printf("### Unable to use the GPU, using CPU! ###\n");
#endif
//Gets Initial time
double initialTime = omp_get_wtime();
// mistake: element count, not byte size!!
// int asz= m*n*sizeof(int);
int asz= m*n;
// choice 2: map data before the outer loop
#pragma omp target map (to:a[0:m], b[0:n], nDiag, m,n,gapScore, matchScore, missmatchScore) map(tofrom: H[0:asz], P[0:asz], maxPos)
// #pragma omp parallel default(none) shared(H, P, maxPos, nDiag, j) private(i)
{
for (i = 1; i <= nDiag; ++i) // start from 1 since 0 is the boundary padding
{
long long int nEle, si, sj;
// nEle = nElement(i);
//---------------inlined ------------
if (i < m && i < n) { // smaller than both directions
//Number of elements in the diagonal is increasing
nEle = i;
}
else if (i < max(m, n)) { // smaller than only one direction
//Number of elements in the diagonal is stable
long int min = min(m, n); // the longer direction has the edge elements, the number is the smaller direction's size
nEle = min - 1;
}
else {
//Number of elements in the diagonal is decreasing
long int min = min(m, n);
nEle = 2 * min - i + llabs(m - n) - 2;
}
//calcFirstDiagElement(i, &si, &sj);
//------------inlined---------------------
// Calculate the first element of diagonal
if (i < n) { // smaller than row count
si = i;
sj = 1; // start from the j==1 since j==0 is the padding
} else { // now we sweep horizontally at the bottom of the matrix
si = n - 1; // i is fixed
sj = i - n + 2; // j position is the nDiag (id -n) +1 +1 // first +1
}
//--------------------------------------
{
// choice 1: map data before the inner loop
//#pragma omp target device(0) map (to:a[0:m], b[0:n], nEle, m,n,gapScore, matchScore, missmatchScore, si, sj) map(tofrom: H[0:asz], P[0:asz], maxPos)
#pragma omp parallel for default(none) private(j) shared (a,b, nEle, m, n, gapScore, matchScore, missmatchScore, si, sj, H, P, maxPos)
for (j = 0; j < nEle; ++j)
{ // going upwards : anti-diagnol direction
long long int ai = si - j ; // going up vertically
long long int aj = sj + j; // going right in horizontal
///------------inlined ------------------------------------------
// similarityScore(ai, aj, H, P, &maxPos); // a critical section is used inside
{
int up, left, diag;
//Stores index of element
long long int index = m * ai + aj;
//Get element above
up = H[index - m] + gapScore;
//Get element on the left
left = H[index - 1] + gapScore;
//Get element on the diagonal
int t_mms;
if (a[aj - 1] == b[ai - 1])
t_mms = matchScore;
else
t_mms = missmatchScore;
diag = H[index - m - 1] + t_mms; // matchMissmatchScore(i, j);
// degug here
// return;
//Calculates the maximum
int max = NONE;
int pred = NONE;
if (diag > max) { //same letter ↖
max = diag;
pred = DIAGONAL;
}
if (up > max) { //remove letter ↑
max = up;
pred = UP;
}
if (left > max) { //insert letter ←
max = left;
pred = LEFT;
}
//Inserts the value in the similarity and predecessor matrixes
H[index] = max;
P[index] = pred;
//Updates maximum score to be used as seed on backtrack
if (max > H[maxPos]) {
#pragma omp critical
maxPos = index;
}
}
// ---------------------------------------------------------------
}
}
} // for end nDiag
} // end omp parallel
double finalTime = omp_get_wtime();
printf("\nElapsed time for scoring matrix computation: %f\n", finalTime - initialTime);
initialTime = omp_get_wtime();
backtrack(P, maxPos);
finalTime = omp_get_wtime();
//Gets backtrack time
finalTime = omp_get_wtime();
printf("Elapsed time for backtracking: %f\n", finalTime - initialTime);
#ifdef DEBUG
printf("\nSimilarity Matrix:\n");
printMatrix(H);
printf("\nPredecessor Matrix:\n");
printPredecessorMatrix(P);
#endif
if (useBuiltInData)
{
printf ("Verifying results using the builtinIn data: %s\n", (H[n*m-1]==7)?"true":"false");
assert (H[n*m-1]==7);
}
//Frees similarity matrixes
free(H);
free(P);
//Frees input arrays
free(a);
free(b);
return 0;
} /* End of main */
/*--------------------------------------------------------------------
* Function: nElement
* Purpose: Calculate the number of i-diagonal's elements
* i value range 1 to nDiag. we inclulde the upper bound value. 0 is for the padded wavefront, which is ignored.
*/
long long int nElement(long long int i) {
if (i < m && i < n) { // smaller than both directions
//Number of elements in the diagonal is increasing
return i;
}
else if (i < max(m, n)) { // smaller than only one direction
//Number of elements in the diagonal is stable
long int min = min(m, n); // the longer direction has the edge elements, the number is the smaller direction's size
return min - 1;
}
else {
//Number of elements in the diagonal is decreasing
long int min = min(m, n);
return 2 * min - i + llabs(m - n) - 2;
}
}
/*--------------------------------------------------------------------
* Function: calcElement: expect valid i value is from 1 to nDiag. since the first one is 0 padding
* Purpose: Calculate the position of (si, sj)-element
* n rows, m columns: we sweep the matrix on the left edge then bottom edge to get the wavefront
*/
void calcFirstDiagElement(long long int i, long long int *si, long long int *sj) {
// Calculate the first element of diagonal
if (i < n) { // smaller than row count
*si = i;
*sj = 1; // start from the j==1 since j==0 is the padding
} else { // now we sweep horizontally at the bottom of the matrix
*si = n - 1; // i is fixed
*sj = i - n + 2; // j position is the nDiag (id -n) +1 +1 // first +1
}
}
/*
// understanding the calculation by an example
n =6 // row
m =2 // col
padded scoring matrix
n=7
m=3
0 1 2
-------
0 x x x
1 x x x
2 x x x
3 x x x
4 x x x
5 x x x
6 x x x
We should peel off top row and left column since they are the padding
the remaining 6x2 sub matrix is what is interesting for us
Now find the number of wavefront lines and their first element's position in the scoring matrix
total diagnol frontwave = (n-1) + (m-1) -1 // submatrix row+column -1
We use the left most element in each wavefront line as its first element.
Then we have the first elements like
(1,1),
(2,1)
(3,1)
..
(6,1) (6,2)
*/
/*--------------------------------------------------------------------
* Function: SimilarityScore
* Purpose: Calculate value of scoring matrix element H(i,j) : the maximum Similarity-Score H(i,j)
* int *P; the predecessor array,storing which of the three elements is picked with max value
*/
#pragma omp declare target
void similarityScore(long long int i, long long int j, int* H, int* P, long long int* maxPos) {
int up, left, diag;
//Stores index of element
long long int index = m * i + j;
//Get element above
up = H[index - m] + gapScore;
//Get element on the left
left = H[index - 1] + gapScore;
//Get element on the diagonal
int t_mms;
if (a[j - 1] == b[i - 1])
t_mms = matchScore;
else
t_mms = missmatchScore;
diag = H[index - m - 1] + t_mms; // matchMissmatchScore(i, j);
// degug here
// return;
//Calculates the maximum
int max = NONE;
int pred = NONE;
/* === Matrix ===
* a[0] ... a[n]
* b[0]
* ...
* b[n]
*
* generate 'a' from 'b', if '←' insert e '↑' remove
* a=GAATTCA
* b=GACTT-A
*
* generate 'b' from 'a', if '←' insert e '↑' remove
* b=GACTT-A
* a=GAATTCA
*/
if (diag > max) { //same letter ↖
max = diag;
pred = DIAGONAL;
}
if (up > max) { //remove letter ↑
max = up;
pred = UP;
}
if (left > max) { //insert letter ←
max = left;
pred = LEFT;
}
//Inserts the value in the similarity and predecessor matrixes
H[index] = max;
P[index] = pred;
//Updates maximum score to be used as seed on backtrack
if (max > H[*maxPos]) {
#pragma omp critical
*maxPos = index;
}
} /* End of similarityScore */
/*--------------------------------------------------------------------
* Function: matchMissmatchScore
* Purpose: Similarity function on the alphabet for match/missmatch
*/
int matchMissmatchScore(long long int i, long long int j) {
if (a[j - 1] == b[i - 1])
return matchScore;
else
return missmatchScore;
} /* End of matchMissmatchScore */
#pragma omp end declare target
void similarityScore2(long long int i, long long int j, int* H, int* P, long long int* maxPos) {
int up, left, diag;
//Stores index of element
long long int index = m * i + j;
//Get element above
up = H[index - m] + gapScore;
//Get element on the left
left = H[index - 1] + gapScore;
//Get element on the diagonal
diag = H[index - m - 1] + matchMissmatchScore(i, j);
//Calculates the maximum
int max = NONE;
int pred = NONE;
/* === Matrix ===
* a[0] ... a[n]
* b[0]
* ...
* b[n]
*
* generate 'a' from 'b', if '←' insert e '↑' remove
* a=GAATTCA
* b=GACTT-A
*
* generate 'b' from 'a', if '←' insert e '↑' remove
* b=GACTT-A
* a=GAATTCA
*/
if (diag > max) { //same letter ↖
max = diag;
pred = DIAGONAL;
}
if (up > max) { //remove letter ↑
max = up;
pred = UP;
}
if (left > max) { //insert letter ←
max = left;
pred = LEFT;
}
//Inserts the value in the similarity and predecessor matrixes
H[index] = max;
P[index] = pred;
//Updates maximum score to be used as seed on backtrack
if (max > H[*maxPos]) {
*maxPos = index;
}
} /* End of similarityScore2 */
/*--------------------------------------------------------------------
* Function: backtrack
* Purpose: Modify matrix to print, path change from value to PATH
*/
void backtrack(int* P, long long int maxPos) {
//hold maxPos value
long long int predPos;
//backtrack from maxPos to startPos = 0
do {
if (P[maxPos] == DIAGONAL)
predPos = maxPos - m - 1;
else if (P[maxPos] == UP)
predPos = maxPos - m;
else if (P[maxPos] == LEFT)
predPos = maxPos - 1;
P[maxPos] *= PATH;
maxPos = predPos;
} while (P[maxPos] != NONE);
} /* End of backtrack */
/*--------------------------------------------------------------------
* Function: printMatrix
* Purpose: Print Matrix
*/
void printMatrix(int* matrix) {
long long int i, j;
printf("-\t-\t");
for (j = 0; j < m-1; j++) {
printf("%c\t", a[j]);
}
printf("\n-\t");
for (i = 0; i < n; i++) { //Lines
for (j = 0; j < m; j++) {
if (j==0 && i>0) printf("%c\t", b[i-1]);
printf("%d\t", matrix[m * i + j]);
}
printf("\n");
}
} /* End of printMatrix */
/*--------------------------------------------------------------------
* Function: printPredecessorMatrix
* Purpose: Print predecessor matrix
*/
void printPredecessorMatrix(int* matrix) {
long long int i, j, index;
printf(" ");
for (j = 0; j < m-1; j++) {
printf("%c ", a[j]);
}
printf("\n ");
for (i = 0; i < n; i++) { //Lines
for (j = 0; j < m; j++) {
if (j==0 && i>0) printf("%c ", b[i-1]);
index = m * i + j;
if (matrix[index] < 0) {
printf(BOLDRED);
if (matrix[index] == -UP)
printf("↑ ");
else if (matrix[index] == -LEFT)
printf("← ");
else if (matrix[index] == -DIAGONAL)
printf("↖ ");
else
printf("- ");
printf(RESET);
} else {
if (matrix[index] == UP)
printf("↑ ");
else if (matrix[index] == LEFT)
printf("← ");
else if (matrix[index] == DIAGONAL)
printf("↖ ");
else
printf("- ");
}
}
printf("\n");
}
} /* End of printPredecessorMatrix */
/*--------------------------------------------------------------------
* Function: generate
* Purpose: Generate arrays a and b
*/
void generate() {
//Random seed
srand(time(NULL));
//Generates the values of a
long long int i;
for (i = 0; i < m; i++) {
int aux = rand() % 4;
if (aux == 0)
a[i] = 'A';
else if (aux == 2)
a[i] = 'C';
else if (aux == 3)
a[i] = 'G';
else
a[i] = 'T';
}
//Generates the values of b
for (i = 0; i < n; i++) {
int aux = rand() % 4;
if (aux == 0)
b[i] = 'A';
else if (aux == 2)
b[i] = 'C';
else if (aux == 3)
b[i] = 'G';
else
b[i] = 'T';
}
} /* End of generate */
/*--------------------------------------------------------------------
* External References:
* http://vlab.amrita.edu/?sub=3&brch=274&sim=1433&cnt=1
* http://pt.slideshare.net/avrilcoghlan/the-smith-waterman-algorithm
* http://baba.sourceforge.net/
*/
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 32;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
6316.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp target teams distribute parallel for schedule(static) private(j)
for (i = 1; i < _PB_NI - 1; ++i)
{
#pragma omp target teams distribute parallel for num_threads(2) dist_schedule(static, 1)
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
cluster_main.c | /*! \file cluster_main.c this file has the functions used by the compute cluster process.
*This process retrieves problems from the i/o clusters and returns results
*/
#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <string.h>
#include <assert.h>
#include <HAL/hal/hal.h>
#include <mppaipc.h>
#include <mppa/osconfig.h>
#include <omp.h>
#include "shared_defs.h"
#include "settings.h"
#include "OMPResultVector.h"
#include "OMPHitsAndDoublets.h"
#include "OMPCACell.h"
#include "OMPSimpleVector.h"
#include "OMPMixedList.h"
//! The number of cores per cluster
#define TC 16
OMPResultVector results;
//! Retrieves quadruplets out of connected doublets
/*! This function gets the connected doublets and performs a DFS to produce the quadruplets.
\param doublets The data structure of the hit and doublet data
\param layer The current layer in the search
\param idx The identifier within the layer for the current doublet we are visiting
\param ml The lists of the connections between doublets
\param top An array that holds the indices of the start of every successor list
\param foundNtuplets The result returned for our problem
\param tmpNtuplet The current path of the search
\param minHitsPerNtuplet The number of layers involved
*/
void find_ntuplets(const OMPLayerDoublets* doublets, unsigned int layer, unsigned int idx, MixedList* ml, int** top,
OMPResultVector* foundNtuplets, OMPSimpleVector* tmpNtuplet, const unsigned int minHitsPerNtuplet) {
int j;
int4 found;
int otherCell;
if (layer == 0) {
if (sizesv(tmpNtuplet) >= minHitsPerNtuplet - 1) {
found.elem[0]=get_inner_hit_id(doublets, tmpNtuplet->m_data[2], 0);
found.elem[1]=get_outer_hit_id(doublets, tmpNtuplet->m_data[2], 0);
found.elem[2]=get_outer_hit_id(doublets, tmpNtuplet->m_data[1], 1);
found.elem[3]=get_outer_hit_id(doublets, tmpNtuplet->m_data[0], 2);
push_backtsrv(foundNtuplets, &found);
}else
return;
} else {
int ptr = top[layer][idx];
while (ptr != -1) {
int otherIdx = fetch_ml (ml, ptr);
push_backsv(tmpNtuplet, otherIdx);
find_ntuplets(doublets, layer-1, otherIdx, ml, top, foundNtuplets, tmpNtuplet, minHitsPerNtuplet);
pop_backsv(tmpNtuplet, &otherCell);
ptr = next_ml (ml, ptr);
}
}
}
//! everything the CA does is here.
/** The control flow is as follows:
* the process opens the file descriptors for communicating with IO,
* loops so that it can handle the different request (didn't add a termination message) and executes the communication logic.
* It gets info on the size of the problem to receive (communication, also uses sync), allocates space for these layer pairs
* and starts reading. As soon as a layer pair is received, it performs the initialization of its cells. After we get more layer pairs,
* we connect the adjacent ones and finally we find the quadruplets. Then we push the results to IO.
*/
int
main(int argc __attribute__ ((unused)), char *argv[])
{
int rank = 0;
int i, j, status, idx1,idx2;
float fargs[6];
const int num_a = 6*sizeof(float)+3*(theNumberOfLayers-1)*sizeof(int);
char args[num_a];
int iargs[3*(theNumberOfLayers-1)];
const char *root_sync = argv[1], *d_portal = argv[2], *a_portal = argv[3];
const char *b_portal = argv[4], *c_portal = argv[5], *e_portal = argv[6];;
/*Each cluster contributes a different bit to the root_sync mask.*/
long long mask = (long long)1 << rank;
/*Open the NoC special files.*/
int root_sync_fd = mppa_open(root_sync, O_WRONLY);
int d_portal_fd = mppa_open(d_portal, O_RDONLY);
int a_portal_fd = mppa_open(a_portal, O_RDONLY);
int b_portal_fd = mppa_open(b_portal, O_RDONLY);
int c_portal_fd = mppa_open(c_portal, O_WRONLY);
int e_portal_fd = mppa_open(e_portal, O_RDONLY);
if (root_sync_fd < 0)
printf ("Sync open error\n");
if (a_portal_fd < 0)
printf ("portal error\n");
if (b_portal_fd < 0)
printf ("portal error\n");
if (c_portal_fd < 0)
printf ("portal error\n");
if (d_portal_fd < 0)
printf ("portal error\n");
if (e_portal_fd < 0)
printf ("portal error\n");
OMPLayerDoublets doublets[theNumberOfLayers-1];
int maxi = 0;
int maxp = 0;
/*arena to be managed*/
char* buffer = malloc(1500000);
int fail = 0;
int num[3];
/*initialize statically-sized asynchronous communications*/
mppa_aiocb_t d_portal_aiocb[1] =
{ MPPA_AIOCB_INITIALIZER(d_portal_fd, args, num_a) };
mppa_aiocb_t c_portal_aiocb[1] =
{ MPPA_AIOCB_INITIALIZER(c_portal_fd, &results, sizeof(OMPResultVector)) };
mppa_aiocb_set_pwrite(c_portal_aiocb, &results, sizeof(OMPResultVector), 0);
mppa_aiocb_set_trigger(d_portal_aiocb, 1);
status |= mppa_aio_read(d_portal_aiocb);
for (idx1 = 0; idx1 < 100; idx1++)
for (idx2 = 0; idx2 < 7; idx2++) {
char* origin = buffer;
int left = 1500000;
int flag = 0;
unsigned int s;
/*synchronize with io and gets parameters*/
status |= mppa_write(root_sync_fd, &mask, sizeof(mask));
if (idx1 != 0 || idx2 != 0)
mppa_aio_wait(c_portal_aiocb);
status |= mppa_aio_wait(d_portal_aiocb);
memcpy (fargs, args, 6*sizeof(float));
memcpy (iargs, args+6*sizeof(float), 3*(theNumberOfLayers-1)*sizeof(int));
num[0] = 2*sizeof(int)*iargs[0]+3*sizeof(float)*(iargs[1]+iargs[2]);
num[1] = 2*sizeof(int)*iargs[3]+3*sizeof(float)*iargs[5];
num[2] = 2*sizeof(int)*iargs[6]+3*sizeof(float)*iargs[8];
//printf ("%d: Allocating memory\n", mppa_getpid());
char* l0; char* l1; char* l2;
int* liptr[3];
/*allocate space*/
left -= num[0];
l0 = origin+left; /*allocated at the end of the buffer, is deallocated later*/
l1 = origin;
left -= num[1];
origin += num[1];
l2 = origin;
left -= num[2];
origin += num[2];
s = iargs[3];
liptr[1] = (int*) origin;
left -= s*sizeof(int);
origin += s*sizeof(int);
MixedList* ml = (MixedList*) origin;
left -=sizeof(MixedList);
origin += sizeof(MixedList);
if (left >= 0) {
init_ml(ml, origin, 10000);
left -= 2*10000*sizeof(int);
origin += 2*10000*sizeof(int);
if (left < 0)
printf ("Out of memory: ml\n");
} else {
printf ("Out of memory: ml\n");
}
int* outerptr[2];
MixedList* isOuterHitOfCell[2];
isOuterHitOfCell[0] = (MixedList*) origin;
left -= sizeof(MixedList);
origin += sizeof(MixedList);
if (left >= 0) {
int size = iargs[0]+100;
init_ml(isOuterHitOfCell[0], origin, size);
left -= 2*size*sizeof(int);
origin += 2*size*sizeof(int);
if (left < 0)
printf ("Out of memory: outer\n");
} else {
printf ("Out of memory: outer\n");
}
outerptr[0] = (int*) origin;
left -= iargs[2]*sizeof(int);
origin += iargs[2]*sizeof(int);
if (left >= 0) {
for (i = 0; i < iargs[2]; i++)
outerptr[0][i] = -1;
}
isOuterHitOfCell[1] = (MixedList*) origin;
left -= sizeof(MixedList);
origin += sizeof(MixedList);
if (left >= 0) {
int size = iargs[3]+100;
init_ml(isOuterHitOfCell[1], origin, size);
left -= 2*size*sizeof(int);
origin += 2*size*sizeof(int);
if (left < 0)
printf ("Out of memory: outer\n");
} else {
printf ("Out of memory: outer\n");
}
outerptr[1] = (int*) origin;
left -= iargs[5]*sizeof(int);
origin += iargs[5]*sizeof(int);
if (left >= 0) {
for (i = 0; i < iargs[5]; i++)
outerptr[1][i] = -1;
}
int outer;
int layer;
int top, val;
int thisCell[3];
int otherCell[3];
left -= 2*iargs[0]*sizeof(float);
doublets[0].r = (float*) (origin+left);
if (left < 0) {
printf ("Out of memory: r1\n");
}
doublets[1].r = (float*) origin;
left -= 2*iargs[3]*sizeof(float);
if (left < 0) {
printf ("Out of memory: r2\n");
}
origin += 2*iargs[3]*sizeof(float);
/*initialize hit and doublet receives
get each layer in a different communication
the idea is that we will wait for each layer just before we process it hiding other communications*/
mppa_aiocb_t l0_portal_aiocb[1] =
{ MPPA_AIOCB_INITIALIZER(a_portal_fd, l0, num[0]) };
mppa_aiocb_t l1_portal_aiocb[1] =
{ MPPA_AIOCB_INITIALIZER(b_portal_fd, l1, num[1]) };
mppa_aiocb_t l2_portal_aiocb[1] =
{ MPPA_AIOCB_INITIALIZER(e_portal_fd, l2, num[2]) };
mppa_aiocb_set_trigger(l0_portal_aiocb, 1);
status |= mppa_aio_read(l0_portal_aiocb);
mppa_aiocb_set_trigger(l1_portal_aiocb, 1);
status |= mppa_aio_read(l1_portal_aiocb);
mppa_aiocb_set_trigger(l2_portal_aiocb, 1);
status |= mppa_aio_read(l2_portal_aiocb);
/*unlock the writes of io*/
status |= mppa_write(root_sync_fd, &mask, sizeof(mask));
status |= mppa_aio_wait(l0_portal_aiocb);
doublets[0].size = iargs[0];
doublets[0].indices = (int*) l0;
l0 += 2*sizeof(int)*iargs[0];
doublets[0].layers[0].size = iargs[1];
doublets[0].layers[0].p = (float*) l0;
l0 += 3*sizeof(float)*iargs[1];
doublets[0].layers[1].size = iargs[2];
doublets[0].layers[1].p = (float*) l0;
/*Create layerpair 0-1*/
if (left >= 0) {
#pragma omp parallel for num_threads(TC)
for (j = 0; j < doublets[0].size; j++) {
int in = doublets[0].indices[2*j];
int out = doublets[0].indices[2*j+1];
int inner = get_inner_hit_id (doublets, j, 0);
float x = get_inner_x (doublets, inner, 0);
float y = get_inner_y (doublets, inner, 0);
doublets[0].r[2*j] = hypot(x,y);
int outer = get_outer_hit_id (doublets, j, 0);
x = get_outer_x (doublets, outer, 0);
y = get_outer_y (doublets, outer, 0);
doublets[0].r[2*j+1] = hypot(x,y);
push_back_mlts (isOuterHitOfCell[0], &outerptr[0][outer], j);
}
}
status |= mppa_aio_wait(l1_portal_aiocb);
doublets[1].size = iargs[3];
doublets[1].indices = (int*) l1;
l1 += 2*sizeof(int)*iargs[3];
doublets[1].layers[0].size = doublets[0].layers[1].size;
doublets[1].layers[0].p = doublets[0].layers[1].p;
doublets[1].layers[1].size = iargs[5];
doublets[1].layers[1].p = (float*) l1;
/*Create layerpair 1-2*/
if (left >= 0) {
#pragma omp parallel for num_threads(TC)
for (j = 0; j < doublets[1].size; j++) {
int inner = get_inner_hit_id (doublets, j, 1);
float x = get_inner_x (doublets, inner, 1);
float y = get_inner_y (doublets, inner, 1);
doublets[1].r[2*j] = hypot(x,y);
int outer = get_outer_hit_id (doublets, j, 1);
x = get_outer_x (doublets, outer, 1);
y = get_outer_y (doublets, outer, 1);
doublets[1].r[2*j+1] = hypot(x,y);
push_back_mlts (isOuterHitOfCell[1], &outerptr[1][outer], j);
}
}
/*connect 0-1-2*/
if (left >= 0) {
s = doublets[1].size;
#pragma omp parallel for num_threads(TC) private(j, top, val, thisCell, otherCell)
for (i = 0; i < s; i++) {
top = -1;
int inner = get_inner_hit_id(doublets, i, 1);
thisCell[0] = i;
thisCell[1] = inner;
thisCell[2] = get_outer_hit_id(doublets, i, 1);
int optr = outerptr[0][inner];
/*loop through doublets sharing hit*/
while (optr != -1) {
otherCell[0] = fetch_ml (isOuterHitOfCell[0], optr);
otherCell[1] = get_inner_hit_id(doublets, otherCell[0], 0);
otherCell[2] = get_outer_hit_id(doublets, otherCell[0], 0);
if (check_alignment_and_tag(doublets, thisCell, 1, otherCell,
fargs[0], fargs[1], fargs[2],
fargs[3], fargs[4], fargs[5])) {
val = otherCell[0];
top = push_back_ml (ml, top, val);
if (top < 0) {
printf ("Error: out of space\n");
results.m_size = -1;
left = -1;
break;
}
}
optr = next_ml (isOuterHitOfCell[0], optr);
}
liptr[1][i] = top;
}
}
status |= mppa_aio_wait(l2_portal_aiocb);
doublets[2].size = iargs[6];
doublets[2].indices = (int*) l2;
l2 += 2*sizeof(int)*iargs[6];
doublets[2].layers[0].size = doublets[1].layers[1].size;
doublets[2].layers[0].p = doublets[1].layers[1].p;
doublets[2].layers[1].size = iargs[8];
doublets[2].layers[1].p = (float*) l2;
if (left < 0) {
flag = 1;
results.m_size = -1;
goto res;
}
left += num[0];
left += 2*iargs[0]*sizeof(float);
doublets[2].r = (float*) origin;
left -= 2*iargs[6]*sizeof(float);
origin += 2*iargs[6]*sizeof(float);
s = iargs[6];
liptr[2] = (int*) origin;
left -= s*sizeof(int);
origin += s*sizeof(int);
if (left < 0) {
printf ("Out of memory: r3\n");
flag = 1;
results.m_size = -1;
goto res;
}
/*Create layerpair 2-3*/
#pragma omp parallel for num_threads(TC)
for (j = 0; j < doublets[2].size; j++) {
int inner = get_inner_hit_id (doublets, j, 2);
float x = get_inner_x (doublets, inner, 2);
float y = get_inner_y (doublets, inner, 2);
doublets[2].r[2*j] = hypot(x,y);
int outer = get_outer_hit_id (doublets, j, 2);
x = get_outer_x (doublets, outer, 2);
y = get_outer_y (doublets, outer, 2);
doublets[2].r[2*j+1] = hypot(x,y);
}
if (left < 0) {
flag = 1;
results.m_size = -1;
goto res;
}
/*connect 1-2-3*/
for (layer = 2; layer < theNumberOfLayers-1; layer++) {
s = doublets[layer].size;
#pragma omp parallel for num_threads(TC) private(j, top, val, thisCell, otherCell)
for (i = 0; i < s; i++) {
top = -1;
int inner = get_inner_hit_id(doublets, i, layer);
thisCell[0] = i;
thisCell[1] = inner;
thisCell[2] = get_outer_hit_id(doublets, i, layer);
int optr = outerptr[layer-1][inner];
while (optr != -1) {
otherCell[0] = fetch_ml (isOuterHitOfCell[layer-1], optr);
otherCell[1] = get_inner_hit_id(doublets, otherCell[0], layer-1);
otherCell[2] = get_outer_hit_id(doublets, otherCell[0], layer-1);
if (check_alignment_and_tag(doublets, thisCell, layer, otherCell,
fargs[0], fargs[1], fargs[2],
fargs[3], fargs[4], fargs[5])) {
val = otherCell[0];
top = push_back_ml (ml, top, val);
if (top < 0) {
printf ("Error: out of space\n");
results.m_size = -1;
flag = 1;
break;
}
}
optr = next_ml (isOuterHitOfCell[layer-1], optr);
}
liptr[layer][i] = top;
}
if (flag == 1)
goto res;
}
unsigned int lastLayerPairIndex = numberOfLayers - 2;
resetrv(&results);
OMPSimpleVector stack;
/*get the quadruplets*/
s = doublets[lastLayerPairIndex].size;
#pragma omp parallel for num_threads(TC) private(stack)
for (i = 0; i < s; i++) {
resetsv(&stack);
push_backsv(&stack, i);
find_ntuplets(doublets, lastLayerPairIndex, i, ml, liptr, &results, &stack, 4);
}
res:
if (results.m_size == -1)
fail++;
/*starts sending results and getting new parameters*/
mppa_aiocb_set_trigger(d_portal_aiocb, 1);
status |= mppa_aio_read(d_portal_aiocb);
status |= mppa_pwrite(c_portal_fd, &results, sizeof(OMPResultVector), 0);
mppa_aio_write(c_portal_aiocb);
}
printf ("Failed to compute: %d\n", fail);
mppa_exit(0);
return 0;
}
|
delete_inf_refcount.c | // RUN: %libomptarget-compile-run-and-check-generic
// fails with error message 'Unable to generate target entries' on amdgcn
// XFAIL: amdgcn-amd-amdhsa
#include <stdio.h>
#include <omp.h>
#pragma omp declare target
int isHost;
#pragma omp end declare target
int main(void) {
isHost = -1;
#pragma omp target enter data map(to: isHost)
#pragma omp target
{ isHost = omp_is_initial_device(); }
#pragma omp target update from(isHost)
if (isHost < 0) {
printf("Runtime error, isHost=%d\n", isHost);
}
#pragma omp target exit data map(delete: isHost)
// CHECK: Target region executed on the device
printf("Target region executed on the %s\n", isHost ? "host" : "device");
return isHost;
}
|
dz1z2.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
void Usage(char *prog_name);
#define ACCURACY 0.01
/*
* worksharing
*/
double sequential_solution(int argc, char *argv[])
{
long long n, i;
double factor;
double sum = 0.0;
if (argc != 2)
Usage(argv[0]);
n = strtoll(argv[1], NULL, 10);
if (n < 1)
Usage(argv[0]);
printf("Before for loop, factor = %f.\n", factor);
for (i = 0; i < n; i++)
{
factor = (i % 2 == 0) ? 1.0 : -1.0;
sum += factor / (2 * i + 1);
}
printf("After for loop, factor = %f.\n", factor);
sum = 4.0 * sum;
printf("With n = %lld terms\n", n);
printf(" Our estimate of pi = %.14f\n", sum);
printf(" Ref estimate of pi = %.14f\n", 4.0 * atan(1.0));
return sum;
}
double parallel_solution(int argc, char *argv[])
{
long long n, i;
double factor;
double sum = 0.0;
if (argc != 2)
Usage(argv[0]);
n = strtoll(argv[1], NULL, 10);
if (n < 1)
Usage(argv[0]);
printf("Before for loop, factor = %f.\n", factor);
#pragma omp parallel for reduction(+:sum) lastprivate(factor)
for (i = 0; i < n; i++)
{
factor = (i % 2 == 0) ? 1.0 : -1.0;
sum += factor / (2 * i + 1);
}
printf("After for loop, factor = %f.\n", factor);
sum = 4.0 * sum;
printf("With n = %lld terms\n", n);
printf(" Our estimate of pi = %.14f\n", sum);
printf(" Ref estimate of pi = %.14f\n", 4.0 * atan(1.0));
return sum;
}
int main(int argc, char *argv[])
{
printf("---------------------Sequential execution---------------------\n");
double start_time_seq = omp_get_wtime();
double sum_seq = sequential_solution(argc, argv);
double end_time_seq = omp_get_wtime();
printf("----------------------Parallel execution----------------------\n");
double start_time_parallel = omp_get_wtime();
double sum_parallel = parallel_solution(argc, argv);
double end_time_parallel = omp_get_wtime();
printf("\nSequential elapsed time: %lfs\n", end_time_seq - start_time_seq);
printf("Parallel elapsed time: %lfs\n", end_time_parallel - start_time_parallel);
if (fabs(sum_seq - sum_parallel) < ACCURACY)
printf("Test PASSED\n");
else
printf("Test FAILED\n");
return 0;
}
void Usage(char *prog_name)
{
fprintf(stderr, "usage: %s <thread_count> <n>\n", prog_name);
fprintf(stderr, " n is the number of terms and should be >= 1\n");
exit(0);
}
|
treeio.c | /****************************************************************************/
/* TREEIO.C: I/O routines for hierarchical N-body code. Public routines: */
/* inputdata(), startoutput(), output(), savestate(), restorestate(). */
/* Copyright (c) 2001 by Joshua E. Barnes, Honolulu, Hawai`i. */
/****************************************************************************/
#include "stdinc.h"
#include "mathfns.h"
#include "vectmath.h"
#include "getparam.h"
#include "treecode.h"
#include <sys/types.h>
#include <sys/stat.h>
#include <strings.h>
/*
* Prototypes for local routines.
*/
local void outputdata(void); /* write N-body data */
local void diagnostics(void); /* eval N-body diagnostics */
local void in_int(stream, int *); /* input integer value */
local void in_real(stream, real *); /* input real value */
local void in_vector(stream, vector); /* input vector of reals */
local void out_int(stream, int); /* output integer value */
local void out_real(stream, real); /* output real value */
local void out_vector(stream, vector); /* output vector of reals */
/*
* Diagnositc output variables.
*/
local real mtot; /* total mass of system */
local real etot[3]; /* Etot, KE, PE of system */
local matrix keten; /* kinetic energy tensor */
local matrix peten; /* potential energy tensor */
local vector cmpos; /* center of mass position */
local vector cmvel; /* center of mass velocity */
local vector amvec; /* angular momentum vector */
/*
* INPUTDATA: read initial conditions from input file.
*/
void inputdata(void)
{
stream instr;
int ndim;
bodyptr p;
instr = stropen(infile, "r"); /* open input stream */
in_int(instr, &nbody); /* read number of bodies */
if (nbody < 1)
error("inputdata: nbody = %d is absurd\n", nbody);
in_int(instr, &ndim); /* read number of dims */
if (ndim != NDIM)
error("inputdata: ndim = %d; expected %d\n", ndim, NDIM);
in_real(instr, &tnow); /* read starting time */
bodytab = (bodyptr) allocate(nbody * sizeof(body));
/* allocate body array */
for (p = bodytab; p < bodytab+nbody; p++) /* loop over all bodies */
in_real(instr, &Mass(p)); /* read mass of each */
for (p = bodytab; p < bodytab+nbody; p++)
in_vector(instr, Pos(p)); /* read position of each */
for (p = bodytab; p < bodytab+nbody; p++)
in_vector(instr, Vel(p)); /* read velocity of each */
fclose(instr); /* close input stream */
if (scanopt(options, "reset-time")) /* reset starting time? */
tnow = 0.0; /* then set it to zero */
for (p = bodytab; p < bodytab+nbody; p++) /* loop over new bodies */
Type(p) = BODY; /* initialize type field */
}
/*
* STARTOUTPUT: begin output to log file.
*/
void startoutput(void)
{
printf("\n%s\n", headline); /* print headline, params */
#if defined(USEFREQ)
printf("\n%8s%10s%10s", "nbody", "freq", "eps");
#else
printf("\n%8s%10s%10s", "nbody", "dtime", "eps");
#endif
#if !defined(QUICKSCAN)
printf("%10s", "theta");
#endif
#if defined(USEFREQ)
printf("%10s%10s%10s\n", "usequad", "freqout", "tstop");
printf("%8d%10.2f%10.4f", nbody, freq, eps);
#else
printf("%10s%10s%10s\n", "usequad", "dtout", "tstop");
printf("%8d%10.5f%10.4f", nbody, dtime, eps);
#endif
#if !defined(QUICKSCAN)
printf("%10.2f", theta);
#endif
#if defined(USEFREQ)
printf("%10s%10.2f%10.4f\n", usequad ? "true" : "false", freqout, tstop);
#else
printf("%10s%10.5f%10.4f\n", usequad ? "true" : "false", dtout, tstop);
#endif
if (! strnull(options)) /* print options, if any */
printf("\n\toptions: %s\n", options);
if (! strnull(savefile)) /* was state file given? */
savestate(savefile); /* save initial data */
}
/*
* FORCEREPORT: print staristics on tree construction and force calculation.
*/
void forcereport(void)
{
printf("\n\t%8s%8s%8s%8s%10s%10s%8s\n",
"rsize", "tdepth", "ftree",
"actmax", "nbbtot", "nbctot", "CPUfc");
printf("\t%8.1f%8d%8.3f%8d%10d%10d%8.3f\n",
rsize, tdepth, (nbody + ncell - 1) / ((real) ncell),
actmax, nbbcalc, nbccalc, cpuforce);
}
/*
* OUTPUT: compute diagnostics and output body data.
*/
void output(void)
{
real cmabs, amabs, teff;
diagnostics(); /* compute std diagnostics */
ABSV(cmabs, cmvel); /* find magnitude of cm vel */
ABSV(amabs, amvec); /* find magnitude of J vect */
printf("\n %8s%8s%8s%8s%8s%8s%8s%8s\n",
"time", "|T+U|", "T", "-U", "-T/U", "|Vcom|", "|Jtot|", "CPUtot");
printf(" %8.3f%8.5f%8.5f%8.5f%8.5f%8.5f%8.5f%8.3f\n",
tnow, ABS(etot[0]), etot[1], -etot[2], -etot[1]/etot[2],
cmabs, amabs, cputime());
#if defined(USEFREQ)
teff = tnow + (freq > 0 ? 0.125/freq : 0); /* anticipate slightly... */
#else
teff = tnow + dtime/8; /* anticipate slightly... */
#endif
if (! strnull(outfile) && teff >= tout) /* time for data output? */
outputdata();
if (! strnull(savefile)) /* was state file given? */
savestate(savefile); /* save data for restart */
}
/*
* OUTPUTDATA: output body data.
*/
void outputdata(void)
{
char namebuf[256];
struct stat buf;
stream outstr;
bodyptr p;
sprintf(namebuf, outfile, nstep); /* construct output name */
if (stat(namebuf, &buf) != 0) /* no output file exists? */
outstr = stropen(namebuf, "w"); /* create & open for output */
else /* else file already exists */
outstr = stropen(namebuf, "a"); /* reopen and append output */
out_int(outstr, nbody); /* write number of bodies */
out_int(outstr, NDIM); /* number of dimensions */
out_real(outstr, tnow); /* and current time value */
for (p = bodytab; p < bodytab+nbody; p++) /* loop over all bodies */
out_real(outstr, Mass(p)); /* output mass of each */
for (p = bodytab; p < bodytab+nbody; p++)
out_vector(outstr, Pos(p)); /* output positions */
for (p = bodytab; p < bodytab+nbody; p++)
out_vector(outstr, Vel(p)); /* output velocities */
if (scanopt(options, "out-phi")) /* potentials requested? */
for (p = bodytab; p < bodytab+nbody; p++)
out_real(outstr, Phi(p)); /* output potentials */
if (scanopt(options, "out-acc")) /* accelerations requested? */
for (p = bodytab; p < bodytab+nbody; p++)
out_vector(outstr, Acc(p)); /* output accelerations */
fclose(outstr); /* close up output file */
printf("\n\tdata output to file %s at time %f\n", namebuf, tnow);
#if defined(USEFREQ)
tout += 1.0 / freqout; /* schedule next output */
#else
tout += dtout; /* schedule next output */
#endif
}
/*
* DIAGNOSTICS: compute set of dynamical diagnostics.
*/
local void diagnostics(void)
{
register bodyptr p;
real velsq;
vector tmpv;
matrix tmpt;
mtot = 0.0; /* zero total mass */
etot[1] = etot[2] = 0.0; /* zero total KE and PE */
CLRM(keten); /* zero ke tensor */
CLRM(peten); /* zero pe tensor */
CLRV(amvec); /* zero am vector */
CLRV(cmpos); /* zero c. of m. position */
CLRV(cmvel); /* zero c. of m. velocity */
#pragma omp parallel for private(p)
for (p = bodytab; p < bodytab+nbody; p++) { /* loop over all particles */
mtot += Mass(p); /* sum particle masses */
DOTVP(velsq, Vel(p), Vel(p)); /* square vel vector */
etot[1] += 0.5 * Mass(p) * velsq; /* sum current KE */
etot[2] += 0.5 * Mass(p) * Phi(p); /* and current PE */
MULVS(tmpv, Vel(p), 0.5 * Mass(p)); /* sum 0.5 m v_i v_j */
OUTVP(tmpt, tmpv, Vel(p));
ADDM(keten, keten, tmpt);
MULVS(tmpv, Pos(p), Mass(p)); /* sum m r_i a_j */
OUTVP(tmpt, tmpv, Acc(p));
ADDM(peten, peten, tmpt);
CROSSVP(tmpv, Vel(p), Pos(p)); /* sum angular momentum */
MULVS(tmpv, tmpv, Mass(p));
ADDV(amvec, amvec, tmpv);
MULVS(tmpv, Pos(p), Mass(p)); /* sum cm position */
ADDV(cmpos, cmpos, tmpv);
MULVS(tmpv, Vel(p), Mass(p)); /* sum cm momentum */
ADDV(cmvel, cmvel, tmpv);
}
etot[0] = etot[1] + etot[2]; /* sum KE and PE */
DIVVS(cmpos, cmpos, mtot); /* normalize cm coords */
DIVVS(cmvel, cmvel, mtot);
}
/*
* IN_INT, IN_REAL, IN_VECTOR: low level input routines.
*/
local void in_int(stream str, int *iptr)
{
#if !defined(BINARYIO)
if (fscanf(str, "%d", iptr) != 1)
error("in_int: input conversion error\n");
#else
if (fread((void *) iptr, sizeof(int), 1, str) != 1)
error("in_int: fread failed\n");
#endif
}
local void in_real(stream str, real *rptr)
{
double tmp;
#if !defined(BINARYIO)
if (fscanf(str, "%lf", &tmp) != 1)
error("in_real: input conversion error\n");
*rptr = tmp;
#else
if (fread((void *) rptr, sizeof(real), 1, str) != 1)
error("in_real: fread failed\n");
#endif
}
local void in_vector(stream str, vector vec)
{
double tmpx, tmpy, tmpz;
#if !defined(BINARYIO)
if (fscanf(str, "%lf%lf%lf", &tmpx, &tmpy, &tmpz) != 3)
error("in_vector: input conversion error\n");
vec[0] = tmpx;
vec[1] = tmpy;
vec[2] = tmpz;
#else
if (fread((void *) vec, sizeof(real), NDIM, str) != NDIM)
error("in_vector: fread failed\n");
#endif
}
/*
* OUT_INT, OUT_REAL, OUT_VECTOR: low level output routines.
*/
#define IFMT " %d" /* output format for ints */
#define RFMT " %14.7E" /* output format for reals */
local void out_int(stream str, int ival)
{
#if !defined(BINARYIO)
if (fprintf(str, IFMT "\n", ival) < 0)
error("out_int: fprintf failed\n");
#else
if (fwrite((void *) &ival, sizeof(int), 1, str) != 1)
error("out_int: fwrite failed\n");
#endif
}
local void out_real(stream str, real rval)
{
#if !defined(BINARYIO)
if (fprintf(str, RFMT "\n", rval) < 0)
error("out_real: fprintf failed\n");
#else
if (fwrite((void *) &rval, sizeof(real), 1, str) != 1)
error("out_real: fwrite failed\n");
#endif
}
local void out_vector(stream str, vector vec)
{
#if !defined(BINARYIO)
if (fprintf(str, RFMT RFMT RFMT "\n", vec[0], vec[1], vec[2]) < 0)
error("out_vector: fprintf failed\n");
#else
if (fwrite((void *) vec, sizeof(real), NDIM, str) != NDIM)
error("out_vector: fwrite failed\n");
#endif
}
/*
* SAVESTATE: write current state to disk file.
*/
#define safewrite(ptr,len,str) \
if (fwrite((void *) ptr, len, 1, str) != 1) \
error("savestate: fwrite failed\n")
void savestate(string pattern)
{
char namebuf[256];
stream str;
int nchars;
sprintf(namebuf, pattern, nstep & 1); /* construct alternate name */
str = stropen(namebuf, "w!");
nchars = strlen(getargv0()) + 1;
safewrite(&nchars, sizeof(int), str);
safewrite(getargv0(), nchars * sizeof(char), str);
nchars = strlen(getversion()) + 1;
safewrite(&nchars, sizeof(int), str);
safewrite(getversion(), nchars * sizeof(char), str);
#if defined(USEFREQ)
safewrite(&freq, sizeof(real), str);
#else
safewrite(&dtime, sizeof(real), str);
#endif
#if !defined(QUICKSCAN)
safewrite(&theta, sizeof(real), str);
#endif
safewrite(&usequad, sizeof(bool), str);
safewrite(&eps, sizeof(real), str);
nchars = strlen(options) + 1;
safewrite(&nchars, sizeof(int), str);
safewrite(options, nchars * sizeof(char), str);
safewrite(&tstop, sizeof(real), str);
#if defined(USEFREQ)
safewrite(&freqout, sizeof(real), str);
#else
safewrite(&dtout, sizeof(real), str);
#endif
safewrite(&tnow, sizeof(real), str);
safewrite(&tout, sizeof(real), str);
safewrite(&nstep, sizeof(int), str);
safewrite(&rsize, sizeof(real), str);
safewrite(&nbody, sizeof(int), str);
safewrite(bodytab, nbody * sizeof(body), str);
fclose(str);
}
/*
* RESTORESTATE: restore state from disk file.
*/
#define saferead(ptr,len,str) \
if (fread((void *) ptr, len, 1, str) != 1) \
error("restorestate: fread failed\n")
void restorestate(string file)
{
stream str;
int nchars;
string program, version;
str = stropen(file, "r");
saferead(&nchars, sizeof(int), str);
program = (string) allocate(nchars * sizeof(char));
saferead(program, nchars * sizeof(char), str);
saferead(&nchars, sizeof(int), str);
version = (string) allocate(nchars * sizeof(char));
saferead(version, nchars * sizeof(char), str);
if (! streq(program, getargv0()) || /* check program, version */
! streq(version, getversion()))
printf("warning: state file may be outdated\n\n");
#if defined(USEFREQ)
saferead(&freq, sizeof(real), str);
#else
saferead(&dtime, sizeof(real), str);
#endif
#if !defined(QUICKSCAN)
saferead(&theta, sizeof(real), str);
#endif
saferead(&usequad, sizeof(bool), str);
saferead(&eps, sizeof(real), str);
saferead(&nchars, sizeof(int), str);
options = (string) allocate(nchars * sizeof(char));
saferead(options, nchars * sizeof(char), str);
saferead(&tstop, sizeof(real), str);
#if defined(USEFREQ)
saferead(&freqout, sizeof(real), str);
#else
saferead(&dtout, sizeof(real), str);
#endif
saferead(&tnow, sizeof(real), str);
saferead(&tout, sizeof(real), str);
saferead(&nstep, sizeof(int), str);
saferead(&rsize, sizeof(real), str);
saferead(&nbody, sizeof(int), str);
bodytab = (bodyptr) allocate(nbody * sizeof(body));
saferead(bodytab, nbody * sizeof(body), str);
fclose(str);
}
|
draw.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD RRRR AAA W W %
% D D R R A A W W %
% D D RRRR AAAAA W W W %
% D D R RN A A WW WW %
% DDDD R R A A W W %
% %
% %
% MagickCore Image Drawing Methods %
% %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon
% rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion",
% Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent
% (www.appligent.com) contributed the dash pattern, linecap stroking
% algorithm, and minor rendering improvements.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Define declarations.
*/
#define BezierQuantum 200
#define DrawEpsilon (1.0e-10)
#define EllipseEpsilon (0.0001)
#define ThrowPointExpectedException(token,exception) \
{ \
(void) ThrowMagickException(exception,GetMagickModule(),DrawError, \
"NonconformingDrawingPrimitiveDefinition","`%s'",token); \
status=MagickFalse; \
break; \
}
/*
Typedef declarations.
*/
typedef struct _EdgeInfo
{
SegmentInfo
bounds;
double
scanline;
PointInfo
*points;
size_t
number_points;
ssize_t
direction;
MagickBooleanType
ghostline;
size_t
highwater;
} EdgeInfo;
typedef struct _ElementInfo
{
double
cx,
cy,
major,
minor,
angle;
} ElementInfo;
typedef struct _PolygonInfo
{
EdgeInfo
*edges;
size_t
number_edges;
} PolygonInfo;
typedef enum
{
MoveToCode,
OpenCode,
GhostlineCode,
LineToCode,
EndCode
} PathInfoCode;
typedef struct _PathInfo
{
PointInfo
point;
PathInfoCode
code;
} PathInfo;
/*
Forward declarations.
*/
static MagickBooleanType
DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *,
ExceptionInfo *);
static PrimitiveInfo
*TraceStrokePolygon(const DrawInfo *,const PrimitiveInfo *);
static size_t
TracePath(PrimitiveInfo *,const char *,ExceptionInfo *);
static void
TraceArc(PrimitiveInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceArcPath(PrimitiveInfo *,const PointInfo,const PointInfo,const PointInfo,
const double,const MagickBooleanType,const MagickBooleanType),
TraceBezier(PrimitiveInfo *,const size_t),
TraceCircle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceEllipse(PrimitiveInfo *,const PointInfo,const PointInfo,
const PointInfo),
TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRoundRectangle(PrimitiveInfo *,const PointInfo,const PointInfo,
PointInfo),
TraceSquareLinecap(PrimitiveInfo *,const size_t,const double);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireDrawInfo() returns a DrawInfo structure properly initialized.
%
% The format of the AcquireDrawInfo method is:
%
% DrawInfo *AcquireDrawInfo(void)
%
*/
MagickExport DrawInfo *AcquireDrawInfo(void)
{
DrawInfo
*draw_info;
draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info));
GetDrawInfo((ImageInfo *) NULL,draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneDrawInfo() makes a copy of the given draw_info structure. If NULL
% is specified, a new DrawInfo structure is created initialized to default
% values.
%
% The format of the CloneDrawInfo method is:
%
% DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
const DrawInfo *draw_info)
{
DrawInfo
*clone_info;
ExceptionInfo
*exception;
clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetDrawInfo(image_info,clone_info);
if (draw_info == (DrawInfo *) NULL)
return(clone_info);
exception=AcquireExceptionInfo();
if (clone_info->primitive != (char *) NULL)
(void) CloneString(&clone_info->primitive,draw_info->primitive);
if (draw_info->geometry != (char *) NULL)
(void) CloneString(&clone_info->geometry,draw_info->geometry);
clone_info->viewbox=draw_info->viewbox;
clone_info->affine=draw_info->affine;
clone_info->gravity=draw_info->gravity;
clone_info->fill=draw_info->fill;
clone_info->stroke=draw_info->stroke;
clone_info->stroke_width=draw_info->stroke_width;
if (draw_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue,
exception);
if (draw_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke_antialias=draw_info->stroke_antialias;
clone_info->text_antialias=draw_info->text_antialias;
clone_info->fill_rule=draw_info->fill_rule;
clone_info->linecap=draw_info->linecap;
clone_info->linejoin=draw_info->linejoin;
clone_info->miterlimit=draw_info->miterlimit;
clone_info->dash_offset=draw_info->dash_offset;
clone_info->decorate=draw_info->decorate;
clone_info->compose=draw_info->compose;
if (draw_info->text != (char *) NULL)
(void) CloneString(&clone_info->text,draw_info->text);
if (draw_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,draw_info->font);
if (draw_info->metrics != (char *) NULL)
(void) CloneString(&clone_info->metrics,draw_info->metrics);
if (draw_info->family != (char *) NULL)
(void) CloneString(&clone_info->family,draw_info->family);
clone_info->style=draw_info->style;
clone_info->stretch=draw_info->stretch;
clone_info->weight=draw_info->weight;
if (draw_info->encoding != (char *) NULL)
(void) CloneString(&clone_info->encoding,draw_info->encoding);
clone_info->pointsize=draw_info->pointsize;
clone_info->kerning=draw_info->kerning;
clone_info->interline_spacing=draw_info->interline_spacing;
clone_info->interword_spacing=draw_info->interword_spacing;
clone_info->direction=draw_info->direction;
if (draw_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,draw_info->density);
clone_info->align=draw_info->align;
clone_info->undercolor=draw_info->undercolor;
clone_info->border_color=draw_info->border_color;
if (draw_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
{
register ssize_t
x;
for (x=0; fabs(draw_info->dash_pattern[x]) >= DrawEpsilon; x++) ;
clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) x+1UL,
sizeof(*clone_info->dash_pattern));
if (clone_info->dash_pattern == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t)
(x+1)*sizeof(*clone_info->dash_pattern));
}
clone_info->gradient=draw_info->gradient;
if (draw_info->gradient.stops != (StopInfo *) NULL)
{
size_t
number_stops;
number_stops=clone_info->gradient.number_stops;
clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t)
number_stops,sizeof(*clone_info->gradient.stops));
if (clone_info->gradient.stops == (StopInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memcpy(clone_info->gradient.stops,
draw_info->gradient.stops,(size_t) number_stops*
sizeof(*clone_info->gradient.stops));
}
if (draw_info->clip_mask != (char *) NULL)
(void) CloneString(&clone_info->clip_mask,draw_info->clip_mask);
clone_info->bounds=draw_info->bounds;
clone_info->clip_units=draw_info->clip_units;
clone_info->fill_alpha=draw_info->fill_alpha;
clone_info->stroke_alpha=draw_info->stroke_alpha;
clone_info->element_reference=draw_info->element_reference;
clone_info->render=draw_info->render;
clone_info->clip_path=draw_info->clip_path;
clone_info->debug=IsEventLogging();
exception=DestroyExceptionInfo(exception);
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P a t h T o P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPathToPolygon() converts a path to the more efficient sorted
% rendering form.
%
% The format of the ConvertPathToPolygon method is:
%
% PolygonInfo *ConvertPathToPolygon(const DrawInfo *draw_info,
% const PathInfo *path_info)
%
% A description of each parameter follows:
%
% o Method ConvertPathToPolygon returns the path in a more efficient sorted
% rendering form of type PolygonInfo.
%
% o draw_info: Specifies a pointer to an DrawInfo structure.
%
% o path_info: Specifies a pointer to an PathInfo structure.
%
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int CompareEdges(const void *x,const void *y)
{
register const EdgeInfo
*p,
*q;
/*
Compare two edges.
*/
p=(const EdgeInfo *) x;
q=(const EdgeInfo *) y;
if ((p->points[0].y-DrawEpsilon) > q->points[0].y)
return(1);
if ((p->points[0].y+DrawEpsilon) < q->points[0].y)
return(-1);
if ((p->points[0].x-DrawEpsilon) > q->points[0].x)
return(1);
if ((p->points[0].x+DrawEpsilon) < q->points[0].x)
return(-1);
if (((p->points[1].x-p->points[0].x)*(q->points[1].y-q->points[0].y)-
(p->points[1].y-p->points[0].y)*(q->points[1].x-q->points[0].x)) > 0.0)
return(1);
return(-1);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static void LogPolygonInfo(const PolygonInfo *polygon_info)
{
register EdgeInfo
*p;
register ssize_t
i,
j;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge");
p=polygon_info->edges;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:",
(double) i);
(void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s",
p->direction != MagickFalse ? "down" : "up");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s",
p->ghostline != MagickFalse ? "transparent" : "opaque");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1,
p->bounds.x2,p->bounds.y2);
for (j=0; j < (ssize_t) p->number_points; j++)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g",
p->points[j].x,p->points[j].y);
p++;
}
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge");
}
static void ReversePoints(PointInfo *points,const size_t number_points)
{
PointInfo
point;
register ssize_t
i;
for (i=0; i < (ssize_t) (number_points >> 1); i++)
{
point=points[i];
points[i]=points[number_points-(i+1)];
points[number_points-(i+1)]=point;
}
}
static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
{
long
direction,
next_direction;
PointInfo
point,
*points;
PolygonInfo
*polygon_info;
SegmentInfo
bounds;
register ssize_t
i,
n;
MagickBooleanType
ghostline;
size_t
edge,
number_edges,
number_points;
/*
Convert a path to the more efficient sorted rendering form.
*/
polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info));
if (polygon_info == (PolygonInfo *) NULL)
return((PolygonInfo *) NULL);
number_edges=16;
polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
(void) memset(polygon_info->edges,0,number_edges*
sizeof(*polygon_info->edges));
direction=0;
edge=0;
ghostline=MagickFalse;
n=0;
number_points=0;
points=(PointInfo *) NULL;
(void) memset(&point,0,sizeof(point));
(void) memset(&bounds,0,sizeof(bounds));
for (i=0; path_info[i].code != EndCode; i++)
{
if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) ||
(path_info[i].code == GhostlineCode))
{
/*
Move to.
*/
if ((points != (PointInfo *) NULL) && (n >= 2))
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
}
if (points == (PointInfo *) NULL)
{
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse;
point=path_info[i].point;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
direction=0;
n=1;
continue;
}
/*
Line to.
*/
next_direction=((path_info[i].point.y > point.y) ||
((fabs(path_info[i].point.y-point.y) < DrawEpsilon) &&
(path_info[i].point.x > point.x))) ? 1 : -1;
if ((points != (PointInfo *) NULL) && (direction != 0) &&
(direction != next_direction))
{
/*
New edge.
*/
point=points[n-1];
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
n=1;
ghostline=MagickFalse;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
edge++;
}
direction=next_direction;
if (points == (PointInfo *) NULL)
continue;
if (n == (ssize_t) number_points)
{
number_points<<=1;
points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
point=path_info[i].point;
points[n]=point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.x > bounds.x2)
bounds.x2=point.x;
n++;
}
if (points != (PointInfo *) NULL)
{
if (n < 2)
points=(PointInfo *) RelinquishMagickMemory(points);
else
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
ghostline=MagickFalse;
edge++;
}
}
polygon_info->number_edges=edge;
qsort(polygon_info->edges,(size_t) polygon_info->number_edges,
sizeof(*polygon_info->edges),CompareEdges);
if (IsEventLogging() != MagickFalse)
LogPolygonInfo(polygon_info);
return(polygon_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P r i m i t i v e T o P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector
% path structure.
%
% The format of the ConvertPrimitiveToPath method is:
%
% PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o Method ConvertPrimitiveToPath returns a vector path structure of type
% PathInfo.
%
% o draw_info: a structure of type DrawInfo.
%
% o primitive_info: Specifies a pointer to an PrimitiveInfo structure.
%
%
*/
static void LogPathInfo(const PathInfo *path_info)
{
register const PathInfo
*p;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path");
for (p=path_info; p->code != EndCode; p++)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ?
"moveto ghostline" : p->code == OpenCode ? "moveto open" :
p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" :
"?");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path");
}
static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info)
{
PathInfo
*path_info;
PathInfoCode
code;
PointInfo
p,
q;
register ssize_t
i,
n;
ssize_t
coordinates,
start;
/*
Converts a PrimitiveInfo structure into a vector path structure.
*/
switch (primitive_info->primitive)
{
case AlphaPrimitive:
case ColorPrimitive:
case ImagePrimitive:
case PointPrimitive:
case TextPrimitive:
return((PathInfo *) NULL);
default:
break;
}
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
path_info=(PathInfo *) AcquireQuantumMemory((size_t) (2UL*i+3UL),
sizeof(*path_info));
if (path_info == (PathInfo *) NULL)
return((PathInfo *) NULL);
coordinates=0;
n=0;
p.x=(-1.0);
p.y=(-1.0);
q.x=(-1.0);
q.y=(-1.0);
start=0;
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
code=LineToCode;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
p=primitive_info[i].point;
start=n;
code=MoveToCode;
}
coordinates--;
/*
Eliminate duplicate points.
*/
if ((i == 0) || (fabs(q.x-primitive_info[i].point.x) >= DrawEpsilon) ||
(fabs(q.y-primitive_info[i].point.y) >= DrawEpsilon))
{
path_info[n].code=code;
path_info[n].point=primitive_info[i].point;
q=primitive_info[i].point;
n++;
}
if (coordinates > 0)
continue;
if ((fabs(p.x-primitive_info[i].point.x) < DrawEpsilon) &&
(fabs(p.y-primitive_info[i].point.y) < DrawEpsilon))
continue;
/*
Mark the p point as open if it does not match the q.
*/
path_info[start].code=OpenCode;
path_info[n].code=GhostlineCode;
path_info[n].point=primitive_info[i].point;
n++;
path_info[n].code=LineToCode;
path_info[n].point=p;
n++;
}
path_info[n].code=EndCode;
path_info[n].point.x=0.0;
path_info[n].point.y=0.0;
if (IsEventLogging() != MagickFalse)
LogPathInfo(path_info);
return(path_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyDrawInfo() deallocates memory associated with an DrawInfo
% structure.
%
% The format of the DestroyDrawInfo method is:
%
% DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
{
if (draw_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (draw_info->primitive != (char *) NULL)
draw_info->primitive=DestroyString(draw_info->primitive);
if (draw_info->text != (char *) NULL)
draw_info->text=DestroyString(draw_info->text);
if (draw_info->geometry != (char *) NULL)
draw_info->geometry=DestroyString(draw_info->geometry);
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern);
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
if (draw_info->metrics != (char *) NULL)
draw_info->metrics=DestroyString(draw_info->metrics);
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
if (draw_info->encoding != (char *) NULL)
draw_info->encoding=DestroyString(draw_info->encoding);
if (draw_info->density != (char *) NULL)
draw_info->density=DestroyString(draw_info->density);
if (draw_info->server_name != (char *) NULL)
draw_info->server_name=(char *)
RelinquishMagickMemory(draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
draw_info->dash_pattern=(double *) RelinquishMagickMemory(
draw_info->dash_pattern);
if (draw_info->gradient.stops != (StopInfo *) NULL)
draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory(
draw_info->gradient.stops);
if (draw_info->clip_mask != (char *) NULL)
draw_info->clip_mask=DestroyString(draw_info->clip_mask);
draw_info->signature=(~MagickCoreSignature);
draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y E d g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyEdge() destroys the specified polygon edge.
%
% The format of the DestroyEdge method is:
%
% ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
% o edge: the polygon edge number to destroy.
%
*/
static size_t DestroyEdge(PolygonInfo *polygon_info,
const size_t edge)
{
assert(edge < polygon_info->number_edges);
polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory(
polygon_info->edges[edge].points);
polygon_info->number_edges--;
if (edge < polygon_info->number_edges)
(void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1,
(size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges));
return(polygon_info->number_edges);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P o l y g o n I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPolygonInfo() destroys the PolygonInfo data structure.
%
% The format of the DestroyPolygonInfo method is:
%
% PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
*/
static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
{
register ssize_t
i;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
polygon_info->edges[i].points=(PointInfo *)
RelinquishMagickMemory(polygon_info->edges[i].points);
polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(polygon_info->edges);
return((PolygonInfo *) RelinquishMagickMemory(polygon_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w A f f i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAffineImage() composites the source over the destination image as
% dictated by the affine transform.
%
% The format of the DrawAffineImage method is:
%
% MagickBooleanType DrawAffineImage(Image *image,const Image *source,
% const AffineMatrix *affine,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o source: the source image.
%
% o affine: the affine transform.
%
% o exception: return any errors or warnings in this structure.
%
*/
static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine,
const double y,const SegmentInfo *edge)
{
double
intercept,
z;
register double
x;
SegmentInfo
inverse_edge;
/*
Determine left and right edges.
*/
inverse_edge.x1=edge->x1;
inverse_edge.y1=edge->y1;
inverse_edge.x2=edge->x2;
inverse_edge.y2=edge->y2;
z=affine->ry*y+affine->tx;
if (affine->sx >= DrawEpsilon)
{
intercept=(-z/affine->sx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->sx < -DrawEpsilon)
{
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->sx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns))
{
inverse_edge.x2=edge->x1;
return(inverse_edge);
}
/*
Determine top and bottom edges.
*/
z=affine->sy*y+affine->ty;
if (affine->rx >= DrawEpsilon)
{
intercept=(-z/affine->rx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->rx < -DrawEpsilon)
{
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->rx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows))
{
inverse_edge.x2=edge->x2;
return(inverse_edge);
}
return(inverse_edge);
}
static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
double
determinant;
determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx*
affine->ry);
inverse_affine.sx=determinant*affine->sy;
inverse_affine.rx=determinant*(-affine->rx);
inverse_affine.ry=determinant*(-affine->ry);
inverse_affine.sy=determinant*affine->sx;
inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty*
inverse_affine.ry;
inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty*
inverse_affine.sy;
return(inverse_affine);
}
MagickExport MagickBooleanType DrawAffineImage(Image *image,
const Image *source,const AffineMatrix *affine,ExceptionInfo *exception)
{
AffineMatrix
inverse_affine;
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
extent[4],
min,
max;
register ssize_t
i;
SegmentInfo
edge;
ssize_t
start,
stop,
y;
/*
Determine bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(source != (const Image *) NULL);
assert(source->signature == MagickCoreSignature);
assert(affine != (AffineMatrix *) NULL);
extent[0].x=0.0;
extent[0].y=0.0;
extent[1].x=(double) source->columns-1.0;
extent[1].y=0.0;
extent[2].x=(double) source->columns-1.0;
extent[2].y=(double) source->rows-1.0;
extent[3].x=0.0;
extent[3].y=(double) source->rows-1.0;
for (i=0; i < 4; i++)
{
PointInfo
point;
point=extent[i];
extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx;
extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
/*
Affine transform image.
*/
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
edge.x1=MagickMax(min.x,0.0);
edge.y1=MagickMax(min.y,0.0);
edge.x2=MagickMin(max.x,(double) image->columns-1.0);
edge.y2=MagickMin(max.y,(double) image->rows-1.0);
inverse_affine=InverseAffineMatrix(affine);
GetPixelInfo(image,&zero);
start=(ssize_t) ceil(edge.y1-0.5);
stop=(ssize_t) floor(edge.y2+0.5);
source_view=AcquireVirtualCacheView(source,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,image,stop-start,1)
#endif
for (y=start; y <= stop; y++)
{
PixelInfo
composite,
pixel;
PointInfo
point;
register ssize_t
x;
register Quantum
*magick_restrict q;
SegmentInfo
inverse_edge;
ssize_t
x_offset;
inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge);
if (inverse_edge.x2 < inverse_edge.x1)
continue;
q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1-
0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),
1,exception);
if (q == (Quantum *) NULL)
continue;
pixel=zero;
composite=zero;
x_offset=0;
for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++)
{
point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+
inverse_affine.tx;
point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+
inverse_affine.ty;
status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel,
point.x,point.y,&pixel,exception);
if (status == MagickFalse)
break;
GetPixelInfoPixel(image,q,&composite);
CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha,
&composite);
SetPixelViaPixelInfo(image,&composite,q);
x_offset++;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w B o u n d i n g R e c t a n g l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawBoundingRectangles() draws the bounding rectangles on the image. This
% is only useful for developers debugging the rendering algorithm.
%
% The format of the DrawBoundingRectangles method is:
%
% void DrawBoundingRectangles(Image *image,const DrawInfo *draw_info,
% PolygonInfo *polygon_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o polygon_info: Specifies a pointer to a PolygonInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void DrawBoundingRectangles(Image *image,const DrawInfo *draw_info,
const PolygonInfo *polygon_info,ExceptionInfo *exception)
{
DrawInfo
*clone_info;
double
mid;
PointInfo
end,
resolution,
start;
PrimitiveInfo
primitive_info[6];
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
coordinates;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) QueryColorCompliance("#000F",AllCompliance,&clone_info->fill,
exception);
resolution.x=96.0;
resolution.y=96.0;
if (clone_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(clone_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == MagickFalse)
resolution.y=resolution.x;
}
mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)*
clone_info->stroke_width/2.0;
bounds.x1=0.0;
bounds.y1=0.0;
bounds.x2=0.0;
bounds.y2=0.0;
if (polygon_info != (PolygonInfo *) NULL)
{
bounds=polygon_info->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1)
bounds.x1=polygon_info->edges[i].bounds.x1;
if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1)
bounds.y1=polygon_info->edges[i].bounds.y1;
if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2)
bounds.x2=polygon_info->edges[i].bounds.x2;
if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2)
bounds.y2=polygon_info->edges[i].bounds.y2;
}
bounds.x1-=mid;
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double)
image->columns ? (double) image->columns-1 : bounds.x1;
bounds.y1-=mid;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double)
image->rows ? (double) image->rows-1 : bounds.y1;
bounds.x2+=mid;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double)
image->columns ? (double) image->columns-1 : bounds.x2;
bounds.y2+=mid;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double)
image->rows ? (double) image->rows-1 : bounds.y2;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].direction != 0)
(void) QueryColorCompliance("red",AllCompliance,&clone_info->stroke,
exception);
else
(void) QueryColorCompliance("green",AllCompliance,&clone_info->stroke,
exception);
start.x=(double) (polygon_info->edges[i].bounds.x1-mid);
start.y=(double) (polygon_info->edges[i].bounds.y1-mid);
end.x=(double) (polygon_info->edges[i].bounds.x2+mid);
end.y=(double) (polygon_info->edges[i].bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
(void) DrawPrimitive(image,clone_info,primitive_info,exception);
}
}
(void) QueryColorCompliance("blue",AllCompliance,&clone_info->stroke,
exception);
start.x=(double) (bounds.x1-mid);
start.y=(double) (bounds.y1-mid);
end.x=(double) (bounds.x2+mid);
end.y=(double) (bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
(void) DrawPrimitive(image,clone_info,primitive_info,exception);
clone_info=DestroyDrawInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClipPath() draws the clip path on the image mask.
%
% The format of the DrawClipPath method is:
%
% MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info,
% const char *name,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the name of the clip path.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawClipPath(Image *image,
const DrawInfo *draw_info,const char *name,ExceptionInfo *exception)
{
char
filename[MagickPathExtent];
Image
*clip_mask;
const char
*value;
DrawInfo
*clone_info;
MagickStatusType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
(void) FormatLocaleString(filename,MagickPathExtent,"%s",name);
value=GetImageArtifact(image,filename);
if (value == (const char *) NULL)
return(MagickFalse);
clip_mask=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (clip_mask == (Image *) NULL)
return(MagickFalse);
(void) SetImageMask(clip_mask,ReadPixelMask,(Image *) NULL,exception);
(void) SetImageMask(clip_mask,WritePixelMask,(Image *) NULL,exception);
(void) QueryColorCompliance("#0000",AllCompliance,
&clip_mask->background_color,exception);
clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
clip_mask->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(clip_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s",
draw_info->clip_mask);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,value);
(void) QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
if (clone_info->clip_mask != (char *) NULL)
clone_info->clip_mask=DestroyString(clone_info->clip_mask);
(void) QueryColorCompliance("#000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
clone_info->clip_path=MagickTrue;
status=DrawImage(clip_mask,clone_info,exception);
(void) SetImageMask(image,WritePixelMask,clip_mask,exception);
clip_mask=DestroyImage(clip_mask);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w D a s h P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the
% image while respecting the dash offset and dash pattern attributes.
%
% The format of the DrawDashPolygon method is:
%
% MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception)
{
DrawInfo
*clone_info;
double
length,
maximum_length,
offset,
scale,
total_length;
MagickStatusType
status;
PrimitiveInfo
*dash_polygon;
register ssize_t
i;
register double
dx,
dy;
size_t
number_vertices;
ssize_t
j,
n;
assert(draw_info != (const DrawInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash");
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
number_vertices=(size_t) i;
dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(2UL*(number_vertices+6UL)+6UL),sizeof(*dash_polygon));
if (dash_polygon == (PrimitiveInfo *) NULL)
return(MagickFalse);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->miterlimit=0;
dash_polygon[0]=primitive_info[0];
scale=ExpandAffine(&draw_info->affine);
length=scale*(draw_info->dash_pattern[0]-0.5);
offset=fabs(draw_info->dash_offset) >= DrawEpsilon ?
scale*draw_info->dash_offset : 0.0;
j=1;
for (n=0; offset > 0.0; j=0)
{
if (draw_info->dash_pattern[n] <= 0.0)
break;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
if (offset > length)
{
offset-=length;
n++;
length=scale*(draw_info->dash_pattern[n]+0.5);
continue;
}
if (offset < length)
{
length-=offset;
offset=0.0;
break;
}
offset=0.0;
n++;
}
status=MagickTrue;
maximum_length=0.0;
total_length=0.0;
for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++)
{
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot((double) dx,dy);
if (fabs(length) < DrawEpsilon)
{
n++;
if (fabs(draw_info->dash_pattern[n]) < DrawEpsilon)
n=0;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
}
for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); )
{
total_length+=length;
if ((n & 0x01) != 0)
{
dash_polygon[0]=primitive_info[0];
dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length/maximum_length);
dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length/maximum_length);
j=1;
}
else
{
if ((j+1) > (ssize_t) (2*number_vertices))
break;
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length/maximum_length);
dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length/maximum_length);
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
}
n++;
if (fabs(draw_info->dash_pattern[n]) < DrawEpsilon)
n=0;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
}
length-=(maximum_length-total_length);
if ((n & 0x01) != 0)
continue;
dash_polygon[j]=primitive_info[i];
dash_polygon[j].coordinates=1;
j++;
}
if ((total_length <= maximum_length) && ((n & 0x01) == 0) && (j > 1))
{
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x+=DrawEpsilon;
dash_polygon[j].point.y+=DrawEpsilon;
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
}
dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawImage() draws a graphic primitive on your image. The primitive
% may be represented as a string or filename. Precede the filename with an
% "at" sign (@) and the contents of the file are drawn on the image. You
% can affect how text is drawn by setting one or more members of the draw
% info structure.
%
% The format of the DrawImage method is:
%
% MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t EllipsePoints(const PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo stop,const PointInfo degrees)
{
double
delta,
step,
y;
PointInfo
angle;
register const PrimitiveInfo
*p;
size_t
number_points;
/*
Ellipses are just short segmented polys.
*/
if ((fabs(stop.x) < DrawEpsilon) && (fabs(stop.y) < DrawEpsilon))
return(1);
delta=2.0/MagickMax(stop.x,stop.y);
step=MagickPI/8.0;
if ((delta >= 0.0) && (delta < (MagickPI/8.0)))
step=MagickPI/(4.0*(MagickPI*PerceptibleReciprocal(delta)/2.0));
if (step < EllipseEpsilon)
step=EllipseEpsilon;
angle.x=DegreesToRadians(degrees.x);
y=degrees.y;
while (y < degrees.x)
y+=360.0;
angle.y=DegreesToRadians(y);
number_points=0;
for (p=primitive_info; angle.x < angle.y; angle.x+=step)
{
number_points++;
p+=p->coordinates;
}
return(number_points+1);
}
static inline MagickBooleanType IsPoint(const char *point)
{
char
*p;
double
value;
value=StringToDouble(point,&p);
return((fabs(value) < DrawEpsilon) && (p == point) ? MagickFalse : MagickTrue);
}
static inline void TracePoint(PrimitiveInfo *primitive_info,
const PointInfo point)
{
primitive_info->coordinates=1;
primitive_info->point=point;
}
MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
ExceptionInfo *exception)
{
#define RenderImageTag "Render/Image"
AffineMatrix
affine,
current;
char
keyword[MagickPathExtent],
geometry[MagickPathExtent],
*next_token,
pattern[MagickPathExtent],
*primitive,
*token;
const char
*q;
double
angle,
factor,
points_extent,
primitive_extent;
DrawInfo
**graphic_context;
MagickBooleanType
proceed;
MagickSizeType
number_points;
MagickStatusType
status;
PointInfo
point;
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register const char
*p;
register ssize_t
i,
x;
SegmentInfo
bounds;
size_t
extent,
number_stops;
ssize_t
defsDepth,
j,
k,
n;
StopInfo
*stops;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if ((draw_info->primitive == (char *) NULL) ||
(*draw_info->primitive == '\0'))
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image");
primitive=(char *) NULL;
if (*draw_info->primitive != '@')
primitive=AcquireString(draw_info->primitive);
else
if (*(draw_info->primitive+1) != '-')
primitive=FileToString(draw_info->primitive+1,~0UL,exception);
if (primitive == (char *) NULL)
return(MagickFalse);
primitive_extent=(double) strlen(primitive);
(void) SetImageArtifact(image,"MVG",primitive);
n=0;
number_stops=0;
stops=(StopInfo *) NULL;
/*
Allocate primitive info memory.
*/
graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
primitive=DestroyString(primitive);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
number_points=65536;
primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(primitive_info,0,(size_t) number_points*
sizeof(*primitive_info));
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info);
graphic_context[n]->viewbox=image->page;
if ((image->page.width == 0) || (image->page.height == 0))
{
graphic_context[n]->viewbox.width=image->columns;
graphic_context[n]->viewbox.height=image->rows;
}
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
defsDepth=0;
status=MagickTrue;
for (q=primitive; *q != '\0'; )
{
/*
Interpret graphic primitive.
*/
GetNextToken(q,&q,MagickPathExtent,keyword);
if (*keyword == '\0')
break;
if (*keyword == '#')
{
/*
Comment.
*/
while ((*q != '\n') && (*q != '\0'))
q++;
continue;
}
p=q-strlen(keyword)-1;
primitive_type=UndefinedPrimitive;
current=graphic_context[n]->affine;
GetAffineMatrix(&affine);
switch (*keyword)
{
case ';':
break;
case 'a':
case 'A':
{
if (LocaleCompare("affine",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.rx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.ry=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("alpha",keyword) == 0)
{
primitive_type=AlphaPrimitive;
break;
}
if (LocaleCompare("arc",keyword) == 0)
{
primitive_type=ArcPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'b':
case 'B':
{
if (LocaleCompare("bezier",keyword) == 0)
{
primitive_type=BezierPrimitive;
break;
}
if (LocaleCompare("border-color",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->border_color,exception);
break;
}
status=MagickFalse;
break;
}
case 'c':
case 'C':
{
if (LocaleCompare("clip-path",keyword) == 0)
{
/*
Create clip mask.
*/
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->clip_mask,token);
(void) DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
break;
}
if (LocaleCompare("clip-rule",keyword) == 0)
{
ssize_t
fill_rule;
GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
status=MagickFalse;
else
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("clip-units",keyword) == 0)
{
ssize_t
clip_units;
GetNextToken(q,&q,extent,token);
clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse,
token);
if (clip_units == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->clip_units=(ClipPathUnits) clip_units;
if (clip_units == ObjectBoundingBox)
{
GetAffineMatrix(¤t);
affine.sx=draw_info->bounds.x2;
affine.sy=draw_info->bounds.y2;
affine.tx=draw_info->bounds.x1;
affine.ty=draw_info->bounds.y1;
break;
}
break;
}
if (LocaleCompare("circle",keyword) == 0)
{
primitive_type=CirclePrimitive;
break;
}
if (LocaleCompare("color",keyword) == 0)
{
primitive_type=ColorPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'd':
case 'D':
{
if (LocaleCompare("decorate",keyword) == 0)
{
ssize_t
decorate;
GetNextToken(q,&q,extent,token);
decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse,
token);
if (decorate == -1)
status=MagickFalse;
else
graphic_context[n]->decorate=(DecorationType) decorate;
break;
}
if (LocaleCompare("density",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->density,token);
break;
}
if (LocaleCompare("direction",keyword) == 0)
{
ssize_t
direction;
GetNextToken(q,&q,extent,token);
direction=ParseCommandOption(MagickDirectionOptions,MagickFalse,
token);
if (direction == -1)
status=MagickFalse;
else
graphic_context[n]->direction=(DirectionType) direction;
break;
}
status=MagickFalse;
break;
}
case 'e':
case 'E':
{
if (LocaleCompare("ellipse",keyword) == 0)
{
primitive_type=EllipsePrimitive;
break;
}
if (LocaleCompare("encoding",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->encoding,token);
break;
}
status=MagickFalse;
break;
}
case 'f':
case 'F':
{
if (LocaleCompare("fill",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->fill_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->fill,exception);
if (graphic_context[n]->fill_alpha != OpaqueAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
}
break;
}
if (LocaleCompare("fill-opacity",keyword) == 0)
{
double
opacity;
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
graphic_context[n]->fill_alpha=(MagickRealType) (QuantumRange-
QuantumRange*(1.0-opacity));
break;
}
if (LocaleCompare("fill-rule",keyword) == 0)
{
ssize_t
fill_rule;
GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
status=MagickFalse;
else
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("font",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->font,token);
if (LocaleCompare("none",token) == 0)
graphic_context[n]->font=(char *) RelinquishMagickMemory(
graphic_context[n]->font);
break;
}
if (LocaleCompare("font-family",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->family,token);
break;
}
if (LocaleCompare("font-size",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->pointsize=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("font-stretch",keyword) == 0)
{
ssize_t
stretch;
GetNextToken(q,&q,extent,token);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token);
if (stretch == -1)
status=MagickFalse;
else
graphic_context[n]->stretch=(StretchType) stretch;
break;
}
if (LocaleCompare("font-style",keyword) == 0)
{
ssize_t
style;
GetNextToken(q,&q,extent,token);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,token);
if (style == -1)
status=MagickFalse;
else
graphic_context[n]->style=(StyleType) style;
break;
}
if (LocaleCompare("font-weight",keyword) == 0)
{
ssize_t
weight;
GetNextToken(q,&q,extent,token);
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(token);
graphic_context[n]->weight=(size_t) weight;
break;
}
status=MagickFalse;
break;
}
case 'g':
case 'G':
{
if (LocaleCompare("gradient-units",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("gravity",keyword) == 0)
{
ssize_t
gravity;
GetNextToken(q,&q,extent,token);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token);
if (gravity == -1)
status=MagickFalse;
else
graphic_context[n]->gravity=(GravityType) gravity;
break;
}
status=MagickFalse;
break;
}
case 'i':
case 'I':
{
if (LocaleCompare("image",keyword) == 0)
{
ssize_t
compose;
primitive_type=ImagePrimitive;
GetNextToken(q,&q,extent,token);
compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token);
if (compose == -1)
status=MagickFalse;
else
graphic_context[n]->compose=(CompositeOperator) compose;
break;
}
if (LocaleCompare("interline-spacing",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->interline_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("interword-spacing",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'k':
case 'K':
{
if (LocaleCompare("kerning",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->kerning=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'l':
case 'L':
{
if (LocaleCompare("line",keyword) == 0)
primitive_type=LinePrimitive;
else
status=MagickFalse;
break;
}
case 'o':
case 'O':
{
if (LocaleCompare("offset",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("opacity",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
graphic_context[n]->alpha=(Quantum) (QuantumRange*(1.0-
(QuantumScale*graphic_context[n]->alpha*(1.0-factor*
StringToDouble(token,&next_token)))));
graphic_context[n]->fill_alpha=QuantumRange*(1.0-(QuantumScale*
graphic_context[n]->fill_alpha*(1.0-factor*StringToDouble(token,
&next_token))));
graphic_context[n]->stroke_alpha=QuantumRange*(1.0-(QuantumScale*
graphic_context[n]->stroke_alpha*(1.0-factor*StringToDouble(token,
&next_token))));
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'p':
case 'P':
{
if (LocaleCompare("path",keyword) == 0)
{
primitive_type=PathPrimitive;
break;
}
if (LocaleCompare("point",keyword) == 0)
{
primitive_type=PointPrimitive;
break;
}
if (LocaleCompare("polyline",keyword) == 0)
{
primitive_type=PolylinePrimitive;
break;
}
if (LocaleCompare("polygon",keyword) == 0)
{
primitive_type=PolygonPrimitive;
break;
}
if (LocaleCompare("pop",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare("clip-path",token) == 0)
break;
if (LocaleCompare("defs",token) == 0)
{
defsDepth--;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
break;
if (LocaleCompare("graphic-context",token) == 0)
{
if (n <= 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
DrawError,"UnbalancedGraphicContextPushPop","`%s'",token);
status=MagickFalse;
n=0;
break;
}
if (graphic_context[n]->clip_mask != (char *) NULL)
if (LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0)
(void) SetImageMask(image,ReadPixelMask,(Image *) NULL,
exception);
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
n--;
break;
}
if (LocaleCompare("pattern",token) == 0)
break;
status=MagickFalse;
break;
}
if (LocaleCompare("push",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare("clip-path",token) == 0)
{
char
name[MagickPathExtent];
GetNextToken(q,&q,extent,token);
(void) FormatLocaleString(name,MagickPathExtent,"%s",token);
for (p=q; *q != '\0'; )
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"clip-path") != 0)
continue;
break;
}
if ((size_t) (q-p-4+1) > 0)
{
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) SetImageArtifact(image,name,token);
}
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("gradient",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent],
type[MagickPathExtent];
SegmentInfo
segment;
GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
GetNextToken(q,&q,extent,token);
(void) CopyMagickString(type,token,MagickPathExtent);
GetNextToken(q,&q,extent,token);
segment.x1=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
segment.y1=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
segment.x2=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
segment.y2=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (LocaleCompare(type,"radial") == 0)
{
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
}
for (p=q; *q != '\0'; )
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"gradient") != 0)
continue;
break;
}
if ((size_t) (q-p-4+1) > 0)
{
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
bounds.x1=graphic_context[n]->affine.sx*segment.x1+
graphic_context[n]->affine.ry*segment.y1+
graphic_context[n]->affine.tx;
bounds.y1=graphic_context[n]->affine.rx*segment.x1+
graphic_context[n]->affine.sy*segment.y1+
graphic_context[n]->affine.ty;
bounds.x2=graphic_context[n]->affine.sx*segment.x2+
graphic_context[n]->affine.ry*segment.y2+
graphic_context[n]->affine.tx;
bounds.y2=graphic_context[n]->affine.rx*segment.x2+
graphic_context[n]->affine.sy*segment.y2+
graphic_context[n]->affine.ty;
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-type",
name);
(void) SetImageArtifact(image,key,type);
(void) FormatLocaleString(key,MagickPathExtent,
"%s-geometry",name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%gx%g%+.15g%+.15g",
MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0),
MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0),
bounds.x1,bounds.y1);
(void) SetImageArtifact(image,key,geometry);
}
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("pattern",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent];
RectangleInfo
pattern_bounds;
GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
GetNextToken(q,&q,extent,token);
pattern_bounds.x=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
pattern_bounds.y=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
pattern_bounds.width=(size_t) floor(StringToDouble(token,
&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
pattern_bounds.height=(size_t) floor(StringToDouble(token,
&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
for (p=q; *q != '\0'; )
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"pattern") != 0)
continue;
break;
}
if ((size_t) (q-p-4+1) > 0)
{
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,
"%s-geometry",name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double)pattern_bounds.width,
(double)pattern_bounds.height,(double)pattern_bounds.x,
(double)pattern_bounds.y);
(void) SetImageArtifact(image,key,geometry);
}
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("graphic-context",token) == 0)
{
n++;
graphic_context=(DrawInfo **) ResizeQuantumMemory(
graphic_context,(size_t) (n+1),sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,
graphic_context[n-1]);
break;
}
if (LocaleCompare("defs",token) == 0)
{
defsDepth++;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'r':
case 'R':
{
if (LocaleCompare("rectangle",keyword) == 0)
{
primitive_type=RectanglePrimitive;
break;
}
if (LocaleCompare("rotate",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0)));
affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0)));
affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0))));
affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0)));
break;
}
if (LocaleCompare("roundRectangle",keyword) == 0)
{
primitive_type=RoundRectanglePrimitive;
break;
}
status=MagickFalse;
break;
}
case 's':
case 'S':
{
if (LocaleCompare("scale",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("skewX",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.ry=sin(DegreesToRadians(angle));
break;
}
if (LocaleCompare("skewY",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.rx=(-tan(DegreesToRadians(angle)/2.0));
break;
}
if (LocaleCompare("stop-color",keyword) == 0)
{
PixelInfo
stop_color;
number_stops++;
if (number_stops == 1)
stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops));
else if (number_stops > 2)
stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops,
sizeof(*stops));
if (stops == (StopInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
GetNextToken(q,&q,extent,token);
(void) QueryColorCompliance(token,AllCompliance,&stop_color,
exception);
stops[number_stops-1].color=stop_color;
GetNextToken(q,&q,extent,token);
stops[number_stops-1].offset=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->stroke_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->stroke,exception);
if (graphic_context[n]->stroke_alpha != OpaqueAlpha)
graphic_context[n]->stroke.alpha=
graphic_context[n]->stroke_alpha;
}
break;
}
if (LocaleCompare("stroke-antialias",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->stroke_antialias=
StringToLong(token) != 0 ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("stroke-dasharray",keyword) == 0)
{
if (graphic_context[n]->dash_pattern != (double *) NULL)
graphic_context[n]->dash_pattern=(double *)
RelinquishMagickMemory(graphic_context[n]->dash_pattern);
if (IsPoint(q) != MagickFalse)
{
const char
*r;
r=q;
GetNextToken(r,&r,extent,token);
if (*token == ',')
GetNextToken(r,&r,extent,token);
for (x=0; IsPoint(token) != MagickFalse; x++)
{
GetNextToken(r,&r,extent,token);
if (*token == ',')
GetNextToken(r,&r,extent,token);
}
graphic_context[n]->dash_pattern=(double *)
AcquireQuantumMemory((size_t) (2UL*x+2UL),
sizeof(*graphic_context[n]->dash_pattern));
if (graphic_context[n]->dash_pattern == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
status=MagickFalse;
break;
}
for (j=0; j < x; j++)
{
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_pattern[j]=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->dash_pattern[j] < 0.0)
status=MagickFalse;
}
if ((x & 0x01) != 0)
for ( ; j < (2*x); j++)
graphic_context[n]->dash_pattern[j]=
graphic_context[n]->dash_pattern[j-x];
graphic_context[n]->dash_pattern[j]=0.0;
break;
}
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("stroke-dashoffset",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_offset=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke-linecap",keyword) == 0)
{
ssize_t
linecap;
GetNextToken(q,&q,extent,token);
linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token);
if (linecap == -1)
status=MagickFalse;
else
graphic_context[n]->linecap=(LineCap) linecap;
break;
}
if (LocaleCompare("stroke-linejoin",keyword) == 0)
{
ssize_t
linejoin;
GetNextToken(q,&q,extent,token);
linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse,
token);
if (linejoin == -1)
status=MagickFalse;
else
graphic_context[n]->linejoin=(LineJoin) linejoin;
break;
}
if (LocaleCompare("stroke-miterlimit",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->miterlimit=StringToUnsignedLong(token);
break;
}
if (LocaleCompare("stroke-opacity",keyword) == 0)
{
double
opacity;
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
graphic_context[n]->stroke_alpha=(MagickRealType) (QuantumRange-
QuantumRange*(1.0-opacity));
break;
}
if (LocaleCompare("stroke-width",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
graphic_context[n]->stroke_width=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 't':
case 'T':
{
if (LocaleCompare("text",keyword) == 0)
{
primitive_type=TextPrimitive;
break;
}
if (LocaleCompare("text-align",keyword) == 0)
{
ssize_t
align;
GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
status=MagickFalse;
else
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-anchor",keyword) == 0)
{
ssize_t
align;
GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
status=MagickFalse;
else
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-antialias",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->text_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("text-undercolor",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->undercolor,exception);
break;
}
if (LocaleCompare("translate",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'v':
case 'V':
{
if (LocaleCompare("viewbox",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.x=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.y=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.width=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.height=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
default:
{
status=MagickFalse;
break;
}
}
if (status == MagickFalse)
break;
if ((fabs(affine.sx-1.0) >= DrawEpsilon) ||
(fabs(affine.rx) >= DrawEpsilon) || (fabs(affine.ry) >= DrawEpsilon) ||
(fabs(affine.sy-1.0) >= DrawEpsilon) ||
(fabs(affine.tx) >= DrawEpsilon) || (fabs(affine.ty) >= DrawEpsilon))
{
graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx;
graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx;
graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy;
graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy;
graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+
current.tx;
graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+
current.ty;
}
if (primitive_type == UndefinedPrimitive)
{
if (*q == '\0')
{
if (number_stops > 1)
{
GradientType
type;
type=LinearGradient;
if (draw_info->gradient.type == RadialGradient)
type=RadialGradient;
(void) GradientImage(image,type,PadSpread,stops,number_stops,
exception);
}
if (number_stops > 0)
stops=(StopInfo *) RelinquishMagickMemory(stops);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int)
(q-p),p);
continue;
}
/*
Parse the primitive attributes.
*/
i=0;
j=0;
primitive_info[0].point.x=0.0;
primitive_info[0].point.y=0.0;
for (x=0; *q != '\0'; x++)
{
/*
Define points.
*/
if (IsPoint(q) == MagickFalse)
break;
GetNextToken(q,&q,extent,token);
point.x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
point.y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,(const char **) NULL,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
primitive_info[i].primitive=primitive_type;
primitive_info[i].point=point;
primitive_info[i].coordinates=0;
primitive_info[i].method=FloodfillMethod;
i++;
if (i < (ssize_t) number_points)
continue;
number_points<<=1;
primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(primitive_info,
(size_t) number_points,sizeof(*primitive_info));
if ((primitive_info == (PrimitiveInfo *) NULL) ||
(number_points != (MagickSizeType) ((size_t) number_points)))
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
primitive_info[j].primitive=primitive_type;
primitive_info[j].coordinates=(size_t) x;
primitive_info[j].method=FloodfillMethod;
primitive_info[j].text=(char *) NULL;
/*
Circumscribe primitive within a circle.
*/
bounds.x1=primitive_info[j].point.x;
bounds.y1=primitive_info[j].point.y;
bounds.x2=primitive_info[j].point.x;
bounds.y2=primitive_info[j].point.y;
for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++)
{
point=primitive_info[j+k].point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.y < bounds.y1)
bounds.y1=point.y;
if (point.x > bounds.x2)
bounds.x2=point.x;
if (point.y > bounds.y2)
bounds.y2=point.y;
}
/*
Speculate how many points our primitive might consume.
*/
points_extent=(double) primitive_info[j].coordinates;
switch (primitive_type)
{
case RectanglePrimitive:
{
points_extent*=5;
break;
}
case RoundRectanglePrimitive:
{
double
alpha,
beta,
coordinates,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot((double) alpha,(double) beta);
coordinates=ceil(MagickPI*MagickPI*radius)+6*BezierQuantum+360;
if (coordinates > 21438)
{
(void) ThrowMagickException(exception,GetMagickModule(),DrawError,
"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
break;
}
points_extent*=5;
points_extent+=2*coordinates;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates > 107)
{
(void) ThrowMagickException(exception,GetMagickModule(),DrawError,
"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
break;
}
points_extent=(double) (BezierQuantum*primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
char
*s,
*t;
GetNextToken(q,&q,extent,token);
points_extent=1;
t=token;
for (s=token; *s != '\0'; s=t)
{
double
value;
value=StringToDouble(s,&t);
(void) value;
if (s == t)
{
t++;
continue;
}
points_extent++;
}
points_extent*=(6*BezierQuantum)+360.0;
break;
}
case CirclePrimitive:
case ArcPrimitive:
{
double
alpha,
beta,
coordinates,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot(alpha,beta);
coordinates=ceil(MagickPI*MagickPI*radius)+6*BezierQuantum+360;
if (coordinates > 21438)
{
(void) ThrowMagickException(exception,GetMagickModule(),DrawError,
"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
break;
}
points_extent=2*coordinates;
break;
}
case EllipsePrimitive:
{
double
alpha,
beta,
coordinates,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot(alpha,beta);
coordinates=2.0*ceil(MagickPI*MagickPI*radius)+6*BezierQuantum+360;
if (coordinates > 1048576)
{
(void) ThrowMagickException(exception,GetMagickModule(),DrawError,
"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
break;
}
points_extent=(double) EllipsePoints(primitive_info+j,
primitive_info[j].point,primitive_info[j+1].point,
primitive_info[j+2].point);
break;
}
default:
break;
}
if (status == MagickFalse)
break;
if (((double) ((size_t) points_extent)) < points_extent)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
break;
}
if (((MagickSizeType) (i+points_extent)) >= number_points)
{
/*
Resize based on speculative points required by primitive.
*/
number_points+=points_extent+1;
primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(primitive_info,
(size_t) number_points,sizeof(*primitive_info));
if ((primitive_info == (PrimitiveInfo *) NULL) ||
(number_points != (MagickSizeType) ((size_t) number_points)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
}
switch (primitive_type)
{
case PointPrimitive:
default:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
TracePoint(primitive_info+j,primitive_info[j].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case LinePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
TraceLine(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RectanglePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
TraceRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RoundRectanglePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
TraceRoundRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case ArcPrimitive:
{
if (primitive_info[j].coordinates != 3)
{
primitive_type=UndefinedPrimitive;
break;
}
TraceArc(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case EllipsePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
TraceEllipse(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case CirclePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
TraceCircle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PolylinePrimitive:
break;
case PolygonPrimitive:
{
primitive_info[i]=primitive_info[j];
primitive_info[i].coordinates=0;
primitive_info[j].coordinates++;
i++;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
TraceBezier(primitive_info+j,primitive_info[j].coordinates);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
i=(ssize_t) (j+TracePath(primitive_info+j,token,exception));
break;
}
case AlphaPrimitive:
case ColorPrimitive:
{
ssize_t
method;
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
GetNextToken(q,&q,extent,token);
method=ParseCommandOption(MagickMethodOptions,MagickFalse,token);
if (method == -1)
status=MagickFalse;
else
primitive_info[j].method=(PaintMethod) method;
break;
}
case TextPrimitive:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
if (*token != ',')
GetNextToken(q,&q,extent,token);
primitive_info[j].text=AcquireString(token);
break;
}
case ImagePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
GetNextToken(q,&q,extent,token);
primitive_info[j].text=AcquireString(token);
break;
}
}
if (primitive_info == (PrimitiveInfo *) NULL)
break;
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p),p);
if (status == MagickFalse)
break;
primitive_info[i].primitive=UndefinedPrimitive;
if (i == 0)
continue;
/*
Transform points.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+
graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx;
primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+
graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty;
point=primitive_info[i].point;
if (point.x < graphic_context[n]->bounds.x1)
graphic_context[n]->bounds.x1=point.x;
if (point.y < graphic_context[n]->bounds.y1)
graphic_context[n]->bounds.y1=point.y;
if (point.x > graphic_context[n]->bounds.x2)
graphic_context[n]->bounds.x2=point.x;
if (point.y > graphic_context[n]->bounds.y2)
graphic_context[n]->bounds.y2=point.y;
if (primitive_info[i].primitive == ImagePrimitive)
break;
if (i >= (ssize_t) number_points)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
}
if (graphic_context[n]->render != MagickFalse)
{
if ((n != 0) && (graphic_context[n]->clip_mask != (char *) NULL) &&
(LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0))
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
status&=DrawPrimitive(image,graphic_context[n],primitive_info,
exception);
}
if (primitive_info->text != (char *) NULL)
primitive_info->text=(char *) RelinquishMagickMemory(
primitive_info->text);
proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType)
primitive_extent);
if (proceed == MagickFalse)
break;
if (status == 0)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image");
/*
Relinquish resources.
*/
token=DestroyString(token);
if (primitive_info != (PrimitiveInfo *) NULL)
primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info);
primitive=DestroyString(primitive);
if (stops != (StopInfo *) NULL)
stops=(StopInfo *) RelinquishMagickMemory(stops);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
if (status == MagickFalse)
ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition",
keyword);
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGradientImage() draws a linear gradient on the image.
%
% The format of the DrawGradientImage method is:
%
% MagickBooleanType DrawGradientImage(Image *image,
% const DrawInfo *draw_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double GetStopColorOffset(const GradientInfo *gradient,
const ssize_t x,const ssize_t y)
{
switch (gradient->type)
{
case UndefinedGradient:
case LinearGradient:
{
double
gamma,
length,
offset,
scale;
PointInfo
p,
q;
const SegmentInfo
*gradient_vector;
gradient_vector=(&gradient->gradient_vector);
p.x=gradient_vector->x2-gradient_vector->x1;
p.y=gradient_vector->y2-gradient_vector->y1;
q.x=(double) x-gradient_vector->x1;
q.y=(double) y-gradient_vector->y1;
length=sqrt(q.x*q.x+q.y*q.y);
gamma=sqrt(p.x*p.x+p.y*p.y)*length;
gamma=PerceptibleReciprocal(gamma);
scale=p.x*q.x+p.y*q.y;
offset=gamma*scale*length;
return(offset);
}
case RadialGradient:
{
PointInfo
v;
if (gradient->spread == RepeatSpread)
{
v.x=(double) x-gradient->center.x;
v.y=(double) y-gradient->center.y;
return(sqrt(v.x*v.x+v.y*v.y));
}
v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians(
gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.x);
v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians(
gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.y);
return(sqrt(v.x*v.x+v.y*v.y));
}
}
return(0.0);
}
static int StopInfoCompare(const void *x,const void *y)
{
StopInfo
*stop_1,
*stop_2;
stop_1=(StopInfo *) x;
stop_2=(StopInfo *) y;
if (stop_1->offset > stop_2->offset)
return(1);
if (fabs(stop_1->offset-stop_2->offset) <= DrawEpsilon)
return(0);
return(-1);
}
MagickExport MagickBooleanType DrawGradientImage(Image *image,
const DrawInfo *draw_info,ExceptionInfo *exception)
{
CacheView
*image_view;
const GradientInfo
*gradient;
const SegmentInfo
*gradient_vector;
double
length;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
point;
RectangleInfo
bounding_box;
ssize_t
y;
/*
Draw linear or radial gradient on image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
gradient=(&draw_info->gradient);
qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo),
StopInfoCompare);
gradient_vector=(&gradient->gradient_vector);
point.x=gradient_vector->x2-gradient_vector->x1;
point.y=gradient_vector->y2-gradient_vector->y1;
length=sqrt(point.x*point.x+point.y*point.y);
bounding_box=gradient->bounding_box;
status=MagickTrue;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,bounding_box.height-bounding_box.y,1)
#endif
for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++)
{
PixelInfo
composite,
pixel;
double
alpha,
offset;
register Quantum
*magick_restrict q;
register ssize_t
i,
x;
ssize_t
j;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
composite=zero;
offset=GetStopColorOffset(gradient,0,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++)
{
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
GetPixelInfoPixel(image,q,&pixel);
switch (gradient->spread)
{
case UndefinedSpread:
case PadSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if ((offset < 0.0) || (i == 0))
composite=gradient->stops[0].color;
else
if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops))
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case ReflectSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
if (offset < 0.0)
offset=(-offset);
if ((ssize_t) fmod(offset,2.0) == 0)
offset=fmod(offset,1.0);
else
offset=1.0-fmod(offset,1.0);
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case RepeatSpread:
{
MagickBooleanType
antialias;
double
repeat;
antialias=MagickFalse;
repeat=0.0;
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type == LinearGradient)
{
repeat=fmod(offset,length);
if (repeat < 0.0)
repeat=length-fmod(-repeat,length);
else
repeat=fmod(offset,length);
antialias=(repeat < length) && ((repeat+1.0) > length) ?
MagickTrue : MagickFalse;
offset=PerceptibleReciprocal(length)*repeat;
}
else
{
repeat=fmod(offset,gradient->radius);
if (repeat < 0.0)
repeat=gradient->radius-fmod(-repeat,gradient->radius);
else
repeat=fmod(offset,gradient->radius);
antialias=repeat+1.0 > gradient->radius ? MagickTrue :
MagickFalse;
offset=repeat/gradient->radius;
}
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
if (antialias != MagickFalse)
{
if (gradient->type == LinearGradient)
alpha=length-repeat;
else
alpha=gradient->radius-repeat;
i=0;
j=(ssize_t) gradient->number_stops-1L;
}
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
}
CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha,
&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P a t t e r n P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPatternPath() draws a pattern.
%
% The format of the DrawPatternPath method is:
%
% MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info,
% const char *name,Image **pattern,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the pattern name.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawPatternPath(Image *image,
const DrawInfo *draw_info,const char *name,Image **pattern,
ExceptionInfo *exception)
{
char
property[MagickPathExtent];
const char
*geometry,
*path,
*type;
DrawInfo
*clone_info;
ImageInfo
*image_info;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
assert(name != (const char *) NULL);
(void) FormatLocaleString(property,MagickPathExtent,"%s",name);
path=GetImageArtifact(image,property);
if (path == (const char *) NULL)
return(MagickFalse);
(void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name);
geometry=GetImageArtifact(image,property);
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((*pattern) != (Image *) NULL)
*pattern=DestroyImage(*pattern);
image_info=AcquireImageInfo();
image_info->size=AcquireString(geometry);
*pattern=AcquireImage(image_info,exception);
image_info=DestroyImageInfo(image_info);
(void) QueryColorCompliance("#000000ff",AllCompliance,
&(*pattern)->background_color,exception);
(void) SetImageBackgroundColor(*pattern,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"begin pattern-path %s %s",name,geometry);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill_pattern=NewImageList();
clone_info->stroke_pattern=NewImageList();
(void) FormatLocaleString(property,MagickPathExtent,"%s-type",name);
type=GetImageArtifact(image,property);
if (type != (const char *) NULL)
clone_info->gradient.type=(GradientType) ParseCommandOption(
MagickGradientOptions,MagickFalse,type);
(void) CloneString(&clone_info->primitive,path);
status=DrawImage(*pattern,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w P o l y g o n P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPolygonPrimitive() draws a polygon on the image.
%
% The format of the DrawPolygonPrimitive method is:
%
% MagickBooleanType DrawPolygonPrimitive(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info)
{
register ssize_t
i;
assert(polygon_info != (PolygonInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (polygon_info[i] != (PolygonInfo *) NULL)
polygon_info[i]=DestroyPolygonInfo(polygon_info[i]);
polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info);
return(polygon_info);
}
static PolygonInfo **AcquirePolygonThreadSet(
const PrimitiveInfo *primitive_info)
{
PathInfo
*magick_restrict path_info;
PolygonInfo
**polygon_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads,
sizeof(*polygon_info));
if (polygon_info == (PolygonInfo **) NULL)
return((PolygonInfo **) NULL);
(void) memset(polygon_info,0,number_threads*sizeof(*polygon_info));
path_info=ConvertPrimitiveToPath(primitive_info);
if (path_info == (PathInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
polygon_info[i]=ConvertPathToPolygon(path_info);
if (polygon_info[i] == (PolygonInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
}
path_info=(PathInfo *) RelinquishMagickMemory(path_info);
return(polygon_info);
}
static double GetFillAlpha(PolygonInfo *polygon_info,const double mid,
const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x,
const ssize_t y,double *stroke_alpha)
{
double
alpha,
beta,
distance,
subpath_alpha;
PointInfo
delta;
register const PointInfo
*q;
register EdgeInfo
*p;
register ssize_t
i;
ssize_t
j,
winding_number;
/*
Compute fill & stroke opacity for this (x,y) point.
*/
*stroke_alpha=0.0;
subpath_alpha=0.0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= (p->bounds.y1-mid-0.5))
break;
if ((double) y > (p->bounds.y2+mid+0.5))
{
(void) DestroyEdge(polygon_info,(size_t) j);
continue;
}
if (((double) x <= (p->bounds.x1-mid-0.5)) ||
((double) x > (p->bounds.x2+mid+0.5)))
continue;
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
{
if ((double) y <= (p->points[i-1].y-mid-0.5))
break;
if ((double) y > (p->points[i].y+mid+0.5))
continue;
if (p->scanline != (double) y)
{
p->scanline=(double) y;
p->highwater=(size_t) i;
}
/*
Compute distance between a point and an edge.
*/
q=p->points+i-1;
delta.x=(q+1)->x-q->x;
delta.y=(q+1)->y-q->y;
beta=delta.x*(x-q->x)+delta.y*(y-q->y);
if (beta < 0.0)
{
delta.x=(double) x-q->x;
delta.y=(double) y-q->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=delta.x*delta.x+delta.y*delta.y;
if (beta > alpha)
{
delta.x=(double) x-(q+1)->x;
delta.y=(double) y-(q+1)->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=PerceptibleReciprocal(alpha);
beta=delta.x*(y-q->y)-delta.y*(x-q->x);
distance=alpha*beta*beta;
}
}
/*
Compute stroke & subpath opacity.
*/
beta=0.0;
if (p->ghostline == MagickFalse)
{
alpha=mid+0.5;
if ((*stroke_alpha < 1.0) &&
(distance <= ((alpha+0.25)*(alpha+0.25))))
{
alpha=mid-0.5;
if (distance <= ((alpha+0.25)*(alpha+0.25)))
*stroke_alpha=1.0;
else
{
beta=1.0;
if (fabs(distance-1.0) >= DrawEpsilon)
beta=sqrt((double) distance);
alpha=beta-mid-0.5;
if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25)))
*stroke_alpha=(alpha-0.25)*(alpha-0.25);
}
}
}
if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0))
continue;
if (distance <= 0.0)
{
subpath_alpha=1.0;
continue;
}
if (distance > 1.0)
continue;
if (fabs(beta) < DrawEpsilon)
{
beta=1.0;
if (fabs(distance-1.0) >= DrawEpsilon)
beta=sqrt(distance);
}
alpha=beta-1.0;
if (subpath_alpha < (alpha*alpha))
subpath_alpha=alpha*alpha;
}
}
/*
Compute fill opacity.
*/
if (fill == MagickFalse)
return(0.0);
if (subpath_alpha >= 1.0)
return(1.0);
/*
Determine winding number.
*/
winding_number=0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= p->bounds.y1)
break;
if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1))
continue;
if ((double) x > p->bounds.x2)
{
winding_number+=p->direction ? 1 : -1;
continue;
}
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) (p->number_points-1); i++)
if ((double) y <= p->points[i].y)
break;
q=p->points+i-1;
if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x)))
winding_number+=p->direction ? 1 : -1;
}
if (fill_rule != NonZeroRule)
{
if ((MagickAbsoluteValue(winding_number) & 0x01) != 0)
return(1.0);
}
else
if (MagickAbsoluteValue(winding_number) != 0)
return(1.0);
return(subpath_alpha);
}
static MagickBooleanType DrawPolygonPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
fill,
status;
double
mid;
PolygonInfo
**magick_restrict polygon_info;
register EdgeInfo
*p;
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
start_y,
stop_y,
y;
/*
Compute bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
assert(primitive_info != (PrimitiveInfo *) NULL);
if (primitive_info->coordinates == 0)
return(MagickTrue);
polygon_info=AcquirePolygonThreadSet(primitive_info);
if (polygon_info == (PolygonInfo **) NULL)
return(MagickFalse);
DisableMSCWarning(4127)
if (0)
DrawBoundingRectangles(image,draw_info,polygon_info[0],exception);
RestoreMSCWarning
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon");
fill=(primitive_info->method == FillToBorderMethod) ||
(primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse;
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
bounds=polygon_info[0]->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++)
{
p=polygon_info[0]->edges+i;
if (p->bounds.x1 < bounds.x1)
bounds.x1=p->bounds.x1;
if (p->bounds.y1 < bounds.y1)
bounds.y1=p->bounds.y1;
if (p->bounds.x2 > bounds.x2)
bounds.x2=p->bounds.x2;
if (p->bounds.y2 > bounds.y2)
bounds.y2=p->bounds.y2;
}
bounds.x1-=(mid+1.0);
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x1;
bounds.y1-=(mid+1.0);
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y1;
bounds.x2+=(mid+1.0);
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x2;
bounds.y2+=(mid+1.0);
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y2;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
if ((primitive_info->coordinates == 1) ||
(polygon_info[0]->number_edges == 0))
{
/*
Draw point.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register ssize_t
x;
register Quantum
*magick_restrict q;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
x=start_x;
q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for ( ; x <= stop_x; x++)
{
if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) &&
(y == (ssize_t) ceil(primitive_info->point.y-0.5)))
{
GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-polygon");
return(status);
}
/*
Draw polygon or line.
*/
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
const int
id = GetOpenMPThreadId();
double
fill_alpha,
stroke_alpha;
PixelInfo
fill_color,
stroke_color;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+
1),1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=start_x; x <= stop_x; x++)
{
/*
Fill and/or stroke.
*/
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule,
x,y,&stroke_alpha);
if (draw_info->stroke_antialias == MagickFalse)
{
fill_alpha=fill_alpha > 0.25 ? 1.0 : 0.0;
stroke_alpha=stroke_alpha > 0.25 ? 1.0 : 0.0;
}
GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception);
fill_alpha=fill_alpha*fill_color.alpha;
CompositePixelOver(image,&fill_color,fill_alpha,q,(double)
GetPixelAlpha(image,q),q);
GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception);
stroke_alpha=stroke_alpha*stroke_color.alpha;
CompositePixelOver(image,&stroke_color,stroke_alpha,q,(double)
GetPixelAlpha(image,q),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image.
%
% The format of the DrawPrimitive method is:
%
% MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info,
% PrimitiveInfo *primitive_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info)
{
const char
*methods[] =
{
"point",
"replace",
"floodfill",
"filltoborder",
"reset",
"?"
};
PointInfo
p,
q,
point;
register ssize_t
i,
x;
ssize_t
coordinates,
y;
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ColorPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ColorPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ImagePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ImagePrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
case PointPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"PointPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case TextPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"TextPrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
default:
break;
}
coordinates=0;
p=primitive_info[0].point;
q.x=(-1.0);
q.y=(-1.0);
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin open (%.20g)",(double) coordinates);
p=point;
}
point=primitive_info[i].point;
if ((fabs(q.x-point.x) >= DrawEpsilon) ||
(fabs(q.y-point.y) >= DrawEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y);
q=point;
coordinates--;
if (coordinates > 0)
continue;
if ((fabs(p.x-point.x) >= DrawEpsilon) ||
(fabs(p.y-point.y) >= DrawEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)",
(double) coordinates);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)",
(double) coordinates);
}
}
MagickExport MagickBooleanType DrawPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickStatusType
status;
register ssize_t
i,
x;
ssize_t
y;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-primitive");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx,
draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy,
draw_info->affine.tx,draw_info->affine.ty);
}
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsPixelInfoGray(&draw_info->fill) == MagickFalse) ||
(IsPixelInfoGray(&draw_info->stroke) == MagickFalse)))
(void) SetImageColorspace(image,sRGBColorspace,exception);
status=MagickTrue;
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
image_view=AcquireAuthenticCacheView(image,exception);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel,
target;
(void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
ChannelType
channel_mask;
PixelInfo
target;
(void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
(void) SetImageChannelMask(image,channel_mask);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel;
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case ColorPrimitive:
{
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetPixelInfo(image,&pixel);
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel,
target;
(void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
PixelInfo
target;
(void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel;
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case ImagePrimitive:
{
AffineMatrix
affine;
char
composite_geometry[MagickPathExtent];
Image
*composite_image;
ImageInfo
*clone_info;
RectangleInfo
geometry;
ssize_t
x1,
y1;
if (primitive_info->text == (char *) NULL)
break;
clone_info=AcquireImageInfo();
if (LocaleNCompare(primitive_info->text,"data:",5) == 0)
composite_image=ReadInlineImage(clone_info,primitive_info->text,
exception);
else
{
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
composite_image=ReadImage(clone_info,exception);
}
clone_info=DestroyImageInfo(clone_info);
if (composite_image == (Image *) NULL)
break;
(void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor)
NULL,(void *) NULL);
x1=(ssize_t) ceil(primitive_info[1].point.x-0.5);
y1=(ssize_t) ceil(primitive_info[1].point.y-0.5);
if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) ||
((y1 != 0L) && (y1 != (ssize_t) composite_image->rows)))
{
/*
Resize image.
*/
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y);
composite_image->filter=image->filter;
(void) TransformImage(&composite_image,(char *) NULL,
composite_geometry,exception);
}
if (composite_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(composite_image,OpaqueAlphaChannel,
exception);
if (draw_info->alpha != OpaqueAlpha)
(void) SetImageAlpha(composite_image,draw_info->alpha,exception);
SetGeometry(image,&geometry);
image->gravity=draw_info->gravity;
geometry.x=x;
geometry.y=y;
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double)
composite_image->rows,(double) geometry.x,(double) geometry.y);
(void) ParseGravityGeometry(image,composite_geometry,&geometry,exception);
affine=draw_info->affine;
affine.tx=(double) geometry.x;
affine.ty=(double) geometry.y;
composite_image->interpolate=image->interpolate;
status&=DrawAffineImage(image,composite_image,&affine,exception);
composite_image=DestroyImage(composite_image);
break;
}
case PointPrimitive:
{
PixelInfo
fill_color;
register Quantum
*q;
if ((y < 0) || (y >= (ssize_t) image->rows))
break;
if ((x < 0) || (x >= (ssize_t) image->columns))
break;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&fill_color,exception);
CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
DrawInfo
*clone_info;
if (primitive_info->text == (char *) NULL)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->text,primitive_info->text);
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
(void) CloneString(&clone_info->geometry,geometry);
status&=AnnotateImage(image,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
break;
}
default:
{
double
mid,
scale;
DrawInfo
*clone_info;
if (IsEventLogging() != MagickFalse)
LogPrimitiveInfo(primitive_info);
scale=ExpandAffine(&draw_info->affine);
if ((draw_info->dash_pattern != (double *) NULL) &&
(fabs(draw_info->dash_pattern[0]) >= DrawEpsilon) &&
(fabs(scale*draw_info->stroke_width) >= DrawEpsilon) &&
(draw_info->stroke.alpha != (Quantum) TransparentAlpha))
{
/*
Draw dash polygon.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
(void) DrawDashPolygon(draw_info,primitive_info,image,exception);
break;
}
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
if ((mid > 1.0) &&
((draw_info->stroke.alpha != (Quantum) TransparentAlpha) ||
(draw_info->stroke_pattern != (Image *) NULL)))
{
MagickBooleanType
closed_path;
/*
Draw strokes while respecting line cap/join attributes.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
closed_path=
(fabs(primitive_info[i-1].point.x-primitive_info[0].point.x) < DrawEpsilon) &&
(fabs(primitive_info[i-1].point.y-primitive_info[0].point.y) < DrawEpsilon) ?
MagickTrue : MagickFalse;
i=(ssize_t) primitive_info[0].coordinates;
if ((((draw_info->linecap == RoundCap) ||
(closed_path != MagickFalse)) &&
(draw_info->linejoin == RoundJoin)) ||
(primitive_info[i].primitive != UndefinedPrimitive))
{
(void) DrawPolygonPrimitive(image,draw_info,primitive_info,
exception);
break;
}
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
status&=DrawStrokePolygon(image,draw_info,primitive_info,exception);
break;
}
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception);
break;
}
}
image_view=DestroyCacheView(image_view);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w S t r o k e P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on
% the image while respecting the line cap and join attributes.
%
% The format of the DrawStrokePolygon method is:
%
% MagickBooleanType DrawStrokePolygon(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
%
*/
static void DrawRoundLinecap(Image *image,const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,ExceptionInfo *exception)
{
PrimitiveInfo
linecap[5];
register ssize_t
i;
for (i=0; i < 4; i++)
linecap[i]=(*primitive_info);
linecap[0].coordinates=4;
linecap[1].point.x+=2.0*DrawEpsilon;
linecap[2].point.x+=2.0*DrawEpsilon;
linecap[2].point.y+=2.0*DrawEpsilon;
linecap[3].point.y+=2.0*DrawEpsilon;
linecap[4].primitive=UndefinedPrimitive;
(void) DrawPolygonPrimitive(image,draw_info,linecap,exception);
}
static MagickBooleanType DrawStrokePolygon(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
DrawInfo
*clone_info;
MagickBooleanType
closed_path;
MagickStatusType
status;
PrimitiveInfo
*stroke_polygon;
register const PrimitiveInfo
*p,
*q;
/*
Draw stroked polygon.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-stroke-polygon");
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill=draw_info->stroke;
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
clone_info->stroke_width=0.0;
clone_info->fill_rule=NonZeroRule;
status=MagickTrue;
for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates)
{
stroke_polygon=TraceStrokePolygon(draw_info,p);
if (stroke_polygon == (PrimitiveInfo *) NULL)
{
status=0;
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
break;
}
status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception);
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
if (status == 0)
break;
q=p+p->coordinates-1;
closed_path=(fabs(q->point.x-p->point.x) < DrawEpsilon) &&
(fabs(q->point.y-p->point.y) < DrawEpsilon) ? MagickTrue : MagickFalse;
if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse))
{
DrawRoundLinecap(image,draw_info,p,exception);
DrawRoundLinecap(image,draw_info,q,exception);
}
}
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-stroke-polygon");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A f f i n e M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAffineMatrix() returns an AffineMatrix initialized to the identity
% matrix.
%
% The format of the GetAffineMatrix method is:
%
% void GetAffineMatrix(AffineMatrix *affine_matrix)
%
% A description of each parameter follows:
%
% o affine_matrix: the affine matrix.
%
*/
MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(affine_matrix != (AffineMatrix *) NULL);
(void) memset(affine_matrix,0,sizeof(*affine_matrix));
affine_matrix->sx=1.0;
affine_matrix->sy=1.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetDrawInfo() initializes draw_info to default values from image_info.
%
% The format of the GetDrawInfo method is:
%
% void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o draw_info: the draw info.
%
*/
MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
{
char
*next_token;
const char
*option;
ExceptionInfo
*exception;
ImageInfo
*clone_info;
/*
Initialize draw attributes.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
(void) memset(draw_info,0,sizeof(*draw_info));
clone_info=CloneImageInfo(image_info);
GetAffineMatrix(&draw_info->affine);
exception=AcquireExceptionInfo();
(void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#0000",AllCompliance,&draw_info->stroke,
exception);
draw_info->stroke_width=1.0;
draw_info->fill_rule=EvenOddRule;
draw_info->alpha=OpaqueAlpha;
draw_info->fill_alpha=OpaqueAlpha;
draw_info->stroke_alpha=OpaqueAlpha;
draw_info->linecap=ButtCap;
draw_info->linejoin=MiterJoin;
draw_info->miterlimit=10;
draw_info->decorate=NoDecoration;
draw_info->pointsize=12.0;
draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha;
draw_info->compose=OverCompositeOp;
draw_info->render=MagickTrue;
draw_info->clip_path=MagickFalse;
draw_info->debug=IsEventLogging();
draw_info->stroke_antialias=clone_info->antialias;
if (clone_info->font != (char *) NULL)
draw_info->font=AcquireString(clone_info->font);
if (clone_info->density != (char *) NULL)
draw_info->density=AcquireString(clone_info->density);
draw_info->text_antialias=clone_info->antialias;
if (fabs(clone_info->pointsize) >= DrawEpsilon)
draw_info->pointsize=clone_info->pointsize;
draw_info->border_color=clone_info->border_color;
if (clone_info->server_name != (char *) NULL)
draw_info->server_name=AcquireString(clone_info->server_name);
option=GetImageOption(clone_info,"direction");
if (option != (const char *) NULL)
draw_info->direction=(DirectionType) ParseCommandOption(
MagickDirectionOptions,MagickFalse,option);
else
draw_info->direction=UndefinedDirection;
option=GetImageOption(clone_info,"encoding");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->encoding,option);
option=GetImageOption(clone_info,"family");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->family,option);
option=GetImageOption(clone_info,"fill");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->fill,
exception);
option=GetImageOption(clone_info,"gravity");
if (option != (const char *) NULL)
draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"interline-spacing");
if (option != (const char *) NULL)
draw_info->interline_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"interword-spacing");
if (option != (const char *) NULL)
draw_info->interword_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"kerning");
if (option != (const char *) NULL)
draw_info->kerning=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"stroke");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke,
exception);
option=GetImageOption(clone_info,"strokewidth");
if (option != (const char *) NULL)
draw_info->stroke_width=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"style");
if (option != (const char *) NULL)
draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"undercolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor,
exception);
option=GetImageOption(clone_info,"weight");
if (option != (const char *) NULL)
{
ssize_t
weight;
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(option);
draw_info->weight=(size_t) weight;
}
exception=DestroyExceptionInfo(exception);
draw_info->signature=MagickCoreSignature;
clone_info=DestroyImageInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r m u t a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Permutate() returns the permuation of the (n,k).
%
% The format of the Permutate method is:
%
% void Permutate(ssize_t n,ssize_t k)
%
% A description of each parameter follows:
%
% o n:
%
% o k:
%
%
*/
static inline double Permutate(const ssize_t n,const ssize_t k)
{
double
r;
register ssize_t
i;
r=1.0;
for (i=k+1; i <= n; i++)
r*=i;
for (i=1; i <= (n-k); i++)
r/=i;
return(r);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a c e P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TracePrimitive is a collection of methods for generating graphic
% primitives such as arcs, ellipses, paths, etc.
%
*/
static void TraceArc(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo end,const PointInfo degrees)
{
PointInfo
center,
radii;
center.x=0.5*(end.x+start.x);
center.y=0.5*(end.y+start.y);
radii.x=fabs(center.x-start.x);
radii.y=fabs(center.y-start.y);
TraceEllipse(primitive_info,center,radii,degrees);
}
static void TraceArcPath(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo end,const PointInfo arc,const double angle,
const MagickBooleanType large_arc,const MagickBooleanType sweep)
{
double
alpha,
beta,
delta,
factor,
gamma,
theta;
PointInfo
center,
points[3],
radii;
register double
cosine,
sine;
register PrimitiveInfo
*p;
register ssize_t
i;
size_t
arc_segments;
if ((fabs(start.x-end.x) < DrawEpsilon) &&
(fabs(start.y-end.y) < DrawEpsilon))
{
TracePoint(primitive_info,end);
return;
}
radii.x=fabs(arc.x);
radii.y=fabs(arc.y);
if ((fabs(radii.x) < DrawEpsilon) || (fabs(radii.y) < DrawEpsilon))
{
TraceLine(primitive_info,start,end);
return;
}
cosine=cos(DegreesToRadians(fmod((double) angle,360.0)));
sine=sin(DegreesToRadians(fmod((double) angle,360.0)));
center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2);
center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2);
delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/
(radii.y*radii.y);
if (delta < DrawEpsilon)
{
TraceLine(primitive_info,start,end);
return;
}
if (delta > 1.0)
{
radii.x*=sqrt((double) delta);
radii.y*=sqrt((double) delta);
}
points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x);
points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y);
points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x);
points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y);
alpha=points[1].x-points[0].x;
beta=points[1].y-points[0].y;
factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25;
if (factor <= 0.0)
factor=0.0;
else
{
factor=sqrt((double) factor);
if (sweep == large_arc)
factor=(-factor);
}
center.x=(double) ((points[0].x+points[1].x)/2-factor*beta);
center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha);
alpha=atan2(points[0].y-center.y,points[0].x-center.x);
theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha;
if ((theta < 0.0) && (sweep != MagickFalse))
theta+=2.0*MagickPI;
else
if ((theta > 0.0) && (sweep == MagickFalse))
theta-=2.0*MagickPI;
arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+DrawEpsilon))));
p=primitive_info;
for (i=0; i < (ssize_t) arc_segments; i++)
{
beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments));
gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))*
sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/
sin(fmod((double) beta,DegreesToRadians(360.0)));
points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x;
p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y;
(p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y*
points[0].y);
(p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y*
points[0].y);
(p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y*
points[1].y);
(p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y*
points[1].y);
(p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y*
points[2].y);
(p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y*
points[2].y);
if (i == (ssize_t) (arc_segments-1))
(p+3)->point=end;
TraceBezier(p,4);
p+=p->coordinates;
}
primitive_info->coordinates=(size_t) (p-primitive_info);
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
}
static void TraceBezier(PrimitiveInfo *primitive_info,
const size_t number_coordinates)
{
double
alpha,
*coefficients,
weight;
PointInfo
end,
point,
*points;
register PrimitiveInfo
*p;
register ssize_t
i,
j;
size_t
control_points,
quantum;
/*
Allocate coeficients.
*/
quantum=number_coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
for (j=i+1; j < (ssize_t) number_coordinates; j++)
{
alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x);
if (alpha > (double) quantum)
quantum=(size_t) alpha;
alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y);
if (alpha > (double) quantum)
quantum=(size_t) alpha;
}
}
quantum=(size_t) MagickMin((double) quantum/number_coordinates,
(double) BezierQuantum);
control_points=quantum*number_coordinates;
coefficients=(double *) AcquireQuantumMemory((size_t)
number_coordinates,sizeof(*coefficients));
points=(PointInfo *) AcquireQuantumMemory((size_t) control_points,
sizeof(*points));
if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL))
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
/*
Compute bezier points.
*/
end=primitive_info[number_coordinates-1].point;
for (i=0; i < (ssize_t) number_coordinates; i++)
coefficients[i]=Permutate((ssize_t) number_coordinates-1,i);
weight=0.0;
for (i=0; i < (ssize_t) control_points; i++)
{
p=primitive_info;
point.x=0.0;
point.y=0.0;
alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0);
for (j=0; j < (ssize_t) number_coordinates; j++)
{
point.x+=alpha*coefficients[j]*p->point.x;
point.y+=alpha*coefficients[j]*p->point.y;
alpha*=weight/(1.0-weight);
p++;
}
points[i]=point;
weight+=1.0/control_points;
}
/*
Bezier curves are just short segmented polys.
*/
p=primitive_info;
for (i=0; i < (ssize_t) control_points; i++)
{
TracePoint(p,points[i]);
p+=p->coordinates;
}
TracePoint(p,end);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
}
static void TraceCircle(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo end)
{
double
alpha,
beta,
radius;
PointInfo
offset,
degrees;
alpha=end.x-start.x;
beta=end.y-start.y;
radius=hypot((double) alpha,(double) beta);
offset.x=(double) radius;
offset.y=(double) radius;
degrees.x=0.0;
degrees.y=360.0;
TraceEllipse(primitive_info,start,offset,degrees);
}
static void TraceEllipse(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo stop,const PointInfo degrees)
{
double
delta,
step,
y;
PointInfo
angle,
point;
register PrimitiveInfo
*p;
register ssize_t
i;
/*
Ellipses are just short segmented polys.
*/
if ((fabs(stop.x) < DrawEpsilon) && (fabs(stop.y) < DrawEpsilon))
{
TracePoint(primitive_info,start);
return;
}
delta=2.0/MagickMax(stop.x,stop.y);
step=MagickPI/8.0;
if ((delta >= 0.0) && (delta < (MagickPI/8.0)))
step=MagickPI/(4.0*(MagickPI*PerceptibleReciprocal(delta)/2.0));
if (step < EllipseEpsilon)
step=EllipseEpsilon;
angle.x=DegreesToRadians(degrees.x);
y=degrees.y;
while (y < degrees.x)
y+=360.0;
angle.y=DegreesToRadians(y);
for (p=primitive_info; angle.x < angle.y; angle.x+=step)
{
point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*stop.x+start.x;
point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*stop.y+start.y;
TracePoint(p,point);
p+=p->coordinates;
}
point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*stop.x+start.x;
point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*stop.y+start.y;
TracePoint(p,point);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
}
static void TraceLine(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo end)
{
TracePoint(primitive_info,start);
if ((fabs(start.x-end.x) < DrawEpsilon) &&
(fabs(start.y-end.y) < DrawEpsilon))
{
primitive_info->primitive=PointPrimitive;
primitive_info->coordinates=1;
return;
}
TracePoint(primitive_info+1,end);
(primitive_info+1)->primitive=primitive_info->primitive;
primitive_info->coordinates=2;
}
static size_t TracePath(PrimitiveInfo *primitive_info,const char *path,
ExceptionInfo *exception)
{
char
*next_token,
token[MagickPathExtent];
const char
*p;
double
x,
y;
int
attribute,
last_attribute;
MagickBooleanType
status;
PointInfo
end = {0.0, 0.0},
points[4] = { {0.0,0.0}, {0.0,0.0}, {0.0,0.0}, {0.0,0.0} },
point = {0.0, 0.0},
start = {0.0, 0.0};
PrimitiveType
primitive_type;
register PrimitiveInfo
*q;
register ssize_t
i;
size_t
number_coordinates,
z_count;
status=MagickTrue;
attribute=0;
number_coordinates=0;
z_count=0;
primitive_type=primitive_info->primitive;
q=primitive_info;
for (p=path; *p != '\0'; )
{
if (status == MagickFalse)
break;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == '\0')
break;
last_attribute=attribute;
attribute=(int) (*p++);
switch (attribute)
{
case 'a':
case 'A':
{
double
angle;
MagickBooleanType
large_arc,
sweep;
PointInfo
arc;
/*
Elliptical arc.
*/
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
arc.x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
arc.y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'A' ? x : point.x+x);
end.y=(double) (attribute == (int) 'A' ? y : point.y+y);
TraceArcPath(q,point,end,arc,angle,large_arc,sweep);
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'c':
case 'C':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 4; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'C' ? x : point.x+x);
end.y=(double) (attribute == (int) 'C' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
TraceBezier(q,4);
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'H':
case 'h':
{
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'H' ? x: point.x+x);
TracePoint(q,point);
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'l':
case 'L':
{
/*
Line to.
*/
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'L' ? x : point.x+x);
point.y=(double) (attribute == (int) 'L' ? y : point.y+y);
TracePoint(q,point);
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'M':
case 'm':
{
/*
Move to;
*/
if (q != primitive_info)
{
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
}
i=0;
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'M' ? x : point.x+x);
point.y=(double) (attribute == (int) 'M' ? y : point.y+y);
if (i == 0)
start=point;
i++;
TracePoint(q,point);
q+=q->coordinates;
if ((i != 0) && (attribute == (int) 'M'))
{
TracePoint(q,point);
q+=q->coordinates;
}
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'q':
case 'Q':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 3; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'Q' ? x : point.x+x);
end.y=(double) (attribute == (int) 'Q' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
TraceBezier(q,3);
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 's':
case 'S':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=points[3];
points[1].x=2.0*points[3].x-points[2].x;
points[1].y=2.0*points[3].y-points[2].y;
for (i=2; i < 4; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'S' ? x : point.x+x);
end.y=(double) (attribute == (int) 'S' ? y : point.y+y);
points[i]=end;
}
if (strchr("CcSs",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
TraceBezier(q,4);
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 't':
case 'T':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=points[2];
points[1].x=2.0*points[2].x-points[1].x;
points[1].y=2.0*points[2].y-points[1].y;
for (i=2; i < 3; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'T' ? x : point.x+x);
end.y=(double) (attribute == (int) 'T' ? y : point.y+y);
points[i]=end;
}
if (status == MagickFalse)
break;
if (strchr("QqTt",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
TraceBezier(q,3);
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'v':
case 'V':
{
/*
Line to.
*/
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.y=(double) (attribute == (int) 'V' ? y : point.y+y);
TracePoint(q,point);
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'z':
case 'Z':
{
/*
Close path.
*/
point=start;
TracePoint(q,point);
q+=q->coordinates;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
z_count++;
break;
}
default:
{
if (isalpha((int) ((unsigned char) attribute)) != 0)
(void) FormatLocaleFile(stderr,"attribute not recognized: %c\n",
attribute);
break;
}
}
}
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
q--;
q->primitive=primitive_type;
if (z_count > 1)
q->method=FillToBorderMethod;
}
q=primitive_info;
return(number_coordinates);
}
static void TraceRectangle(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo end)
{
PointInfo
point;
register PrimitiveInfo
*p;
register ssize_t
i;
p=primitive_info;
TracePoint(p,start);
p+=p->coordinates;
point.x=start.x;
point.y=end.y;
TracePoint(p,point);
p+=p->coordinates;
TracePoint(p,end);
p+=p->coordinates;
point.x=end.x;
point.y=start.y;
TracePoint(p,point);
p+=p->coordinates;
TracePoint(p,start);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
}
static void TraceRoundRectangle(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end,PointInfo arc)
{
PointInfo
degrees,
offset,
point;
register PrimitiveInfo
*p;
register ssize_t
i;
p=primitive_info;
offset.x=fabs(end.x-start.x);
offset.y=fabs(end.y-start.y);
if (arc.x > (0.5*offset.x))
arc.x=0.5*offset.x;
if (arc.y > (0.5*offset.y))
arc.y=0.5*offset.y;
point.x=start.x+offset.x-arc.x;
point.y=start.y+arc.y;
degrees.x=270.0;
degrees.y=360.0;
TraceEllipse(p,point,arc,degrees);
p+=p->coordinates;
point.x=start.x+offset.x-arc.x;
point.y=start.y+offset.y-arc.y;
degrees.x=0.0;
degrees.y=90.0;
TraceEllipse(p,point,arc,degrees);
p+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+offset.y-arc.y;
degrees.x=90.0;
degrees.y=180.0;
TraceEllipse(p,point,arc,degrees);
p+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+arc.y;
degrees.x=180.0;
degrees.y=270.0;
TraceEllipse(p,point,arc,degrees);
p+=p->coordinates;
TracePoint(p,primitive_info->point);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
}
static void TraceSquareLinecap(PrimitiveInfo *primitive_info,
const size_t number_vertices,const double offset)
{
double
distance;
register double
dx,
dy;
register ssize_t
i;
ssize_t
j;
dx=0.0;
dy=0.0;
for (i=1; i < (ssize_t) number_vertices; i++)
{
dx=primitive_info[0].point.x-primitive_info[i].point.x;
dy=primitive_info[0].point.y-primitive_info[i].point.y;
if ((fabs((double) dx) >= DrawEpsilon) ||
(fabs((double) dy) >= DrawEpsilon))
break;
}
if (i == (ssize_t) number_vertices)
i=(ssize_t) number_vertices-1L;
distance=hypot((double) dx,(double) dy);
primitive_info[0].point.x=(double) (primitive_info[i].point.x+
dx*(distance+offset)/distance);
primitive_info[0].point.y=(double) (primitive_info[i].point.y+
dy*(distance+offset)/distance);
for (j=(ssize_t) number_vertices-2; j >= 0; j--)
{
dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x;
dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y;
if ((fabs((double) dx) >= DrawEpsilon) ||
(fabs((double) dy) >= DrawEpsilon))
break;
}
distance=hypot((double) dx,(double) dy);
primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+
dx*(distance+offset)/distance);
primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+
dy*(distance+offset)/distance);
}
static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info)
{
typedef struct _LineSegment
{
double
p,
q;
} LineSegment;
double
delta_theta,
dot_product,
mid,
miterlimit;
LineSegment
dx,
dy,
inverse_slope,
slope,
theta;
MagickBooleanType
closed_path;
PointInfo
box_p[5],
box_q[5],
center,
offset,
*path_p,
*path_q;
PrimitiveInfo
*polygon_primitive,
*stroke_polygon;
register ssize_t
i;
size_t
arc_segments,
max_strokes,
number_vertices;
ssize_t
j,
n,
p,
q;
/*
Allocate paths.
*/
number_vertices=primitive_info->coordinates;
max_strokes=2*number_vertices+6*BezierQuantum+360;
path_p=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes,
sizeof(*path_p));
path_q=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes,
sizeof(*path_q));
polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
number_vertices+2UL,sizeof(*polygon_primitive));
if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL) ||
(polygon_primitive == (PrimitiveInfo *) NULL))
{
if (path_p != (PointInfo *) NULL)
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
if (path_q != (PointInfo *) NULL)
path_q=(PointInfo *) RelinquishMagickMemory(path_q);
if (polygon_primitive != (PrimitiveInfo *) NULL)
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return((PrimitiveInfo *) NULL);
}
(void) memcpy(polygon_primitive,primitive_info,(size_t)
number_vertices*sizeof(*polygon_primitive));
closed_path=
(fabs(primitive_info[number_vertices-1].point.x-primitive_info[0].point.x) < DrawEpsilon) &&
(fabs(primitive_info[number_vertices-1].point.y-primitive_info[0].point.y) < DrawEpsilon) ?
MagickTrue : MagickFalse;
if (((draw_info->linejoin == RoundJoin) ||
(draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse))
{
polygon_primitive[number_vertices]=primitive_info[1];
number_vertices++;
}
polygon_primitive[number_vertices].primitive=UndefinedPrimitive;
/*
Compute the slope for the first line segment, p.
*/
dx.p=0.0;
dy.p=0.0;
for (n=1; n < (ssize_t) number_vertices; n++)
{
dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x;
dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y;
if ((fabs(dx.p) >= DrawEpsilon) || (fabs(dy.p) >= DrawEpsilon))
break;
}
if (n == (ssize_t) number_vertices)
n=(ssize_t) number_vertices-1L;
slope.p=0.0;
inverse_slope.p=0.0;
if (fabs(dx.p) < DrawEpsilon)
{
if (dx.p >= 0.0)
slope.p=dy.p < 0.0 ? -1.0/DrawEpsilon : 1.0/DrawEpsilon;
else
slope.p=dy.p < 0.0 ? 1.0/DrawEpsilon : -1.0/DrawEpsilon;
}
else
if (fabs(dy.p) < DrawEpsilon)
{
if (dy.p >= 0.0)
inverse_slope.p=dx.p < 0.0 ? -1.0/DrawEpsilon : 1.0/DrawEpsilon;
else
inverse_slope.p=dx.p < 0.0 ? 1.0/DrawEpsilon : -1.0/DrawEpsilon;
}
else
{
slope.p=dy.p/dx.p;
inverse_slope.p=(-1.0/slope.p);
}
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid);
if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse))
TraceSquareLinecap(polygon_primitive,number_vertices,mid);
offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0)));
offset.y=(double) (offset.x*inverse_slope.p);
if ((dy.p*offset.x-dx.p*offset.y) > 0.0)
{
box_p[0].x=polygon_primitive[0].point.x-offset.x;
box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p;
box_p[1].x=polygon_primitive[n].point.x-offset.x;
box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p;
box_q[0].x=polygon_primitive[0].point.x+offset.x;
box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p;
box_q[1].x=polygon_primitive[n].point.x+offset.x;
box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p;
}
else
{
box_p[0].x=polygon_primitive[0].point.x+offset.x;
box_p[0].y=polygon_primitive[0].point.y+offset.y;
box_p[1].x=polygon_primitive[n].point.x+offset.x;
box_p[1].y=polygon_primitive[n].point.y+offset.y;
box_q[0].x=polygon_primitive[0].point.x-offset.x;
box_q[0].y=polygon_primitive[0].point.y-offset.y;
box_q[1].x=polygon_primitive[n].point.x-offset.x;
box_q[1].y=polygon_primitive[n].point.y-offset.y;
}
/*
Create strokes for the line join attribute: bevel, miter, round.
*/
p=0;
q=0;
path_q[p++]=box_q[0];
path_p[q++]=box_p[0];
for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++)
{
/*
Compute the slope for this line segment, q.
*/
dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x;
dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y;
dot_product=dx.q*dx.q+dy.q*dy.q;
if (dot_product < 0.25)
continue;
slope.q=0.0;
inverse_slope.q=0.0;
if (fabs(dx.q) < DrawEpsilon)
{
if (dx.q >= 0.0)
slope.q=dy.q < 0.0 ? -1.0/DrawEpsilon : 1.0/DrawEpsilon;
else
slope.q=dy.q < 0.0 ? 1.0/DrawEpsilon : -1.0/DrawEpsilon;
}
else
if (fabs(dy.q) < DrawEpsilon)
{
if (dy.q >= 0.0)
inverse_slope.q=dx.q < 0.0 ? -1.0/DrawEpsilon : 1.0/DrawEpsilon;
else
inverse_slope.q=dx.q < 0.0 ? 1.0/DrawEpsilon : -1.0/DrawEpsilon;
}
else
{
slope.q=dy.q/dx.q;
inverse_slope.q=(-1.0/slope.q);
}
offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0)));
offset.y=(double) (offset.x*inverse_slope.q);
dot_product=dy.q*offset.x-dx.q*offset.y;
if (dot_product > 0.0)
{
box_p[2].x=polygon_primitive[n].point.x-offset.x;
box_p[2].y=polygon_primitive[n].point.y-offset.y;
box_p[3].x=polygon_primitive[i].point.x-offset.x;
box_p[3].y=polygon_primitive[i].point.y-offset.y;
box_q[2].x=polygon_primitive[n].point.x+offset.x;
box_q[2].y=polygon_primitive[n].point.y+offset.y;
box_q[3].x=polygon_primitive[i].point.x+offset.x;
box_q[3].y=polygon_primitive[i].point.y+offset.y;
}
else
{
box_p[2].x=polygon_primitive[n].point.x+offset.x;
box_p[2].y=polygon_primitive[n].point.y+offset.y;
box_p[3].x=polygon_primitive[i].point.x+offset.x;
box_p[3].y=polygon_primitive[i].point.y+offset.y;
box_q[2].x=polygon_primitive[n].point.x-offset.x;
box_q[2].y=polygon_primitive[n].point.y-offset.y;
box_q[3].x=polygon_primitive[i].point.x-offset.x;
box_q[3].y=polygon_primitive[i].point.y-offset.y;
}
if (fabs((double) (slope.p-slope.q)) < DrawEpsilon)
{
box_p[4]=box_p[1];
box_q[4]=box_q[1];
}
else
{
box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+
box_p[3].y)/(slope.p-slope.q));
box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y);
box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+
box_q[3].y)/(slope.p-slope.q));
box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y);
}
if (q >= (ssize_t) (max_strokes-6*BezierQuantum-360))
{
if (~max_strokes < (6*BezierQuantum+360))
{
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
path_q=(PointInfo *) RelinquishMagickMemory(path_q);
}
else
{
max_strokes+=6*BezierQuantum+360;
path_p=(PointInfo *) ResizeQuantumMemory(path_p,max_strokes,
sizeof(*path_p));
path_q=(PointInfo *) ResizeQuantumMemory(path_q,max_strokes,
sizeof(*path_q));
}
if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL))
{
if (path_p != (PointInfo *) NULL)
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
if (path_q != (PointInfo *) NULL)
path_q=(PointInfo *) RelinquishMagickMemory(path_q);
polygon_primitive=(PrimitiveInfo *)
RelinquishMagickMemory(polygon_primitive);
return((PrimitiveInfo *) NULL);
}
}
dot_product=dx.q*dy.p-dx.p*dy.q;
if (dot_product <= 0.0)
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x);
theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x);
if (theta.q < theta.p)
theta.q+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/
(2.0*sqrt((double) (1.0/mid)))));
path_q[q].x=box_q[1].x;
path_q[q].y=box_q[1].y;
q++;
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_q[q].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_q[q].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
q++;
}
path_q[q++]=box_q[2];
break;
}
default:
break;
}
else
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x);
theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x);
if (theta.p < theta.q)
theta.p+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/
(2.0*sqrt((double) (1.0/mid)))));
path_p[p++]=box_p[1];
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_p[p].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_p[p].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
p++;
}
path_p[p++]=box_p[2];
break;
}
default:
break;
}
slope.p=slope.q;
inverse_slope.p=inverse_slope.q;
box_p[0]=box_p[2];
box_p[1]=box_p[3];
box_q[0]=box_q[2];
box_q[1]=box_q[3];
dx.p=dx.q;
dy.p=dy.q;
n=i;
}
path_p[p++]=box_p[1];
path_q[q++]=box_q[1];
/*
Trace stroked polygon.
*/
stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon));
if (stroke_polygon != (PrimitiveInfo *) NULL)
{
for (i=0; i < (ssize_t) p; i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_p[i];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
}
for ( ; i < (ssize_t) (p+q+closed_path); i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_q[p+q+closed_path-(i+1)];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[p+closed_path].point;
i++;
}
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
stroke_polygon[i].primitive=UndefinedPrimitive;
stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1);
}
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
path_q=(PointInfo *) RelinquishMagickMemory(path_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive);
return(stroke_polygon);
}
|
GB_unaryop__minv_bool_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_bool_uint16
// op(A') function: GB_tran__minv_bool_uint16
// C type: bool
// A type: uint16_t
// cast: ;
// unaryop: cij = true
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = true ;
// casting
#define GB_CASTING(z, aij) \
; ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_BOOL || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_bool_uint16
(
bool *Cx, // Cx and Ax may be aliased
uint16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_bool_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
expected_output.c | #include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
#include <polybench.h>
#include "ludcmp.h"
/**
* This version is stamped on May 10, 2016
*
* Contact:
* Louis-Noel Pouchet <pouchet.ohio-state.edu>
* Tomofumi Yuki <tomofumi.yuki.fr>
*
* Web address: http://polybench.sourceforge.net
*/
/*ludcmp.c: this file is part of PolyBench/C*/
/*Include polybench common header.*/
/*Include benchmark-specific header.*/
/*Array initialization.*/
static void init_array(int n, double A[2000][2000], double b[2000], double x[2000], double y[2000]) {
int i, j;
double fn = (double) n;
for(i = 0; i < n; i++) {
x[i] = 0;
y[i] = 0;
b[i] = (i + 1) / fn / 2.0 + 4;
}
for(i = 0; i < n; i++) {
for(j = 0; j <= i; j++)
A[i][j] = (double) (-j % n) / n + 1;
for(j = i + 1; j < n; j++) {
A[i][j] = 0;
}
A[i][i] = 1;
}
/*Make the matrix positive semi-definite.*/
/*not necessary for LU, but using same code as cholesky*/
int r, s, t;
double (*B)[2000][2000];
B = (double (*)[2000][2000]) polybench_alloc_data((2000 + 0) * (2000 + 0), sizeof(double));
;
for(r = 0; r < n; ++r)
for(s = 0; s < n; ++s)
(*B)[r][s] = 0;
for(t = 0; t < n; ++t)
for(r = 0; r < n; ++r)
for(s = 0; s < n; ++s)
(*B)[r][s] += A[r][t] * A[s][t];
for(r = 0; r < n; ++r)
for(s = 0; s < n; ++s)
A[r][s] = (*B)[r][s];
free((void *) B);
;
}
/*DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output.*/
static void print_array(int n, double x[2000]) {
int i;
fprintf(stderr, "==BEGIN DUMP_ARRAYS==\n");
fprintf(stderr, "begin dump: %s", "x");
for(i = 0; i < n; i++) {
if(i % 20 == 0) fprintf(stderr, "\n");
fprintf(stderr, "%0.2lf ", x[i]);
}
fprintf(stderr, "\nend dump: %s\n", "x");
fprintf(stderr, "==END DUMP_ARRAYS==\n");
}
/*Main computational kernel. The whole function will be timed,
including the call and return.*/
static void kernel_ludcmp(int n, double A[2000][2000], double b[2000], double x[2000], double y[2000]) {
int i, j, k;
double w;
/*************** Clava msgError **************
unsolved dependency for arrayAccess A use : RW
****************************************/
for(i = 0; i < n; i++) {
/*************** Clava msgError **************
unsolved dependency for arrayAccess A use : RW
****************************************/
for(j = 0; j < i; j++) {
w = A[i][j];
#pragma omp parallel for default(shared) private(k) firstprivate(j, i, A) reduction(- : w)
for(k = 0; k < j; k++) {
w -= A[i][k] * A[k][j];
}
A[i][j] = w / A[j][j];
}
/*************** Clava msgError **************
unsolved dependency for arrayAccess A use : RW
****************************************/
for(j = i; j < n; j++) {
w = A[i][j];
#pragma omp parallel for default(shared) private(k) firstprivate(i, j, A) reduction(- : w)
for(k = 0; k < i; k++) {
w -= A[i][k] * A[k][j];
}
A[i][j] = w;
}
}
/*************** Clava msgError **************
unsolved dependency for arrayAccess y use : RW
****************************************/
for(i = 0; i < n; i++) {
w = b[i];
#pragma omp parallel for default(shared) private(j) firstprivate(i, A, y) reduction(- : w)
for(j = 0; j < i; j++)
w -= A[i][j] * y[j];
y[i] = w;
}
/*************** Clava msgError **************
unsolved dependency for arrayAccess x use : RW
****************************************/
for(i = n - 1; i >= 0; i--) {
w = y[i];
#pragma omp parallel for default(shared) private(j) firstprivate(i, n, A, x) reduction(- : w)
for(j = i + 1; j < n; j++)
w -= A[i][j] * x[j];
x[i] = w / A[i][i];
}
}
int main(int argc, char **argv) {
/*Retrieve problem size.*/
int n = 2000;
/*Variable declaration/allocation.*/
double (*A)[2000][2000];
A = (double (*)[2000][2000]) polybench_alloc_data((2000 + 0) * (2000 + 0), sizeof(double));
;
double (*b)[2000];
b = (double (*)[2000]) polybench_alloc_data(2000 + 0, sizeof(double));
;
double (*x)[2000];
x = (double (*)[2000]) polybench_alloc_data(2000 + 0, sizeof(double));
;
double (*y)[2000];
y = (double (*)[2000]) polybench_alloc_data(2000 + 0, sizeof(double));
;
/*Initialize array(s).*/
init_array(n, *A, *b, *x, *y);
/*Start timer.*/
;
/*Run kernel.*/
kernel_ludcmp(n, *A, *b, *x, *y);
/*Stop and print timer.*/
;
;
/*Prevent dead-code elimination. All live-out data must be printed
by the function call in argument.*/
if(argc > 42 && !strcmp(argv[0], "")) print_array(n, *x);
/*Be clean.*/
free((void *) A);
;
free((void *) b);
;
free((void *) x);
;
free((void *) y);
;
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.