source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
GB_unaryop__minv_int8_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_int8_fp32
// op(A') function: GB_tran__minv_int8_fp32
// C type: int8_t
// A type: float
// cast: int8_t cij ; GB_CAST_SIGNED(cij,aij,8)
// unaryop: cij = GB_IMINV_SIGNED (aij, 8)
#define GB_ATYPE \
float
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 8) ;
// casting
#define GB_CASTING(z, aij) \
int8_t z ; GB_CAST_SIGNED(z,aij,8) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT8 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_int8_fp32
(
int8_t *Cx, // Cx and Ax may be aliased
float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_int8_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
electrons.c | /******************************************************************************
* *
* ELECTRONS.C *
* *
* ELECTRON THERMODYNAMICS *
* *
******************************************************************************/
#include "decs.h"
#include <gsl/gsl_sf_bessel.h>
/* TODO: Encapsulate electron equation of state in EOS framework.
* For now, I'v ebroken encapsulation but made the code
* complain/break if EOS_TYPE is not EOS_TYPE_GAMMA
* when ELECTRONS are active.
*
* The best strategy is probably to make a two-temperature
* electron EOS one of the available EOS's.
*
* Until then, I'm worried that we might hit a maintainability
* problem becasue if the implementation in ELECTRONS changes
* the implementation in EOS_TYPE_GAMMA needs to change too.
* ~JMM
*/
#if ELECTRONS
#if EOS == EOS_TYPE_GAMMA
void heat_electrons_zone(int i, int j, int k, double Pi[NVAR], double Ps[NVAR],
double Pf[NVAR], double Dt);
void fixup_electrons_1zone(double P[NVAR]);
void init_electrons() {
double uel;
ZSLOOP(-NG, N1 + NG - 1, -NG, NG + N2 - 1, -NG, NG + N3 - 1) {
// Set electron internal energy to constant fraction of internal energy
uel = fel0 * P[i][j][k][UU];
// Initialize entropies
P[i][j][k][KTOT] = (gam - 1.) * P[i][j][k][UU] * pow(P[i][j][k][RHO], -gam);
P[i][j][k][KEL] = (game - 1.) * uel * pow(P[i][j][k][RHO], -game);
}
bound_prim(P);
}
void heat_electrons(
grid_prim_type Pi, grid_prim_type Ps, grid_prim_type Pf, double Dt) {
timer_start(TIMER_ELECTRON);
#pragma omp parallel for collapse(3) schedule(dynamic)
ZLOOP {
heat_electrons_zone(i, j, k, Pi[i][j][k], Ps[i][j][k], Pf[i][j][k], Dt);
}
timer_stop(TIMER_ELECTRON);
}
void heat_electrons_zone(int i, int j, int k, double Pi[NVAR], double Ps[NVAR],
double Pf[NVAR], double Dt) {
double ktotharm, ktotadv, fel;
// Calculated and advected entropy at final time
ktotharm = (gam - 1.) * Pf[UU] / pow(Pf[RHO], gam);
ktotadv = Pf[KTOT];
// Electron heating fraction
fel = get_fel(i, j, k, Ps);
// Update final electron entropy according to Ressler+ 2015 Eqn. 27:
Pf[KEL] += (game - 1.) / (gam - 1.) * pow(Ps[RHO], gam - game) * fel *
(ktotharm - ktotadv);
// Diagnostics
struct of_geom *geom = &ggeom[i][j][CENT];
struct of_state q;
get_state(Ps, geom, &q);
double uadv = ktotadv / (gam - 1.) * pow(Pf[RHO], gam);
double Qud = q.ucon[0] * q.ucov[0] * (Pf[UU] - uadv) *
pow(Ps[RHO] / Pf[RHO], gam) / Dt;
Qvisc[i][j][k] = fel * Qud;
// Reset total entropy
Pf[KTOT] = ktotharm;
}
double get_fel(int i, int j, int k, double P[NVAR]) {
#if BETA_HEAT == 0
return fel0;
#endif
struct of_geom geom;
double beta, fel, c1, Trat, Tpr, mbeta, qrat;
double pres, bsq;
double c2, c3, c22, c32;
c1 = 0.92;
Tpr = (gam - 1.) * P[UU] / P[RHO];
double uel = 1. / (game - 1.) * P[KEL] * pow(P[RHO], game);
double Tel = (game - 1.) * uel / P[RHO];
if (Tel <= 0.)
Tel = SMALL;
if (Tpr <= 0.)
Tpr = SMALL;
Trat = fabs(Tpr / Tel);
pres = P[RHO] * Tpr; // Proton pressure
geom = *get_geometry(i, j, k, CENT);
bsq = bsq_calc(P, &geom);
beta = pres / bsq * 2.;
if (beta > 1.e20)
beta = 1.e20;
mbeta = 2. - 0.2 * log10(Trat);
if (Trat <= 1.) {
c2 = 1.6 / Trat;
c3 = 18. + 5. * log10(Trat);
} else {
c2 = 1.2 / Trat;
c3 = 18.;
}
c22 = pow(c2, 2.);
c32 = pow(c3, 2.);
qrat = c1 * (c22 + pow(beta, mbeta)) / (c32 + pow(beta, mbeta)) *
exp(-1. / beta) * pow(MP / ME * Trat, .5);
fel = 1. / (1. + qrat);
return fel;
}
// Modified Bessel function of second kind with safe inputs
double safe_Kn(int n, double x) {
if (x > 100.) {
return exp(-x) * sqrt(M_PI / (2. * x));
} else {
return gsl_sf_bessel_Kn(n, x);
}
}
#if RADIATION
void coulomb(
grid_prim_type Pi, grid_prim_type Ps, grid_prim_type Pf, double Dt) {
timer_start(TIMER_ELECTRON);
#pragma omp parallel for collapse(3)
ZLOOP {
double rho = Ps[i][j][k][RHO];
double thetae = MP / ME * Ps[i][j][k][KEL] * pow(rho, game - 1.);
double ue = Ps[i][j][k][KEL] * pow(rho, game) / (game - 1.);
double up = Ps[i][j][k][UU] - ue;
double n = rho * Ne_unit;
double Ti = up * U_unit * (gamp - 1.) / (n * KBOL);
double thetai = KBOL * Ti / (MP * CL * CL);
double thetam = 1. / (1. / thetae + 1. / thetai);
double logCoul = COULOMB_LOG;
double Te = thetae * ME * CL * CL / KBOL;
struct of_geom *geom;
struct of_state q;
// Sanity checks, although electron fixup routine should catch these
if (!isnan(Te) && !isnan(Ti) && Te > 0. && Ti > 0.) {
double Qc, term1, term2;
// Get Coulomb heating rate.
// Need to handle cases where Thetai < 1e-2, Thetae < 1e-2, and both
// Thetae and Thetai < 1e-2 separately due to Bessel functions exploding
double prefac =
3. / 2. * ME / MP * n * n * logCoul * CL * KBOL * THOMSON * (Ti - Te);
double thetaCrit = 1.e-2;
if (thetae < thetaCrit && thetai < thetaCrit) {
term1 = sqrt(thetam / (M_PI * thetae * thetai / 2.));
term2 = sqrt(thetam / (M_PI * thetae * thetai / 2.));
} else if (thetae < thetaCrit) {
term1 =
exp(-1. / thetai) / safe_Kn(2, 1. / thetai) * sqrt(thetam / thetae);
term2 =
exp(-1. / thetai) / safe_Kn(2, 1. / thetai) * sqrt(thetam / thetae);
} else if (thetai < thetaCrit) {
term1 =
exp(-1. / thetae) / safe_Kn(2, 1. / thetae) * sqrt(thetam / thetai);
term2 =
exp(-1. / thetae) / safe_Kn(2, 1. / thetae) * sqrt(thetam / thetai);
} else {
term1 = safe_Kn(1, 1. / thetam) /
(safe_Kn(2, 1. / thetae) * safe_Kn(2, 1. / thetai));
term2 = safe_Kn(0, 1. / thetam) /
(safe_Kn(2, 1. / thetae) * safe_Kn(2, 1. / thetai));
}
term1 *= (2. * pow(thetae + thetai, 2) + 1.) / (thetae + thetai);
term2 *= 2.;
Qc = prefac * (term1 + term2);
// Convert to code units
Qc *= T_unit / U_unit;
// Update electron internal energy
geom = &ggeom[i][j][CENT];
get_state(Ps[i][j][k], geom, &q);
double ue_f =
Pf[i][j][k][KEL] * pow(Pf[i][j][k][RHO], game) / (game - 1.);
ue_f += Qc * Dt / q.ucon[0];
// Record diagnostic
Qcoul[i][j][k] = q.ucov[0] * Qc;
// Update electron entropy
Pf[i][j][k][KEL] = (game - 1.) * ue_f * pow(Pf[i][j][k][RHO], -game);
}
} // ZLOOP
timer_stop(TIMER_ELECTRON);
}
void apply_rad_force_e(grid_prim_type Prh, grid_prim_type Pr,
grid_fourvector_type radG, double Dt) {
// Apply only to active zones for this proc -- ghost zone four-force
// depositions already communicated over MPI
#pragma omp parallel for collapse(3) schedule(dynamic)
ZLOOP {
struct of_geom *geom = &ggeom[i][j][CENT];
struct of_state q;
double Uel, Urho;
double C = 0.;
// Get fluid state at n + 1/2 where radiation four-force is centered
get_state(Prh[i][j][k], geom, &q);
for (int mu = 0; mu < NDIM; mu++) {
C += -q.ucon[mu] * radG[i][j][k][mu];
}
// Get fluid state at n+1 for full update
get_state(Pr[i][j][k], geom, &q);
// Remove \sqrt{-g} from radG
C = C / geom->g;
Urho = Pr[i][j][k][RHO] * q.ucon[0];
Uel = Pr[i][j][k][KEL] * Urho;
Uel += Dt * (C * (game - 1.) * pow(Prh[i][j][k][RHO], 1. - game));
// Supercooling diagnostics
if (Uel < 0.) {
double U_1[NVAR], prim_2[NVAR], U_2[NVAR];
struct of_state q_1, q_2;
Nsuper[i][j][k]++;
// (1) Record total energy density after cooling
get_state(Pr[i][j][k], &ggeom[i][j][CENT], &q_1);
primtoflux(Pr[i][j][k], &q_1, 0, 0, &ggeom[i][j][CENT], U_1);
// (2) Calculate total energy density with zero electron energy density
PLOOP prim_2[ip] = psupersave[i][j][k][ip];
double ue = prim_2[KEL] * pow(prim_2[RHO], game) / (game - 1.);
prim_2[UU] -= ue;
get_state(prim_2, &ggeom[i][j][CENT], &q_2);
primtoflux(prim_2, &q_2, 0, 0, &ggeom[i][j][CENT], U_2);
// Subtract (2) from (1); integrated over volume, this is fake energy
Esuper[i][j][k] += fabs((U_1[UU] - U_2[UU]) * dx[1] * dx[2] * dx[3]);
} // Uel < 0
Pr[i][j][k][KEL] = Uel / Urho;
// Reset total entropy
Pr[i][j][k][KTOT] =
(gam - 1.) * Pr[i][j][k][UU] * pow(Pr[i][j][k][RHO], -gam);
} // ZSLOOP
}
#endif // RADIATION
void fixup_electrons(grid_prim_type P) {
timer_start(TIMER_ELECTRON);
#pragma omp parallel for collapse(3) schedule(dynamic)
ZLOOP { fixup_electrons_1zone(P[i][j][k]); }
timer_stop(TIMER_ELECTRON);
}
void fixup_electrons_1zone(double P[NVAR]) {
double kelmax =
P[KTOT] * pow(P[RHO], gam - game) / (tptemin + (gam - 1.) / (game - 1.));
double kelmin =
P[KTOT] * pow(P[RHO], gam - game) / (tptemax + (gam - 1.) / (game - 1.));
// Replace NANs with cold electrons
if (isnan(P[KEL]))
P[KEL] = kelmin;
// Enforce maximum Tp/Te
P[KEL] = MY_MAX(P[KEL], kelmin);
// Enforce minimum Tp/Te
P[KEL] = MY_MIN(P[KEL], kelmax);
}
#endif // EOS == EOS_TYPE_GAMMA
#endif // ELECTRONS
|
lu.c | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <stdint.h>
#include <math.h>
#include "nb/math_bot.h"
#include "nb/memory_bot.h"
#include "nb/container_bot.h"
#include "nb/graph_bot.h"
#include "nb/solver_bot.h"
#include "../sparse_struct.h"
#include "cholesky_symbolic.h"
int nb_sparse_alloc_LU(const nb_sparse_t *const restrict A,
nb_sparse_t** L, nb_sparse_t** U)
{
*L = nb_sparse_allocate(A->N);
*U = nb_sparse_allocate(A->N);
nb_sparse_cholesky_symbolic(A, *L, *U, A->N);
return 0;
}
void nb_sparse_decompose_LU(const nb_sparse_t *const Ar,
nb_sparse_t *L, nb_sparse_t* U,
uint32_t omp_parallel_threads)
{
/* Create Ut to compute faster the decomposition */
nb_sparse_t* Ut = nb_sparse_clone(L);
/* Compute the decomposition */
for (uint32_t j = 0; j < Ar->N; j++) {
L->rows_values[j][L->rows_size[j]-1] = 1.0;
U->rows_values[j][0] = nb_sparse_get(Ar, j, j);
double sum = 0;
#pragma omp parallel for schedule(guided) reduction(+:sum) num_threads(omp_parallel_threads)
for (uint32_t q = 0; q < L->rows_size[j]-1; q++)
sum += L->rows_values[j][q] * Ut->rows_values[j][q];
U->rows_values[j][0] -= sum;
Ut->rows_values[j][Ut->rows_size[j]-1] = U->rows_values[j][0];
#pragma omp parallel for schedule(guided) num_threads(omp_parallel_threads)
for (uint32_t q = 1; q < U->rows_size[j]; q++) {
uint32_t i = U->rows_index[j][q];
/*** L_ij <- A_ij ******************************************************/
uint32_t L_jindex = nb_sparse_bsearch_row(L, i, j, 0, L->rows_size[i]-1);/**/
L->rows_values[i][L_jindex] = nb_sparse_get(Ar, i, j); /**/
/***********************************************************************/
/*** U_ji <- A_ji ******************************************************/
U->rows_values[j][q] = nb_sparse_get(Ar, j, i); /**/
/***********************************************************************/
register uint32_t r = 0;
register uint32_t s = 0;
register uint32_t _ro = L->rows_index[i][r];
register uint32_t _sigma = L->rows_index[j][s];
bool flag = true; /* Flag to know when to stop the cylce */
while (flag) {
while (_ro < _sigma)
_ro = L->rows_index[i][++r];
while (_ro > _sigma)
_sigma = L->rows_index[j][++s];
while (_ro == _sigma) {
if (_ro == j) {
flag = false; /* Finish the cycle */
break;
}
double vir = L->rows_values[i][r];
double vjs = Ut->rows_values[j][s];
L->rows_values[i][L_jindex] -= vir*vjs;
vjs = L->rows_values[j][s];
vir = Ut->rows_values[i][r];
U->rows_values[j][q] -= vir * vjs;
_ro = L->rows_index[i][++r];
_sigma = L->rows_index[j][++s];
}
}
L->rows_values[i][L_jindex] /= U->rows_values[j][0];
Ut->rows_values[i][L_jindex] = U->rows_values[j][q];
}
}
/* Free memory */
nb_sparse_destroy(Ut);
}
void nb_sparse_solve_LU(const nb_sparse_t *const L,
const nb_sparse_t *const U,
const double *const b,
double* _x /* Out */)
{
double* z = nb_allocate_zero_mem(L->N * sizeof(*z));
nb_sparse_forward_solve(L, b, z);
nb_sparse_backward_solve(U, z, _x);
nb_free_mem(z);
}
int nb_sparse_solve_using_LU(const nb_sparse_t *const A,
const double *const b,
double* x, /* Out */
uint32_t omp_parallel_threads)
{
nb_sparse_t *L = NULL;
nb_sparse_t *U = NULL;
nb_sparse_alloc_LU(A, &L, &U);
if(NULL == L)
return 1;
nb_sparse_decompose_LU(A, L, U, omp_parallel_threads);
nb_sparse_solve_LU(L, U, b, x);
nb_sparse_destroy(L);
nb_sparse_destroy(U);
return 0;
}
int nb_sparse_relabel_and_solve_using_LU(const nb_sparse_t *const A,
const double *const b,
double* x, /* Out */
uint32_t omp_parallel_threads)
{
uint32_t N = nb_sparse_get_size(A);
uint32_t memsize = 2 * N * (sizeof(uint32_t) + sizeof(double));
char *memblock = nb_soft_allocate_mem(memsize);
uint32_t *perm = (void*) memblock;
uint32_t *iperm = (void*) (memblock + N * sizeof(uint32_t));
double *br = (void*) (memblock + 2 * N * sizeof(uint32_t));
double *xr = (void*) (memblock + 2 * N * sizeof(uint32_t) +
N * sizeof(double));
nb_sparse_calculate_permutation(A, perm, iperm);
nb_sparse_t *Ar = nb_sparse_create_permutation(A, perm, iperm);
nb_vector_permutation(N, b, perm, br);
int status = nb_sparse_solve_using_LU(Ar, br, xr,
omp_parallel_threads);
nb_vector_permutation(N, xr, iperm, x);
nb_sparse_destroy(Ar);
nb_soft_free_mem(memsize, memblock);
return status;
}
double nb_sparse_relabel_and_get_det_sign_using_LU(const nb_sparse_t *A)
{
uint32_t N = nb_sparse_get_size(A);
uint32_t memsize = 2 * N * (sizeof(uint32_t));
char *memblock = nb_soft_allocate_mem(memsize);
uint32_t *perm = (void*) memblock;
uint32_t *iperm = (void*) (memblock + N * sizeof(uint32_t));
nb_sparse_calculate_permutation(A, perm, iperm);
nb_sparse_t *Ar = nb_sparse_create_permutation(A, perm, iperm);
nb_sparse_t *Lr = NULL;
nb_sparse_t *Ur = NULL;
nb_sparse_alloc_LU(Ar, &Lr, &Ur);
if(NULL == Lr)
return 0;
nb_sparse_decompose_LU(Ar, Lr, Ur, 1);
double det = nb_sparse_triangular_get_det_sign(Ur);
nb_sparse_destroy(Ar);
nb_sparse_destroy(Lr);
nb_sparse_destroy(Ur);
nb_soft_free_mem(memsize, memblock);
return det;
}
|
cleaned_t_b_all.c | /**
F.H.P.C. Assingment 2
@file cleaned_t_b_all_option_2.cc
@brief All threads fill the array and perform the sum.
@author Pietro Morichetti
@date 17/12/2019
@version 1.1
*/
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
#define _GNU_SOURCE
#define N 10000000 // size of the problem
int main(int argc, char **argv){
long int S = 0;
int* array = (int*)malloc(N * sizeof(int));
#if defined(_OPENMP)
if(argc > 1){
omp_set_num_threads(atoi(*(argv + 1))); // set the number of threads
}
#endif
#pragma omp parallel for // everybody fills the array
for(int ii = 0; ii < N; ++ii){
array[ii] = ii;
}
#pragma omp parallel for reduction(+:S) // everyone add up and reduce on S
for(int ii = 0; ii < N; ++ii){
S += array[ii];
}
free(array);
return 0;
}
|
tetrahedron.c | #include "tetrahedron_method.h"
#include "kgrid.h"
#include <stdio.h>
#include <stdlib.h>
static void test_tetrahedron_method(void);
static void mat_copy_matrix_d3(double a[3][3], double b[3][3]);
static double mat_get_determinant_d3(double a[3][3]);
static int mat_inverse_matrix_d3(double m[3][3],
double a[3][3],
const double precision);
int main(void)
{
test_tetrahedron_method();
return 0;
}
/* frequency.dat is in the example directory. */
/* The values in this file are the phonon frequencies of NaCl */
/* with 20x20x20 mesh. Calculation was done with reducing */
/* k-points to the irreducible k-points using phonopy. */
/* (https://github.com/phonopy/phonopy) */
static void test_tetrahedron_method(void)
{
printf("*** Example of tetrahedron method of NaCl to calculate DOS ***:\n");
printf("Read data from frequency.dat and write DOS to dos.dat.\n");
int i, j, k, l, q, r;
/* NaCl 20x20x20 gamma-centre mesh (m=20 and "frequency-202020.dat" file) */
/* NaCl 10x10x10 gamma-centre mesh (m=20 and "frequency-202020.dat" file) */
double lattice[3][3] = {
{0.000000000000000, 2.845150738087836, 2.845150738087836},
{2.845150738087836, 0.000000000000000, 2.845150738087836},
{2.845150738087836, 2.845150738087836, 0.000000000000000}
};
int num_atom = 2;
int m = 20; /* m = 10 for 10x10x10 mesh */
int mesh[3] = {m, m, m};
size_t num_gp = mesh[0] * mesh[1] * mesh[2];
int is_shift[3] = {0, 0, 0};
int grid_address[num_gp][3];
int relative_grid_address[24][4][3];
double rec_lat[3][3];
FILE *fp;
char * line = NULL;
size_t len = 0;
ssize_t read;
double frequency[num_gp * num_atom * 3];
double max_f, min_f;
double t_omegas[24][4];
int g_addr[3];
int g_addr_double[3];
size_t gp;
int num_freqs = 201;
double dos[num_freqs];
double integral_dos[num_freqs];
double omegas[num_freqs];
double iw;
kgd_get_all_grid_addresses(grid_address, mesh);
mat_inverse_matrix_d3(rec_lat, lattice, 1e-5);
thm_get_relative_grid_address(relative_grid_address, rec_lat);
/* for (i = 0; i < 24; i++) { */
/* for (j = 0; j < 4; j++) { */
/* printf("[%2d %2d %2d] ", */
/* relative_grid_address[i][j][0], */
/* relative_grid_address[i][j][1], */
/* relative_grid_address[i][j][2]); */
/* } */
/* printf("\n"); */
/* } */
/* "frequency-101010.dat" for 10x10x10 mesh */
fp = fopen("frequency-202020.dat", "r");
for (i = 0; i < num_gp * num_atom * 3; i++) {
read = getline(&line, &len, fp);
if (read == -1) {
break;
}
frequency[i] = strtod(line, NULL);
}
fclose(fp);
max_f = frequency[0];
min_f = frequency[0];
for (i = 0; i < num_gp * num_atom * 3; i++) {
if (max_f < frequency[i]) {
max_f = frequency[i];
}
if (min_f > frequency[i]) {
min_f = frequency[i];
}
}
#pragma omp parallel for private(j, k, l, q, r, g_addr, g_addr_double, gp, t_omegas, iw)
for (i = 0; i < num_freqs; i++) {
dos[i] = 0;
integral_dos[i] = 0;
omegas[i] = min_f + (max_f - min_f) / (num_freqs - 1) * i;
for (j = 0; j < num_gp; j++) {
for (k = 0; k < num_atom * 3; k++) {
for (l = 0; l < 24; l++) {
for (q = 0; q < 4; q++) {
for (r = 0; r < 3; r++) {
g_addr[r] = grid_address[j][r] + relative_grid_address[l][q][r];
}
kgd_get_grid_address_double_mesh(g_addr_double,
g_addr,
mesh,
is_shift);
gp = kgd_get_grid_point_double_mesh(g_addr_double, mesh);
t_omegas[l][q] = frequency[gp * num_atom * 3 + k];
}
}
iw = thm_get_integration_weight(omegas[i], t_omegas, 'J');
dos[i] += iw;
iw = thm_get_integration_weight(omegas[i], t_omegas, 'I');
integral_dos[i] += iw;
}
}
}
fp = fopen("dos.dat", "w");
for (i = 0; i < num_freqs; i++) {
fprintf(fp, "%f %f\n", omegas[i], dos[i] / num_gp);
}
fprintf(fp, "\n\n");
for (i = 0; i < num_freqs; i++) {
fprintf(fp, "%f %f\n", omegas[i], integral_dos[i] / num_gp);
}
fclose(fp);
}
static void mat_copy_matrix_d3(double a[3][3], double b[3][3])
{
a[0][0] = b[0][0];
a[0][1] = b[0][1];
a[0][2] = b[0][2];
a[1][0] = b[1][0];
a[1][1] = b[1][1];
a[1][2] = b[1][2];
a[2][0] = b[2][0];
a[2][1] = b[2][1];
a[2][2] = b[2][2];
}
static double mat_get_determinant_d3(double a[3][3])
{
return a[0][0] * (a[1][1] * a[2][2] - a[1][2] * a[2][1])
+ a[0][1] * (a[1][2] * a[2][0] - a[1][0] * a[2][2])
+ a[0][2] * (a[1][0] * a[2][1] - a[1][1] * a[2][0]);
}
static int mat_inverse_matrix_d3(double m[3][3],
double a[3][3],
const double precision)
{
double det;
double c[3][3];
det = mat_get_determinant_d3(a);
c[0][0] = (a[1][1] * a[2][2] - a[1][2] * a[2][1]) / det;
c[1][0] = (a[1][2] * a[2][0] - a[1][0] * a[2][2]) / det;
c[2][0] = (a[1][0] * a[2][1] - a[1][1] * a[2][0]) / det;
c[0][1] = (a[2][1] * a[0][2] - a[2][2] * a[0][1]) / det;
c[1][1] = (a[2][2] * a[0][0] - a[2][0] * a[0][2]) / det;
c[2][1] = (a[2][0] * a[0][1] - a[2][1] * a[0][0]) / det;
c[0][2] = (a[0][1] * a[1][2] - a[0][2] * a[1][1]) / det;
c[1][2] = (a[0][2] * a[1][0] - a[0][0] * a[1][2]) / det;
c[2][2] = (a[0][0] * a[1][1] - a[0][1] * a[1][0]) / det;
mat_copy_matrix_d3(m, c);
return 1;
}
|
clonedetector.h | #if ! defined CLONEDETECTOR_H
#define CLONEDETECTOR_H
#include <cassert>
#include <vector>
#include <map>
#include "../common/hash_map_includer.h"
#include <algorithm>
#include <limits>
#include <iterator>
#include <boost/cstdint.hpp>
#include <boost/array.hpp>
#include <boost/thread.hpp>
#include "../ccfx/ccfxcommon.h"
#include "../threadqueue/threadqueue.h"
#if defined _MSC_VER
#undef max
#undef min
#endif
template<typename ElemType, typename HashValueType>
class CloneDetector {
private:
class SubSequence {
private:
size_t begin;
size_t end;
public:
SubSequence(size_t beginPos_, size_t endPos_)
: begin(beginPos_), end(endPos_)
{
assert(beginPos_ <= endPos_);
}
SubSequence()
: begin(), end()
{
}
SubSequence(const SubSequence &right)
: begin(right.begin), end(right.end)
{
}
public:
//bool operator==(const SubSequence &right) const
//{
// if (end - begin != right.end - right.begin) {
// return false;
// }
// const std:: vector<ElemType> &seq = *pSeq;
// size_t li = begin;
// size_t ri = right.begin;
// while (li != end) {
// ElemType lt = to_compared(seq, li, begin);
// ElemType rt = to_compared(*right.pSeq, ri, right.begin);
// if (lt != rt) {
// return false;
// }
// ++li;
// ++ri;
// }
// return true;
//}
//bool operator<(const SubSequence &right) const
//{
// const std:: vector<ElemType> &seq = *pSeq;
// size_t li = begin;
// size_t ri = right.begin;
// while (li < end && ri < right.end) {
// ElemType lt = to_compared(seq, li, begin);
// ElemType rt = to_compared(*right.pSeq, ri, right.begin);
// if (lt != rt) {
// break; // while
// }
// ++li;
// ++ri;
// }
//
// if (li < end) {
// if (ri < right.end) {
// ElemType lt = to_compared(seq, li, begin);
// ElemType rt = to_compared(*right.pSeq, ri, right.begin);
// if (lt < rt) {
// return true;
// }
// else {
// return false;
// }
// }
// else {
// assert(ri == right.end);
// return false;
// }
// }
// else {
// assert(li == end);
// if (ri < right.end) {
// return true;
// }
// else {
// assert(ri == right.end);
// return false;
// }
// }
//}
//const ElemType &operator[](size_t index) const
//{
// return *(li + index);
//}
void swap(SubSequence &right)
{
std:: swap(this->begin, right.begin);
std:: swap(this->end, right.end);
}
inline size_t size() const
{
return end - begin;
}
inline size_t getBegin() const
{
return this->begin;
}
inline size_t getEnd() const
{
return this->end;
}
public:
class SequencePrevComparator {
private:
size_t unitLength;
const typename std:: vector<ElemType> *pSeq;
public:
SequencePrevComparator(size_t unitLength_, const std:: vector<ElemType> *pSeq_)
: unitLength(unitLength_), pSeq(pSeq_)
{
}
SequencePrevComparator()
: unitLength(0), pSeq(NULL)
{
}
SequencePrevComparator(const SequencePrevComparator &right)
: unitLength(right.unitLength), pSeq(right.pSeq)
{
}
bool operator()(size_t posLeft, size_t posRight) const
{
assert(posLeft + unitLength <= (*pSeq).size());
assert(posRight + unitLength <= (*pSeq).size());
size_t i;
for (i = 0; i < unitLength; ++i) {
const ElemType &li = to_compared(*pSeq, posLeft + i, posLeft);
const ElemType &ri = to_compared(*pSeq, posRight + i, posRight);
assert(li != 0);
assert(ri != 0);
if (li != ri) {
break; // for i
}
}
if (i != unitLength) {
const ElemType &li = to_compared(*pSeq, posLeft + i, posLeft);
const ElemType &ri = to_compared(*pSeq, posRight + i, posRight);
return li < ri;
}
else {
const ElemType &lp = to_reversereference_compared(*pSeq, posLeft - 1, posLeft, posLeft + unitLength);
const ElemType &rp = to_reversereference_compared(*pSeq, posRight - 1, posRight, posRight + unitLength);
return lp < rp;
}
}
};
class ExtensionPrevComparator {
private:
size_t baseLength;
const typename std:: vector<ElemType> *pSeq;
public:
ExtensionPrevComparator(size_t baseLength_, const std:: vector<ElemType> *pSeq_)
: baseLength(baseLength_), pSeq(pSeq_)
{
}
ExtensionPrevComparator()
: baseLength(0), pSeq(NULL)
{
}
ExtensionPrevComparator(const ExtensionPrevComparator &right)
: baseLength(right.baseLength), pSeq(right.pSeq)
{
}
bool operator()(size_t posLeft, size_t posRight) const
{
assert(posLeft + baseLength < (*pSeq).size());
assert(posRight + baseLength < (*pSeq).size());
const ElemType &li = to_compared(*pSeq, posLeft + baseLength, posLeft);
const ElemType &ri = to_compared(*pSeq, posRight + baseLength, posRight);
if (li == ri) {
const ElemType &lp = to_reversereference_compared(*pSeq, posLeft - 1, posLeft, posLeft + baseLength);
const ElemType &rp = to_reversereference_compared(*pSeq, posRight - 1, posRight, posRight + baseLength);
return lp < rp;
}
else {
return li < ri;
}
}
};
class PrevExtensionComparator {
private:
size_t baseLength;
const typename std:: vector<ElemType> *pSeq;
public:
PrevExtensionComparator(size_t baseLength_, const std:: vector<ElemType> *pSeq_)
: baseLength(baseLength_), pSeq(pSeq_)
{
}
PrevExtensionComparator()
: baseLength(0), pSeq(NULL)
{
}
PrevExtensionComparator(const ExtensionPrevComparator &right)
: baseLength(right.baseLength), pSeq(right.pSeq)
{
}
bool operator()(size_t posLeft, size_t posRight) const
{
const ElemType &lp = to_reversereference_compared(*pSeq, posLeft - 1, posLeft, posLeft + baseLength);
const ElemType &rp = to_reversereference_compared(*pSeq, posRight - 1, posRight, posRight + baseLength);
if (lp == rp) {
assert(posLeft + baseLength < (*pSeq).size());
assert(posRight + baseLength < (*pSeq).size());
const ElemType &li = to_compared(*pSeq, posLeft + baseLength, posLeft);
const ElemType &ri = to_compared(*pSeq, posRight + baseLength, posRight);
return li < ri;
}
else {
return lp < rp;
}
}
};
};
static bool subsequenceEqual(const typename std:: vector<ElemType> *pSeq, const SubSequence &left, const SubSequence &right)
{
if (left.size() != right.size()) {
return false;
}
size_t li = left.getBegin();
size_t ri = right.getBegin();
while (li != left.getEnd()) {
ElemType lt = to_compared(*pSeq, li, left.getBegin());
ElemType rt = to_compared(*pSeq, ri, right.getBegin());
if (lt != rt) {
return false;
}
++li;
++ri;
}
return true;
}
public:
class SequenceHashFunction {
public:
virtual ~SequenceHashFunction()
{
}
virtual HashValueType operator()(const typename std:: vector<ElemType> &seq, size_t begin, size_t end) = 0;
};
public:
struct CloneSetItem {
public:
ElemType prev;
ElemType extension;
std::vector<size_t/* pos */> poss;
public:
CloneSetItem(const CloneSetItem &right)
: prev(right.prev), extension(right.extension), poss(right.poss)
{
}
CloneSetItem()
: prev(0), extension(0), poss()
{
}
};
class CloneSetListener {
private:
const std:: vector<ElemType> *pSeq;
size_t unitLength;
public:
virtual ~CloneSetListener()
{
}
CloneSetListener()
: pSeq(NULL), unitLength(0)
{
}
CloneSetListener(const CloneSetListener &right)
: pSeq(right.pSeq), unitLength(right.unitLength)
{
}
public:
virtual void attachSeq(const std:: vector<ElemType> *pSeq_)
{
pSeq = pSeq_;
const std:: vector<ElemType> &seq = *pSeq;
if (! seq.empty()) {
assert(seq[0] == 0);
assert(seq.back() == 0);
}
}
virtual void setUnitLength(size_t unitLength_)
{
unitLength = unitLength_;
}
public:
virtual bool rangeCheck(const std:: vector<CloneSetItem>& UNUSED(cloneSet))
{
return true;
}
virtual bool codeCheck(size_t UNUSED(pos), size_t UNUSED(length))
{
return true;
}
virtual void found(
const std:: vector<CloneSetItem>& UNUSED(cloneSet),
size_t UNUSED(baseLength),
boost::uint64_t UNUSED(cloneSetReferenceNumber))
{
}
protected:
const std:: vector<ElemType> &refSeq() const
{
return *pSeq;
}
size_t getUnitLength() const
{
return unitLength;
}
};
class ClonePairListener {
private:
const typename std:: vector<ElemType> *pSeq;
size_t unitLength;
public:
virtual ~ClonePairListener()
{
}
ClonePairListener()
: pSeq(NULL), unitLength(0)
{
}
ClonePairListener(const CloneSetListener &right)
: pSeq(right.pSeq), unitLength(right.unitLength)
{
}
public:
virtual void attachSeq(const std:: vector<ElemType> *pSeq_)
{
pSeq = pSeq_;
}
virtual void setUnitLength(size_t unitLength_)
{
unitLength = unitLength_;
}
public:
virtual bool codeCheck(size_t UNUSED(pos), size_t UNUSED(length))
{
return true;
}
virtual bool rangeCheck(const std:: vector<CloneSetItem>& UNUSED(cloneSet))
{
return true;
}
virtual void found(
size_t pos1,
size_t pos2,
size_t UNUSED(baseLength),
boost::uint64_t UNUSED(cloneSetReferenceNumber))
{
assert(pos1 < pos2);
}
protected:
const typename std:: vector<ElemType> &refSeq() const
{
return *pSeq;
}
size_t getUnitLength() const
{
return unitLength;
}
};
class ClonePairListenerWithScope : public ClonePairListener {
private:
size_t barrior;
enum mode_t { mode_all, mode_left_and_cross, mode_cross } mode;
public:
virtual ~ClonePairListenerWithScope()
{
}
ClonePairListenerWithScope()
: ClonePairListener(), barrior(0), mode(mode_all)
{
}
ClonePairListenerWithScope(const CloneSetListener &right)
: ClonePairListener(right), barrior(right.barrior), mode(right.mode)
{
}
public:
void setAllMode()
{
mode = mode_all;
}
void setCrossMode(size_t barrior_)
{
mode = mode_cross;
barrior = barrior_;
}
void setLeftAndCrossMode(size_t barrior_)
{
mode = mode_left_and_cross;
barrior = barrior_;
}
public:
virtual bool rangeCheck(const std:: vector<CloneSetItem> &cloneSet)
{
switch (mode) {
case mode_all:
{
return true;
}
break;
case mode_left_and_cross:
{
for (size_t i = 0; i < cloneSet.size(); ++i) {
const std::vector<size_t> &poss = cloneSet[i].poss;
for (std:: vector<size_t/* pos */>::const_iterator j = poss.begin(); j != poss.end(); ++j) {
if (*j < barrior) {
return true;
}
}
}
}
break;
case mode_cross:
{
bool leftFound = false;
bool rightFound = false;
for (size_t i = 0; i < cloneSet.size(); ++i) {
const std::vector<size_t> &poss = cloneSet[i].poss;
for (std:: vector<size_t/* pos */>::const_iterator j = poss.begin(); j != poss.end(); ++j) {
if (*j < barrior) {
leftFound = true;
}
else {
rightFound = true;
}
if (leftFound && rightFound) {
return true;
}
}
}
}
break;
default:
assert(false);
break;
}
return false;
}
virtual void found(size_t posA, size_t posB, size_t baseLength, boost::uint64_t cloneSetReferenceNumber)
{
assert(posA < posB);
switch (mode) {
case mode_all:
break;
case mode_left_and_cross:
if (posA >= barrior && posB >= barrior) {
return;
}
break;
case mode_cross:
if (
((posA >= barrior) && (posB >= barrior)) ||
((posA < barrior) && (posB < barrior))
)
{
return;
}
break;
default:
assert(false);
break;
}
found_scoped(posA, posB, baseLength, cloneSetReferenceNumber);
}
protected:
virtual void found_scoped(
size_t pos1,
size_t pos2,
size_t UNUSED(baseLength),
boost::uint64_t UNUSED(cloneSetReferenceNumber))
{
assert(pos1 < pos2);
}
};
private:
class ClonePairListenerAdapter : public CloneSetListener {
private:
ClonePairListener *pListener;
public:
ClonePairListenerAdapter(ClonePairListener *pRight)
: pListener(pRight)
{
}
public:
virtual void attachSeq(const typename std:: vector<ElemType> *pSeq_)
{
CloneSetListener::attachSeq(pSeq_);
(*pListener).attachSeq(pSeq_);
}
virtual void setUnitLength(size_t unitLength_)
{
CloneSetListener::setUnitLength(unitLength_);
(*pListener).setUnitLength(unitLength_);
}
public:
virtual bool codeCheck(size_t pos, size_t length)
{
return (*pListener).codeCheck(pos, length);
}
virtual bool rangeCheck(const std:: vector<CloneSetItem> &cloneSet)
{
return (*pListener).rangeCheck(cloneSet);
}
virtual void found(const std:: vector<CloneSetItem> &cloneSet, size_t baseLength,
boost::uint64_t cloneSetReferenceNumber)
{
//const typename std:: vector<ElemType> &seq = refSeq();
//size_t unitLength = getUnitLength(); //unused variable
for (size_t csi = 0; csi < cloneSet.size(); ++csi) {
const CloneSetItem &cs = cloneSet[csi];
for (size_t csj = csi; csj < cloneSet.size(); ++csj) { // 2008/02/13
const CloneSetItem &right = cloneSet[csj];
if ((cs.prev == 0 || cs.prev != right.prev) && (cs.extension == 0 || cs.extension != right.extension)) {
const std::vector<size_t> &poss = cs.poss;
for (std:: vector<size_t/* pos */>::const_iterator a = poss.begin(); a != poss.end(); ++a) {
const std::vector<size_t> &possRight = right.poss;
for (std:: vector<size_t/* pos */>::const_iterator b = (&cs == &right) ? a + 1 : possRight.begin(); b != possRight.end(); ++b) {
size_t posA = *a;
size_t posB = *b;
assert(posA != posB);
if (posA < posB) {
(*pListener).found(posA, posB, baseLength, cloneSetReferenceNumber);
}
else {
(*pListener).found(posB, posA, baseLength, cloneSetReferenceNumber);
}
}
}
}
}
}
}
};
private:
const typename std:: vector<ElemType> *pSeq;
size_t bottomUnitLength;
size_t multiply;
std:: vector<HashValueType> hashSeq;
//bool optionVerbose;
boost::uint64_t cloneSetReferenceNumber;
size_t numThreads;
public:
CloneDetector()
: pSeq(NULL), bottomUnitLength(0), multiply(1), hashSeq()/*, optionVerbose(false)*/, cloneSetReferenceNumber(0), numThreads(1)
{
}
CloneDetector(const CloneDetector &right)
: pSeq(right.pSeq), bottomUnitLength(right.bottomUnitLength), multiply(right.multiply), hashSeq(right.hashSeq)/*, optionVerbose(right.optionVerbose)*/, numThreads(1)
{
}
private:
CloneDetector(size_t dummy) // dummy to ensure methods
{
std::vector<ElemType> seqDummy;
size_t pos = 0;
size_t begin = 0;
size_t end = 0;
ElemType t1 = to_compared(&seqDummy, pos, begin);
ElemType t2 = to_reversereference_compared(&seqDummy, pos, begin, end);
assert(false);
}
public:
void setThreads(size_t numThreads_)
{
numThreads = numThreads_;
}
void attachSequence(const std:: vector<ElemType> *pSeq_)
{
assert(pSeq_ != NULL);
pSeq = pSeq_;
}
const typename std:: vector<ElemType> &refSeq() const
{
return *pSeq;
}
void detachSequence()
{
pSeq = NULL;
}
void setBottomUnitLength(size_t bottomUnitLength_)
{
bottomUnitLength = bottomUnitLength_;
}
void setMultiply(size_t multiply_)
{
multiply = multiply_;
}
size_t getUnitLength() const
{
return bottomUnitLength * multiply;
}
// void setOptionVerbose(bool ov)
// {
// optionVerbose = ov;
// }
// bool getOptionVerbose() const
// {
// return optionVerbose;
// }
void clearCloneSetReferenceNumber()
{
cloneSetReferenceNumber = 0;
}
void findClonePair(ClonePairListener *pListener, SequenceHashFunction &hashFunc)
{
ClonePairListenerAdapter a(pListener);
findCloneSet(&a, hashFunc);
}
public:
void print_seq(size_t beginPos, size_t len)
{
const std:: vector<ElemType> &seq = *pSeq;
size_t endPos = beginPos + len;
size_t count = 0;
for (size_t i = beginPos; i < endPos; ++i) {
if (count > 0) {
std::cout << " ";
}
std::cout << (int)(seq[i]);
++count;
if (count == 10) {
std::cout << std::endl;
count = 0;
}
}
}
private:
struct CloneSetData {
std::vector<CloneSetItem> cloneSet;
size_t baseLength;
};
void send_clone_set_data_to_listener(ThreadQueue<std::vector<std::vector<CloneSetData> > *> *pQue, CloneSetListener *pListener) {
std::vector<std::vector<CloneSetData> > *pFoundCloneSetsForThreads;
while ((pFoundCloneSetsForThreads = (*pQue).pop()) != NULL) {
std::vector<std::vector<CloneSetData> > &foundCloneSetsForThreads = *pFoundCloneSetsForThreads;
for (size_t cii = 0; cii < foundCloneSetsForThreads.size(); ++cii) {
std::vector<CloneSetData> &foundCloneSets = foundCloneSetsForThreads[cii];
for (size_t csi = 0; csi < foundCloneSets.size(); ++csi) {
++cloneSetReferenceNumber;
const CloneSetData &cloneSetData = foundCloneSets[csi];
(*pListener).found(cloneSetData.cloneSet, cloneSetData.baseLength, cloneSetReferenceNumber);
}
}
delete pFoundCloneSetsForThreads;
}
}
public:
void findCloneSet(CloneSetListener *pListener, SequenceHashFunction &hashFunc)
{
const std:: vector<ElemType> &seq = *pSeq;
const size_t unitLength = getUnitLength();
//if (optionVerbose) {
// std:: cerr << "> finding identical substrings" << std:: endl;
//}
calc_hash_seq(hashFunc);
//std::vector<HashValueType> hashSeqCopy = hashSeq;
//calc_hash_seq_prev_version(hashFunc);
//assert(hashSeq.size() == hashSeqCopy.size());
//for (size_t i = 0; i < hashSeq.size(); ++i) {
// assert(hashSeq[i] == hashSeqCopy[i]);
//}
//for (size_t i = 0; i < hashSeq.size(); ++i) {
// std:: cout << i << ": " << hashSeq[i] << " " << std:: endl;
//}
(*pListener).attachSeq(&seq);
(*pListener).setUnitLength(unitLength);
if (seq.size() < unitLength) {
return;
}
std::vector<std:: vector<size_t/* pos */> > cloneFragments;
{
std:: vector<size_t> cloneCounts;
cloneCounts.resize((size_t)(std:: numeric_limits<HashValueType>::max()) + 1, 0);
size_t pos = 1;
while (pos < seq.size() - unitLength) {
HashValueType h = hashSeq[pos];
if (h != 0) {
assert(cloneCounts[h] < std::numeric_limits<size_t>::max());
++cloneCounts[h];
++pos;
}
else {
if (pos + unitLength < hashSeq.size()) {
if (pos > unitLength) {
assert(hashSeq[pos + unitLength - 1] == 0);
pos += unitLength;
}
while (pos < seq.size() - unitLength && hashSeq[pos] == 0) {
++pos;
}
}
else {
break; // while pos
}
}
}
cloneFragments.resize((size_t)(std:: numeric_limits<HashValueType>::max()) + 1);
pos = 1;
while (pos < seq.size() - unitLength) {
HashValueType h = hashSeq[pos];
if (h != 0) {
if (cloneCounts[h] >= 2) {
cloneFragments[h].reserve(cloneCounts[h]);
cloneFragments[h].push_back(pos);
}
++pos;
}
else {
if (pos + unitLength < hashSeq.size()) {
if (pos > unitLength) {
assert(hashSeq[pos + unitLength - 1] == 0);
pos += unitLength;
}
while (pos < seq.size() - unitLength && hashSeq[pos] == 0) {
++pos;
}
}
else {
break; // while pos
}
}
}
}
ThreadQueue<std::vector<std::vector<CloneSetData> > *> que(10);
boost::thread eater(boost::bind(&CloneDetector::send_clone_set_data_to_listener, this, &que, pListener));
size_t worker = std::max((size_t)1, (size_t)numThreads);
std::vector<size_t> validCis;
validCis.reserve(numThreads);
size_t ci = 1;
while (ci < cloneFragments.size()) {
validCis.clear();
while (ci < cloneFragments.size() && validCis.size() < worker) {
if (cloneFragments[ci].size() > 0) {
validCis.push_back(ci);
}
++ci;
}
std::vector<std::vector<CloneSetData> > *pFoundCloneSetsForThreads = new std::vector<std::vector<CloneSetData> >();
std::vector<std::vector<CloneSetData> > &foundCloneSetsForThreads = *pFoundCloneSetsForThreads;
foundCloneSetsForThreads.resize(worker);
size_t validCiCount = validCis.size();
#pragma omp parallel for schedule(dynamic)
for (size_t cii = 0; cii < validCiCount; ++cii) {
const size_t threadNum = cii;
size_t tci = validCis[cii];
std::vector<CloneSetData> &foundCloneSets = foundCloneSetsForThreads[threadNum];
foundCloneSets.clear();
std:: vector<size_t/* pos */> &poss = cloneFragments[tci];
if (poss.size() > 1) {
typename SubSequence::SequencePrevComparator spc(unitLength, pSeq);
std:: sort(poss.begin(), poss.end(), spc);
size_t j = 0;
while (j < poss.size()) {
size_t pj = poss[j];
SubSequence ssj(pj, pj + unitLength);
SubSequence ssk;
size_t k = j + 1;
while (k < poss.size() && subsequenceEqual(pSeq, (ssk = SubSequence(poss[k], poss[k] + unitLength)), ssj)) {
++k;
}
// here, subsequence begining at j, ..., subsequence begining at k - 1 have the same subsequence
assert(k == poss.size() || ! subsequenceEqual(pSeq, ssj, ssk));
size_t size = k - j;
if (size <= 1) {
NULL;
}
else {
const ElemType &firstPrev = to_reversereference_compared(*pSeq, poss[j] - 1, poss[j], poss[j] + unitLength);
const ElemType &lastPrev = to_reversereference_compared(*pSeq, poss[k - 1] - 1, poss[k - 1], poss[k - 1] + unitLength);
if (firstPrev != 0 && firstPrev != -1 && firstPrev == lastPrev) { // 2007/10/29 //if (firstPrev != 0 && firstPrev == lastPrev) {
NULL;
}
else {
size_t maxExtend = calc_max_extend(poss, j, k, unitLength);
typename SubSequence::PrevExtensionComparator pec(unitLength + maxExtend, pSeq);
std:: sort(poss.begin() + j, poss.begin() + k, pec);
output_clone_set(poss, j, k, unitLength + maxExtend, pListener, &foundCloneSets);
find_clone_set_i(&poss, j, k, unitLength + maxExtend, pListener, &foundCloneSets);
}
}
j = k;
ssj = ssk;
}
}
}
que.push(pFoundCloneSetsForThreads);
}
que.push(NULL);
eater.join();
hashSeq.clear();
}
private:
void find_clone_set_i(std:: vector<size_t/* pos */> *pPoss, size_t begin, size_t end,
size_t baseLength, CloneSetListener *pListener, std::vector<CloneSetData> *pFoundCloneSets)
{
if (end - begin <= 1) {
return;
}
std:: vector<size_t/* pos */> &poss = *pPoss;
typename SubSequence::ExtensionPrevComparator epc(baseLength, pSeq);
std:: sort(poss.begin() + begin, poss.begin() + end, epc);
size_t nnBegin = begin;
while (nnBegin < end && (poss[nnBegin] + baseLength >= (*pSeq).size() || (*pSeq)[poss[nnBegin] + baseLength] == 0)) {
++nnBegin;
}
begin = nnBegin;
if (end - nnBegin <= 1) {
return;
}
size_t j = nnBegin;
while (j < end) {
size_t k = j + 1;
while (k < end && to_compared(*pSeq, poss[k] + baseLength, poss[k]) == to_compared(*pSeq, poss[j] + baseLength, poss[j])) {
++k;
}
//std::cerr << to_compared(*pSeq, poss[j] + baseLength, poss[j]) << ": "; for (size_t I = j; I < k; ++I) { std::cerr << poss[I] << " "; } std::cerr << std::endl;
// here, subsequence begining at j, ..., subsequence begining at k - 1 have the same subsequence
assert(k == end || ! (to_compared(*pSeq, poss[k] + baseLength, poss[k]) == to_compared(*pSeq, poss[j] + baseLength, poss[j])));
size_t size = k - j;
if (size <= 1) {
NULL;
}
else {
size_t pj = poss[j];
const ElemType &firstPrev = to_reversereference_compared(*pSeq, pj - 1, pj, pj + baseLength);
const ElemType &lastPrev = to_reversereference_compared(*pSeq, poss[k - 1] - 1, poss[k - 1], poss[k - 1] + baseLength);
if (firstPrev != 0 && firstPrev != -1 && firstPrev == lastPrev) { // 2007/11/02 //if (firstPrev != 0 && firstPrev == lastPrev) {
NULL;
}
else {
size_t maxExtend = calc_max_extend(poss, j, k, baseLength);
typename SubSequence::PrevExtensionComparator pec(baseLength + maxExtend, pSeq);
std:: sort(poss.begin() + j, poss.begin() + k, pec);
output_clone_set(poss, j, k, baseLength + maxExtend, pListener, pFoundCloneSets);
find_clone_set_i(&poss, j, k, baseLength + maxExtend, pListener, pFoundCloneSets);
}
}
j = k;
}
}
void output_clone_set(const std:: vector<size_t/* pos */> &poss, size_t begin, size_t end, size_t baseLength, CloneSetListener *pListener,
std::vector<CloneSetData> *pFoundCloneSets)
{
if (end - begin == 0 || ! (*pListener).codeCheck(poss[begin], baseLength)) {
return;
}
std:: vector<CloneSetItem> cloneSet;
size_t p = begin;
while (p < end) {
const ElemType &prevp = to_reversereference_compared(*pSeq, poss[p] - 1, poss[p], poss[p] + baseLength);
size_t q = p + 1;
while (q < end && to_reversereference_compared(*pSeq, poss[q] - 1, poss[q], poss[q] + baseLength) == prevp) {
++q;
}
// here, subsequence begining at p, ..., subsequence begining at q - 1 have the same prev
assert(q == end || prevp != to_reversereference_compared(*pSeq, poss[q] - 1, poss[q], poss[q] + baseLength));
size_t i = p;
while (i < q) {
const ElemType &extensioni = to_compared(*pSeq, poss[i] + baseLength, poss[i]);
size_t j = i + 1;
while (j < q && to_compared(*pSeq, poss[j] + baseLength, poss[j]) == extensioni) {
++j;
}
// here, subsequence begining at i, ..., subsequence begining at j - 1 have the same extension
assert(j == q || extensioni != to_compared(*pSeq, poss[j] + baseLength, poss[j]));
cloneSet.resize(cloneSet.size() + 1);
CloneSetItem &cs = cloneSet.back();
cs.prev = prevp;
cs.extension = extensioni;
cs.poss.insert(cs.poss.end(), poss.begin() + i, poss.begin() + j);
i = j;
}
p = q;
}
if ((*pListener).rangeCheck(cloneSet)) {
std::vector<CloneSetData> &foundCloneSets = *pFoundCloneSets;
foundCloneSets.resize(foundCloneSets.size() + 1);
CloneSetData &cloneSetData = foundCloneSets.back();
cloneSetData.cloneSet.swap(cloneSet);
cloneSetData.baseLength = baseLength;
}
}
size_t calc_max_extend(const std:: vector<size_t/* pos */> &poss, size_t begin, size_t end, size_t baseLength)
{
assert(end - begin >= 2);
size_t extend = 0;
while (true) {
size_t posj = poss[begin];
const ElemType &ej = to_compared(*pSeq, posj + baseLength + extend, posj);
if (ej == 0) {
return extend;
}
for (size_t p = begin + 1; p < end; ++p) {
size_t pos = poss[p];
const ElemType &ep = to_compared(*pSeq, pos + baseLength + extend, pos);
if (ep != ej) {
return extend;
}
}
++extend;
}
return extend;
}
void calc_hash_seq(SequenceHashFunction &hashFunc)
{
const std::vector<ElemType> &seq = *pSeq;
hashSeq.clear();
hashSeq.resize(seq.size(), 0);
size_t num = bottomUnitLength * multiply;
std:: vector<size_t> factors0;
factorize(&factors0, num);
if (factors0.size() == 0) {
make_bottom_level_hash_sequence(seq, hashFunc, &hashSeq, bottomUnitLength * multiply);
}
else {
size_t beginPos = 0;
assert(seq.size() == 0 || seq.back() == 0);
while (beginPos < seq.size() - 1) {
typename std::vector<ElemType>::const_iterator j = std::find(seq.begin() + beginPos + 1, seq.end(), 0);
size_t nextPos = j - seq.begin();
assert(seq[nextPos] == 0);
size_t endPos = nextPos + 1;
assert(endPos <= seq.size());
size_t blockSize = endPos - beginPos;
if (blockSize < num) {
// hashSeq[beginPos ... endPos] has been zero-filled already.
}
else {
int fi = factors0.size() - 1;
size_t f = factors0[fi];
make_bottom_level_hash_sequence(seq, hashFunc, &hashSeq, f, beginPos, endPos);
size_t curUnitLength = f;
while (--fi >= 0) {
size_t f = factors0[fi];
multiple_hash_sequence(hashFunc, &hashSeq, curUnitLength, f, beginPos, endPos);
curUnitLength *= f;
}
assert(curUnitLength == bottomUnitLength * multiply);
}
beginPos = nextPos;
}
}
}
private:
static inline void multiple_hash_sequence(SequenceHashFunction &hashFunc,
std:: vector<HashValueType> *pHashSeq, size_t unitLength, size_t multiply)
{
std:: vector<HashValueType> &hashSeq = *pHashSeq;
if (! hashSeq.empty()) {
multiple_hash_sequence(hashFunc, pHashSeq, unitLength, multiply, 0, hashSeq.size());
}
}
static void multiple_hash_sequence(
SequenceHashFunction& UNUSED(hashFunc),
std:: vector<HashValueType> *pHashSeq,
size_t unitLength,
size_t multiply,
size_t beginPos,
size_t endPos)
{
std:: vector<HashValueType> &hashSeq = *pHashSeq;
assert(beginPos < hashSeq.size());
assert(hashSeq[beginPos] == 0);
assert(endPos <= hashSeq.size());
assert(hashSeq[endPos - 1] == 0);
assert(unitLength >= 1);
assert(endPos > unitLength * multiply);
for (size_t i = beginPos + 1; i < endPos - unitLength * multiply; ++i) {
HashValueType value = 0;
for (size_t j = 0; j < multiply; ++j) {
HashValueType h = hashSeq[i + j * unitLength];
/*
** when i == endPos - unitLength * multiply - 1 and j == multiply - 1,
** i + j * unitLength
** = endPos - unitLength * multiply - 1 + unitLength * (multiply - 1)
** = endPos - unitLength * multiply - 1 + unitLength * multiply - unitLength
** = endPos - unitLength - 1
** Therefore, i + j * unitLength < endPos - 1
*/
assert(h != 0);
value += h;
}
hashSeq[i] = value == 0 ? 1 : value; // 0ÍdelimiterƩȳêé½ßAnbV
lƵÄp¢é±ÆÍūȢ
}
std::fill(hashSeq.begin() + endPos - unitLength * multiply, hashSeq.begin() + endPos, 0);
}
static inline void make_bottom_level_hash_sequence(const std:: vector<ElemType> &seq, SequenceHashFunction &hashFunc,
std:: vector<HashValueType> *pHashSeq, size_t unitLength)
{
std:: vector<HashValueType> &hashSeq = *pHashSeq;
if (! hashSeq.empty()) {
make_bottom_level_hash_sequence(seq, hashFunc, pHashSeq, unitLength, 0, seq.size());
}
}
static void make_bottom_level_hash_sequence(const std:: vector<ElemType> &seq, SequenceHashFunction &hashFunc,
std:: vector<HashValueType> *pHashSeq, size_t unitLength, size_t beginPos, size_t endPos)
{
assert(beginPos < seq.size());
assert(seq[beginPos] == 0);
assert(endPos <= seq.size());
assert(seq[endPos - 1] == 0);
assert(unitLength >= 1);
std:: vector<HashValueType> &hashSeq = *pHashSeq;
assert(hashSeq.size() == seq.size());
size_t i = beginPos + 1;
if (endPos - beginPos >= unitLength) {
typename std::vector<ElemType>::const_iterator range_begin = seq.begin() + beginPos + 1;
typename std::vector<ElemType>::const_iterator range_end = seq.begin() + endPos - unitLength; // value of i at the last repetition of the following 'for' loop
//assert(std::find(range_begin, range_end, 0) == range_end);
for (; i < endPos - unitLength; ++i) {
HashValueType hashValue = hashFunc(seq, i, i + unitLength);
hashSeq[i] = hashValue == 0 ? 1 : hashValue; // 0ÍdelimiterƩȳêé½ßAnbV
lƵÄp¢é±ÆÍūȢ
}
}
std::fill(hashSeq.begin() + i, hashSeq.begin() + endPos, 0);
}
static void factorize(std:: vector<size_t> *pFactors, size_t number0)
{
std:: vector<size_t> &factors = *pFactors;
factors.clear();
size_t number = number0;
while (number > 1) {
bool found = false;
for (size_t i = 2; i < number / 2; ++i) {
if (number % i == 0) {
factors.push_back(i);
number /= i;
found = true;
break; // for i
}
}
if (! found) {
factors.push_back(number);
return;
}
}
}
//static void fill_zero(const std:: vector<ElemType> &seq, std:: vector<HashValueType> *pHashSeq, size_t unitLength)
//{
// assert(seq.size() == (*pHashSeq).size());
//
// if ((*pHashSeq).size() == 0) {
// return;
// }
// size_t i = (*pHashSeq).size() - 1;
// while (true) {
// if (seq[i] == 0) {
// for (size_t j = 0; j < unitLength; ++j) {
// (*pHashSeq)[i] = 0;
// if (--i == 0) {
// return;
// }
// }
// }
// if (--i == 0) {
// return;
// }
// }
//}
};
#endif // CLONEDETECTOR_H
|
enhance.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% EEEEE N N H H AAA N N CCCC EEEEE %
% E NN N H H A A NN N C E %
% EEE N N N HHHHH AAAAA N N N C EEE %
% E N NN H H A A N NN C E %
% EEEEE N N H H A A N N CCCC EEEEE %
% %
% %
% MagickCore Image Enhancement Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/xml-tree.h"
#include "MagickCore/xml-tree-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoGammaImage() extract the 'mean' from the image and adjust the image
% to try make set its gamma appropriatally.
%
% The format of the AutoGammaImage method is:
%
% MagickBooleanType AutoGammaImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AutoGammaImage(Image *image,
ExceptionInfo *exception)
{
double
gamma,
log_mean,
mean,
sans;
MagickStatusType
status;
register ssize_t
i;
log_mean=log(0.5);
if (image->channel_mask == DefaultChannels)
{
/*
Apply gamma correction equally across all given channels.
*/
(void) GetImageMean(image,&mean,&sans,exception);
gamma=log(mean*QuantumScale)/log_mean;
return(LevelImage(image,0.0,(double) QuantumRange,gamma,exception));
}
/*
Auto-gamma each channel separately.
*/
status=MagickTrue;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
ChannelType
channel_mask;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
channel_mask=SetImageChannelMask(image,(ChannelType) (1UL << i));
status=GetImageMean(image,&mean,&sans,exception);
gamma=log(mean*QuantumScale)/log_mean;
status&=LevelImage(image,0.0,(double) QuantumRange,gamma,exception);
(void) SetImageChannelMask(image,channel_mask);
if (status == MagickFalse)
break;
}
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoLevelImage() adjusts the levels of a particular image channel by
% scaling the minimum and maximum values to the full quantum range.
%
% The format of the LevelImage method is:
%
% MagickBooleanType AutoLevelImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AutoLevelImage(Image *image,
ExceptionInfo *exception)
{
return(MinMaxStretchImage(image,0.0,0.0,1.0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B r i g h t n e s s C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BrightnessContrastImage() changes the brightness and/or contrast of an
% image. It converts the brightness and contrast parameters into slope and
% intercept and calls a polynomical function to apply to the image.
%
% The format of the BrightnessContrastImage method is:
%
% MagickBooleanType BrightnessContrastImage(Image *image,
% const double brightness,const double contrast,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o brightness: the brightness percent (-100 .. 100).
%
% o contrast: the contrast percent (-100 .. 100).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType BrightnessContrastImage(Image *image,
const double brightness,const double contrast,ExceptionInfo *exception)
{
#define BrightnessContastImageTag "BrightnessContast/Image"
double
alpha,
coefficients[2],
intercept,
slope;
MagickBooleanType
status;
/*
Compute slope and intercept.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
alpha=contrast;
slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0));
if (slope < 0.0)
slope=0.0;
intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope);
coefficients[0]=slope;
coefficients[1]=intercept;
status=FunctionImage(image,PolynomialFunction,2,coefficients,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C L A H E I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CLAHEImage() is a variant of adaptive histogram equalization in which the
% contrast amplification is limited, so as to reduce this problem of noise
% amplification.
%
% Adapted from implementation by Karel Zuiderveld, karel@cv.ruu.nl in
% "Graphics Gems IV", Academic Press, 1994.
%
% The format of the CLAHEImage method is:
%
% MagickBooleanType CLAHEImage(Image *image,const size_t width,
% const size_t height,const size_t number_bins,const double clip_limit,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the width of the tile divisions to use in horizontal direction.
%
% o height: the height of the tile divisions to use in vertical direction.
%
% o number_bins: number of bins for histogram ("dynamic range").
%
% o clip_limit: contrast limit for localised changes in contrast. A limit
% less than 1 results in standard non-contrast limited AHE.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _RangeInfo
{
unsigned short
min,
max;
} RangeInfo;
static void ClipCLAHEHistogram(const double clip_limit,const size_t number_bins,
size_t *histogram)
{
#define NumberCLAHEGrays (65536)
register ssize_t
i;
size_t
cumulative_excess,
previous_excess,
step;
ssize_t
excess;
/*
Compute total number of excess pixels.
*/
cumulative_excess=0;
for (i=0; i < (ssize_t) number_bins; i++)
{
excess=(ssize_t) histogram[i]-(ssize_t) clip_limit;
if (excess > 0)
cumulative_excess+=excess;
}
/*
Clip histogram and redistribute excess pixels across all bins.
*/
step=cumulative_excess/number_bins;
excess=(ssize_t) (clip_limit-step);
for (i=0; i < (ssize_t) number_bins; i++)
{
if ((double) histogram[i] > clip_limit)
histogram[i]=(size_t) clip_limit;
else
if ((ssize_t) histogram[i] > excess)
{
cumulative_excess-=histogram[i]-excess;
histogram[i]=(size_t) clip_limit;
}
else
{
cumulative_excess-=step;
histogram[i]+=step;
}
}
/*
Redistribute remaining excess.
*/
do
{
register size_t
*p;
size_t
*q;
previous_excess=cumulative_excess;
p=histogram;
q=histogram+number_bins;
while ((cumulative_excess != 0) && (p < q))
{
step=number_bins/cumulative_excess;
if (step < 1)
step=1;
for (p=histogram; (p < q) && (cumulative_excess != 0); p+=step)
if ((double) *p < clip_limit)
{
(*p)++;
cumulative_excess--;
}
p++;
}
} while ((cumulative_excess != 0) && (cumulative_excess < previous_excess));
}
static void GenerateCLAHEHistogram(const RectangleInfo *clahe_info,
const RectangleInfo *tile_info,const size_t number_bins,
const unsigned short *lut,const unsigned short *pixels,size_t *histogram)
{
register const unsigned short
*p;
register ssize_t
i;
/*
Classify the pixels into a gray histogram.
*/
for (i=0; i < (ssize_t) number_bins; i++)
histogram[i]=0L;
p=pixels;
for (i=0; i < (ssize_t) tile_info->height; i++)
{
const unsigned short
*q;
q=p+tile_info->width;
while (p < q)
histogram[lut[*p++]]++;
q+=clahe_info->width;
p=q-tile_info->width;
}
}
static void InterpolateCLAHE(const RectangleInfo *clahe_info,const size_t *Q12,
const size_t *Q22,const size_t *Q11,const size_t *Q21,
const RectangleInfo *tile,const unsigned short *lut,unsigned short *pixels)
{
ssize_t
y;
unsigned short
intensity;
/*
Bilinear interpolate four tiles to eliminate boundary artifacts.
*/
for (y=(ssize_t) tile->height; y > 0; y--)
{
register ssize_t
x;
for (x=(ssize_t) tile->width; x > 0; x--)
{
intensity=lut[*pixels];
*pixels++=(unsigned short ) (PerceptibleReciprocal((double) tile->width*
tile->height)*(y*(x*Q12[intensity]+(tile->width-x)*Q22[intensity])+
(tile->height-y)*(x*Q11[intensity]+(tile->width-x)*Q21[intensity])));
}
pixels+=(clahe_info->width-tile->width);
}
}
static void GenerateCLAHELut(const RangeInfo *range_info,
const size_t number_bins,unsigned short *lut)
{
ssize_t
i;
unsigned short
delta;
/*
Scale input image [intensity min,max] to [0,number_bins-1].
*/
delta=(unsigned short) ((range_info->max-range_info->min)/number_bins+1);
for (i=(ssize_t) range_info->min; i <= (ssize_t) range_info->max; i++)
lut[i]=(unsigned short) ((i-range_info->min)/delta);
}
static void MapCLAHEHistogram(const RangeInfo *range_info,
const size_t number_bins,const size_t number_pixels,size_t *histogram)
{
double
scale,
sum;
register ssize_t
i;
/*
Rescale histogram to range [min-intensity .. max-intensity].
*/
scale=(double) (range_info->max-range_info->min)/number_pixels;
sum=0.0;
for (i=0; i < (ssize_t) number_bins; i++)
{
sum+=histogram[i];
histogram[i]=(size_t) (range_info->min+scale*sum);
if (histogram[i] > range_info->max)
histogram[i]=range_info->max;
}
}
static MagickBooleanType CLAHE(const RectangleInfo *clahe_info,
const RectangleInfo *tile_info,const RangeInfo *range_info,
const size_t number_bins,const double clip_limit,unsigned short *pixels)
{
MemoryInfo
*tile_cache;
register unsigned short
*p;
size_t
limit,
*tiles;
ssize_t
y;
unsigned short
*lut;
/*
Constrast limited adapted histogram equalization.
*/
if (clip_limit == 1.0)
return(MagickTrue);
tile_cache=AcquireVirtualMemory((size_t) clahe_info->x*clahe_info->y,
number_bins*sizeof(*tiles));
if (tile_cache == (MemoryInfo *) NULL)
return(MagickFalse);
lut=AcquireMagickMemory(NumberCLAHEGrays);
if (lut == (unsigned short *) NULL)
{
tile_cache=RelinquishVirtualMemory(tile_cache);
return(MagickFalse);
}
tiles=(size_t *) GetVirtualMemoryBlob(tile_cache);
limit=(size_t) (clip_limit*(tile_info->width*tile_info->height)/number_bins);
if (limit < 1UL)
limit=1UL;
/*
Generate greylevel mappings for each tile.
*/
GenerateCLAHELut(range_info,number_bins,lut);
p=pixels;
for (y=0; y < (ssize_t) clahe_info->y; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) clahe_info->x; x++)
{
size_t
*histogram;
histogram=tiles+(number_bins*(y*clahe_info->x+x));
GenerateCLAHEHistogram(clahe_info,tile_info,number_bins,lut,p,histogram);
ClipCLAHEHistogram((double) limit,number_bins,histogram);
MapCLAHEHistogram(range_info,number_bins,tile_info->width*
tile_info->height,histogram);
p+=tile_info->width;
}
p+=clahe_info->width*(tile_info->height-1);
}
/*
Interpolate greylevel mappings to get CLAHE image.
*/
p=pixels;
for (y=0; y <= (ssize_t) clahe_info->y; y++)
{
OffsetInfo
offset;
RectangleInfo
tile;
register ssize_t
x;
tile.height=tile_info->height;
tile.y=y-1;
offset.y=tile.y+1;
if (y == 0)
{
/*
Top row.
*/
tile.height=tile_info->height >> 1;
tile.y=0;
offset.y=0;
}
else
if (y == (ssize_t) clahe_info->y)
{
/*
Bottom row.
*/
tile.height=(tile_info->height+1) >> 1;
tile.y=clahe_info->y-1;
offset.y=tile.y;
}
for (x=0; x <= (ssize_t) clahe_info->x; x++)
{
tile.width=tile_info->width;
tile.x=x-1;
offset.x=tile.x+1;
if (x == 0)
{
/*
Left column.
*/
tile.width=tile_info->width >> 1;
tile.x=0;
offset.x=0;
}
else
if (x == (ssize_t) clahe_info->x)
{
/*
Right column.
*/
tile.width=(tile_info->width+1) >> 1;
tile.x=clahe_info->x-1;
offset.x=tile.x;
}
InterpolateCLAHE(clahe_info,
tiles+(number_bins*(tile.y*clahe_info->x+tile.x)), /* Q12 */
tiles+(number_bins*(tile.y*clahe_info->x+offset.x)), /* Q22 */
tiles+(number_bins*(offset.y*clahe_info->x+tile.x)), /* Q11 */
tiles+(number_bins*(offset.y*clahe_info->x+offset.x)), /* Q21 */
&tile,lut,p);
p+=tile.width;
}
p+=clahe_info->width*(tile.height-1);
}
lut=RelinquishMagickMemory(lut);
tile_cache=RelinquishVirtualMemory(tile_cache);
return(MagickTrue);
}
MagickExport MagickBooleanType CLAHEImage(Image *image,const size_t width,
const size_t height,const size_t number_bins,const double clip_limit,
ExceptionInfo *exception)
{
#define CLAHEImageTag "CLAHE/Image"
CacheView
*image_view;
ColorspaceType
colorspace;
MagickBooleanType
status;
MagickOffsetType
progress;
MemoryInfo
*pixel_cache;
RangeInfo
range_info;
RectangleInfo
clahe_info,
tile_info;
size_t
n;
ssize_t
y;
unsigned short
*pixels;
/*
Configure CLAHE parameters.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
range_info.min=0;
range_info.max=NumberCLAHEGrays-1;
tile_info.width=width;
if (tile_info.width == 0)
tile_info.width=image->columns >> 3;
tile_info.height=height;
if (tile_info.height == 0)
tile_info.height=image->rows >> 3;
tile_info.x=0;
if ((image->columns % tile_info.width) != 0)
tile_info.x=(ssize_t) tile_info.width-(image->columns % tile_info.width);
tile_info.y=0;
if ((image->rows % tile_info.height) != 0)
tile_info.y=(ssize_t) tile_info.height-(image->rows % tile_info.height);
clahe_info.width=image->columns+tile_info.x;
clahe_info.height=image->rows+tile_info.y;
clahe_info.x=(ssize_t) clahe_info.width/tile_info.width;
clahe_info.y=(ssize_t) clahe_info.height/tile_info.height;
pixel_cache=AcquireVirtualMemory(clahe_info.width,clahe_info.height*
sizeof(*pixels));
if (pixel_cache == (MemoryInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
pixels=(unsigned short *) GetVirtualMemoryBlob(pixel_cache);
colorspace=image->colorspace;
if (TransformImageColorspace(image,LabColorspace,exception) == MagickFalse)
{
pixel_cache=RelinquishVirtualMemory(pixel_cache);
return(MagickFalse);
}
/*
Initialize CLAHE pixels.
*/
image_view=AcquireVirtualCacheView(image,exception);
progress=0;
status=MagickTrue;
n=0;
for (y=0; y < (ssize_t) clahe_info.height; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-(tile_info.x >> 1),y-
(tile_info.y >> 1),clahe_info.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) clahe_info.width; x++)
{
pixels[n++]=ScaleQuantumToShort(p[0]);
p+=GetPixelChannels(image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
progress++;
proceed=SetImageProgress(image,CLAHEImageTag,progress,2*
GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
status=CLAHE(&clahe_info,&tile_info,&range_info,number_bins == 0 ?
(size_t) 128 : MagickMin(number_bins,256),clip_limit,pixels);
if (status == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
/*
Push CLAHE pixels to CLAHE image.
*/
image_view=AcquireAuthenticCacheView(image,exception);
n=clahe_info.width*(tile_info.y >> 1);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
n+=tile_info.x >> 1;
for (x=0; x < (ssize_t) image->columns; x++)
{
q[0]=ScaleShortToQuantum(pixels[n++]);
q+=GetPixelChannels(image);
}
n+=(clahe_info.width-image->columns-(tile_info.x >> 1));
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
progress++;
proceed=SetImageProgress(image,CLAHEImageTag,progress,2*
GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
pixel_cache=RelinquishVirtualMemory(pixel_cache);
if (TransformImageColorspace(image,colorspace,exception) == MagickFalse)
status=MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClutImage() replaces each color value in the given image, by using it as an
% index to lookup a replacement color value in a Color Look UP Table in the
% form of an image. The values are extracted along a diagonal of the CLUT
% image so either a horizontal or vertial gradient image can be used.
%
% Typically this is used to either re-color a gray-scale image according to a
% color gradient in the CLUT image, or to perform a freeform histogram
% (level) adjustment according to the (typically gray-scale) gradient in the
% CLUT image.
%
% When the 'channel' mask includes the matte/alpha transparency channel but
% one image has no such channel it is assumed that that image is a simple
% gray-scale image that will effect the alpha channel values, either for
% gray-scale coloring (with transparent or semi-transparent colors), or
% a histogram adjustment of existing alpha channel values. If both images
% have matte channels, direct and normal indexing is applied, which is rarely
% used.
%
% The format of the ClutImage method is:
%
% MagickBooleanType ClutImage(Image *image,Image *clut_image,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image, which is replaced by indexed CLUT values
%
% o clut_image: the color lookup table image for replacement color values.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define ClutImageTag "Clut/Image"
CacheView
*clut_view,
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
*clut_map;
register ssize_t
i;
ssize_t adjust,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(clut_image != (Image *) NULL);
assert(clut_image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
(IsGrayColorspace(clut_image->colorspace) == MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace,exception);
clut_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*clut_map));
if (clut_map == (PixelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Clut image.
*/
status=MagickTrue;
progress=0;
adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1);
clut_view=AcquireVirtualCacheView(clut_image,exception);
for (i=0; i <= (ssize_t) MaxMap; i++)
{
GetPixelInfo(clut_image,clut_map+i);
status=InterpolatePixelInfo(clut_image,clut_view,method,
(double) i*(clut_image->columns-adjust)/MaxMap,(double) i*
(clut_image->rows-adjust)/MaxMap,clut_map+i,exception);
if (status == MagickFalse)
break;
}
clut_view=DestroyCacheView(clut_view);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelTrait
traits;
GetPixelInfoPixel(image,q,&pixel);
traits=GetPixelChannelTraits(image,RedPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.red=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.red))].red;
traits=GetPixelChannelTraits(image,GreenPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.green=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.green))].green;
traits=GetPixelChannelTraits(image,BluePixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.blue=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.blue))].blue;
traits=GetPixelChannelTraits(image,BlackPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.black=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.black))].black;
traits=GetPixelChannelTraits(image,AlphaPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.alpha=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.alpha))].alpha;
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ClutImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
clut_map=(PixelInfo *) RelinquishMagickMemory(clut_map);
if ((clut_image->alpha_trait != UndefinedPixelTrait) &&
((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0))
(void) SetImageAlphaChannel(image,ActivateAlphaChannel,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r D e c i s i o n L i s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorDecisionListImage() accepts a lightweight Color Correction Collection
% (CCC) file which solely contains one or more color corrections and applies
% the correction to the image. Here is a sample CCC file:
%
% <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2">
% <ColorCorrection id="cc03345">
% <SOPNode>
% <Slope> 0.9 1.2 0.5 </Slope>
% <Offset> 0.4 -0.5 0.6 </Offset>
% <Power> 1.0 0.8 1.5 </Power>
% </SOPNode>
% <SATNode>
% <Saturation> 0.85 </Saturation>
% </SATNode>
% </ColorCorrection>
% </ColorCorrectionCollection>
%
% which includes the slop, offset, and power for each of the RGB channels
% as well as the saturation.
%
% The format of the ColorDecisionListImage method is:
%
% MagickBooleanType ColorDecisionListImage(Image *image,
% const char *color_correction_collection,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_correction_collection: the color correction collection in XML.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ColorDecisionListImage(Image *image,
const char *color_correction_collection,ExceptionInfo *exception)
{
#define ColorDecisionListCorrectImageTag "ColorDecisionList/Image"
typedef struct _Correction
{
double
slope,
offset,
power;
} Correction;
typedef struct _ColorCorrection
{
Correction
red,
green,
blue;
double
saturation;
} ColorCorrection;
CacheView
*image_view;
char
token[MagickPathExtent];
ColorCorrection
color_correction;
const char
*content,
*p;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
*cdl_map;
register ssize_t
i;
ssize_t
y;
XMLTreeInfo
*cc,
*ccc,
*sat,
*sop;
/*
Allocate and initialize cdl maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (color_correction_collection == (const char *) NULL)
return(MagickFalse);
ccc=NewXMLTree((const char *) color_correction_collection,exception);
if (ccc == (XMLTreeInfo *) NULL)
return(MagickFalse);
cc=GetXMLTreeChild(ccc,"ColorCorrection");
if (cc == (XMLTreeInfo *) NULL)
{
ccc=DestroyXMLTree(ccc);
return(MagickFalse);
}
color_correction.red.slope=1.0;
color_correction.red.offset=0.0;
color_correction.red.power=1.0;
color_correction.green.slope=1.0;
color_correction.green.offset=0.0;
color_correction.green.power=1.0;
color_correction.blue.slope=1.0;
color_correction.blue.offset=0.0;
color_correction.blue.power=1.0;
color_correction.saturation=0.0;
sop=GetXMLTreeChild(cc,"SOPNode");
if (sop != (XMLTreeInfo *) NULL)
{
XMLTreeInfo
*offset,
*power,
*slope;
slope=GetXMLTreeChild(sop,"Slope");
if (slope != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(slope);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.slope=StringToDouble(token,(char **) NULL);
break;
}
case 1:
{
color_correction.green.slope=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.slope=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
offset=GetXMLTreeChild(sop,"Offset");
if (offset != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(offset);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.offset=StringToDouble(token,
(char **) NULL);
break;
}
case 1:
{
color_correction.green.offset=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.offset=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
power=GetXMLTreeChild(sop,"Power");
if (power != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(power);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.power=StringToDouble(token,(char **) NULL);
break;
}
case 1:
{
color_correction.green.power=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.power=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
}
sat=GetXMLTreeChild(cc,"SATNode");
if (sat != (XMLTreeInfo *) NULL)
{
XMLTreeInfo
*saturation;
saturation=GetXMLTreeChild(sat,"Saturation");
if (saturation != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(saturation);
p=(const char *) content;
(void) GetNextToken(p,&p,MagickPathExtent,token);
color_correction.saturation=StringToDouble(token,(char **) NULL);
}
}
ccc=DestroyXMLTree(ccc);
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" Color Correction Collection:");
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.slope: %g",color_correction.red.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.offset: %g",color_correction.red.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.power: %g",color_correction.red.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.slope: %g",color_correction.green.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.offset: %g",color_correction.green.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.power: %g",color_correction.green.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.slope: %g",color_correction.blue.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.offset: %g",color_correction.blue.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.power: %g",color_correction.blue.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.saturation: %g",color_correction.saturation);
}
cdl_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map));
if (cdl_map == (PixelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
for (i=0; i <= (ssize_t) MaxMap; i++)
{
cdl_map[i].red=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.red.slope*i/MaxMap+
color_correction.red.offset,color_correction.red.power))));
cdl_map[i].green=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.green.slope*i/MaxMap+
color_correction.green.offset,color_correction.green.power))));
cdl_map[i].blue=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.blue.slope*i/MaxMap+
color_correction.blue.offset,color_correction.blue.power))));
}
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Apply transfer function to colormap.
*/
double
luma;
luma=0.21267f*image->colormap[i].red+0.71526*image->colormap[i].green+
0.07217f*image->colormap[i].blue;
image->colormap[i].red=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red))].red-luma;
image->colormap[i].green=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green))].green-luma;
image->colormap[i].blue=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue))].blue-luma;
}
/*
Apply transfer function to image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
luma;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
luma=0.21267f*GetPixelRed(image,q)+0.71526*GetPixelGreen(image,q)+
0.07217f*GetPixelBlue(image,q);
SetPixelRed(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelRed(image,q))].red-luma)),q);
SetPixelGreen(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelGreen(image,q))].green-luma)),q);
SetPixelBlue(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelBlue(image,q))].blue-luma)),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag,
progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
cdl_map=(PixelInfo *) RelinquishMagickMemory(cdl_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastImage() enhances the intensity differences between the lighter and
% darker elements of the image. Set sharpen to a MagickTrue to increase the
% image contrast otherwise the contrast is reduced.
%
% The format of the ContrastImage method is:
%
% MagickBooleanType ContrastImage(Image *image,
% const MagickBooleanType sharpen,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o sharpen: Increase or decrease image contrast.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void Contrast(const int sign,double *red,double *green,double *blue)
{
double
brightness,
hue,
saturation;
/*
Enhance contrast: dark color become darker, light color become lighter.
*/
assert(red != (double *) NULL);
assert(green != (double *) NULL);
assert(blue != (double *) NULL);
hue=0.0;
saturation=0.0;
brightness=0.0;
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)-
brightness);
if (brightness > 1.0)
brightness=1.0;
else
if (brightness < 0.0)
brightness=0.0;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
MagickExport MagickBooleanType ContrastImage(Image *image,
const MagickBooleanType sharpen,ExceptionInfo *exception)
{
#define ContrastImageTag "Contrast/Image"
CacheView
*image_view;
int
sign;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateContrastImage(image,sharpen,exception) != MagickFalse)
return(MagickTrue);
#endif
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
sign=sharpen != MagickFalse ? 1 : -1;
if (image->storage_class == PseudoClass)
{
/*
Contrast enhance colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
double
blue,
green,
red;
red=(double) image->colormap[i].red;
green=(double) image->colormap[i].green;
blue=(double) image->colormap[i].blue;
Contrast(sign,&red,&green,&blue);
image->colormap[i].red=(MagickRealType) red;
image->colormap[i].green=(MagickRealType) green;
image->colormap[i].blue=(MagickRealType) blue;
}
}
/*
Contrast enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
blue,
green,
red;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
red=(double) GetPixelRed(image,q);
green=(double) GetPixelGreen(image,q);
blue=(double) GetPixelBlue(image,q);
Contrast(sign,&red,&green,&blue);
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ContrastImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastStretchImage() is a simple image enhancement technique that attempts
% to improve the contrast in an image by 'stretching' the range of intensity
% values it contains to span a desired range of values. It differs from the
% more sophisticated histogram equalization in that it can only apply a
% linear scaling function to the image pixel values. As a result the
% 'enhancement' is less harsh.
%
% The format of the ContrastStretchImage method is:
%
% MagickBooleanType ContrastStretchImage(Image *image,
% const char *levels,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
% o levels: Specify the levels where the black and white points have the
% range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ContrastStretchImage(Image *image,
const double black_point,const double white_point,ExceptionInfo *exception)
{
#define MaxRange(color) ((double) ScaleQuantumToMap((Quantum) (color)))
#define ContrastStretchImageTag "ContrastStretch/Image"
CacheView
*image_view;
double
*black,
*histogram,
*stretch_map,
*white;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate histogram and stretch map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageGray(image,exception) != MagickFalse)
(void) SetImageColorspace(image,GRAYColorspace,exception);
black=(double *) AcquireQuantumMemory(MaxPixelChannels,sizeof(*black));
white=(double *) AcquireQuantumMemory(MaxPixelChannels,sizeof(*white));
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*histogram));
stretch_map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*stretch_map));
if ((black == (double *) NULL) || (white == (double *) NULL) ||
(histogram == (double *) NULL) || (stretch_map == (double *) NULL))
{
if (stretch_map != (double *) NULL)
stretch_map=(double *) RelinquishMagickMemory(stretch_map);
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (white != (double *) NULL)
white=(double *) RelinquishMagickMemory(white);
if (black != (double *) NULL)
black=(double *) RelinquishMagickMemory(black);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Form histogram.
*/
status=MagickTrue;
(void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
pixel=GetPixelIntensity(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
if (image->channel_mask != DefaultChannels)
pixel=(double) p[i];
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum(pixel))+i]++;
}
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Find the histogram boundaries by locating the black/white levels.
*/
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
register ssize_t
j;
black[i]=0.0;
white[i]=MaxRange(QuantumRange);
intensity=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
if (intensity > black_point)
break;
}
black[i]=(double) j;
intensity=0.0;
for (j=(ssize_t) MaxMap; j != 0; j--)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white[i]=(double) j;
}
histogram=(double *) RelinquishMagickMemory(histogram);
/*
Stretch the histogram to create the stretched image mapping.
*/
(void) memset(stretch_map,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*stretch_map));
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
register ssize_t
j;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
double
gamma;
gamma=PerceptibleReciprocal(white[i]-black[i]);
if (j < (ssize_t) black[i])
stretch_map[GetPixelChannels(image)*j+i]=0.0;
else
if (j > (ssize_t) white[i])
stretch_map[GetPixelChannels(image)*j+i]=(double) QuantumRange;
else
if (black[i] != white[i])
stretch_map[GetPixelChannels(image)*j+i]=(double) ScaleMapToQuantum(
(double) (MaxMap*gamma*(j-black[i])));
}
}
if (image->storage_class == PseudoClass)
{
register ssize_t
j;
/*
Stretch-contrast colormap.
*/
for (j=0; j < (ssize_t) image->colors; j++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,RedPixelChannel);
image->colormap[j].red=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+i];
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,GreenPixelChannel);
image->colormap[j].green=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+i];
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,BluePixelChannel);
image->colormap[j].blue=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+i];
}
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,AlphaPixelChannel);
image->colormap[j].alpha=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+i];
}
}
}
/*
Stretch-contrast image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (black[j] == white[j])
continue;
q[j]=ClampToQuantum(stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(q[j])+j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ContrastStretchImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
stretch_map=(double *) RelinquishMagickMemory(stretch_map);
white=(double *) RelinquishMagickMemory(white);
black=(double *) RelinquishMagickMemory(black);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E n h a n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EnhanceImage() applies a digital filter that improves the quality of a
% noisy image.
%
% The format of the EnhanceImage method is:
%
% Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
{
#define EnhanceImageTag "Enhance/Image"
#define EnhancePixel(weight) \
mean=QuantumScale*((double) GetPixelRed(image,r)+pixel.red)/2.0; \
distance=QuantumScale*((double) GetPixelRed(image,r)-pixel.red); \
distance_squared=(4.0+mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelGreen(image,r)+pixel.green)/2.0; \
distance=QuantumScale*((double) GetPixelGreen(image,r)-pixel.green); \
distance_squared+=(7.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelBlue(image,r)+pixel.blue)/2.0; \
distance=QuantumScale*((double) GetPixelBlue(image,r)-pixel.blue); \
distance_squared+=(5.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelBlack(image,r)+pixel.black)/2.0; \
distance=QuantumScale*((double) GetPixelBlack(image,r)-pixel.black); \
distance_squared+=(5.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelAlpha(image,r)+pixel.alpha)/2.0; \
distance=QuantumScale*((double) GetPixelAlpha(image,r)-pixel.alpha); \
distance_squared+=(5.0-mean)*distance*distance; \
if (distance_squared < 0.069) \
{ \
aggregate.red+=(weight)*GetPixelRed(image,r); \
aggregate.green+=(weight)*GetPixelGreen(image,r); \
aggregate.blue+=(weight)*GetPixelBlue(image,r); \
aggregate.black+=(weight)*GetPixelBlack(image,r); \
aggregate.alpha+=(weight)*GetPixelAlpha(image,r); \
total_weight+=(weight); \
} \
r+=GetPixelChannels(image);
CacheView
*enhance_view,
*image_view;
Image
*enhance_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize enhanced image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
enhance_image=CloneImage(image,0,0,MagickTrue,
exception);
if (enhance_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(enhance_image,DirectClass,exception) == MagickFalse)
{
enhance_image=DestroyImage(enhance_image);
return((Image *) NULL);
}
/*
Enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
enhance_view=AcquireAuthenticCacheView(enhance_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,enhance_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception);
q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*(2*(image->columns+4)+2);
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
distance,
distance_squared,
mean,
total_weight;
PixelInfo
aggregate;
register const Quantum
*magick_restrict r;
GetPixelInfo(image,&aggregate);
total_weight=0.0;
GetPixelInfoPixel(image,p+center,&pixel);
r=p;
EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0);
EnhancePixel(8.0); EnhancePixel(5.0);
r=p+GetPixelChannels(image)*(image->columns+4);
EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0);
EnhancePixel(20.0); EnhancePixel(8.0);
r=p+2*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(10.0); EnhancePixel(40.0); EnhancePixel(80.0);
EnhancePixel(40.0); EnhancePixel(10.0);
r=p+3*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0);
EnhancePixel(20.0); EnhancePixel(8.0);
r=p+4*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0);
EnhancePixel(8.0); EnhancePixel(5.0);
if (total_weight > MagickEpsilon)
{
pixel.red=((aggregate.red+total_weight/2.0)/total_weight);
pixel.green=((aggregate.green+total_weight/2.0)/total_weight);
pixel.blue=((aggregate.blue+total_weight/2.0)/total_weight);
pixel.black=((aggregate.black+total_weight/2.0)/total_weight);
pixel.alpha=((aggregate.alpha+total_weight/2.0)/total_weight);
}
SetPixelViaPixelInfo(enhance_image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(enhance_image);
}
if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,EnhanceImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
enhance_view=DestroyCacheView(enhance_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
enhance_image=DestroyImage(enhance_image);
return(enhance_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E q u a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EqualizeImage() applies a histogram equalization to the image.
%
% The format of the EqualizeImage method is:
%
% MagickBooleanType EqualizeImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType EqualizeImage(Image *image,
ExceptionInfo *exception)
{
#define EqualizeImageTag "Equalize/Image"
CacheView
*image_view;
double
black[CompositePixelChannel+1],
*equalize_map,
*histogram,
*map,
white[CompositePixelChannel+1];
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize histogram arrays.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateEqualizeImage(image,exception) != MagickFalse)
return(MagickTrue);
#endif
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
equalize_map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*equalize_map));
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*histogram));
map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*sizeof(*map));
if ((equalize_map == (double *) NULL) || (histogram == (double *) NULL) ||
(map == (double *) NULL))
{
if (map != (double *) NULL)
map=(double *) RelinquishMagickMemory(map);
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (equalize_map != (double *) NULL)
equalize_map=(double *) RelinquishMagickMemory(equalize_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Form histogram.
*/
status=MagickTrue;
(void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
intensity=(double) p[i];
if ((image->channel_mask & SyncChannels) != 0)
intensity=GetPixelIntensity(image,p);
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum(intensity))+i]++;
}
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Integrate the histogram to get the equalization map.
*/
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
register ssize_t
j;
intensity=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
map[GetPixelChannels(image)*j+i]=intensity;
}
}
(void) memset(equalize_map,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*equalize_map));
(void) memset(black,0,sizeof(*black));
(void) memset(white,0,sizeof(*white));
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
register ssize_t
j;
black[i]=map[i];
white[i]=map[GetPixelChannels(image)*MaxMap+i];
if (black[i] != white[i])
for (j=0; j <= (ssize_t) MaxMap; j++)
equalize_map[GetPixelChannels(image)*j+i]=(double)
ScaleMapToQuantum((double) ((MaxMap*(map[
GetPixelChannels(image)*j+i]-black[i]))/(white[i]-black[i])));
}
histogram=(double *) RelinquishMagickMemory(histogram);
map=(double *) RelinquishMagickMemory(map);
if (image->storage_class == PseudoClass)
{
register ssize_t
j;
/*
Equalize colormap.
*/
for (j=0; j < (ssize_t) image->colors; j++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
RedPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].red=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+
channel];
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
GreenPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].green=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+
channel];
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
BluePixelChannel);
if (black[channel] != white[channel])
image->colormap[j].blue=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+
channel];
}
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
AlphaPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].alpha=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+
channel];
}
}
}
/*
Equalize image.
*/
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (((traits & UpdatePixelTrait) == 0) || (black[j] == white[j]))
continue;
q[j]=ClampToQuantum(equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(q[j])+j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,EqualizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
equalize_map=(double *) RelinquishMagickMemory(equalize_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GammaImage() gamma-corrects a particular image channel. The same
% image viewed on different devices will have perceptual differences in the
% way the image's intensities are represented on the screen. Specify
% individual gamma levels for the red, green, and blue channels, or adjust
% all three with the gamma parameter. Values typically range from 0.8 to 2.3.
%
% You can also reduce the influence of a particular channel with a gamma
% value of 0.
%
% The format of the GammaImage method is:
%
% MagickBooleanType GammaImage(Image *image,const double gamma,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o level: the image gamma as a string (e.g. 1.6,1.2,1.0).
%
% o gamma: the image gamma.
%
*/
static inline double gamma_pow(const double value,const double gamma)
{
return(value < 0.0 ? value : pow(value,gamma));
}
MagickExport MagickBooleanType GammaImage(Image *image,const double gamma,
ExceptionInfo *exception)
{
#define GammaImageTag "Gamma/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
Quantum
*gamma_map;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize gamma maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (gamma == 1.0)
return(MagickTrue);
gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map));
if (gamma_map == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memset(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map));
if (gamma != 0.0)
for (i=0; i <= (ssize_t) MaxMap; i++)
gamma_map[i]=ScaleMapToQuantum((double) (MaxMap*pow((double) i/
MaxMap,PerceptibleReciprocal(gamma))));
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Gamma-correct colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].red))];
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].green))];
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].blue))];
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].alpha))];
}
/*
Gamma-correct image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=gamma_map[ScaleQuantumToMap(ClampToQuantum((MagickRealType)
q[j]))];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,GammaImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map);
if (image->gamma != 0.0)
image->gamma*=gamma;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GrayscaleImage() converts the image to grayscale.
%
% The format of the GrayscaleImage method is:
%
% MagickBooleanType GrayscaleImage(Image *image,
% const PixelIntensityMethod method ,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the pixel intensity method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GrayscaleImage(Image *image,
const PixelIntensityMethod method,ExceptionInfo *exception)
{
#define GrayscaleImageTag "Grayscale/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateGrayscaleImage(image,method,exception) != MagickFalse)
{
image->intensity=method;
image->type=GrayscaleType;
if ((method == Rec601LuminancePixelIntensityMethod) ||
(method == Rec709LuminancePixelIntensityMethod))
return(SetImageColorspace(image,LinearGRAYColorspace,exception));
return(SetImageColorspace(image,GRAYColorspace,exception));
}
#endif
/*
Grayscale image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
blue,
green,
red,
intensity;
red=(MagickRealType) GetPixelRed(image,q);
green=(MagickRealType) GetPixelGreen(image,q);
blue=(MagickRealType) GetPixelBlue(image,q);
intensity=0.0;
switch (method)
{
case AveragePixelIntensityMethod:
{
intensity=(red+green+blue)/3.0;
break;
}
case BrightnessPixelIntensityMethod:
{
intensity=MagickMax(MagickMax(red,green),blue);
break;
}
case LightnessPixelIntensityMethod:
{
intensity=(MagickMin(MagickMin(red,green),blue)+
MagickMax(MagickMax(red,green),blue))/2.0;
break;
}
case MSPixelIntensityMethod:
{
intensity=(MagickRealType) (((double) red*red+green*green+
blue*blue)/3.0);
break;
}
case Rec601LumaPixelIntensityMethod:
{
if (image->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec601LuminancePixelIntensityMethod:
{
if (image->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec709LumaPixelIntensityMethod:
default:
{
if (image->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case Rec709LuminancePixelIntensityMethod:
{
if (image->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case RMSPixelIntensityMethod:
{
intensity=(MagickRealType) (sqrt((double) red*red+green*green+
blue*blue)/sqrt(3.0));
break;
}
}
SetPixelGray(image,ClampToQuantum(intensity),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,GrayscaleImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
image->intensity=method;
image->type=GrayscaleType;
if ((method == Rec601LuminancePixelIntensityMethod) ||
(method == Rec709LuminancePixelIntensityMethod))
return(SetImageColorspace(image,LinearGRAYColorspace,exception));
return(SetImageColorspace(image,GRAYColorspace,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% H a l d C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% HaldClutImage() applies a Hald color lookup table to the image. A Hald
% color lookup table is a 3-dimensional color cube mapped to 2 dimensions.
% Create it with the HALD coder. You can apply any color transformation to
% the Hald image and then use this method to apply the transform to the
% image.
%
% The format of the HaldClutImage method is:
%
% MagickBooleanType HaldClutImage(Image *image,Image *hald_image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image, which is replaced by indexed CLUT values
%
% o hald_image: the color lookup table image for replacement color values.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType HaldClutImage(Image *image,
const Image *hald_image,ExceptionInfo *exception)
{
#define HaldClutImageTag "Clut/Image"
typedef struct _HaldInfo
{
double
x,
y,
z;
} HaldInfo;
CacheView
*hald_view,
*image_view;
double
width;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
zero;
size_t
cube_size,
length,
level;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(hald_image != (Image *) NULL);
assert(hald_image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
/*
Hald clut image.
*/
status=MagickTrue;
progress=0;
length=(size_t) MagickMin((MagickRealType) hald_image->columns,
(MagickRealType) hald_image->rows);
for (level=2; (level*level*level) < length; level++) ;
level*=level;
cube_size=level*level;
width=(double) hald_image->columns;
GetPixelInfo(hald_image,&zero);
hald_view=AcquireVirtualCacheView(hald_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
area,
offset;
HaldInfo
point;
PixelInfo
pixel,
pixel1,
pixel2,
pixel3,
pixel4;
point.x=QuantumScale*(level-1.0)*GetPixelRed(image,q);
point.y=QuantumScale*(level-1.0)*GetPixelGreen(image,q);
point.z=QuantumScale*(level-1.0)*GetPixelBlue(image,q);
offset=point.x+level*floor(point.y)+cube_size*floor(point.z);
point.x-=floor(point.x);
point.y-=floor(point.y);
point.z-=floor(point.z);
pixel1=zero;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset,width),floor(offset/width),&pixel1,exception);
if (status == MagickFalse)
break;
pixel2=zero;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception);
if (status == MagickFalse)
break;
pixel3=zero;
area=point.y;
if (hald_image->interpolate == NearestInterpolatePixel)
area=(point.y < 0.5) ? 0.0 : 1.0;
CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha,
area,&pixel3);
offset+=cube_size;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset,width),floor(offset/width),&pixel1,exception);
if (status == MagickFalse)
break;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception);
if (status == MagickFalse)
break;
pixel4=zero;
CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha,
area,&pixel4);
pixel=zero;
area=point.z;
if (hald_image->interpolate == NearestInterpolatePixel)
area=(point.z < 0.5)? 0.0 : 1.0;
CompositePixelInfoAreaBlend(&pixel3,pixel3.alpha,&pixel4,pixel4.alpha,
area,&pixel);
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
SetPixelRed(image,ClampToQuantum(pixel.red),q);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
SetPixelGreen(image,ClampToQuantum(pixel.green),q);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
SetPixelBlue(image,ClampToQuantum(pixel.blue),q);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelBlack(image,ClampToQuantum(pixel.black),q);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,HaldClutImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
hald_view=DestroyCacheView(hald_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImage() adjusts the levels of a particular image channel by
% scaling the colors falling between specified white and black points to
% the full available quantum range.
%
% The parameters provided represent the black, and white points. The black
% point specifies the darkest color in the image. Colors darker than the
% black point are set to zero. White point specifies the lightest color in
% the image. Colors brighter than the white point are set to the maximum
% quantum value.
%
% If a '!' flag is given, map black and white colors to the given levels
% rather than mapping those levels to black and white. See
% LevelizeImage() below.
%
% Gamma specifies a gamma correction to apply to the image.
%
% The format of the LevelImage method is:
%
% MagickBooleanType LevelImage(Image *image,const double black_point,
% const double white_point,const double gamma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: The level to map zero (black) to.
%
% o white_point: The level to map QuantumRange (white) to.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double LevelPixel(const double black_point,
const double white_point,const double gamma,const double pixel)
{
double
level_pixel,
scale;
scale=PerceptibleReciprocal(white_point-black_point);
level_pixel=QuantumRange*gamma_pow(scale*((double) pixel-black_point),
PerceptibleReciprocal(gamma));
return(level_pixel);
}
MagickExport MagickBooleanType LevelImage(Image *image,const double black_point,
const double white_point,const double gamma,ExceptionInfo *exception)
{
#define LevelImageTag "Level/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Level colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].red));
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].green));
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].blue));
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].alpha));
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=ClampToQuantum(LevelPixel(black_point,white_point,gamma,
(double) q[j]));
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,LevelImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
(void) ClampImage(image,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelizeImage() applies the reversed LevelImage() operation to just
% the specific channels specified. It compresses the full range of color
% values, so that they lie between the given black and white points. Gamma is
% applied before the values are mapped.
%
% LevelizeImage() can be called with by using a +level command line
% API option, or using a '!' on a -level or LevelImage() geometry string.
%
% It can be used to de-contrast a greyscale image to the exact levels
% specified. Or by using specific levels for each channel of an image you
% can convert a gray-scale image to any linear color gradient, according to
% those levels.
%
% The format of the LevelizeImage method is:
%
% MagickBooleanType LevelizeImage(Image *image,const double black_point,
% const double white_point,const double gamma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: The level to map zero (black) to.
%
% o white_point: The level to map QuantumRange (white) to.
%
% o gamma: adjust gamma by this factor before mapping values.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LevelizeImage(Image *image,
const double black_point,const double white_point,const double gamma,
ExceptionInfo *exception)
{
#define LevelizeImageTag "Levelize/Image"
#define LevelizeValue(x) ClampToQuantum(((MagickRealType) gamma_pow((double) \
(QuantumScale*(x)),gamma))*(white_point-black_point)+black_point)
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Level colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) LevelizeValue(image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) LevelizeValue(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) LevelizeValue(image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) LevelizeValue(
image->colormap[i].alpha);
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=LevelizeValue(q[j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,LevelizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImageColors() maps the given color to "black" and "white" values,
% linearly spreading out the colors, and level values on a channel by channel
% bases, as per LevelImage(). The given colors allows you to specify
% different level ranges for each of the color channels separately.
%
% If the boolean 'invert' is set true the image values will modifyed in the
% reverse direction. That is any existing "black" and "white" colors in the
% image will become the color values given, with all other values compressed
% appropriatally. This effectivally maps a greyscale gradient into the given
% color gradient.
%
% The format of the LevelImageColors method is:
%
% MagickBooleanType LevelImageColors(Image *image,
% const PixelInfo *black_color,const PixelInfo *white_color,
% const MagickBooleanType invert,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_color: The color to map black to/from
%
% o white_point: The color to map white to/from
%
% o invert: if true map the colors (levelize), rather than from (level)
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LevelImageColors(Image *image,
const PixelInfo *black_color,const PixelInfo *white_color,
const MagickBooleanType invert,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickStatusType
status;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsGrayColorspace(black_color->colorspace) == MagickFalse) ||
(IsGrayColorspace(white_color->colorspace) == MagickFalse)))
(void) SetImageColorspace(image,sRGBColorspace,exception);
status=MagickTrue;
if (invert == MagickFalse)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,RedChannel);
status&=LevelImage(image,black_color->red,white_color->red,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,GreenChannel);
status&=LevelImage(image,black_color->green,white_color->green,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,BlueChannel);
status&=LevelImage(image,black_color->blue,white_color->blue,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_mask=SetImageChannelMask(image,BlackChannel);
status&=LevelImage(image,black_color->black,white_color->black,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
{
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=LevelImage(image,black_color->alpha,white_color->alpha,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
}
else
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,RedChannel);
status&=LevelizeImage(image,black_color->red,white_color->red,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,GreenChannel);
status&=LevelizeImage(image,black_color->green,white_color->green,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,BlueChannel);
status&=LevelizeImage(image,black_color->blue,white_color->blue,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_mask=SetImageChannelMask(image,BlackChannel);
status&=LevelizeImage(image,black_color->black,white_color->black,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
{
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=LevelizeImage(image,black_color->alpha,white_color->alpha,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
}
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i n e a r S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LinearStretchImage() discards any pixels below the black point and above
% the white point and levels the remaining pixels.
%
% The format of the LinearStretchImage method is:
%
% MagickBooleanType LinearStretchImage(Image *image,
% const double black_point,const double white_point,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LinearStretchImage(Image *image,
const double black_point,const double white_point,ExceptionInfo *exception)
{
#define LinearStretchImageTag "LinearStretch/Image"
CacheView
*image_view;
double
*histogram,
intensity;
MagickBooleanType
status;
ssize_t
black,
white,
y;
/*
Allocate histogram and linear map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*histogram));
if (histogram == (double *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Form histogram.
*/
(void) memset(histogram,0,(MaxMap+1)*sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
intensity=GetPixelIntensity(image,p);
histogram[ScaleQuantumToMap(ClampToQuantum(intensity))]++;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Find the histogram boundaries by locating the black and white point levels.
*/
intensity=0.0;
for (black=0; black < (ssize_t) MaxMap; black++)
{
intensity+=histogram[black];
if (intensity >= black_point)
break;
}
intensity=0.0;
for (white=(ssize_t) MaxMap; white != 0; white--)
{
intensity+=histogram[white];
if (intensity >= white_point)
break;
}
histogram=(double *) RelinquishMagickMemory(histogram);
status=LevelImage(image,(double) ScaleMapToQuantum((MagickRealType) black),
(double) ScaleMapToQuantum((MagickRealType) white),1.0,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d u l a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModulateImage() lets you control the brightness, saturation, and hue
% of an image. Modulate represents the brightness, saturation, and hue
% as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the
% modulation is lightness, saturation, and hue. For HWB, use blackness,
% whiteness, and hue. And for HCL, use chrome, luma, and hue.
%
% The format of the ModulateImage method is:
%
% MagickBooleanType ModulateImage(Image *image,const char *modulate,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o modulate: Define the percent change in brightness, saturation, and hue.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void ModulateHCL(const double percent_hue,
const double percent_chroma,const double percent_luma,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToHCL(*red,*green,*blue,&hue,&chroma,&luma);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
chroma*=0.01*percent_chroma;
luma*=0.01*percent_luma;
ConvertHCLToRGB(hue,chroma,luma,red,green,blue);
}
static inline void ModulateHCLp(const double percent_hue,
const double percent_chroma,const double percent_luma,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToHCLp(*red,*green,*blue,&hue,&chroma,&luma);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
chroma*=0.01*percent_chroma;
luma*=0.01*percent_luma;
ConvertHCLpToRGB(hue,chroma,luma,red,green,blue);
}
static inline void ModulateHSB(const double percent_hue,
const double percent_saturation,const double percent_brightness,double *red,
double *green,double *blue)
{
double
brightness,
hue,
saturation;
/*
Increase or decrease color brightness, saturation, or hue.
*/
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
brightness*=0.01*percent_brightness;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
static inline void ModulateHSI(const double percent_hue,
const double percent_saturation,const double percent_intensity,double *red,
double *green,double *blue)
{
double
intensity,
hue,
saturation;
/*
Increase or decrease color intensity, saturation, or hue.
*/
ConvertRGBToHSI(*red,*green,*blue,&hue,&saturation,&intensity);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
intensity*=0.01*percent_intensity;
ConvertHSIToRGB(hue,saturation,intensity,red,green,blue);
}
static inline void ModulateHSL(const double percent_hue,
const double percent_saturation,const double percent_lightness,double *red,
double *green,double *blue)
{
double
hue,
lightness,
saturation;
/*
Increase or decrease color lightness, saturation, or hue.
*/
ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
lightness*=0.01*percent_lightness;
ConvertHSLToRGB(hue,saturation,lightness,red,green,blue);
}
static inline void ModulateHSV(const double percent_hue,
const double percent_saturation,const double percent_value,double *red,
double *green,double *blue)
{
double
hue,
saturation,
value;
/*
Increase or decrease color value, saturation, or hue.
*/
ConvertRGBToHSV(*red,*green,*blue,&hue,&saturation,&value);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
value*=0.01*percent_value;
ConvertHSVToRGB(hue,saturation,value,red,green,blue);
}
static inline void ModulateHWB(const double percent_hue,
const double percent_whiteness,const double percent_blackness,double *red,
double *green,double *blue)
{
double
blackness,
hue,
whiteness;
/*
Increase or decrease color blackness, whiteness, or hue.
*/
ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
blackness*=0.01*percent_blackness;
whiteness*=0.01*percent_whiteness;
ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue);
}
static inline void ModulateLCHab(const double percent_luma,
const double percent_chroma,const double percent_hue,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToLCHab(*red,*green,*blue,&luma,&chroma,&hue);
luma*=0.01*percent_luma;
chroma*=0.01*percent_chroma;
hue+=fmod((percent_hue-100.0),200.0)/200.0;
ConvertLCHabToRGB(luma,chroma,hue,red,green,blue);
}
static inline void ModulateLCHuv(const double percent_luma,
const double percent_chroma,const double percent_hue,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToLCHuv(*red,*green,*blue,&luma,&chroma,&hue);
luma*=0.01*percent_luma;
chroma*=0.01*percent_chroma;
hue+=fmod((percent_hue-100.0),200.0)/200.0;
ConvertLCHuvToRGB(luma,chroma,hue,red,green,blue);
}
MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate,
ExceptionInfo *exception)
{
#define ModulateImageTag "Modulate/Image"
CacheView
*image_view;
ColorspaceType
colorspace;
const char
*artifact;
double
percent_brightness,
percent_hue,
percent_saturation;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickStatusType
flags;
register ssize_t
i;
ssize_t
y;
/*
Initialize modulate table.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (modulate == (char *) NULL)
return(MagickFalse);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
flags=ParseGeometry(modulate,&geometry_info);
percent_brightness=geometry_info.rho;
percent_saturation=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
percent_saturation=100.0;
percent_hue=geometry_info.xi;
if ((flags & XiValue) == 0)
percent_hue=100.0;
colorspace=UndefinedColorspace;
artifact=GetImageArtifact(image,"modulate:colorspace");
if (artifact != (const char *) NULL)
colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions,
MagickFalse,artifact);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
double
blue,
green,
red;
/*
Modulate image colormap.
*/
red=(double) image->colormap[i].red;
green=(double) image->colormap[i].green;
blue=(double) image->colormap[i].blue;
switch (colorspace)
{
case HCLColorspace:
{
ModulateHCL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ModulateHCLp(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSIColorspace:
{
ModulateHSI(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSVColorspace:
{
ModulateHSV(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHabColorspace:
{
ModulateLCHab(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
case LCHuvColorspace:
{
ModulateLCHuv(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
}
image->colormap[i].red=red;
image->colormap[i].green=green;
image->colormap[i].blue=blue;
}
/*
Modulate image.
*/
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateModulateImage(image,percent_brightness,percent_hue,
percent_saturation,colorspace,exception) != MagickFalse)
return(MagickTrue);
#endif
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red;
red=(double) GetPixelRed(image,q);
green=(double) GetPixelGreen(image,q);
blue=(double) GetPixelBlue(image,q);
switch (colorspace)
{
case HCLColorspace:
{
ModulateHCL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ModulateHCLp(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSVColorspace:
{
ModulateHSV(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case LCHabColorspace:
{
ModulateLCHab(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHuvColorspace:
{
ModulateLCHuv(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
}
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ModulateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e g a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NegateImage() negates the colors in the reference image. The grayscale
% option means that only grayscale values within the image are negated.
%
% The format of the NegateImage method is:
%
% MagickBooleanType NegateImage(Image *image,
% const MagickBooleanType grayscale,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o grayscale: If MagickTrue, only negate grayscale pixels within the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType NegateImage(Image *image,
const MagickBooleanType grayscale,ExceptionInfo *exception)
{
#define NegateImageTag "Negate/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Negate colormap.
*/
if( grayscale != MagickFalse )
if ((image->colormap[i].red != image->colormap[i].green) ||
(image->colormap[i].green != image->colormap[i].blue))
continue;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=QuantumRange-image->colormap[i].red;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=QuantumRange-image->colormap[i].green;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=QuantumRange-image->colormap[i].blue;
}
/*
Negate image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
if( grayscale != MagickFalse )
{
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
if (IsPixelGray(image,q) != MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=QuantumRange-q[j];
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
progress++;
proceed=SetImageProgress(image,NegateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(MagickTrue);
}
/*
Negate image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=QuantumRange-q[j];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,NegateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N o r m a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The NormalizeImage() method enhances the contrast of a color image by
% mapping the darkest 2 percent of all pixel to black and the brightest
% 1 percent to white.
%
% The format of the NormalizeImage method is:
%
% MagickBooleanType NormalizeImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType NormalizeImage(Image *image,
ExceptionInfo *exception)
{
double
black_point,
white_point;
black_point=(double) image->columns*image->rows*0.0015;
white_point=(double) image->columns*image->rows*0.9995;
return(ContrastStretchImage(image,black_point,white_point,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i g m o i d a l C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SigmoidalContrastImage() adjusts the contrast of an image with a non-linear
% sigmoidal contrast algorithm. Increase the contrast of the image using a
% sigmoidal transfer function without saturating highlights or shadows.
% Contrast indicates how much to increase the contrast (0 is none; 3 is
% typical; 20 is pushing it); mid-point indicates where midtones fall in the
% resultant image (0 is white; 50% is middle-gray; 100% is black). Set
% sharpen to MagickTrue to increase the image contrast otherwise the contrast
% is reduced.
%
% The format of the SigmoidalContrastImage method is:
%
% MagickBooleanType SigmoidalContrastImage(Image *image,
% const MagickBooleanType sharpen,const char *levels,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o sharpen: Increase or decrease image contrast.
%
% o contrast: strength of the contrast, the larger the number the more
% 'threshold-like' it becomes.
%
% o midpoint: midpoint of the function as a color value 0 to QuantumRange.
%
% o exception: return any errors or warnings in this structure.
%
*/
/*
ImageMagick 6 has a version of this function which uses LUTs.
*/
/*
Sigmoidal function Sigmoidal with inflexion point moved to b and "slope
constant" set to a.
The first version, based on the hyperbolic tangent tanh, when combined with
the scaling step, is an exact arithmetic clone of the the sigmoid function
based on the logistic curve. The equivalence is based on the identity
1/(1+exp(-t)) = (1+tanh(t/2))/2
(http://de.wikipedia.org/wiki/Sigmoidfunktion) and the fact that the
scaled sigmoidal derivation is invariant under affine transformations of
the ordinate.
The tanh version is almost certainly more accurate and cheaper. The 0.5
factor in the argument is to clone the legacy ImageMagick behavior. The
reason for making the define depend on atanh even though it only uses tanh
has to do with the construction of the inverse of the scaled sigmoidal.
*/
#if defined(MAGICKCORE_HAVE_ATANH)
#define Sigmoidal(a,b,x) ( tanh((0.5*(a))*((x)-(b))) )
#else
#define Sigmoidal(a,b,x) ( 1.0/(1.0+exp((a)*((b)-(x)))) )
#endif
/*
Scaled sigmoidal function:
( Sigmoidal(a,b,x) - Sigmoidal(a,b,0) ) /
( Sigmoidal(a,b,1) - Sigmoidal(a,b,0) )
See http://osdir.com/ml/video.image-magick.devel/2005-04/msg00006.html and
http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf. The limit
of ScaledSigmoidal as a->0 is the identity, but a=0 gives a division by
zero. This is fixed below by exiting immediately when contrast is small,
leaving the image (or colormap) unmodified. This appears to be safe because
the series expansion of the logistic sigmoidal function around x=b is
1/2-a*(b-x)/4+...
so that the key denominator s(1)-s(0) is about a/4 (a/2 with tanh).
*/
#define ScaledSigmoidal(a,b,x) ( \
(Sigmoidal((a),(b),(x))-Sigmoidal((a),(b),0.0)) / \
(Sigmoidal((a),(b),1.0)-Sigmoidal((a),(b),0.0)) )
/*
Inverse of ScaledSigmoidal, used for +sigmoidal-contrast. Because b
may be 0 or 1, the argument of the hyperbolic tangent (resp. logistic
sigmoidal) may be outside of the interval (-1,1) (resp. (0,1)), even
when creating a LUT from in gamut values, hence the branching. In
addition, HDRI may have out of gamut values.
InverseScaledSigmoidal is not a two-sided inverse of ScaledSigmoidal:
It is only a right inverse. This is unavoidable.
*/
static inline double InverseScaledSigmoidal(const double a,const double b,
const double x)
{
const double sig0=Sigmoidal(a,b,0.0);
const double sig1=Sigmoidal(a,b,1.0);
const double argument=(sig1-sig0)*x+sig0;
const double clamped=
(
#if defined(MAGICKCORE_HAVE_ATANH)
argument < -1+MagickEpsilon
?
-1+MagickEpsilon
:
( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument )
);
return(b+(2.0/a)*atanh(clamped));
#else
argument < MagickEpsilon
?
MagickEpsilon
:
( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument )
);
return(b-log(1.0/clamped-1.0)/a);
#endif
}
MagickExport MagickBooleanType SigmoidalContrastImage(Image *image,
const MagickBooleanType sharpen,const double contrast,const double midpoint,
ExceptionInfo *exception)
{
#define SigmoidalContrastImageTag "SigmoidalContrast/Image"
#define ScaledSig(x) ( ClampToQuantum(QuantumRange* \
ScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) )
#define InverseScaledSig(x) ( ClampToQuantum(QuantumRange* \
InverseScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) )
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Convenience macros.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Side effect: may clamp values unless contrast<MagickEpsilon, in which
case nothing is done.
*/
if (contrast < MagickEpsilon)
return(MagickTrue);
/*
Sigmoidal-contrast enhance colormap.
*/
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
if( sharpen != MagickFalse )
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(MagickRealType) ScaledSig(
image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(MagickRealType) ScaledSig(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(MagickRealType) ScaledSig(
image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(MagickRealType) ScaledSig(
image->colormap[i].alpha);
}
else
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(MagickRealType) InverseScaledSig(
image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(MagickRealType) InverseScaledSig(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(MagickRealType) InverseScaledSig(
image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(MagickRealType) InverseScaledSig(
image->colormap[i].alpha);
}
}
/*
Sigmoidal-contrast enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if( sharpen != MagickFalse )
q[i]=ScaledSig(q[i]);
else
q[i]=InverseScaledSig(q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
ba_sparse_matrix.h | /*
* Copyright (C) 2015, Simon Fuhrmann, Fabian Langguth
* TU Darmstadt - Graphics, Capture and Massively Parallel Computing
* All rights reserved.
*
* This software may be modified and distributed under the terms
* of the BSD 3-Clause license. See the LICENSE.txt file for details.
*/
#ifndef SFM_SPARSE_MATRIX_HEADER
#define SFM_SPARSE_MATRIX_HEADER
#include <thread>
#include <stdexcept>
#include <vector>
#include <algorithm>
#include "sfm/ba_dense_vector.h"
#include "sfm/defines.h"
SFM_NAMESPACE_BEGIN
SFM_BA_NAMESPACE_BEGIN
/**
* Sparse matrix class in Yale format for column-major matrices.
*/
template <typename T>
class SparseMatrix
{
public:
/** Triplet with row/col index, and the actual value. */
struct Triplet
{
Triplet (void) = default;
Triplet (std::size_t row, std::size_t col, T const& value);
std::size_t row;
std::size_t col;
T value;
};
/** List of triplets. */
typedef std::vector<Triplet> Triplets;
public:
SparseMatrix (void);
SparseMatrix (std::size_t rows, std::size_t cols);
void allocate (std::size_t rows, std::size_t cols);
void reserve (std::size_t num_elements);
void set_from_triplets (Triplets const& triplets);
void mult_diagonal (T const& factor);
void cwise_invert (void);
void column_nonzeros (std::size_t col, DenseVector<T>* vector) const;
SparseMatrix transpose (void) const;
SparseMatrix subtract (SparseMatrix const& rhs) const;
SparseMatrix multiply (SparseMatrix const& rhs) const;
SparseMatrix sequential_multiply (SparseMatrix const& rhs) const;
SparseMatrix parallel_multiply (SparseMatrix const& rhs) const;
DenseVector<T> multiply (DenseVector<T> const& rhs) const;
SparseMatrix diagonal_matrix (void) const;
std::size_t num_non_zero (void) const;
std::size_t num_rows (void) const;
std::size_t num_cols (void) const;
T* begin (void);
T* end (void);
void debug (void) const;
private:
std::size_t rows;
std::size_t cols;
std::vector<T> values;
std::vector<std::size_t> outer;
std::vector<std::size_t> inner;
};
SFM_BA_NAMESPACE_END
SFM_NAMESPACE_END
/* ------------------------ Implementation ------------------------ */
#include <iostream>
SFM_NAMESPACE_BEGIN
SFM_BA_NAMESPACE_BEGIN
template <typename T>
SparseMatrix<T>::Triplet::Triplet (std::size_t row,
std::size_t col, T const& value)
: row(row), col(col), value(value)
{
}
/* --------------------------------------------------------------- */
template <typename T>
SparseMatrix<T>::SparseMatrix (void)
: rows(0)
, cols(0)
{
}
template <typename T>
SparseMatrix<T>::SparseMatrix (std::size_t rows, std::size_t cols)
{
this->allocate(rows, cols);
}
template <typename T>
void
SparseMatrix<T>::allocate (std::size_t rows, std::size_t cols)
{
this->rows = rows;
this->cols = cols;
this->values.clear();
this->outer.clear();
this->inner.clear();
this->outer.resize(cols + 1, 0);
}
template <typename T>
void
SparseMatrix<T>::reserve (std::size_t num_elements)
{
this->inner.reserve(num_elements);
this->values.reserve(num_elements);
}
template <typename T>
void
SparseMatrix<T>::set_from_triplets (Triplets const& triplets)
{
/* Create a temporary transposed matrix */
SparseMatrix<T> transposed(this->cols, this->rows);
transposed.values.resize(triplets.size());
transposed.inner.resize(triplets.size());
/* Initialize outer indices with amount of inner values. */
for (std::size_t i = 0; i < triplets.size(); ++i)
transposed.outer[triplets[i].row]++;
/* Convert amounts to indices with prefix sum. */
std::size_t sum = 0;
std::vector<std::size_t> scratch(transposed.outer.size());
for (std::size_t i = 0; i < transposed.outer.size(); ++i)
{
std::size_t const temp = transposed.outer[i];
transposed.outer[i] = sum;
scratch[i] = sum;
sum += temp;
}
/* Add triplets, inner indices are unsorted. */
for (std::size_t i = 0; i < triplets.size(); ++i)
{
Triplet const& t = triplets[i];
std::size_t pos = scratch[t.row]++;
transposed.values[pos] = t.value;
transposed.inner[pos] = t.col;
}
/* Transpose matrix, implicit sorting of inner indices. */
*this = transposed.transpose();
}
template <typename T>
SparseMatrix<T>
SparseMatrix<T>::transpose (void) const
{
SparseMatrix ret(this->cols, this->rows);
ret.values.resize(this->num_non_zero());
ret.inner.resize(this->num_non_zero());
/* Compute inner sizes of transposed matrix. */
for(std::size_t i = 0; i < this->inner.size(); ++i)
ret.outer[this->inner[i]] += 1;
/* Compute outer sizes of transposed matrix with prefix sum. */
std::size_t sum = 0;
std::vector<std::size_t> scratch(ret.outer.size());
for (std::size_t i = 0; i < ret.outer.size(); ++i)
{
std::size_t const temp = ret.outer[i];
ret.outer[i] = sum;
scratch[i] = sum;
sum += temp;
}
/* Write inner indices and values of transposed matrix. */
for (std::size_t i = 0; i < this->outer.size() - 1; ++i)
for (std::size_t j = this->outer[i]; j < this->outer[i + 1]; ++j)
{
std::size_t pos = scratch[this->inner[j]]++;
ret.inner[pos] = i;
ret.values[pos] = this->values[j];
}
return ret;
}
template <typename T>
SparseMatrix<T>
SparseMatrix<T>::subtract (SparseMatrix const& rhs) const
{
if (this->rows != rhs.rows || this->cols != rhs.cols)
throw std::invalid_argument("Incompatible matrix dimensions");
SparseMatrix ret(this->rows, this->cols);
ret.reserve(this->num_non_zero() + rhs.num_non_zero());
std::size_t num_outer = this->outer.size() - 1;
for (std::size_t outer = 0; outer < num_outer; ++outer)
{
ret.outer[outer] = ret.values.size();
std::size_t i1 = this->outer[outer];
std::size_t i2 = rhs.outer[outer];
std::size_t const i1_end = this->outer[outer + 1];
std::size_t const i2_end = rhs.outer[outer + 1];
while (i1 < i1_end || i2 < i2_end)
{
if (i1 >= i1_end)
{
ret.values.push_back(-rhs.values[i2]);
ret.inner.push_back(rhs.inner[i2]);
i2 += 1;
continue;
}
if (i2 >= i2_end)
{
ret.values.push_back(this->values[i1]);
ret.inner.push_back(this->inner[i1]);
i1 += 1;
continue;
}
std::size_t id1 = this->inner[i1];
std::size_t id2 = rhs.inner[i2];
if (id1 < id2)
ret.values.push_back(this->values[i1]);
else if (id2 < id1)
ret.values.push_back(-rhs.values[i2]);
else
ret.values.push_back(this->values[i1] - rhs.values[i2]);
i1 += static_cast<std::size_t>(id1 <= id2);
i2 += static_cast<std::size_t>(id2 <= id1);
ret.inner.push_back(std::min(id1, id2));
}
}
ret.outer.back() = ret.values.size();
return ret;
}
template <typename T>
SparseMatrix<T>
SparseMatrix<T>::multiply (SparseMatrix const& rhs) const
{
#ifdef _OPENMP
return this->parallel_multiply(rhs);
#else
return this->sequential_multiply(rhs);
#endif
}
template <typename T>
SparseMatrix<T>
SparseMatrix<T>::sequential_multiply (SparseMatrix const& rhs) const
{
if (this->cols != rhs.rows)
throw std::invalid_argument("Incompatible matrix dimensions");
SparseMatrix ret(this->rows, rhs.cols);
ret.reserve(this->num_non_zero() + rhs.num_non_zero());
/* Matrix-matrix multiplication. */
std::vector<T> ret_col(ret.rows, T(0));
std::vector<bool> ret_nonzero(ret.rows, false);
for (std::size_t col = 0; col < ret.cols; ++col)
{
ret.outer[col] = ret.values.size();
std::fill(ret_col.begin(), ret_col.end(), T(0));
std::fill(ret_nonzero.begin(), ret_nonzero.end(), false);
std::size_t rhs_col_begin = rhs.outer[col];
std::size_t rhs_col_end = rhs.outer[col + 1];
for (std::size_t i = rhs_col_begin; i < rhs_col_end; ++i)
{
T const& rhs_col_value = rhs.values[i];
std::size_t const lhs_col = rhs.inner[i];
std::size_t const lhs_col_begin = this->outer[lhs_col];
std::size_t const lhs_col_end = this->outer[lhs_col + 1];
for (std::size_t j = lhs_col_begin; j < lhs_col_end; ++j)
{
std::size_t const id = this->inner[j];
ret_col[id] += this->values[j] * rhs_col_value;
ret_nonzero[id] = true;
}
}
for (std::size_t i = 0; i < ret.rows; ++i)
if (ret_nonzero[i])
{
ret.inner.push_back(i);
ret.values.push_back(ret_col[i]);
}
}
ret.outer[ret.cols] = ret.values.size();
return ret;
}
template <typename T>
SparseMatrix<T>
SparseMatrix<T>::parallel_multiply (SparseMatrix const& rhs) const
{
if (this->cols != rhs.rows)
throw std::invalid_argument("Incompatible matrix dimensions");
std::size_t nnz = this->num_non_zero() + rhs.num_non_zero();
SparseMatrix ret(this->rows, rhs.cols);
ret.reserve(nnz);
std::fill(ret.outer.begin(), ret.outer.end(), 0);
std::size_t const chunk_size = 64;
std::size_t const num_chunks = ret.cols / chunk_size
+ (ret.cols % chunk_size != 0);
std::size_t const max_threads = std::max(1u,
std::thread::hardware_concurrency());
std::size_t const num_threads = std::min(num_chunks, max_threads);
#pragma omp parallel num_threads(num_threads)
{
/* Matrix-matrix multiplication. */
std::vector<T> ret_col(ret.rows, T(0));
std::vector<bool> ret_nonzero(ret.rows, false);
std::vector<T> thread_values;
thread_values.reserve(nnz / num_chunks);
std::vector<std::size_t> thread_inner;
thread_inner.reserve(nnz / num_chunks);
#pragma omp for ordered schedule(static, 1)
for (std::size_t chunk = 0; chunk < num_chunks; ++chunk)
{
thread_inner.clear();
thread_values.clear();
std::size_t const begin = chunk * chunk_size;
std::size_t const end = std::min(begin + chunk_size, ret.cols);
for (std::size_t col = begin; col < end; ++col)
{
std::fill(ret_col.begin(), ret_col.end(), T(0));
std::fill(ret_nonzero.begin(), ret_nonzero.end(), false);
std::size_t const rhs_col_begin = rhs.outer[col];
std::size_t const rhs_col_end = rhs.outer[col + 1];
for (std::size_t i = rhs_col_begin; i < rhs_col_end; ++i)
{
T const& rhs_col_value = rhs.values[i];
std::size_t const lhs_col = rhs.inner[i];
std::size_t const lhs_col_begin = this->outer[lhs_col];
std::size_t const lhs_col_end = this->outer[lhs_col + 1];
for (std::size_t j = lhs_col_begin; j < lhs_col_end; ++j)
{
std::size_t const id = this->inner[j];
ret_col[id] += this->values[j] * rhs_col_value;
ret_nonzero[id] = true;
}
}
for (std::size_t i = 0; i < ret.rows; ++i)
if (ret_nonzero[i])
{
ret.outer[col + 1] += 1;
thread_inner.push_back(i);
thread_values.push_back(ret_col[i]);
}
}
#pragma omp ordered
{
ret.inner.insert(ret.inner.end(),
thread_inner.begin(), thread_inner.end());
ret.values.insert(ret.values.end(),
thread_values.begin(), thread_values.end());
}
}
}
for (std::size_t col = 0; col < ret.cols; ++col)
ret.outer[col + 1] += ret.outer[col];
return ret;
}
template<typename T>
DenseVector<T>
SparseMatrix<T>::multiply (DenseVector<T> const& rhs) const
{
if (rhs.size() != this->cols)
throw std::invalid_argument("Incompatible dimensions");
DenseVector<T> ret(this->rows, T(0));
for (std::size_t i = 0; i < this->cols; ++i)
for (std::size_t id = this->outer[i]; id < this->outer[i + 1]; ++id)
ret[this->inner[id]] += this->values[id] * rhs[i];
return ret;
}
template<typename T>
SparseMatrix<T>
SparseMatrix<T>::diagonal_matrix (void) const
{
std::size_t const diag_size = std::min(this->rows, this->cols);
SparseMatrix ret(diag_size, diag_size);
ret.reserve(diag_size);
for (std::size_t i = 0; i < diag_size; ++i)
{
ret.outer[i] = ret.values.size();
for (std::size_t j = this->outer[i]; j < this->outer[i + 1]; ++j)
if (this->inner[j] == i)
{
ret.inner.push_back(i);
ret.values.push_back(this->values[j]);
}
else if (this->inner[j] > i)
break;
}
ret.outer[diag_size] = ret.values.size();
return ret;
}
template<typename T>
void
SparseMatrix<T>::mult_diagonal (T const& factor)
{
for (std::size_t i = 0; i < this->outer.size() - 1; ++i)
for (std::size_t j = this->outer[i]; j < this->outer[i + 1]; ++j)
{
if (this->inner[j] == i)
this->values[j] *= factor;
if (this->inner[j] >= i)
break;
}
}
template<typename T>
void
SparseMatrix<T>::cwise_invert (void)
{
for (std::size_t i = 0; i < this->values.size(); ++i)
this->values[i] = T(1) / this->values[i];
}
template<typename T>
void
SparseMatrix<T>::column_nonzeros (std::size_t col, DenseVector<T>* vector) const
{
std::size_t const start = this->outer[col];
std::size_t const end = this->outer[col + 1];
vector->resize(end - start);
for (std::size_t row = start, i = 0; row < end; ++row, ++i)
vector->at(i) = this->values[row];
}
template<typename T>
inline std::size_t
SparseMatrix<T>::num_non_zero (void) const
{
return this->values.size();
}
template<typename T>
inline std::size_t
SparseMatrix<T>::num_rows (void) const
{
return this->rows;
}
template<typename T>
inline std::size_t
SparseMatrix<T>::num_cols (void) const
{
return this->cols;
}
template<typename T>
inline T*
SparseMatrix<T>::begin (void)
{
return this->values.data();
}
template<typename T>
inline T*
SparseMatrix<T>::end (void)
{
return this->values.data() + this->values.size();
}
template<typename T>
void
SparseMatrix<T>::debug (void) const
{
std::cout << "SparseMatrix ("
<< this->rows << " rows, " << this->cols << " cols, "
<< this->num_non_zero() << " values)" << std::endl;
std::cout << " Values:";
for (std::size_t i = 0; i < this->values.size(); ++i)
std::cout << " " << this->values[i];
std::cout << std::endl << " Inner:";
for (std::size_t i = 0; i < this->inner.size(); ++i)
std::cout << " " << this->inner[i];
std::cout << std::endl << " Outer:";
for (std::size_t i = 0; i < this->outer.size(); ++i)
std::cout << " " << this->outer[i];
std::cout << std::endl;
}
SFM_BA_NAMESPACE_END
SFM_NAMESPACE_END
#endif // SFM_SPARSE_MATRIX_HEADER
|
tree.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_TREE_H_
#define LIGHTGBM_TREE_H_
#include <LightGBM/dataset.h>
#include <LightGBM/meta.h>
#include <string>
#include <map>
#include <memory>
#include <unordered_map>
#include <vector>
namespace LightGBM {
#define kCategoricalMask (1)
#define kDefaultLeftMask (2)
/*!
* \brief Tree model
*/
class Tree {
public:
/*!
* \brief Constructor
* \param max_leaves The number of max leaves
*/
explicit Tree(int max_leaves);
/*!
* \brief Constructor, from a string
* \param str Model string
* \param used_len used count of str
*/
Tree(const char* str, size_t* used_len);
~Tree();
/*!
* \brief Performing a split on tree leaves.
* \param leaf Index of leaf to be split
* \param feature Index of feature; the converted index after removing useless features
* \param real_feature Index of feature, the original index on data
* \param threshold_bin Threshold(bin) of split
* \param threshold_double Threshold on feature value
* \param left_value Model Left child output
* \param right_value Model Right child output
* \param left_cnt Count of left child
* \param right_cnt Count of right child
* \param left_weight Weight of left child
* \param right_weight Weight of right child
* \param gain Split gain
* \param missing_type missing type
* \param default_left default direction for missing value
* \return The index of new leaf.
*/
int Split(int leaf, int feature, int real_feature, uint32_t threshold_bin,
double threshold_double, double left_value, double right_value,
int left_cnt, int right_cnt, double left_weight, double right_weight,
float gain, MissingType missing_type, bool default_left);
/*!
* \brief Performing a split on tree leaves, with categorical feature
* \param leaf Index of leaf to be split
* \param feature Index of feature; the converted index after removing useless features
* \param real_feature Index of feature, the original index on data
* \param threshold_bin Threshold(bin) of split, use bitset to represent
* \param num_threshold_bin size of threshold_bin
* \param threshold Thresholds of real feature value, use bitset to represent
* \param num_threshold size of threshold
* \param left_value Model Left child output
* \param right_value Model Right child output
* \param left_cnt Count of left child
* \param right_cnt Count of right child
* \param left_weight Weight of left child
* \param right_weight Weight of right child
* \param gain Split gain
* \return The index of new leaf.
*/
int SplitCategorical(int leaf, int feature, int real_feature, const uint32_t* threshold_bin, int num_threshold_bin,
const uint32_t* threshold, int num_threshold, double left_value, double right_value,
int left_cnt, int right_cnt, double left_weight, double right_weight, float gain, MissingType missing_type);
/*! \brief Get the output of one leaf */
inline double LeafOutput(int leaf) const { return leaf_value_[leaf]; }
/*! \brief Set the output of one leaf */
inline void SetLeafOutput(int leaf, double output) {
// Prevent denormal values because they can cause std::out_of_range exception when converting strings to doubles
if (IsZero(output)) {
leaf_value_[leaf] = 0;
} else {
leaf_value_[leaf] = output;
}
}
/*!
* \brief Adding prediction value of this tree model to scores
* \param data The dataset
* \param num_data Number of total data
* \param score Will add prediction to score
*/
void AddPredictionToScore(const Dataset* data,
data_size_t num_data,
double* score) const;
/*!
* \brief Adding prediction value of this tree model to scores
* \param data The dataset
* \param used_data_indices Indices of used data
* \param num_data Number of total data
* \param score Will add prediction to score
*/
void AddPredictionToScore(const Dataset* data,
const data_size_t* used_data_indices,
data_size_t num_data, double* score) const;
/*!
* \brief Prediction on one record
* \param feature_values Feature value of this record
* \return Prediction result
*/
inline double Predict(const double* feature_values) const;
inline double PredictByMap(const std::unordered_map<int, double>& feature_values) const;
inline int PredictLeafIndex(const double* feature_values) const;
inline int PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const;
inline void PredictContrib(const double* feature_values, int num_features, double* output);
/*! \brief Get Number of leaves*/
inline int num_leaves() const { return num_leaves_; }
/*! \brief Get depth of specific leaf*/
inline int leaf_depth(int leaf_idx) const { return leaf_depth_[leaf_idx]; }
/*! \brief Get feature of specific split*/
inline int split_feature(int split_idx) const { return split_feature_[split_idx]; }
inline double split_gain(int split_idx) const { return split_gain_[split_idx]; }
/*! \brief Get the number of data points that fall at or below this node*/
inline int data_count(int node) const { return node >= 0 ? internal_count_[node] : leaf_count_[~node]; }
/*!
* \brief Shrinkage for the tree's output
* shrinkage rate (a.k.a learning rate) is used to tune the training process
* \param rate The factor of shrinkage
*/
inline void Shrinkage(double rate) {
#pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048)
for (int i = 0; i < num_leaves_; ++i) {
double new_leaf_value = leaf_value_[i] * rate;
// Prevent denormal values because they can cause std::out_of_range exception when converting strings to doubles
if (IsZero(new_leaf_value)) {
leaf_value_[i] = 0;
} else {
leaf_value_[i] = new_leaf_value;
}
}
shrinkage_ *= rate;
}
inline double shrinkage() const {
return shrinkage_;
}
inline void AddBias(double val) {
#pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048)
for (int i = 0; i < num_leaves_; ++i) {
double new_leaf_value = val + leaf_value_[i];
// Prevent denormal values because they can cause std::out_of_range exception when converting strings to doubles
if (IsZero(new_leaf_value)) {
leaf_value_[i] = 0;
} else {
leaf_value_[i] = new_leaf_value;
}
}
// force to 1.0
shrinkage_ = 1.0f;
}
inline void AsConstantTree(double val) {
num_leaves_ = 1;
shrinkage_ = 1.0f;
leaf_value_[0] = val;
}
/*! \brief Serialize this object to string*/
std::string ToString() const;
/*! \brief Serialize this object to json*/
std::string ToJSON() const;
/*! \brief Serialize this object to if-else statement*/
std::string ToIfElse(int index, bool predict_leaf_index) const;
inline static bool IsZero(double fval) {
if (fval > -kZeroThreshold && fval <= kZeroThreshold) {
return true;
} else {
return false;
}
}
inline static bool GetDecisionType(int8_t decision_type, int8_t mask) {
return (decision_type & mask) > 0;
}
inline static void SetDecisionType(int8_t* decision_type, bool input, int8_t mask) {
if (input) {
(*decision_type) |= mask;
} else {
(*decision_type) &= (127 - mask);
}
}
inline static int8_t GetMissingType(int8_t decision_type) {
return (decision_type >> 2) & 3;
}
inline static void SetMissingType(int8_t* decision_type, int8_t input) {
(*decision_type) &= 3;
(*decision_type) |= (input << 2);
}
void RecomputeMaxDepth();
int NextLeafId() const { return num_leaves_; }
private:
std::string NumericalDecisionIfElse(int node) const;
std::string CategoricalDecisionIfElse(int node) const;
inline int NumericalDecision(double fval, int node) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
if (std::isnan(fval)) {
if (missing_type != 2) {
fval = 0.0f;
}
}
if ((missing_type == 1 && IsZero(fval))
|| (missing_type == 2 && std::isnan(fval))) {
if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) {
return left_child_[node];
} else {
return right_child_[node];
}
}
if (fval <= threshold_[node]) {
return left_child_[node];
} else {
return right_child_[node];
}
}
inline int NumericalDecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
if ((missing_type == 1 && fval == default_bin)
|| (missing_type == 2 && fval == max_bin)) {
if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) {
return left_child_[node];
} else {
return right_child_[node];
}
}
if (fval <= threshold_in_bin_[node]) {
return left_child_[node];
} else {
return right_child_[node];
}
}
inline int CategoricalDecision(double fval, int node) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
int int_fval = static_cast<int>(fval);
if (int_fval < 0) {
return right_child_[node];;
} else if (std::isnan(fval)) {
// NaN is always in the right
if (missing_type == 2) {
return right_child_[node];
}
int_fval = 0;
}
int cat_idx = static_cast<int>(threshold_[node]);
if (Common::FindInBitset(cat_threshold_.data() + cat_boundaries_[cat_idx],
cat_boundaries_[cat_idx + 1] - cat_boundaries_[cat_idx], int_fval)) {
return left_child_[node];
}
return right_child_[node];
}
inline int CategoricalDecisionInner(uint32_t fval, int node) const {
int cat_idx = static_cast<int>(threshold_in_bin_[node]);
if (Common::FindInBitset(cat_threshold_inner_.data() + cat_boundaries_inner_[cat_idx],
cat_boundaries_inner_[cat_idx + 1] - cat_boundaries_inner_[cat_idx], fval)) {
return left_child_[node];
}
return right_child_[node];
}
inline int Decision(double fval, int node) const {
if (GetDecisionType(decision_type_[node], kCategoricalMask)) {
return CategoricalDecision(fval, node);
} else {
return NumericalDecision(fval, node);
}
}
inline int DecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const {
if (GetDecisionType(decision_type_[node], kCategoricalMask)) {
return CategoricalDecisionInner(fval, node);
} else {
return NumericalDecisionInner(fval, node, default_bin, max_bin);
}
}
inline void Split(int leaf, int feature, int real_feature, double left_value, double right_value, int left_cnt, int right_cnt,
double left_weight, double right_weight, float gain);
/*!
* \brief Find leaf index of which record belongs by features
* \param feature_values Feature value of this record
* \return Leaf index
*/
inline int GetLeaf(const double* feature_values) const;
inline int GetLeafByMap(const std::unordered_map<int, double>& feature_values) const;
/*! \brief Serialize one node to json*/
std::string NodeToJSON(int index) const;
/*! \brief Serialize one node to if-else statement*/
std::string NodeToIfElse(int index, bool predict_leaf_index) const;
std::string NodeToIfElseByMap(int index, bool predict_leaf_index) const;
double ExpectedValue() const;
/*! \brief This is used fill in leaf_depth_ after reloading a model*/
inline void RecomputeLeafDepths(int node = 0, int depth = 0);
/*!
* \brief Used by TreeSHAP for data we keep about our decision path
*/
struct PathElement {
int feature_index;
double zero_fraction;
double one_fraction;
// note that pweight is included for convenience and is not tied with the other attributes,
// the pweight of the i'th path element is the permutation weight of paths with i-1 ones in them
double pweight;
PathElement() {}
PathElement(int i, double z, double o, double w) : feature_index(i), zero_fraction(z), one_fraction(o), pweight(w) {}
};
/*! \brief Polynomial time algorithm for SHAP values (arXiv:1706.06060)*/
void TreeSHAP(const double *feature_values, double *phi,
int node, int unique_depth,
PathElement *parent_unique_path, double parent_zero_fraction,
double parent_one_fraction, int parent_feature_index) const;
/*! \brief Extend our decision path with a fraction of one and zero extensions for TreeSHAP*/
static void ExtendPath(PathElement *unique_path, int unique_depth,
double zero_fraction, double one_fraction, int feature_index);
/*! \brief Undo a previous extension of the decision path for TreeSHAP*/
static void UnwindPath(PathElement *unique_path, int unique_depth, int path_index);
/*! determine what the total permutation weight would be if we unwound a previous extension in the decision path*/
static double UnwoundPathSum(const PathElement *unique_path, int unique_depth, int path_index);
/*! \brief Number of max leaves*/
int max_leaves_;
/*! \brief Number of current leaves*/
int num_leaves_;
// following values used for non-leaf node
/*! \brief A non-leaf node's left child */
std::vector<int> left_child_;
/*! \brief A non-leaf node's right child */
std::vector<int> right_child_;
/*! \brief A non-leaf node's split feature */
std::vector<int> split_feature_inner_;
/*! \brief A non-leaf node's split feature, the original index */
std::vector<int> split_feature_;
/*! \brief A non-leaf node's split threshold in bin */
std::vector<uint32_t> threshold_in_bin_;
/*! \brief A non-leaf node's split threshold in feature value */
std::vector<double> threshold_;
int num_cat_;
std::vector<int> cat_boundaries_inner_;
std::vector<uint32_t> cat_threshold_inner_;
std::vector<int> cat_boundaries_;
std::vector<uint32_t> cat_threshold_;
/*! \brief Store the information for categorical feature handle and missing value handle. */
std::vector<int8_t> decision_type_;
/*! \brief A non-leaf node's split gain */
std::vector<float> split_gain_;
// used for leaf node
/*! \brief The parent of leaf */
std::vector<int> leaf_parent_;
/*! \brief Output of leaves */
std::vector<double> leaf_value_;
/*! \brief weight of leaves */
std::vector<double> leaf_weight_;
/*! \brief DataCount of leaves */
std::vector<int> leaf_count_;
/*! \brief Output of non-leaf nodes */
std::vector<double> internal_value_;
/*! \brief weight of non-leaf nodes */
std::vector<double> internal_weight_;
/*! \brief DataCount of non-leaf nodes */
std::vector<int> internal_count_;
/*! \brief Depth for leaves */
std::vector<int> leaf_depth_;
double shrinkage_;
int max_depth_;
};
inline void Tree::Split(int leaf, int feature, int real_feature,
double left_value, double right_value, int left_cnt, int right_cnt,
double left_weight, double right_weight, float gain) {
int new_node_idx = num_leaves_ - 1;
// update parent info
int parent = leaf_parent_[leaf];
if (parent >= 0) {
// if cur node is left child
if (left_child_[parent] == ~leaf) {
left_child_[parent] = new_node_idx;
} else {
right_child_[parent] = new_node_idx;
}
}
// add new node
split_feature_inner_[new_node_idx] = feature;
split_feature_[new_node_idx] = real_feature;
split_gain_[new_node_idx] = gain;
// add two new leaves
left_child_[new_node_idx] = ~leaf;
right_child_[new_node_idx] = ~num_leaves_;
// update new leaves
leaf_parent_[leaf] = new_node_idx;
leaf_parent_[num_leaves_] = new_node_idx;
// save current leaf value to internal node before change
internal_weight_[new_node_idx] = leaf_weight_[leaf];
internal_value_[new_node_idx] = leaf_value_[leaf];
internal_count_[new_node_idx] = left_cnt + right_cnt;
leaf_value_[leaf] = std::isnan(left_value) ? 0.0f : left_value;
leaf_weight_[leaf] = left_weight;
leaf_count_[leaf] = left_cnt;
leaf_value_[num_leaves_] = std::isnan(right_value) ? 0.0f : right_value;
leaf_weight_[num_leaves_] = right_weight;
leaf_count_[num_leaves_] = right_cnt;
// update leaf depth
leaf_depth_[num_leaves_] = leaf_depth_[leaf] + 1;
leaf_depth_[leaf]++;
}
inline double Tree::Predict(const double* feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeaf(feature_values);
return LeafOutput(leaf);
} else {
return leaf_value_[0];
}
}
inline double Tree::PredictByMap(const std::unordered_map<int, double>& feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeafByMap(feature_values);
return LeafOutput(leaf);
} else {
return leaf_value_[0];
}
}
inline int Tree::PredictLeafIndex(const double* feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeaf(feature_values);
return leaf;
} else {
return 0;
}
}
inline int Tree::PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeafByMap(feature_values);
return leaf;
} else {
return 0;
}
}
inline void Tree::PredictContrib(const double* feature_values, int num_features, double* output) {
output[num_features] += ExpectedValue();
// Run the recursion with preallocated space for the unique path data
if (num_leaves_ > 1) {
CHECK(max_depth_ >= 0);
const int max_path_len = max_depth_ + 1;
std::vector<PathElement> unique_path_data(max_path_len*(max_path_len + 1) / 2);
TreeSHAP(feature_values, output, 0, 0, unique_path_data.data(), 1, 1, -1);
}
}
inline void Tree::RecomputeLeafDepths(int node, int depth) {
if (node == 0) leaf_depth_.resize(num_leaves());
if (node < 0) {
leaf_depth_[~node] = depth;
} else {
RecomputeLeafDepths(left_child_[node], depth + 1);
RecomputeLeafDepths(right_child_[node], depth + 1);
}
}
inline int Tree::GetLeaf(const double* feature_values) const {
int node = 0;
if (num_cat_ > 0) {
while (node >= 0) {
node = Decision(feature_values[split_feature_[node]], node);
}
} else {
while (node >= 0) {
node = NumericalDecision(feature_values[split_feature_[node]], node);
}
}
return ~node;
}
inline int Tree::GetLeafByMap(const std::unordered_map<int, double>& feature_values) const {
int node = 0;
if (num_cat_ > 0) {
while (node >= 0) {
node = Decision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node);
}
} else {
while (node >= 0) {
node = NumericalDecision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node);
}
}
return ~node;
}
} // namespace LightGBM
#endif // LightGBM_TREE_H_
|
GB_unop__identity_int8_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_int8_bool
// op(A') function: GB_unop_tran__identity_int8_bool
// C type: int8_t
// A type: bool
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int8_t z = (int8_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
bool aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = (int8_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_int8_bool
(
int8_t *Cx, // Cx and Ax may be aliased
const bool *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
int8_t z = (int8_t) aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int8_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
kvstore_dist_server.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file mxnet_node.h
* \brief implement mxnet nodes
*/
#ifndef MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
#define MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
#include <mxnet/c_api.h>
#include <mxnet/kvstore.h>
#include <ps/ps.h>
#include <queue>
#include <string>
#include <mutex>
#include <condition_variable>
#include <memory>
#include <functional>
#include <future>
#include <vector>
#include "../profiler/profiler.h"
#include "../operator/tensor/elemwise_binary_op-inl.h"
#include "../operator/tensor/init_op.h"
//dist_device_sync
#define _STALE 0
using Staleness = uint64_t;
using Callback = std::function<void()>;
namespace mxnet {
namespace kvstore {
// maintain same order in frontend.
enum class CommandType {
kController, kSetMultiPrecision, kStopServer, kSyncMode,
kSetGradientCompression, kSetProfilerParams
};
enum class RequestType {
kDefaultPushPull, kRowSparsePushPull, kCompressedPushPull//, SSPDefaultPushPull
};
struct DataHandleType {
RequestType requestType;
int dtype;
};
/*!
* Uses Cantor pairing function to generate a unique number given two numbers.
* This number can also be inverted to find the unique pair whose Cantor value is this number.
* Ref: https://en.wikipedia.org/wiki/Pairing_function#Cantor_pairing_function
* \param requestType RequestType
* \param dtype integer
* \return Cantor value of arguments
*/
static int GetCommandType(RequestType requestType, int d) {
int m = static_cast<int>(requestType);
return (((m + d) * (m + d + 1)) / 2) + d;
}
/*!
* Unpairs Cantor value and finds the two integers used to pair.
* Then returns DataHandleType object with those numbers.
* \param cmd DataHandleCommand generated by GetCommandType function
* \return DataHandleType
*/
static DataHandleType DepairDataHandleType(int cmd) {
int w = std::floor((std::sqrt(8 * cmd + 1) - 1)/2);
int t = ((w * w) + w) / 2;
int y = cmd - t;
int x = w - y;
CHECK_GE(x, 0);
CHECK_GE(y, 0);
DataHandleType type;
type.requestType = static_cast<RequestType>(x);
type.dtype = y;
return type;
}
/**
* \brief executor runs a function using the thread called \ref Start
*/
class Executor {
public:
/**
* \brief start the executor
*/
void Start() {
std::unique_lock<std::mutex> lk(mu_);
while (true) {
cond_.wait(lk, [this]{return !queue_.empty();});
Block blk = std::move(queue_.front());
queue_.pop();
lk.unlock();
if (blk.f) {
blk.f();
blk.p->set_value();
} else {
blk.p->set_value(); break;
}
lk.lock();
}
}
/**
* \brief function
*/
typedef std::function<void()> Func;
/**
* \brief let the thread called \ref Start to exec a function. threadsafe
*/
void Exec(const Func& func) {
Block blk(func);
auto fut = blk.p->get_future();
{
std::lock_guard<std::mutex> lk(mu_);
queue_.push(std::move(blk));
cond_.notify_one();
}
fut.wait();
}
/**
* \brief stop the thread, threadsafe
*/
void Stop() {
Exec(Func());
}
private:
struct Block {
explicit Block(const Func& func) : f(func), p(std::make_shared<std::promise<void>>()) { }
Func f;
std::shared_ptr<std::promise<void>> p;
};
std::queue<Block> queue_;
std::mutex mu_;
std::condition_variable cond_;
};
class KVStoreDistServer {
public:
KVStoreDistServer() {
using namespace std::placeholders;
ps_server_ = new ps::KVServer<char>(0);
static_cast<ps::SimpleApp*>(ps_server_)->set_request_handle(
std::bind(&KVStoreDistServer::CommandHandle, this, _1, _2));
ps_server_->set_request_handle(
std::bind(&KVStoreDistServer::DataHandleEx, this, _1, _2, _3));
sync_mode_ = false;
stale = _STALE; //inital is
gradient_compression_ = std::make_shared<GradientCompression>();
log_verbose_ = dmlc::GetEnv("MXNET_KVSTORE_DIST_ROW_SPARSE_VERBOSE", false);
}
~KVStoreDistServer() {
profiler::Profiler::Get()->SetState(profiler::Profiler::ProfilerState(0));
delete ps_server_;
}
void set_controller(const KVStore::Controller& controller) { //Usually None
CHECK(controller);
controller_ = controller;
}
void set_updater(const KVStore::Updater& updater) { //Usually fixed
CHECK(updater);
updater_ = updater;
}
/**
* \brief blocked until received the command \a kSyncMode
*/
void Run() {
exec_.Start();
}
private:
struct UpdateBuf {
std::vector<ps::KVMeta> request;
NDArray merged;
// temp_array is used to cast received values as float32 for computation if required
NDArray temp_array;
};
void CommandHandle(const ps::SimpleData& recved, ps::SimpleApp* app) {
CommandType recved_type = static_cast<CommandType>(recved.head);
switch (recved_type) {
case CommandType::kStopServer:
exec_.Stop();
break;
case CommandType::kSyncMode:
sync_mode_ = true;
//Xin YAO
std::string tmp_s = static_cast<std::string>(recved.body);
_STALE = 0;
for(int i=9; i<tmp_s.length(); i++){ //reading the staleness
_STALE *= 10;
_STALE += (tmp_s[i] - '0');
}
break;
case CommandType::kSetGradientCompression:
gradient_compression_->DecodeParams(recved.body);
break;
case CommandType::kSetProfilerParams:
// last char is the type of profiler command
ProcessServerProfilerCommands(static_cast<KVStoreServerProfilerCommand>
(recved.body.back() - '0'),
recved.body);
break;
case CommandType::kSetMultiPrecision:
// uses value 1 for message id from frontend
if (!multi_precision_) {
multi_precision_ = true;
CreateMultiPrecisionCopies();
}
break;
case CommandType::kController:
// this uses value 0 for message id from frontend
// let the main thread to execute ctrl, which is necessary for python
exec_.Exec([this, recved]() {
CHECK(controller_);
controller_(recved.head, recved.body);
});
break;
}
app->Response(recved);
}
/*
* For keys already initialized, if necessary create stored_realt.
* This will only be used if by some wrong usage of kvstore,
* some keys are initialized before optimizer is set.
*/
void CreateMultiPrecisionCopies() {
for (auto const &stored_entry : store_) {
const int key = stored_entry.first;
const NDArray &stored = stored_entry.second;
if (stored.dtype() != mshadow::kFloat32) {
auto &stored_realt = store_realt_[key];
if (stored.storage_type() == kRowSparseStorage) {
stored_realt = NDArray(kRowSparseStorage, stored.shape(), stored.ctx(),
true, mshadow::kFloat32);
} else {
stored_realt = NDArray(stored.shape(), stored.ctx(), false, mshadow::kFloat32);
}
auto &update = update_buf_[key];
if (!update.merged.is_none()) {
if (update.merged.storage_type() == kRowSparseStorage) {
update.merged = NDArray(kRowSparseStorage, update.merged.shape(), update.merged.ctx(),
true, mshadow::kFloat32);
} else {
update.merged = NDArray(update.merged.shape(), update.merged.ctx(), false,
mshadow::kFloat32);
}
}
CHECK(update.request.size() == 0)
<< ps::MyRank() << "Multiprecision mode can not be set while pushes are underway."
<< "Please set optimizer before pushing keys." << key << " " << update.request.size();
CopyFromTo(stored, stored_realt);
}
}
for (auto const &stored_realt_entry : store_realt_) {
stored_realt_entry.second.WaitToRead();
}
}
void ProcessServerProfilerCommands(KVStoreServerProfilerCommand type, const std::string& body) {
switch (type) {
case KVStoreServerProfilerCommand::kSetConfig:
SetProfilerConfig(body.substr(0, body.size() - 1));
break;
case KVStoreServerProfilerCommand::kState:
MXSetProfilerState(static_cast<int>(body.front() - '0'));
break;
case KVStoreServerProfilerCommand::kPause:
MXProfilePause(static_cast<int>(body.front() - '0'));
break;
case KVStoreServerProfilerCommand::kDump:
MXDumpProfile(static_cast<int>(body.front() - '0'));
break;
}
}
void SetProfilerConfig(std::string params_str) {
std::vector<std::string> elems;
mxnet::kvstore::split(params_str, ',', std::back_inserter(elems));
std::vector<const char*> ckeys;
std::vector<const char*> cvals;
ckeys.reserve(elems.size());
cvals.reserve(elems.size());
for (size_t i=0; i < elems.size(); i++) {
std::vector<std::string> parts;
mxnet::kvstore::split(elems[i], ':', std::back_inserter(parts));
CHECK_EQ(parts.size(), 2) << "Improper profiler config passed from worker";
CHECK(!parts[0].empty()) << "ProfilerConfig parameter is empty";
CHECK(!parts[1].empty()) << "ProfilerConfig value is empty for parameter "<< parts[0];
if (parts[0] == "filename") {
parts[1] = "rank" + std::to_string(ps::MyRank()) + "_" + parts[1];
}
char* ckey = new char[parts[0].length() + 1];
std::snprintf(ckey, parts[0].length() + 1, "%s", parts[0].c_str());
ckeys.push_back(ckey);
char* cval = new char[parts[1].length() + 1];
std::snprintf(cval, parts[1].length() + 1, "%s", parts[1].c_str());
cvals.push_back(cval);
}
MXSetProfilerConfig(elems.size(), &ckeys[0], &cvals[0]);
for (size_t i=0; i < ckeys.size(); i++) {
delete[] ckeys[i];
delete[] cvals[i];
}
}
void DataHandleEx(const ps::KVMeta& req_meta,
const ps::KVPairs<char>& req_data,
ps::KVServer<char>* server) {
DataHandleType type = DepairDataHandleType(req_meta.cmd);
switch (type.requestType) {
case RequestType::kRowSparsePushPull:
DataHandleRowSparse(type, req_meta, req_data, server);
break;
case RequestType::kCompressedPushPull:
DataHandleCompressed(type, req_meta, req_data, server);
break;
case RequestType::kDefaultPushPull:
stale == _STALE ? DataHandleDefault(type, req_meta, req_data, server):SSPDataHandleDefault(type, req_meta, req_data, server);
break;
//Xin Yao
//case RequestType::SSPDefaultPushPull:
//break;
}
}
inline bool has_multi_precision_copy(const DataHandleType type) {
return multi_precision_ && type.dtype != mshadow::kFloat32;
}
inline void ApplyUpdates(const DataHandleType type, const int key,
UpdateBuf *update_buf, ps::KVServer<char>* server) {
if (!sync_mode_ || update_buf->request.size() == (size_t) ps::NumWorkers()) {
// let the main thread to execute updater_, which is necessary for python
auto& stored = has_multi_precision_copy(type) ? store_realt_[key] : store_[key];
auto& update = sync_mode_ ? update_buf->merged : update_buf->temp_array;
if (updater_) {
exec_.Exec([this, key, &update, &stored](){
CHECK(updater_);
updater_(key, update, &stored);
});
} else {
CHECK(sync_mode_) << "Updater needs to be set for async mode";
// if no updater, just copy
CopyFromTo(update_buf->merged, &stored);
}
if (log_verbose_) {
LOG(INFO) << "sent response to " << update_buf->request.size() << " workers";
}
for (const auto& req : update_buf->request) {
server->Response(req);
}
update_buf->request.clear();
if (has_multi_precision_copy(type)) CopyFromTo(stored, store_[key]);
stored.WaitToRead();
} else {
update_buf->merged.WaitToRead();
}
}
void DecodeRowIds(const ps::SArray<ps::Key> &keys, int64_t *indices,
const int64_t master_key, const int64_t num_rows) {
indices[0] = 0;
for (int64_t i = 1; i <= num_rows; i++) {
int key = DecodeKey(keys[i]);
auto row_id = key - master_key;
indices[i - 1] = row_id;
}
}
void AccumulateRowSparseGrads(const DataHandleType type,
const NDArray& recved,
UpdateBuf* updateBuf) {
NDArray out(kRowSparseStorage, updateBuf->merged.shape(), Context(), true,
has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype);
if (has_multi_precision_copy(type)) CopyFromTo(recved, updateBuf->temp_array);
const NDArray& to_merge = has_multi_precision_copy(type) ? updateBuf->temp_array : recved;
// accumulate row_sparse gradients
using namespace mshadow;
Engine::Get()->PushAsync(
[to_merge, updateBuf, out](RunContext ctx, Engine::CallbackOnComplete on_complete) {
op::ElemwiseBinaryOp::ComputeEx<cpu, op::mshadow_op::plus>(
{}, {}, {to_merge, updateBuf->merged}, {kWriteTo}, {out});
on_complete();
}, to_merge.ctx(), {to_merge.var(), updateBuf->merged.var()}, {out.var()},
FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME);
CopyFromTo(out, &(updateBuf->merged), 0);
updateBuf->merged.WaitToRead();
}
void RowSparsePullResponse(const DataHandleType type,
const int master_key,
const size_t num_rows,
const ps::KVMeta& req_meta,
const ps::KVPairs<char>& req_data,
ps::KVServer<char>* server) {
if (log_verbose_) LOG(INFO) << "pull: " << master_key;
ps::KVPairs<char> response;
if (num_rows == 0) {
std::vector<int> lens(req_data.keys.size(), 0);
response.keys = req_data.keys;
response.lens.CopyFrom(lens.begin(), lens.end());
server->Response(req_meta, response);
return;
}
const NDArray& stored = store_[master_key];
if (has_multi_precision_copy(type)) stored.WaitToRead();
CHECK(!stored.is_none()) << "init " << master_key << " first";
auto shape = stored.shape();
auto unit_len = shape.ProdShape(1, shape.ndim());
const int num_bytes = mshadow::mshadow_sizeof(type.dtype);
const int unit_size = unit_len * num_bytes;
const char* data = static_cast<char *> (stored.data().dptr_);
auto len = num_rows * unit_size;
// concat values
response.vals.resize(len);
#pragma omp parallel for
for (size_t i = 1; i <= num_rows; i++) {
int key = DecodeKey(req_data.keys[i]);
int64_t row_id = key - master_key;
const auto src = data + row_id * unit_size;
auto begin = (i - 1) * unit_size;
auto end = i * unit_size;
response.vals.segment(begin, end).CopyFrom(src, unit_size);
}
// setup response
response.keys = req_data.keys;
std::vector<int> lens(req_data.keys.size(), unit_len);
lens[0] = 0;
response.lens.CopyFrom(lens.begin(), lens.end());
server->Response(req_meta, response);
}
void InitRowSparseStored(const DataHandleType type,
const int master_key,
const size_t num_rows,
const ps::KVMeta& req_meta,
const ps::KVPairs<char>& req_data,
ps::KVServer<char>* server) {
auto& stored = has_multi_precision_copy(type) ? store_realt_[master_key] : store_[master_key];
int dtype = type.dtype;
int num_bytes = mshadow::mshadow_sizeof(dtype);
auto unit_len = req_data.lens[1] / num_bytes;
CHECK_GT(unit_len, 0);
size_t ds[] = {num_rows, (size_t) unit_len};
TShape dshape(ds, ds + 2);
CHECK_EQ(req_data.vals.size(), num_rows * unit_len * num_bytes);
TBlob recv_blob;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask);
})
NDArray recved = NDArray(recv_blob, 0);
stored = NDArray(kRowSparseStorage, dshape, Context(), true,
has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype);
if (has_multi_precision_copy(type)) {
store_[master_key] = NDArray(kRowSparseStorage, dshape, Context(), true, type.dtype);
}
Engine::Get()->PushAsync(
[this, recved, stored, type](RunContext ctx, Engine::CallbackOnComplete on_complete) {
NDArray rsp = stored;
stored.CheckAndAlloc({mshadow::Shape1(recved.shape()[0])});
mshadow::Stream<cpu> *s = ctx.get_stream<cpu>();
using namespace mxnet::op;
nnvm::dim_t nnr = rsp.shape()[0];
MSHADOW_IDX_TYPE_SWITCH(rsp.aux_type(rowsparse::kIdx), IType, {
IType* idx = rsp.aux_data(rowsparse::kIdx).dptr<IType>();
mxnet_op::Kernel<PopulateFullIdxRspKernel, cpu>::Launch(s, nnr, idx);
});
TBlob rsp_data = rsp.data();
// copies or casts as appropriate
ndarray::Copy<cpu, cpu>(recved.data(), &rsp_data, Context(), Context(), RunContext());
on_complete();
}, recved.ctx(), {recved.var()}, {stored.var()},
FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME);
if (has_multi_precision_copy(type)) {
CopyFromTo(stored, store_[master_key]);
store_[master_key].WaitToRead();
}
stored.WaitToRead();
server->Response(req_meta);
}
void DataHandleRowSparse(const DataHandleType type, const ps::KVMeta& req_meta,
const ps::KVPairs<char>& req_data,
ps::KVServer<char>* server) {
int master_key = DecodeKey(req_data.keys[0]);
auto num_rows = req_data.keys.size() - 1;
auto& stored = store_[master_key];
if (req_meta.push) {
CHECK_GT(req_data.lens.size(), 0) << "req_data.lens cannot be empty";
CHECK_EQ(req_data.lens[0], 0);
if (stored.is_none()) {
if (log_verbose_) LOG(INFO) << "initial push: " << master_key;
// initialization
CHECK_GT(num_rows, 0) << "init with empty data is not supported";
InitRowSparseStored(type, master_key, num_rows, req_meta, req_data, server);
return;
} else {
if (log_verbose_) LOG(INFO) << "push: " << master_key << " " << req_data.keys;
auto& updates = update_buf_[master_key];
if (sync_mode_ && updates.merged.is_none()) {
updates.merged = NDArray(kRowSparseStorage, stored.shape(), Context(), true,
has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype);
}
if (has_multi_precision_copy(type) && updates.temp_array.is_none()) {
updates.temp_array = NDArray(kRowSparseStorage, stored.shape(), Context(), false,
mshadow::kFloat32);
}
if (num_rows == 0) {
if (sync_mode_) {
if (updates.request.empty()) {
// reset to zeros
int merged_dtype = has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype;
updates.merged = NDArray(kRowSparseStorage, stored.shape(), Context(),
true, merged_dtype);
} // else nothing to aggregate
updates.request.push_back(req_meta);
ApplyUpdates(type, master_key, &updates, server);
} else {
server->Response(req_meta);
}
} else {
auto unit_len = req_data.lens[1] / mshadow::mshadow_sizeof(type.dtype);
CHECK_GT(unit_len, 0);
// indices
std::vector<int64_t> indices(num_rows);
DecodeRowIds(req_data.keys, indices.data(), master_key, num_rows);
// data
TBlob idx_blob(indices.data(), mshadow::Shape1(num_rows), cpu::kDevMask);
size_t ds[] = {(size_t) num_rows, (size_t) unit_len};
TShape dshape(ds, ds + 2);
TBlob recv_blob;
MSHADOW_REAL_TYPE_SWITCH(type.dtype, DType, {
recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()),
dshape, cpu::kDevMask);
})
// row_sparse NDArray
NDArray recved(kRowSparseStorage, stored.shape(), recv_blob, {idx_blob}, 0);
if (updates.request.empty()) {
if (sync_mode_) {
CopyFromTo(recved, updates.merged);
} else {
if (has_multi_precision_copy(type)) {
CopyFromTo(recved, updates.temp_array);
} else {
updates.temp_array = recved;
}
}
} else {
CHECK(sync_mode_);
AccumulateRowSparseGrads(type, recved, &updates);
}
updates.request.push_back(req_meta);
ApplyUpdates(type, master_key, &updates, server);
}
}
} else {
// pull
RowSparsePullResponse(type, master_key, num_rows, req_meta, req_data, server);
}
}
void DefaultStorageResponse(const DataHandleType type,
const int key,
const ps::KVMeta& req_meta,
const ps::KVPairs<char> &req_data,
ps::KVServer<char>* server) {
ps::KVPairs<char> response;
const NDArray& stored = store_[key];
CHECK(!stored.is_none()) << "init " << key << " first";
// as server returns when store_realt is ready in this case
if (has_multi_precision_copy(type)) stored.WaitToRead();
auto len = stored.shape().Size() * mshadow::mshadow_sizeof(stored.dtype());
response.keys = req_data.keys;
response.lens = {len};
// TODO(mli) try to remove this CopyFrom
response.vals.CopyFrom(static_cast<const char*>(stored.data().dptr_), len);
server->Response(req_meta, response);
}
//SSP Xin Yao
void SSPDefaultStorageResponse(const DataHandleType type,
const int key,
const ps::KVMeta& req_meta,
const ps::KVPairs<char> &req_data,
ps::KVServer<char>* server) {
ps::KVPairs<char> response;
int current_iter = req_meta.staleness;
/*
* SSP condition
* the slowest one + _STALE <= current_iter
* we can not pull data until
*/
if (ticks[key] + stale <= current_iter) { // Wait
//wait for the slow workers catch up
callbacks_[ticks[key]].push_back(
[this, key, req_meta, req_data, response, server]() mutable {
const NDArray& stored = store_[key];
CHECK(!stored.is_none()) << "init " << key << " first";
// as server returns when store_realt is ready in this case
if (has_multi_precision_copy(type)) stored.WaitToRead();
auto len = stored.shape().Size() * mshadow::mshadow_sizeof(stored.dtype());
response.keys = req_data.keys;
response.lens = {len};
// TODO(mli) try to remove this CopyFrom
response.vals.CopyFrom(static_cast<const char*>(stored.data().dptr_), len);
server->Response(req_meta, response);
});
return;
}
const NDArray& stored = store_[key];
CHECK(!stored.is_none()) << "init " << key << " first";
// as server returns when store_realt is ready in this case
if (has_multi_precision_copy(type)) stored.WaitToRead();
auto len = stored.shape().Size() * mshadow::mshadow_sizeof(stored.dtype());
response.keys = req_data.keys;
response.lens = {len};
// TODO(mli) try to remove this CopyFrom
response.vals.CopyFrom(static_cast<const char*>(stored.data().dptr_), len);
server->Response(req_meta, response);
}
void DataHandleCompressed(const DataHandleType type,
const ps::KVMeta& req_meta,
const ps::KVPairs<char> &req_data,
ps::KVServer<char>* server) {
CHECK_EQ(type.dtype, mshadow::kFloat32)
<< "Gradient compression is currently supported for fp32 only";
if (req_meta.push) {
// there used several WaitToRead, this is because \a recved's memory
// could be deallocated when this function returns. so we need to make sure
// the operators with \a NDArray are actually finished
// first for dummy key which represents original size of array, whose len is 0
CHECK_EQ(req_data.keys.size(), (size_t)2);
CHECK_EQ(req_data.lens.size(), (size_t)2);
CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[1]);
int original_size = DecodeKey(req_data.keys[0]);
int key = DecodeKey(req_data.keys[1]);
auto& stored = store_[key];
size_t ds[] = {(size_t)req_data.lens[1] / mshadow::mshadow_sizeof(type.dtype)};
TShape dshape(ds, ds + 1);
TBlob recv_blob(reinterpret_cast<real_t*>(req_data.vals.data()), dshape, cpu::kDevMask);
NDArray recved = NDArray(recv_blob, 0);
NDArray decomp_buf = decomp_buf_[key];
dshape = TShape{(int64_t) original_size};
if (decomp_buf.is_none()) {
decomp_buf = NDArray(dshape, Context());
}
if (stored.is_none()) {
stored = NDArray(dshape, Context());
gradient_compression_->Dequantize(recved, &stored, 0);
server->Response(req_meta);
stored.WaitToRead();
} else if (sync_mode_) {
// synced push
auto& merged = update_buf_[key];
if (merged.merged.is_none()) {
merged.merged = NDArray(dshape, Context());
}
if (merged.request.size() == 0) {
gradient_compression_->Dequantize(recved, &merged.merged, 0);
} else {
gradient_compression_->Dequantize(recved, &decomp_buf, 0);
merged.merged += decomp_buf;
}
merged.request.push_back(req_meta);
ApplyUpdates(type, key, &merged, server);
} else {
// async push
gradient_compression_->Dequantize(recved, &decomp_buf, 0);
exec_.Exec([this, key, &decomp_buf, &stored]() {
CHECK(updater_);
updater_(key, decomp_buf, &stored);
});
server->Response(req_meta);
stored.WaitToRead();
}
} else { // pull
CHECK_EQ(req_data.keys.size(), (size_t)1);
CHECK_EQ(req_data.lens.size(), (size_t)0);
int key = DecodeKey(req_data.keys[0]);
DefaultStorageResponse(type, key, req_meta, req_data, server);
}
}
void DataHandleDefault(const DataHandleType type, const ps::KVMeta& req_meta,
const ps::KVPairs<char> &req_data,
ps::KVServer<char>* server) {
// do some check
CHECK_EQ(req_data.keys.size(), (size_t)1);
if (req_meta.push) {
CHECK_EQ(req_data.lens.size(), (size_t)1);
CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[0]);
}
int key = DecodeKey(req_data.keys[0]);
auto& stored = has_multi_precision_copy(type) ? store_realt_[key] : store_[key];
// there used several WaitToRead, this is because \a recved's memory
// could be deallocated when this function returns. so we need to make sure
// the operators with \a NDArray are actually finished
if (req_meta.push) {
size_t ds[] = {(size_t) req_data.lens[0] / mshadow::mshadow_sizeof(type.dtype)};
TShape dshape(ds, ds + 1);
TBlob recv_blob;
MSHADOW_REAL_TYPE_SWITCH(type.dtype, DType, {
recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask);
})
NDArray recved = NDArray(recv_blob, 0);
if (stored.is_none()) {
// initialization
stored = NDArray(dshape, Context(), false,
has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype);
CopyFromTo(recved, &stored, 0);
server->Response(req_meta);
if (has_multi_precision_copy(type)) {
auto& stored_dtype = store_[key];
stored_dtype = NDArray(dshape, Context(), false, type.dtype);
CopyFromTo(stored, stored_dtype);
stored_dtype.WaitToRead();
}
stored.WaitToRead();
} else {
auto &updates = update_buf_[key];
if (sync_mode_ && updates.merged.is_none()) {
updates.merged = NDArray(dshape, Context(), false,
has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype);
}
if (has_multi_precision_copy(type) && updates.temp_array.is_none()) {
updates.temp_array = NDArray(dshape, Context(), false, mshadow::kFloat32);
}
if (updates.request.empty()) {
if (sync_mode_) {
CopyFromTo(recved, updates.merged);
} else {
if (has_multi_precision_copy(type)) {
CopyFromTo(recved, updates.temp_array);
} else {
updates.temp_array = recved;
}
}
} else {
CHECK(sync_mode_);
if (has_multi_precision_copy(type)) {
CopyFromTo(recved, updates.temp_array);
updates.merged += updates.temp_array;
} else {
updates.merged += recved;
}
}
updates.request.push_back(req_meta);
ApplyUpdates(type, key, &updates, server);
}
} else {
DefaultStorageResponse(type, key, req_meta, req_data, server);
}
}
//SSP Xin Yao
void SSPDataHandleDefault(const DataHandleType type, const ps::KVMeta& req_meta,
const ps::KVPairs<char> &req_data,
ps::KVServer<char>* server) {
// do some check
CHECK_EQ(req_data.keys.size(), (size_t)1);
if (req_meta.push) {
CHECK_EQ(req_data.lens.size(), (size_t)1);
CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[0]);
}
int key = DecodeKey(req_data.keys[0]);
int current_iter = req_meta.staleness;
auto& stored = has_multi_precision_copy(type) ? store_realt_[key] : store_[key];
// there used several WaitToRead, this is because \a recved's memory
// could be deallocated when this function returns. so we need to make sure
// the operators with \a NDArray are actually finished
if (req_meta.push) {
size_t ds[] = {(size_t) req_data.lens[0] / mshadow::mshadow_sizeof(type.dtype)};
TShape dshape(ds, ds + 1);
TBlob recv_blob;
MSHADOW_REAL_TYPE_SWITCH(type.dtype, DType, {
recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask);
})
NDArray recved = NDArray(recv_blob, 0);
if (stored.is_none()) {
// initialization
stored = NDArray(dshape, Context(), false,
has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype);
CopyFromTo(recved, &stored, 0); //finished push
server->Response(req_meta); //response push to workers
if (has_multi_precision_copy(type)) {
auto& stored_dtype = store_[key];
stored_dtype = NDArray(dshape, Context(), false, type.dtype);
CopyFromTo(stored, stored_dtype);
stored_dtype.WaitToRead();
}
stored.WaitToRead(); // After wait to read
} else {
auto &updates = update_buf_[key];
if (sync_mode_ && updates.merged.is_none()) {
updates.merged = NDArray(dshape, Context(), false,
has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype);
}
if (has_multi_precision_copy(type) && updates.temp_array.is_none()) {
updates.temp_array = NDArray(dshape, Context(), false, mshadow::kFloat32);
}
if (updates.request.empty()) {
if (sync_mode_) {
CopyFromTo(recved, updates.merged);
} else {
if (has_multi_precision_copy(type)) {
CopyFromTo(recved, updates.temp_array);
} else {
updates.temp_array = recved;
}
}
} else {
CHECK(sync_mode_);
if (has_multi_precision_copy(type)) {
CopyFromTo(recved, updates.temp_array);
updates.merged += updates.temp_array;
} else {
updates.merged += recved;
}
}
updates.request.push_back(req_meta);
ApplyUpdates(type, key, &updates, server); //response push to workers
}
//Xin Yao
workercount[current_iter] += 1; // For this iteration, add it for each push
while (workercount[ticks[key]] == ps::NumWorkers()) { // For a given key, if the staleness has been passed number of workers, add one
//trigger a cb of pull
auto& cbs = callbacks_[ticks[key]];
for (const auto& cb : cbs) {
cb();
}
ticks[key] += 1;
}
} else {
SSPDefaultStorageResponse(type, key, req_meta, req_data, server);
}
}
int DecodeKey(ps::Key key) {
auto kr = ps::Postoffice::Get()->GetServerKeyRanges()[ps::MyRank()];
return key - kr.begin();
}
/**
* \brief user defined mode for push
*/
bool sync_mode_;
int stale; //let us see it
KVStore::Controller controller_;
KVStore::Updater updater_;
/**
* \brief store_ contains the value at kvstore for each key
*/
std::unordered_map<int, NDArray> store_;
std::unordered_map<int, NDArray> store_realt_;
/**
* SSP controller
*/
std::unordered_map<int, Staleness> ticks;
std::unordered_map<Staleness, int> workercount;
std::unordered_map<Staleness, std::vector<Callback>> callbacks_;
/**
* \brief merge_buf_ is a buffer used if sync_mode is true. It represents
* values from different workers being merged. The store will be updated
* to this value when values from all workers are pushed into this buffer.
*/
std::unordered_map<int, UpdateBuf> update_buf_;
/**
* \brief decomp_buf_ is a buffer into which compressed values are
* decompressed before merging to the store. used when compress_!='none'
*/
std::unordered_map<int, NDArray> decomp_buf_;
Executor exec_;
ps::KVServer<char>* ps_server_;
// whether to LOG verbose information
bool log_verbose_;
/*
* \brief whether to use multi precision mode.
* in multi precision mode, all weights are stored as float32.
* any gradient received will be cast to float32 before accumulation and updating of weights.
*/
bool multi_precision_;
/**
* \brief gradient compression object.
* starts with none, used after SetGradientCompression sets the type
* currently there is no support for unsetting gradient compression
*/
std::shared_ptr<kvstore::GradientCompression> gradient_compression_;
};
} // namespace kvstore
} // namespace mxnet
#endif // MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
|
GB_binop__isge_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__isge_uint16
// A.*B function (eWiseMult): GB_AemultB__isge_uint16
// A*D function (colscale): GB_AxD__isge_uint16
// D*A function (rowscale): GB_DxB__isge_uint16
// C+=B function (dense accum): GB_Cdense_accumB__isge_uint16
// C+=b function (dense accum): GB_Cdense_accumb__isge_uint16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isge_uint16
// C=scalar+B GB_bind1st__isge_uint16
// C=scalar+B' GB_bind1st_tran__isge_uint16
// C=A+scalar GB_bind2nd__isge_uint16
// C=A'+scalar GB_bind2nd_tran__isge_uint16
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x >= y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGE || GxB_NO_UINT16 || GxB_NO_ISGE_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__isge_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__isge_uint16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__isge_uint16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__isge_uint16
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__isge_uint16
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__isge_uint16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__isge_uint16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__isge_uint16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = Bx [p] ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__isge_uint16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = Ax [p] ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB_bind1st_tran__isge_uint16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB_bind2nd_tran__isge_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
density.c | #include <stdio.h>
#include <stdlib.h>
#include <complex.h>
#include <math.h>
#include <omp.h>
#include <time.h>
#include <mkl.h>
#include <mkl_types.h>
#include "utils.h"
#include "density.h"
#include "linalg.h"
#define PI 3.14159265359
// THE FOLLOWING TWO FUNCTIONS ARE NOT YET IMPLEMENTED
/*
double* ncl_ae_state_density(int BAND_NUM, pswf_t* wf, int* fftg, int* labels, double* coords) {
int gridsize = fftg[0] * fftg[1] * fftg[2];
double* P = mkl_calloc(gridsize, sizeof(double), 64);
int spin_mult = 2 / wf->nspin;
double complex* x = realspace_state(b, k, wf, fftg, labels, coords);
for (int i = 0; i < gridsize; i++) {
P[i] += creal(x[i] * conj(x[i]));
}
mkl_free(x);
return P;
}
*/
void ae_state_density(double* P, int BAND_NUM, int KPOINT_NUM, pswf_t* wf,
int* fftg, int* labels, double* coords) {
int gridsize = fftg[0] * fftg[1] * fftg[2];
//double* P = mkl_calloc(gridsize, sizeof(double), 64);
double complex* x = mkl_malloc(gridsize * sizeof(double complex), 64);
realspace_state(x, BAND_NUM, KPOINT_NUM,
wf, fftg, labels, coords);
for (int i = 0; i < gridsize; i++) {
P[i] += creal(x[i] * conj(x[i]));
}
mkl_free(x);
}
/*
void ae_state_density(double* P, int BAND_NUM, int KPOINT_NUM, pswf_t* wf,
int* fftg, int* labels, double* coords) {
ppot_t* pps = wf->pps;
int num_sites = wf->num_sites;
double complex* x = (double complex*) mkl_malloc(fftg[0]*fftg[1]*fftg[2]*sizeof(double complex), 64);
fft3d(x, wf->G_bounds, wf->lattice, wf->kpts[KPOINT_NUM]->k,
wf->kpts[KPOINT_NUM]->Gs, wf->kpts[KPOINT_NUM]->bands[BAND_NUM]->Cs,
wf->kpts[KPOINT_NUM]->bands[BAND_NUM]->num_waves, fftg);
//printf("FINISH FT\n");
double* lattice = wf->lattice;
double vol = determinant(lattice);
for (int i = 0; i < fftg[0]; i++) {
double frac[3] = {0,0,0};
double kdotr = 0;
for (int j = 0; j < fftg[1]; j++) {
for (int k = 0; k < fftg[2]; k++) {
P[i*fftg[1]*fftg[2] + j*fftg[2] + k] = creal(x[i*fftg[1]*fftg[2] + j*fftg[2] + k]
* conj(x[i*fftg[1]*fftg[2] + j*fftg[2] + k]));
}
}
}
for (int p = 0; p < num_sites; p++) {
projection_t pros = wf->kpts[KPOINT_NUM]->bands[BAND_NUM]->projections[p];
//printf("READ PROJECTIONS\n");
ppot_t pp = pps[labels[p]];
double rmax = pp.wave_grid[pp.wave_gridsize-1];
double res[3] = {0,0,0};
vcross(res, lattice+3, lattice+6);
int grid1 = (int) (mag(res) * rmax / vol * fftg[0]) + 1;
vcross(res, lattice+0, lattice+6);
int grid2 = (int) (mag(res) * rmax / vol * fftg[1]) + 1;
vcross(res, lattice+0, lattice+3);
int grid3 = (int) (mag(res) * rmax / vol * fftg[2]) + 1;
int center1 = (int) round(coords[3*p+0] * fftg[0]);
int center2 = (int) round(coords[3*p+1] * fftg[1]);
int center3 = (int) round(coords[3*p+2] * fftg[2]);
//printf("FINISH SETUP %d\n%d %d %d\n%d %d %d\n",p, center1, center2, center3, grid1, grid2, grid3);
for (int i = -grid1 + center1; i <= grid1 + center1; i++) {
double frac[3] = {0,0,0};
double testcoord[3] = {0,0,0};
int ii=0, jj=0, kk=0;
double phasecoord[3] = {0,0,0};
double phase = 0;
for (int j = -grid2 + center2; j <= grid2 + center2; j++) {
for (int k = -grid3 + center3; k <= grid3 + center3; k++) {
testcoord[0] = (double) i / fftg[0] - coords[3*p+0];
testcoord[1] = (double) j / fftg[1] - coords[3*p+1];
testcoord[2] = (double) k / fftg[2] - coords[3*p+2];
frac_to_cartesian(testcoord, lattice);
if (mag(testcoord) < rmax) {
ii = (i%fftg[0] + fftg[0]) % fftg[0];
jj = (j%fftg[1] + fftg[1]) % fftg[1];
kk = (k%fftg[2] + fftg[2]) % fftg[2];
frac[0] = (double) ii / fftg[0];
frac[1] = (double) jj / fftg[1];
frac[2] = (double) kk / fftg[2];
phasecoord[0] = coords[3*p+0] + ((ii-i) / fftg[0]);
phasecoord[1] = coords[3*p+1] + ((jj-j) / fftg[1]);
phasecoord[2] = coords[3*p+2] + ((kk-k) / fftg[2]);
phase = dot(phasecoord, wf->kpts[KPOINT_NUM]->k);
for (int n = 0; n < pros.total_projs; n++) {
for (int m = 0; m < pros.total_projs; m++) {
P[ii*fftg[1]*fftg[2] + jj*fftg[2] + kk] += creal(
wave_value2(pp.wave_grid,
pp.funcs[pros.ns[n]].aewave,
pp.funcs[pros.ns[n]].aewave_spline,
pp.wave_gridsize,
pros.ls[n], pros.ms[n],
testcoord)
* conj(wave_value2(pp.wave_grid,
pp.funcs[pros.ns[m]].aewave,
pp.funcs[pros.ns[m]].aewave_spline,
pp.wave_gridsize,
pros.ls[m], pros.ms[m],
testcoord))
* pros.overlaps[n] * conj(pros.overlaps[m])
);
P[ii*fftg[1]*fftg[2] + jj*fftg[2] + kk] -= creal(
wave_value2(pp.wave_grid,
pp.funcs[pros.ns[n]].pswave,
pp.funcs[pros.ns[n]].pswave_spline,
pp.wave_gridsize,
pros.ls[n], pros.ms[n],
testcoord)
* conj(wave_value2(pp.wave_grid,
pp.funcs[pros.ns[m]].pswave,
pp.funcs[pros.ns[m]].pswave_spline,
pp.wave_gridsize,
pros.ls[m], pros.ms[m],
testcoord))
* pros.overlaps[n] * conj(pros.overlaps[m])
);
}
// wave_value(pp.funcs[pros.ns[n]],
// pp.wave_gridsize, pp.wave_grid,
// pros.ms[n], coords+3*p, frac, lattice)
// * pros.overlaps[n] * cexp(2*PI*I*phase);
// * Ylm(thetaphi[0], thetaphi[1]);
}
}
}
}
}
}
mkl_free(x);
}
*/
void ae_chg_density(double* P, pswf_t* wf, int* fftg, int* labels, double* coords) {
int gridsize = fftg[0] * fftg[1] * fftg[2];
double complex* x = mkl_malloc(gridsize * sizeof(double complex), 64);
//double* P = mkl_calloc(gridsize, sizeof(double), 64);
int spin_mult = 2 / wf->nspin;
for (int k = 0; k < wf->nwk * wf->nspin; k++) {
//printf("KLOOP %d\n", k);
for (int b = 0; b < wf->nband; b++) {
if (wf->kpts[k]->bands[b]->occ > 0) {
realspace_state(x, b, k, wf, fftg, labels, coords);
for (int i = 0; i < gridsize; i++) {
P[i] += creal(x[i] * conj(x[i])) * wf->kpts[k]->weight
* wf->kpts[k]->bands[b]->occ * spin_mult;
}
}
}
}
mkl_free(x);
mkl_free_buffers();
}
void ncl_ae_chg_density(double* P, pswf_t* wf, int* fftg, int* labels, double* coords) {
int gridsize = fftg[0] * fftg[1] * fftg[2];
double complex* x = mkl_malloc(2 * gridsize * sizeof(double complex), 64);
//double* P = mkl_calloc(gridsize, sizeof(double), 64);
int spin_mult = 1;
for (int k = 0; k < wf->nwk * wf->nspin; k++) {
//printf("KLOOP %d\n", k);
for (int b = 0; b < wf->nband; b++) {
if (wf->kpts[k]->bands[b]->occ > 0) {
ncl_realspace_state(x, b, k, wf, fftg, labels, coords);
for (int i = 0; i < gridsize; i++) {
P[i] += (creal(x[i] * conj(x[i]))
+ creal(x[i+gridsize] * conj(x[i+gridsize])))
* wf->kpts[k]->weight
* wf->kpts[k]->bands[b]->occ * spin_mult;
}
}
}
}
mkl_free(x);
mkl_free_buffers();
}
void project_realspace_state(double complex* projs, int BAND_NUM, pswf_t* wf, pswf_t* wf_R,
int* fftg, int* labels, double* coords, int* labels_R, double* coords_R) {
int nband = wf->nband;
int nwk = wf->nwk;
int nspin = wf->nspin;
int gridsize = fftg[0]*fftg[1]*fftg[2];
//double* projs = (double*) malloc(2*nband*nwk*nspin*sizeof(double));
double vol = determinant(wf->lattice);
double complex* state = mkl_malloc(gridsize * sizeof(double complex), 64);
double complex* state_R = mkl_malloc(gridsize * sizeof(double complex), 64);
for (int k = 0; k < nwk * nspin; k++) {
double complex overlap = 0;
realspace_state(state, BAND_NUM, k, wf, fftg, labels, coords);
for (int b = 0; b < nband; b++) {
realspace_state(state_R, b, k, wf_R, fftg, labels_R, coords_R);
cblas_zdotc_sub(gridsize, state_R, 1, state, 1, &overlap);
overlap *= vol / gridsize;
projs[b*nwk*nspin + k] = overlap;
}
}
mkl_free(state_R);
mkl_free(state);
}
void realspace_state(double complex* x, int BAND_NUM, int KPOINT_NUM,
pswf_t* wf, int* fftg, int* labels, double* coords) {
ppot_t* pps = wf->pps;
//double complex* x = mkl_calloc(fftg[0]*fftg[1]*fftg[2], sizeof(double complex), 64);
//printf("START FT\n");
fft3d(x, wf->G_bounds, wf->lattice, wf->kpts[KPOINT_NUM]->k,
wf->kpts[KPOINT_NUM]->Gs, wf->kpts[KPOINT_NUM]->bands[BAND_NUM]->Cs,
wf->kpts[KPOINT_NUM]->bands[BAND_NUM]->num_waves, fftg);
//printf("FINISH FT\n");
double* lattice = wf->lattice;
double vol = determinant(lattice);
for (int i = 0; i < fftg[0]; i++) {
double frac[3] = {0,0,0};
double kdotr = 0;
for (int j = 0; j < fftg[1]; j++) {
for (int k = 0; k < fftg[2]; k++) {
frac[0] = (double) i / fftg[0];
frac[1] = (double) j / fftg[1];
frac[2] = (double) k / fftg[2];
kdotr = dot(wf->kpts[KPOINT_NUM]->k, frac);
x[i*fftg[1]*fftg[2] + j*fftg[2] + k] *= cexp(2*PI*I*kdotr);
}
}
}
int num_sites = wf->num_sites;
#pragma omp parallel for
for (int p = 0; p < num_sites; p++) {
projection_t pros = wf->kpts[KPOINT_NUM]->bands[BAND_NUM]->projections[p];
//printf("READ PROJECTIONS\n");
ppot_t pp = pps[labels[p]];
double rmax = pp.wave_grid[pp.wave_gridsize-1];
double res[3] = {0,0,0};
vcross(res, lattice+3, lattice+6);
int grid1 = (int) (mag(res) * rmax / vol * fftg[0]) + 1;
vcross(res, lattice+0, lattice+6);
int grid2 = (int) (mag(res) * rmax / vol * fftg[1]) + 1;
vcross(res, lattice+0, lattice+3);
int grid3 = (int) (mag(res) * rmax / vol * fftg[2]) + 1;
int center1 = (int) round(coords[3*p+0] * fftg[0]);
int center2 = (int) round(coords[3*p+1] * fftg[1]);
int center3 = (int) round(coords[3*p+2] * fftg[2]);
//printf("FINISH SETUP %d\n%d %d %d\n%d %d %d\n",p, center1, center2, center3, grid1, grid2, grid3);
for (int i = -grid1 + center1; i <= grid1 + center1; i++) {
double frac[3] = {0,0,0};
double testcoord[3] = {0,0,0};
int ii=0, jj=0, kk=0;
double phasecoord[3] = {0,0,0};
double phase = 0;
for (int j = -grid2 + center2; j <= grid2 + center2; j++) {
for (int k = -grid3 + center3; k <= grid3 + center3; k++) {
testcoord[0] = (double) i / fftg[0] - coords[3*p+0];
testcoord[1] = (double) j / fftg[1] - coords[3*p+1];
testcoord[2] = (double) k / fftg[2] - coords[3*p+2];
frac_to_cartesian(testcoord, lattice);
if (mag(testcoord) < rmax) {
ii = (i%fftg[0] + fftg[0]) % fftg[0];
jj = (j%fftg[1] + fftg[1]) % fftg[1];
kk = (k%fftg[2] + fftg[2]) % fftg[2];
frac[0] = (double) ii / fftg[0];
frac[1] = (double) jj / fftg[1];
frac[2] = (double) kk / fftg[2];
phasecoord[0] = coords[3*p+0] + ((ii-i) / fftg[0]);
phasecoord[1] = coords[3*p+1] + ((jj-j) / fftg[1]);
phasecoord[2] = coords[3*p+2] + ((kk-k) / fftg[2]);
phase = dot(phasecoord, wf->kpts[KPOINT_NUM]->k);
for (int n = 0; n < pros.total_projs; n++) {
x[ii*fftg[1]*fftg[2] + jj*fftg[2] + kk] +=
wave_value2(pp.wave_grid,
pp.funcs[pros.ns[n]].diffwave,
pp.funcs[pros.ns[n]].diffwave_spline,
pp.wave_gridsize,
pros.ls[n], pros.ms[n],
testcoord)
* pros.overlaps[n] * cexp(2*PI*I*phase);
// wave_value(pp.funcs[pros.ns[n]],
// pp.wave_gridsize, pp.wave_grid,
// pros.ms[n], coords+3*p, frac, lattice)
// * pros.overlaps[n] * cexp(2*PI*I*phase);
// * Ylm(thetaphi[0], thetaphi[1]);
}
}
}
}
}
}
}
void remove_phase(double complex* x, int KPOINT_NUM, pswf_t* wf, int* fftg) {
for (int i = 0; i < fftg[0]; i++) {
double frac[3] = {0,0,0};
double kdotr = 0;
for (int j = 0; j < fftg[1]; j++) {
for (int k = 0; k < fftg[2]; k++) {
frac[0] = (double) i / fftg[0];
frac[1] = (double) j / fftg[1];
frac[2] = (double) k / fftg[2];
kdotr = dot(wf->kpts[KPOINT_NUM]->k, frac);
x[i*fftg[1]*fftg[2] + j*fftg[2] + k] *= cexp(-2*PI*I*kdotr);
}
}
}
}
void ncl_realspace_state(double complex* x, int BAND_NUM, int KPOINT_NUM,
pswf_t* wf, int* fftg, int* labels, double* coords) {
ppot_t* pps = wf->pps;
double complex* xup = x;//mkl_calloc(2*fftg[0]*fftg[1]*fftg[2], sizeof(double complex), 64);
double complex* xdown = x + fftg[0]*fftg[1]*fftg[2];
int num_waves = wf->kpts[KPOINT_NUM]->bands[BAND_NUM]->num_waves / 2;
fft3d(xup, wf->G_bounds, wf->lattice, wf->kpts[KPOINT_NUM]->k,
wf->kpts[KPOINT_NUM]->Gs, wf->kpts[KPOINT_NUM]->bands[BAND_NUM]->Cs,
num_waves, fftg);
fft3d(xdown, wf->G_bounds, wf->lattice, wf->kpts[KPOINT_NUM]->k,
wf->kpts[KPOINT_NUM]->Gs, wf->kpts[KPOINT_NUM]->bands[BAND_NUM]->Cs + num_waves,
num_waves, fftg);
double* lattice = wf->lattice;
double vol = determinant(lattice);
for (int i = 0; i < fftg[0]; i++) {
double frac[3] = {0,0,0};
double kdotr = 0;
for (int j = 0; j < fftg[1]; j++) {
for (int k = 0; k < fftg[2]; k++) {
frac[0] = (double) i / fftg[0];
frac[1] = (double) j / fftg[1];
frac[2] = (double) k / fftg[2];
kdotr = dot(wf->kpts[KPOINT_NUM]->k, frac);
xup[i*fftg[1]*fftg[2] + j*fftg[2] + k] *= cexp(2*PI*I*kdotr);
xdown[i*fftg[1]*fftg[2] + j*fftg[2] + k] *= cexp(2*PI*I*kdotr);
}
}
}
int num_sites = wf->num_sites;
#pragma omp parallel for
for (int p = 0; p < num_sites; p++) {
projection_t up_pros =
wf->kpts[KPOINT_NUM]->bands[BAND_NUM]->up_projections[p];
projection_t down_pros =
wf->kpts[KPOINT_NUM]->bands[BAND_NUM]->down_projections[p];
ppot_t pp = pps[labels[p]];
double rmax = pp.wave_grid[pp.wave_gridsize-1];
double res[3] = {0,0,0};
vcross(res, lattice+3, lattice+6);
int grid1 = (int) (mag(res) * rmax / vol * fftg[0]) + 1;
vcross(res, lattice+0, lattice+6);
int grid2 = (int) (mag(res) * rmax / vol * fftg[1]) + 1;
vcross(res, lattice+0, lattice+3);
int grid3 = (int) (mag(res) * rmax / vol * fftg[2]) + 1;
int center1 = (int) round(coords[3*p+0] * fftg[0]);
int center2 = (int) round(coords[3*p+1] * fftg[1]);
int center3 = (int) round(coords[3*p+2] * fftg[2]);
for (int i = -grid1 + center1; i <= grid1 + center1; i++) {
double frac[3] = {0,0,0};
double testcoord[3] = {0,0,0};
int ii=0, jj=0, kk=0;
double phasecoord[3] = {0,0,0};
double phase = 0;
for (int j = -grid2 + center2; j <= grid2 + center2; j++) {
for (int k = -grid3 + center3; k <= grid3 + center3; k++) {
testcoord[0] = (double) i / fftg[0] - coords[3*p+0];
testcoord[1] = (double) j / fftg[1] - coords[3*p+1];
testcoord[2] = (double) k / fftg[2] - coords[3*p+2];
frac_to_cartesian(testcoord, lattice);
if (mag(testcoord) < rmax) {
ii = (i%fftg[0] + fftg[0]) % fftg[0];
jj = (j%fftg[1] + fftg[1]) % fftg[1];
kk = (k%fftg[2] + fftg[2]) % fftg[2];
frac[0] = (double) ii / fftg[0];
frac[1] = (double) jj / fftg[1];
frac[2] = (double) kk / fftg[2];
phasecoord[0] = coords[3*p+0] + ((ii-i) / fftg[0]);
phasecoord[1] = coords[3*p+1] + ((jj-j) / fftg[1]);
phasecoord[2] = coords[3*p+2] + ((kk-k) / fftg[2]);
phase = dot(phasecoord, wf->kpts[KPOINT_NUM]->k);
for (int n = 0; n < up_pros.total_projs; n++) {
xup[ii*fftg[1]*fftg[2] + jj*fftg[2] + kk] +=
wave_value(pp.funcs[up_pros.ns[n]],
pp.wave_gridsize, pp.wave_grid,
up_pros.ms[n], coords+3*p, frac, lattice)
* up_pros.overlaps[n] * cexp(2*PI*I*phase);
xdown[ii*fftg[1]*fftg[2] + jj*fftg[2] + kk] +=
wave_value(pp.funcs[down_pros.ns[n]],
pp.wave_gridsize, pp.wave_grid,
down_pros.ms[n], coords+3*p, frac, lattice)
* down_pros.overlaps[n] * cexp(2*PI*I*phase);
}
}
}
}
}
}
}
double* realspace_state_ri(int BAND_NUM, int KPOINT_NUM,
pswf_t* wf, int* fftg, int* labels, double* coords) {
int gridsize = fftg[0]*fftg[1]*fftg[2];
double complex* x = mkl_malloc(gridsize * sizeof(double complex), 64);
realspace_state(x, BAND_NUM, KPOINT_NUM, wf, fftg, labels, coords);
double* rpip = (double*) malloc(2 * gridsize * sizeof(double));
for (int i = 0; i < gridsize; i++) {
rpip[i] = creal(x[i]);
rpip[i+gridsize] = cimag(x[i]);
}
mkl_free(x);
return rpip;
}
double* realspace_state_ncl_ri(int BAND_NUM, int KPOINT_NUM,
pswf_t* wf, int* fftg, int* labels, double* coords) {
int gridsize = 2*fftg[0]*fftg[1]*fftg[2];
double complex* x = mkl_malloc(2 * gridsize * sizeof(double complex), 64);
ncl_realspace_state(x, BAND_NUM, KPOINT_NUM, wf, fftg, labels, coords);
double* rpip = (double*) malloc(2*gridsize * sizeof(double));
for (int i = 0; i < gridsize; i++) {
rpip[i] = creal(x[i]);
rpip[i+gridsize] = cimag(x[i]);
}
mkl_free(x);
return rpip;
}
void write_volumetric(char* filename, double* x, int* fftg, double scale) {
FILE* fp = fopen(filename, "w");
int t = 1;
for (int k = 0; k < fftg[2]; k++) {
for (int j = 0; j < fftg[1]; j++) {
for (int i = 0; i < fftg[0]; i++) {
fprintf(fp, "%E ", x[i*fftg[1]*fftg[2] + j*fftg[2] + k] * scale);
if (t % 5 == 0) fprintf(fp, "\n");
t++;
}
}
}
fclose(fp);
}
void write_realspace_state_ncl_ri(char* filename1, char* filename2,
char* filename3, char* filename4,
int BAND_NUM, int KPOINT_NUM,
pswf_t* wf, int* fftg, int* labels, double* coords) {
int gridsize = fftg[0]*fftg[1]*fftg[2];
double* x = realspace_state_ncl_ri(BAND_NUM, KPOINT_NUM, wf, fftg, labels, coords);
write_volumetric(filename1, x+0*gridsize, fftg, 1);
write_volumetric(filename2, x+1*gridsize, fftg, 1);
write_volumetric(filename3, x+2*gridsize, fftg, 1);
write_volumetric(filename4, x+3*gridsize, fftg, 1);
free(x);
}
double* write_realspace_state_ri_return(char* filename1, char* filename2,
int BAND_NUM, int KPOINT_NUM,
pswf_t* wf, int* fftg, int* labels, double* coords) {
double* x = realspace_state_ri(BAND_NUM, KPOINT_NUM, wf, fftg, labels, coords);
write_volumetric(filename1, x, fftg, 1);
write_volumetric(filename2, x+fftg[0]*fftg[1]*fftg[2], fftg, 1);
return x;
}
double* write_density_return(char* filename, pswf_t* wf,
int* fftg, int* labels, double* coords) {
int gridsize = fftg[0] * fftg[1] * fftg[2];
double* x = mkl_calloc(gridsize, sizeof(double), 64);
ae_chg_density(x, wf, fftg, labels, coords);
double scale = determinant(wf->lattice);
write_volumetric(filename, x, fftg, scale);
return x;
}
void write_realspace_state_ri_noreturn(char* filename1, char* filename2, int BAND_NUM, int KPOINT_NUM,
pswf_t* wf, int* fftg, int* labels, double* coords) {
double* x = write_realspace_state_ri_return(filename1, filename2,
BAND_NUM, KPOINT_NUM, wf, fftg, labels, coords);
free(x);
}
void write_density_noreturn(char* filename, pswf_t* wf,
int* fftg, int* labels, double* coords) {
setbuf(stdout, NULL);
double* x = write_density_return(filename, wf, fftg, labels, coords);
mkl_free(x);
}
|
mx_util.c | #include <inttypes.h>
#include <omp.h>
#include <string.h>
#include "matrix.h"
#include "mx_util.h"
mxArray *
mx_pad_boundary(const mxArray *mxx)
{
const size_t *sz = (const size_t *)mxGetDimensions(mxx);
const size_t szp[3] = {sz[0]+2, sz[1]+2, sz[2]+2};
mxArray *mxxp = mxCreateNumericArray(3, szp, mxGetClassID(mxx), mxREAL);
const size_t nx = sz[0];
const size_t ny = sz[1];
const size_t nz = sz[2];
const size_t nxny = nx*ny;
const size_t nxp = szp[0];
const size_t nyp = szp[1];
const size_t nxnyp = nxp*nyp;
if (mxIsSingle(mxx)) {
float *x = (float *)mxGetData(mxx);
float *xp = (float *)mxGetData(mxxp);
#pragma omp parallel for schedule(static) collapse(3) \
if(nxny*nz > 32*32*32)
for (size_t k = 0; k < nz; ++k) {
for (size_t j = 0; j < ny; ++j) {
for (size_t i = 0; i < nx; ++i) {
xp[(i+1) + nxp*(j+1) + nxnyp*(k+1)] = x[i + nx*j + nxny*k];
}
}
}
} else if (mxIsDouble(mxx)) {
double *x = (double *)mxGetData(mxx);
double *xp = (double *)mxGetData(mxxp);
#pragma omp parallel for schedule(static) collapse(3) \
if(nxny*nz > 32*32*32)
for (size_t k = 0; k < nz; ++k) {
for (size_t j = 0; j < ny; ++j) {
for (size_t i = 0; i < nx; ++i) {
xp[(i+1) + nxp*(j+1) + nxnyp*(k+1)] = x[i + nx*j + nxny*k];
}
}
}
} else if (mxIsLogical(mxx)) {
uint8_t *x = (uint8_t *)mxGetData(mxx);
uint8_t *xp = (uint8_t *)mxGetData(mxxp);
#pragma omp parallel for schedule(static) collapse(3) \
if(nxny*nz > 32*32*32)
for (size_t k = 0; k < nz; ++k) {
for (size_t j = 0; j < ny; ++j) {
for (size_t i = 0; i < nx; ++i) {
xp[(i+1) + nxp*(j+1) + nxnyp*(k+1)] = x[i + nx*j + nxny*k];
}
}
}
}
return mxxp;
}
mxArray *
mx_unpad_boundary(const mxArray *mxxp)
{
const size_t *szp = (const size_t *)mxGetDimensions(mxxp);
const size_t sz[3] = {szp[0]-2, szp[1]-2, szp[2]-2};
mxArray *mxx = mxCreateNumericArray(3, sz, mxGetClassID(mxxp), mxREAL);
const size_t nx = sz[0];
const size_t ny = sz[1];
const size_t nz = sz[2];
const size_t nxny = nx*ny;
const size_t nxp = szp[0];
const size_t nyp = szp[1];
const size_t nxnyp = nxp*nyp;
if (mxIsSingle(mxxp)) {
float *x = (float *)mxGetData(mxx);
float *xp = (float *)mxGetData(mxxp);
#pragma omp parallel for schedule(static) collapse(3) \
if(nxny*nz > 32*32*32)
for (size_t k = 0; k < nz; ++k) {
for (size_t j = 0; j < ny; ++j) {
for (size_t i = 0; i < nx; ++i) {
x[i + nx*j + nxny*k] = xp[(i+1) + nxp*(j+1) + nxnyp*(k+1)];
}
}
}
} else if (mxIsDouble(mxxp)) {
double *x = (double *)mxGetData(mxx);
double *xp = (double *)mxGetData(mxxp);
#pragma omp parallel for schedule(static) collapse(3) \
if(nxny*nz > 32*32*32)
for (size_t k = 0; k < nz; ++k) {
for (size_t j = 0; j < ny; ++j) {
for (size_t i = 0; i < nx; ++i) {
x[i + nx*j + nxny*k] = xp[(i+1) + nxp*(j+1) + nxnyp*(k+1)];
}
}
}
} else if (mxIsLogical(mxxp)) {
uint8_t *x = (uint8_t *)mxGetData(mxx);
uint8_t *xp = (uint8_t *)mxGetData(mxxp);
#pragma omp parallel for schedule(static) collapse(3) \
if(nxny*nz > 32*32*32)
for (size_t k = 0; k < nz; ++k) {
for (size_t j = 0; j < ny; ++j) {
for (size_t i = 0; i < nx; ++i) {
x[i + nx*j + nxny*k] = xp[(i+1) + nxp*(j+1) + nxnyp*(k+1)];
}
}
}
}
return mxx;
}
void
mx_zero(mxArray *mxx)
{
const size_t n = mxGetNumberOfElements(mxx);
const size_t b = mxGetElementSize(mxx);
memset(mxGetData(mxx), 0, n*b);
return;
}
|
atomic_ops.h | /**
* @file atomic_ops.h
* @author Yibo Lin (DREAMPlace)
* @date Apr 2020
*/
#include <type_traits>
#include "utility/src/utils.h"
DREAMPLACE_BEGIN_NAMESPACE
/// @brief A class generalized scaled atomic addition for floating point number
/// and integers. For integer, we use it as a fixed point number with the LSB
/// part for fractions.
template <typename T, bool = std::is_integral<T>::value>
struct AtomicAdd {
typedef T type;
/// @brief constructor
AtomicAdd(type = 1) {}
template <typename V>
inline void operator()(type* dst, V v) const {
#pragma omp atomic
*dst += v;
}
};
/// @brief For atomic addition of fixed point number using integers.
template <typename T>
struct AtomicAdd<T, true> {
typedef T type;
type scale_factor; ///< a scale factor to scale fraction into integer
/// @brief constructor
/// @param sf scale factor
AtomicAdd(type sf = 1) : scale_factor(sf) {}
template <typename V>
inline void operator()(type* dst, V v) const {
type sv = v * scale_factor;
#pragma omp atomic
*dst += sv;
}
};
DREAMPLACE_END_NAMESPACE
|
GB_unaryop__abs_uint8_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint8_int8
// op(A') function: GB_tran__abs_uint8_int8
// C type: uint8_t
// A type: int8_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT8 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint8_int8
(
uint8_t *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint8_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
test.c |
#include <stdio.h>
#include <omp.h>
#include <math.h>
int main(void) {
double A = 2.0;
float B = 2.0;
#pragma omp target map(A,B)
{
A = powi(A, 4);
//B = powif(B, 4);
}
printf("%lf\n",A);
//printf("%f\n",B);
return 0;
}
|
for-18.c | /* { dg-do compile } */
/* { dg-options "-O -fopenmp -fdump-tree-ompexp" } */
void
foo (int *a, int i)
{
int j, k = 1, l = 30, m = 4;
#pragma omp parallel for num_threads (3 * i) schedule (dynamic, i * 4)
for (j = 0; j <= l; j++)
a[j] = 1;
#pragma omp parallel for num_threads (3 * i) schedule (dynamic, i * 4)
for (j = k; j <= l; j += (m - 1))
a[j] = 2;
#pragma omp parallel for num_threads (3 * i) schedule (dynamic, 4)
for (j = 0; j <= l; j++)
a[j] = 3;
#pragma omp parallel for num_threads (3 * i) schedule (dynamic, 4)
for (j = k; j <= l; j += (m - 1))
a[j] = 4;
}
void
bar (int *a, int i)
{
int j, k = 1, l = 30, m = 4;
#pragma omp parallel for num_threads (3 * i) schedule (guided, i * 4)
for (j = 0; j <= l; j++)
a[j] = 1;
#pragma omp parallel for num_threads (3 * i) schedule (guided, i * 4)
for (j = k; j <= l; j += (m - 1))
a[j] = 2;
#pragma omp parallel for num_threads (3 * i) schedule (guided, 4)
for (j = 0; j <= l; j++)
a[j] = 3;
#pragma omp parallel for num_threads (3 * i) schedule (guided, 4)
for (j = k; j <= l; j += (m - 1))
a[j] = 4;
}
/* { dg-final { scan-tree-dump-times "GOMP_parallel_loop_dynamic_start" 4 "ompexp" { xfail *-*-* } } } */
/* { dg-final { scan-tree-dump-times "GOMP_parallel_loop_guided_start" 4 "ompexp" { xfail *-*-* } } } */
|
Preconditioners.h | /*
Copyright (c) 2005-2016, University of Oxford.
All rights reserved.
University of Oxford means the Chancellor, Masters and Scholars of the
University of Oxford, having an administrative office at Wellington
Square, Oxford OX1 2JD, UK.
This file is part of Aboria.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the University of Oxford nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef PRECONDITIONERS_H_
#define PRECONDITIONERS_H_
#include <algorithm>
#include <fstream>
#include <unordered_map>
#ifdef HAVE_CAIRO
#include <cairo-svg.h>
#endif
#ifdef HAVE_EIGEN
#include <unsupported/Eigen/SparseExtra>
namespace Aboria {
namespace detail {
template <typename Function, typename Dest, unsigned int NI, unsigned int NJ,
typename Blocks, typename Rhs>
void apply_function_to_diagonal_blocks(
Function &&f, Dest &y, const MatrixReplacement<NI, NJ, Blocks> &mat,
const Rhs &rhs, std::integral_constant<unsigned int, NI>) {}
template <typename Function, unsigned int NI, unsigned int NJ, typename Blocks>
void apply_function_to_diagonal_blocks(
Function &&f, const MatrixReplacement<NI, NJ, Blocks> &mat,
std::integral_constant<unsigned int, NI>) {}
template <typename Function, typename Dest, unsigned int NI, unsigned int NJ,
typename Blocks, typename Rhs, unsigned int I>
void apply_function_to_diagonal_blocks(
Function &&f, Dest &x, const MatrixReplacement<NI, NJ, Blocks> &mat,
const Rhs &b, std::integral_constant<unsigned int, I>) {
f(x.segment(mat.template start_row<I>(), mat.template size_row<I>()),
b.segment(mat.template start_col<I>(), mat.template size_col<I>()),
std::get<I * NJ + I>(mat.m_blocks));
apply_function_to_diagonal_blocks(
std::forward<Function>(f), x, mat, b,
std::integral_constant<unsigned int, I + 1>());
}
template <typename Function, unsigned int NI, unsigned int NJ, typename Blocks,
unsigned int I>
void apply_function_to_diagonal_blocks(
Function &&f, const MatrixReplacement<NI, NJ, Blocks> &mat,
std::integral_constant<unsigned int, I>) {
f(std::get<I * NJ + I>(mat.m_blocks));
apply_function_to_diagonal_blocks(
std::forward<Function>(f), mat,
std::integral_constant<unsigned int, I + 1>());
}
template <typename Function, typename Dest, unsigned int NI, unsigned int NJ,
typename Blocks, typename Rhs>
void apply_function_to_diagonal_blocks(
Function &&function, Dest &x, const MatrixReplacement<NI, NJ, Blocks> &mat,
const Rhs &b) {
apply_function_to_diagonal_blocks(std::forward<Function>(function), x, mat, b,
std::integral_constant<unsigned int, 0>());
}
template <typename Function, unsigned int NI, unsigned int NJ, typename Blocks>
void apply_function_to_diagonal_blocks(
Function &&function, const MatrixReplacement<NI, NJ, Blocks> &mat) {
apply_function_to_diagonal_blocks(std::forward<Function>(function), mat,
std::integral_constant<unsigned int, 0>());
}
} // namespace detail
template <typename Solver> class ChebyshevPreconditioner {
typedef double Scalar;
typedef size_t Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> matrix_type;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> vector_type;
typedef Solver solver_type;
typedef std::vector<size_t> storage_vector_type;
typedef std::vector<storage_vector_type> connectivity_type;
protected:
bool m_isInitialized;
private:
int m_order;
matrix_type m_col_Rn_matrix, m_row_Rn_matrix;
solver_type m_factorized_matrix;
Index m_rows;
Index m_cols;
mutable vector_type m_W;
mutable vector_type m_fcheb;
public:
typedef typename vector_type::StorageIndex StorageIndex;
enum {
ColsAtCompileTime = Eigen::Dynamic,
MaxColsAtCompileTime = Eigen::Dynamic
};
ChebyshevPreconditioner() : m_isInitialized(false), m_order(10) {}
template <typename MatType>
explicit ChebyshevPreconditioner(const MatType &mat) {
compute(mat);
}
Index rows() const { return m_rows; }
Index cols() const { return m_cols; }
void set_order(int arg) { m_order = arg; }
template <typename Kernel>
void analyze_impl_block(const Index start_row, const Kernel &kernel) {
typedef typename Kernel::row_elements_type row_elements_type;
typedef typename row_elements_type::query_type query_type;
typedef typename query_type::traits_type traits_type;
typedef typename traits_type::double_d double_d;
typedef typename traits_type::int_d int_d;
typedef typename traits_type::position position;
const unsigned int dimension = query_type::dimension;
const row_elements_type &rows = kernel.get_row_elements();
const row_elements_type &cols = kernel.get_row_elements();
auto row_bounds = rows.get_query().get_bounds();
auto col_bounds = rows.get_query().get_bounds();
detail::ChebyshevRn<dimension> row_Rn(m_order, row_bounds);
detail::ChebyshevRn<dimension> col_Rn(m_order, col_bounds);
const size_t ncheb = std::pow(m_order, dimension);
LOG(2,
"ChebyshevPreconditioner:analyzing_impl_block with ncheb = " << ncheb);
const int_d start = int_d::Constant(0);
const int_d end = int_d::Constant(m_order);
// fill row_Rn matrix
m_row_Rn_matrix.resize(rows.size(), ncheb);
for (size_t i = 0; i < rows.size(); ++i) {
row_Rn.set_position(get<position>(rows)[i]);
lattice_iterator<dimension> mj(start, end);
for (size_t j = 0; j < ncheb; ++j, ++mj) {
m_row_Rn_matrix(i, j) = row_Rn(*mj);
}
}
// fill col_Rn matrix
m_col_Rn_matrix.resize(cols.size(), ncheb);
for (size_t i = 0; i < cols.size(); ++i) {
col_Rn.set_position(get<position>(rows)[i]);
lattice_iterator<dimension> mj(start, end);
for (size_t j = 0; j < ncheb; ++j, ++mj) {
m_col_Rn_matrix(i, j) = col_Rn(*mj);
}
}
// fill kernel matrix
matrix_type kernel_matrix(ncheb, ncheb);
lattice_iterator<dimension> mi(start, end);
for (size_t i = 0; i < ncheb; ++i, ++mi) {
const double_d pi = col_Rn.get_position(*mi);
lattice_iterator<dimension> mj(start, end);
for (size_t j = 0; j < ncheb; ++j, ++mj) {
const double_d pj = row_Rn.get_position(*mj);
kernel_matrix(i, j) = kernel.get_position_function()(pi, pj);
}
}
m_factorized_matrix.compute(kernel_matrix);
Eigen::VectorXd b = Eigen::VectorXd::Random(kernel_matrix.rows());
Eigen::VectorXd x = m_factorized_matrix.solve(b);
double relative_error = (kernel_matrix * x - b).norm() / b.norm();
if (relative_error > 1e-3 || std::isnan(relative_error)) {
std::cout << "relative error = " << relative_error << std::endl;
}
}
template <typename RowParticles, typename ColParticles>
void
analyze_impl_block(const Index start_row,
const KernelZero<RowParticles, ColParticles> &kernel) {}
template <unsigned int NI, unsigned int NJ, typename Blocks, std::size_t... I>
void analyze_impl(const MatrixReplacement<NI, NJ, Blocks> &mat,
detail::index_sequence<I...>) {
int dummy[] = {0, (analyze_impl_block(mat.template start_row<I>(),
std::get<I * NJ + I>(mat.m_blocks)),
0)...};
static_cast<void>(dummy);
}
template <unsigned int NI, unsigned int NJ, typename Blocks>
ChebyshevPreconditioner &
analyzePattern(const MatrixReplacement<NI, NJ, Blocks> &mat) {
LOG(2, "ChebyshevPreconditioner: analyze pattern");
m_rows = mat.rows();
m_cols = mat.cols();
analyze_impl(mat, detail::make_index_sequence<NI>());
return *this;
}
template <int _Options, typename _StorageIndex>
ChebyshevPreconditioner &analyzePattern(
const Eigen::SparseMatrix<Scalar, _Options, _StorageIndex> &mat) {
CHECK(m_row_Rn_matrix.rows() > 0,
"ChebyshevPreconditioner::analyzePattern(): cannot analyze sparse "
"matrix, "
"call analyzePattern using a Aboria MatrixReplacement class first");
return *this;
}
template <int _Options, typename _StorageIndex, int RefOptions,
typename RefStrideType>
ChebyshevPreconditioner &
analyzePattern(const Eigen::Ref<
const Eigen::SparseMatrix<Scalar, _Options, _StorageIndex>,
RefOptions, RefStrideType> &mat) {
CHECK(m_row_Rn_matrix.rows() > 0,
"ChebyshevPreconditioner::analyzePattern(): cannot analyze sparse "
"matrix, "
"call analyzePattern using a Aboria MatrixReplacement class first");
return *this;
}
template <typename Derived>
ChebyshevPreconditioner &
analyzePattern(const Eigen::DenseBase<Derived> &mat) {
CHECK(m_row_Rn_matrix.rows() > 0,
"ChebyshevPreconditioner::analyzePattern(): cannot analyze dense "
"matrix, "
"call analyzePattern need to pass a Aboria MatrixReplacement class "
"first");
return *this;
}
template <typename MatType>
ChebyshevPreconditioner &factorize(const MatType &mat) {
LOG(2, "ChebyshevPreconditioner: factorizing domain");
eigen_assert(
static_cast<typename MatType::Index>(m_rows) == mat.rows() &&
"ChebyshevPreconditioner::solve(): invalid number of rows of mat");
eigen_assert(
static_cast<typename MatType::Index>(m_cols) == mat.cols() &&
"ChebyshevPreconditioner::solve(): invalid number of rows of mat");
m_isInitialized = true;
return *this;
}
template <typename MatType>
ChebyshevPreconditioner &compute(const MatType &mat) {
analyzePattern(mat);
return factorize(mat);
}
/** \internal */
template <typename Rhs, typename Dest>
void _solve_impl(const Rhs &b, Dest &x) const {
// First compute the weights at the Chebyshev nodes ym
// by anterpolation
m_W = m_col_Rn_matrix.transpose() * b;
// Next compute f ðxÞ at the Chebyshev nodes xl:
m_fcheb = m_factorized_matrix.solve(m_W);
// Last compute f ðxÞ at the observation points xi by interpolation:
x = m_row_Rn_matrix * m_fcheb;
}
template <typename Rhs>
inline const Eigen::Solve<ChebyshevPreconditioner, Rhs>
solve(const Eigen::MatrixBase<Rhs> &b) const {
eigen_assert(
static_cast<typename Rhs::Index>(m_rows) == b.rows() &&
"ChebyshevPreconditioner::solve(): invalid number of rows of the "
"right hand side matrix b");
eigen_assert(m_isInitialized &&
"ChebyshevPreconditioner is not initialized.");
return Eigen::Solve<ChebyshevPreconditioner, Rhs>(*this, b.derived());
}
Eigen::ComputationInfo info() { return Eigen::Success; }
}; // namespace Aboria
#ifdef HAVE_H2LIB
template <typename Solver> class ReducedOrderPreconditioner {
typedef Solver solver_type;
typedef size_t Index;
typedef double Scalar;
typedef H2LibMatrix h2_matrix_type;
Index m_rows;
Index m_cols;
double m_tol;
std::vector<size_t> m_col_sizes;
std::vector<size_t> m_row_sizes;
std::vector<std::shared_ptr<solver_type>> m_solvers;
std::vector<const h2_matrix_type *> m_h2mats;
public:
typedef size_t StorageIndex;
enum {
ColsAtCompileTime = Eigen::Dynamic,
MaxColsAtCompileTime = Eigen::Dynamic
};
ReducedOrderPreconditioner() : m_tol(1e-5) {}
template <typename MatType>
explicit ReducedOrderPreconditioner(const MatType &mat) : m_tol(1e-5) {
compute(mat);
}
Index rows() const { return m_rows; }
Index cols() const { return m_cols; }
void set_tolerance(const double tol) { m_tol = tol; }
template <unsigned int NI, unsigned int NJ, typename Blocks>
ReducedOrderPreconditioner &
analyzePattern(const MatrixReplacement<NI, NJ, Blocks> &mat) {
LOG(2, "ExtMatrixPreconditioner: analyze pattern");
m_rows = mat.rows();
m_cols = mat.cols();
return *this;
}
struct factorize_block {
std::vector<size_t> &m_col_sizes;
std::vector<size_t> &m_row_sizes;
std::vector<std::shared_ptr<solver_type>> &m_solvers;
std::vector<const h2_matrix_type *> &m_h2mats;
double m_tol;
int i;
factorize_block(std::vector<size_t> &col_sizes,
std::vector<size_t> &row_sizes,
std::vector<std::shared_ptr<solver_type>> &solvers,
std::vector<const h2_matrix_type *> &h2mats, double tol)
: m_col_sizes(col_sizes), m_row_sizes(row_sizes), m_solvers(solvers),
m_h2mats(h2mats), m_tol(tol), i(0) {}
template <typename Block> void operator()(const Block &block) {
LOG(2, "ReducedOrderPreconditioner: block " << i << ": non h2 block");
m_solvers[i] = nullptr;
m_col_sizes[i] = block.cols();
m_row_sizes[i] = block.rows();
++i;
}
template <typename RowParticles, typename ColParticles, typename PositionF,
typename F>
void operator()(
const KernelH2<RowParticles, ColParticles, PositionF, F> &kernel) {
m_h2mats[i] = &kernel.get_h2_matrix();
m_col_sizes[i] = kernel.cols();
m_row_sizes[i] = kernel.rows();
LOG(2, "ReducedOrderPreconditioner: block "
<< i << ": factorise h2 matrix with tolerance " << m_tol);
m_solvers[i] = std::make_shared<solver_type>(
m_h2mats[i]->get_ph2matrix(), m_h2mats[i]->get_pblock(), m_tol);
std::vector<double> b(m_col_sizes[i]);
std::vector<double> b2(m_row_sizes[i]);
for (size_t ii = 0; ii < m_col_sizes[i]; ++ii) {
b[ii] = 1.0;
}
m_h2mats[i]->matrix_vector_multiply(b2, 1, false, b);
m_solvers[i]->solve(b2, b2);
double sum = 0;
double sum2 = 0;
for (size_t ii = 0; ii < m_row_sizes[i]; ++ii) {
sum += std::pow(b2[ii] - b[ii], 2);
sum2 += std::pow(b[ii], 2);
}
LOG(2, "ReducedOrderPreconditioner: block "
<< i << ": factorisation accuracy: " << std::sqrt(sum / sum2));
// m_solvers[i]->setMaxIterations(m_inner_iterations);
// LOG(2,"ExtMatrixPreconditioner: block "<<i<<": set precon");
// m_solvers[i]->preconditioner().setDroptol(0.1);
// m_solvers[i]->preconditioner().compute(m_str_ext_matrices[i]);
++i;
}
};
template <unsigned int NI, unsigned int NJ, typename Blocks>
ReducedOrderPreconditioner &
factorize(const MatrixReplacement<NI, NJ, Blocks> &mat) {
LOG(2, "ReducedOrderPreconditioner: factorizing domain");
m_rows = mat.rows();
m_cols = mat.cols();
m_solvers.resize(NI);
m_h2mats.resize(NI);
m_col_sizes.resize(NI);
m_row_sizes.resize(NI);
detail::apply_function_to_diagonal_blocks(
factorize_block(m_col_sizes, m_row_sizes, m_solvers, m_h2mats, m_tol),
mat);
m_isInitialized = true;
return *this;
}
template <typename MatType>
ReducedOrderPreconditioner &compute(const MatType &mat) {
analyzePattern(mat);
return factorize(mat);
}
template <typename Rhs, typename Dest>
void _solve_impl(const Rhs &b, Dest &x) const {
size_t row = 0;
size_t col = 0;
Eigen::Matrix<double, Eigen::Dynamic, 1> x_buffer;
for (size_t i = 0; i < m_solvers.size(); ++i) {
auto b_segment = b.segment(col, m_col_sizes[i]);
auto x_segment = x.segment(row, m_row_sizes[i]);
if (m_solvers[i] != nullptr) { // solver only exists for h2 blocks
LOG(2, "ReducedOrderPreconditioner: block "
<< i << " solve for " << m_row_sizes[i] << "x"
<< m_col_sizes[i] << " matrix");
x_buffer.resize(m_col_sizes[i]);
m_solvers[i]->solve(b_segment, x_buffer);
x_segment = x_buffer;
} else {
x_segment = b_segment;
}
row += m_row_sizes[i];
col += m_col_sizes[i];
}
LOG(2, "ReducedOrderPreconditioner: done solve_impl");
}
template <typename Rhs>
inline const Eigen::Solve<ReducedOrderPreconditioner, Rhs>
solve(const Eigen::MatrixBase<Rhs> &b) const {
eigen_assert(m_rows == b.rows() &&
"ReducedOrderPreconditioner::solve(): invalid number of rows "
"of the right hand side matrix b");
eigen_assert(m_isInitialized &&
"ReducedOrderPreconditioneris not initialized.");
return Eigen::Solve<ReducedOrderPreconditioner, Rhs>(*this, b.derived());
}
Eigen::ComputationInfo info() { return Eigen::Success; }
protected:
bool m_isInitialized;
};
#endif // HAVE_H2LIB
#if 0
template <unsigned int ReducedOrder,
typename InnerPreconditioner=Eigen::IncompleteLUT<double>,
typename IterativeSolver=Eigen::DGMRES<Eigen::SparseMatrix<double>,
InnerPreconditioner>>
class ExtMatrixPreconditioner {
typedef double Scalar;
typedef size_t Index;
typedef Eigen::SparseMatrix<Scalar> sparse_matrix_type;
typedef Eigen::Matrix<Scalar,Eigen::Dynamic,1> vector_type;
typedef Eigen::Matrix<int,Eigen::Dynamic,1> index_vector_type;
typedef InnerPreconditioner solver_type;
Index m_rows;
Index m_cols;
size_t m_inner_iterations;
std::vector<index_vector_type> m_col_maps;
std::vector<size_t> m_col_sizes;
std::vector<size_t> m_row_sizes;
std::vector<index_vector_type> m_row_maps;
std::vector<std::shared_ptr<solver_type>> m_solvers;
std::vector<sparse_matrix_type> m_ext_matrices;
std::vector<sparse_matrix_type> m_str_ext_matrices;
public:
typedef typename vector_type::StorageIndex StorageIndex;
enum {
ColsAtCompileTime = Eigen::Dynamic,
MaxColsAtCompileTime = Eigen::Dynamic
};
ExtMatrixPreconditioner():m_inner_iterations(10)
{}
template<typename MatType>
explicit ExtMatrixPreconditioner(const MatType& mat):m_inner_iterations(10) {
compute(mat);
}
Index rows() const { return m_rows; }
Index cols() const { return m_cols; }
template<unsigned int NI, unsigned int NJ, typename Blocks>
ExtMatrixPreconditioner& analyzePattern(const MatrixReplacement<NI,NJ,Blocks>& mat)
{
LOG(2,"ExtMatrixPreconditioner: analyze pattern");
m_rows = mat.rows();
m_cols = mat.cols();
return *this;
}
struct factorize_block {
std::vector<size_t> &m_col_sizes;
std::vector<size_t> &m_row_sizes;
std::vector<index_vector_type> &m_col_maps;
std::vector<index_vector_type> &m_row_maps;
std::vector<std::shared_ptr<solver_type>> &m_solvers;
std::vector<sparse_matrix_type> &m_ext_matrices;
std::vector<sparse_matrix_type> &m_str_ext_matrices;
size_t m_inner_iterations;
int i;
factorize_block(std::vector<size_t>& col_sizes,
std::vector<size_t>& row_sizes,
std::vector<index_vector_type>& col_maps,
std::vector<index_vector_type>& row_maps,
std::vector<std::shared_ptr<solver_type>>& solvers,
std::vector<sparse_matrix_type>& ext_matrices,
std::vector<sparse_matrix_type>& str_ext_matrices,
size_t inner_iterations):
m_col_sizes(col_sizes),m_row_sizes(row_sizes),m_col_maps(col_maps),m_row_maps(row_maps),m_solvers(solvers),m_ext_matrices(ext_matrices),m_str_ext_matrices(str_ext_matrices),m_inner_iterations(inner_iterations),i(0) {}
template <typename Block>
void operator()(const Block& block) {
LOG(2,"ExtMatrixPreconditioner: block "<<i<<": non h2 block");
m_solvers[i] = nullptr;
m_col_sizes[i] = block.cols();
m_row_sizes[i] = block.rows();
++i;
}
template <typename RowParticles, typename ColParticles, typename PositionF>
void operator()(const KernelH2<RowParticles,ColParticles,PositionF>& kernel) {
static const unsigned int dimension = RowParticles::dimension;
auto h2 = make_h2_matrix(kernel.get_row_particles(),
kernel.get_col_particles(),
make_black_box_expansion<dimension,ReducedOrder>(
kernel.get_position_function()));
LOG(2,"ExtMatrixPreconditioner: block "<<i<<": generate extended matrix");
m_ext_matrices[i] = h2.gen_extended_matrix();
m_str_ext_matrices[i] = h2.gen_stripped_extended_matrix();
/*
Eigen::saveMarket(m_ext_matrices[i],"ext_matrix.mat");
Eigen::saveMarket(m_str_ext_matrices[i],"str_ext_matrix.mat");
std::ofstream myfile;
myfile.open ("ext_matrix.csv");
myfile << Eigen::Matrix<double,Eigen::Dynamic,Eigen::Dynamic>(m_ext_matrices[i]);
myfile.close();
myfile.open ("str_ext_matrix.csv");
myfile << Eigen::Matrix<double,Eigen::Dynamic,Eigen::Dynamic>(m_str_ext_matrices[i]);
myfile.close();
*/
m_col_sizes[i] = kernel.cols();
m_row_sizes[i] = kernel.rows();
m_col_maps[i] = h2.gen_column_map();
m_row_maps[i] = h2.gen_row_map();
LOG(2,"ExtMatrixPreconditioner: block "<<i<<": set precon");
//LOG(2,"ExtMatrixPreconditioner: block "<<i<<": create solver");
//m_solvers[i] = std::make_shared<solver_type>(m_ext_matrices[i]);
m_solvers[i] = std::make_shared<solver_type>();
m_solvers[i]->setDroptol(0.1);
m_solvers[i]->compute(m_str_ext_matrices[i]);
if (m_solvers[i]->info() != Eigen::Success) {
ERROR("ExtMatrixPreconditioner inner preconditioner could not factorize");
}
//m_solvers[i]->setMaxIterations(m_inner_iterations);
//LOG(2,"ExtMatrixPreconditioner: block "<<i<<": set precon");
//m_solvers[i]->preconditioner().setDroptol(0.1);
//m_solvers[i]->preconditioner().compute(m_str_ext_matrices[i]);
LOG(2,"ExtMatrixPreconditioner: block "<<i<<": factorization complete");
++i;
}
};
template <unsigned int NI, unsigned int NJ, typename Blocks>
ExtMatrixPreconditioner& factorize(const MatrixReplacement<NI,NJ,Blocks>& mat)
{
LOG(2,"ExtMatrixPreconditioner: factorizing domain");
m_rows = mat.rows();
m_cols = mat.cols();
m_solvers.resize(NI);
m_ext_matrices.resize(NI);
m_str_ext_matrices.resize(NI);
m_col_sizes.resize(NI);
m_row_sizes.resize(NI);
m_col_maps.resize(NI);
m_row_maps.resize(NI);
detail::apply_function_to_diagonal_blocks(
factorize_block(m_col_sizes,m_row_sizes,m_col_maps,m_row_maps,
m_solvers,m_ext_matrices,m_str_ext_matrices,m_inner_iterations),
mat);
m_isInitialized = true;
return *this;
}
template<typename MatType>
ExtMatrixPreconditioner& compute(const MatType& mat)
{
analyzePattern(mat);
return factorize(mat);
}
template<typename Rhs, typename Dest>
void _solve_impl(const Rhs& b, Dest& x) const {
vector_type m_ext_b;
vector_type m_ext_x;
size_t row = 0;
size_t col = 0;
for (size_t i = 0; i < m_solvers.size(); ++i) {
if (m_solvers[i] != nullptr) { // solver only exists for h2 blocks
LOG(2,"ExtMatrixPreconditioner: block "<<i<<" solve");
// construct b
m_ext_b.resize(m_ext_matrices[i].rows());
for (size_t j = 0; j < m_row_maps[i].size(); ++j) {
m_ext_b[m_row_maps[i][j]] = b[row + j];
}
for (int j = m_row_maps[i].size(); j < m_ext_matrices[i].rows(); ++j) {
m_ext_b[j] = 0;
}
m_ext_x = m_ext_b;
//for (int j = 0; j < m_ext_matrices[i].cols(); ++j) {
// m_ext_x[j] = 0;
//}
//for (int j = 0; j < m_ext_matrices[i].rows(); ++j) {
// std::cout << "ext_b["<<j<<"] = "<< m_ext_b[j] << std::endl;
//}
//TODO: solve with guess?
//m_ext_x = m_solvers[i]->solveWithGuess(m_ext_b,m_ext_x);
//m_ext_x = m_solvers[i]->solve(m_ext_b);
Eigen::Index iters = m_inner_iterations;
double tol_error = 1e-10;
std::cout << "m_ext_b norm = "<<m_ext_b.norm() << std::endl;
std::cout << "m_ext_x norm = "<<m_ext_x.norm() << std::endl;
std::cout << "residual norm = "<<(m_ext_matrices[i]*m_ext_x-m_ext_b).norm() << std::endl;
Eigen::internal::gmres(m_ext_matrices[i],m_ext_b,m_ext_x,*m_solvers[i],
iters,2*m_inner_iterations,tol_error);
//LOG(2,"ExtMatrixPreconditioner: solve complete: #iterations: " << m_solvers[i]->iterations() << ", estimated error: " << m_solvers[i]->error() << " true error = "<<(m_ext_matrices[i]*m_ext_x-m_ext_b).norm());
LOG(2,"ExtMatrixPreconditioner: solve complete: #iterations: " << iters << " true error = "<<(m_ext_matrices[i]*m_ext_x-m_ext_b).norm());
// filter to x
for (size_t j = 0; j < m_col_maps[i].size(); ++j) {
x[col + j] = m_ext_x[m_col_maps[i][j]];
}
// increment row/col by number of particles (NOT size of ext vectors)
row += m_row_maps[i].size();
col += m_col_maps[i].size();
} else {
LOG(2,"ExtMatrixPreconditioner: block "<<i<<" non h2 block");
for (int j = 0; j < m_col_sizes[i]; ++j) {
x[col + j] = b[row + j];
}
// increment row/col by the size of the block
row += m_row_sizes[i];
col += m_col_sizes[i];
}
}
LOG(2,"ExtMatrixPreconditioner: done solve_impl");
}
template<typename Rhs>
inline const Eigen::Solve<ExtMatrixPreconditioner, Rhs>
solve(const Eigen::MatrixBase<Rhs>& b) const {
eigen_assert(m_rows==b.rows()
&& "ExtMatrixPreconditioner::solve(): invalid number of rows of the right hand side matrix b");
eigen_assert(m_isInitialized
&& "ExtMatrixPreconditioner is not initialized.");
return Eigen::Solve<ExtMatrixPreconditioner, Rhs>(*this, b.derived());
}
Eigen::ComputationInfo info() { return Eigen::Success; }
protected:
bool m_isInitialized;
};
#endif
class CardinalFunctionsPreconditioner {
typedef double Scalar;
typedef size_t Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> matrix_type;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> vector_type;
typedef std::vector<size_t> storage_vector_type;
typedef std::vector<storage_vector_type> connectivity_type;
protected:
bool m_isInitialized;
private:
size_t m_random;
double m_sigma;
double m_M;
connectivity_type m_domain_buffer;
std::vector<vector_type> m_weights;
Index m_rows;
Index m_cols;
public:
typedef typename vector_type::StorageIndex StorageIndex;
enum {
ColsAtCompileTime = Eigen::Dynamic,
MaxColsAtCompileTime = Eigen::Dynamic
};
CardinalFunctionsPreconditioner()
: m_isInitialized(false), m_random(0), m_sigma(-1), m_M(1.0) {}
template <typename MatType>
explicit CardinalFunctionsPreconditioner(const MatType &mat) {
compute(mat);
}
Index rows() const { return m_rows; }
Index cols() const { return m_cols; }
void set_number_of_random_particles(size_t n) { m_random = n; }
void set_sigma(double value) { m_sigma = value; }
void set_rejection_sampling_scale(double value) { m_M = value; }
template <typename Kernel>
void analyze_impl_block(const Index start_row, const Kernel &kernel) {
typedef typename Kernel::row_elements_type row_elements_type;
typedef typename Kernel::col_elements_type col_elements_type;
typedef typename row_elements_type::query_type query_type;
typedef typename query_type::traits_type traits_type;
typedef typename query_type::child_iterator child_iterator;
typedef typename traits_type::double_d double_d;
typedef typename traits_type::int_d int_d;
typedef typename traits_type::position position;
static_assert(
std::is_same<row_elements_type, col_elements_type>::value,
"Cardinal Functions preconditioner restricted to identical row and col "
"particle sets");
const row_elements_type &a = kernel.get_row_elements();
CHECK(
&a == &(kernel.get_col_elements()),
"Cardinal Functions preconditioner restricted to identical row and col "
"particle "
"sets");
const query_type &query = a.get_query();
m_domain_buffer.resize(a.size());
for (size_t i = 0; i < a.size(); ++i) {
storage_vector_type &buffer = m_domain_buffer[i];
// add buffer particles through random sampling
int nspecial = std::pow(3, query_type::dimension);
// const int nspecial = 0;
buffer.resize(m_random + nspecial);
std::vector<child_iterator> buckets(m_random + nspecial);
std::uniform_real_distribution<double> uniform(0, 1);
std::normal_distribution<double> normal(0, m_sigma);
std::default_random_engine generator;
// add special points
// updates nspecial with actual number of special points
lattice_iterator<query_type::dimension> special_it(
int_d::Constant(0), int_d::Constant(3), int_d::Constant(0));
for (nspecial = 0; special_it != false; ++special_it, ++nspecial) {
const double_d &bmin = query.get_bounds().bmin;
const double_d &bmax = query.get_bounds().bmax;
const double_d pos =
(*special_it) *
(0.5 * (bmax - bmin) - std::numeric_limits<double>::epsilon()) +
bmin;
// std::cout <<"adding special point at pos = "<<pos<<std::endl;
buckets[nspecial] = query.get_bucket(pos);
}
const double_d middle = get<position>(a)[i];
if (m_sigma > 0) {
const double scale2 = 1.0 / std::pow(m_sigma, 2);
auto gaussianf = [&](const double_d &x) {
return std::exp(-(x - middle).squaredNorm() * scale2);
};
std::generate(buckets.begin() + nspecial, buckets.end(), [&]() {
double_d sp;
bool accepted;
do {
for (size_t i = 0; i < query_type::dimension; i++) {
sp[i] = normal(generator) + middle[i];
}
const bool in_domain =
(sp < a.get_max()).all() && (sp >= a.get_min()).all();
accepted =
in_domain && uniform(generator) <
kernel.get_position_function()(middle, sp) /
(gaussianf(sp) * m_M);
} while (!accepted);
return query.get_bucket(sp);
});
} else {
const double volume = (a.get_max() - a.get_min()).prod();
std::generate(buckets.begin() + nspecial, buckets.end(), [&]() {
double_d sp;
bool accepted;
do {
for (size_t i = 0; i < query_type::dimension; i++) {
sp[i] =
0.5 * (a.get_max()[i] - a.get_min()[i]) * uniform(generator) +
a.get_min()[i];
}
const bool in_domain =
(sp < a.get_max()).all() && (sp >= a.get_min()).all();
accepted =
in_domain &&
uniform(generator) <
kernel.get_position_function()(middle, sp) * volume / m_M;
} while (!accepted);
return query.get_bucket(sp);
});
}
std::unordered_map<size_t, std::pair<child_iterator, size_t>> counts;
for (int i = 0; i < buckets.size(); ++i) {
auto bucket_index = query.get_bucket_index(*(buckets[i]));
auto it = counts.find(bucket_index);
if (it != counts.end()) {
it->second.second++;
} else {
counts[bucket_index] = std::make_pair(buckets[i], 1);
}
}
// for (auto i : counts) {
// std::cout << "bucket index " << i.first << " with bounds "
//<< query.get_bounds(i.second.first) << " has " << i.second.second
//<< " counts" << std::endl;
//}
int out_index = 0;
std::for_each(counts.begin(), counts.end(), [&](auto i) {
auto ci = i.second.first;
size_t count = i.second.second;
auto pit = query.get_bucket_particles(*ci);
auto num_particles = pit.distance_to_end();
std::vector<int> bucket_indices(num_particles);
std::iota(bucket_indices.begin(), bucket_indices.end(), 0);
std::random_shuffle(bucket_indices.begin(), bucket_indices.end());
const int trunc_count = std::min(count, bucket_indices.size());
std::transform(
bucket_indices.begin(), bucket_indices.begin() + trunc_count,
buffer.begin() + out_index, [&](const int i) {
return (&get<position>(*(pit + i)) - &get<position>(a)[0]) +
start_row;
});
// std::cout << "looking for " << count
//<< " samples in buffer. Found at indicies ";
// for (size_t i = out_index; i < out_index + trunc_count; ++i) {
// std::cout << buffer[i] << " ";
//}
// std::cout << std::endl;
out_index += trunc_count;
});
buffer.resize(out_index);
// ensure that cardinal index isn't in buffer
std::remove_if(buffer.begin(), buffer.end(),
[&i](const int j) { return j == i; });
#ifdef HAVE_CAIRO
const int image_size = 512;
cairo_surface_t *surface = cairo_svg_surface_create(
("sampler" + std::to_string(i) + ".svg").c_str(), image_size,
image_size);
cairo_svg_surface_restrict_to_version(surface, CAIRO_SVG_VERSION_1_2);
cairo_t *cr = cairo_create(surface);
const double lw = 0.01;
cairo_scale(cr, image_size, image_size);
cairo_set_line_width(cr, lw);
const double PI = boost::math::constants::pi<double>();
cairo_set_source_rgba(cr, 0.5, 0, 0, 1.0);
auto &pos = get<position>(a)[i];
cairo_arc(cr, pos[0], pos[1], lw, 0, 2 * PI);
cairo_fill(cr);
cairo_set_source_rgba(cr, 0, 0, 0.5, 1.0);
for (auto i : buffer) {
auto &pos = get<position>(a)[i];
cairo_arc(cr, pos[0], pos[1], lw, 0, 2 * PI);
cairo_fill(cr);
}
cairo_destroy(cr);
cairo_surface_destroy(surface);
#endif
ASSERT(buffer.size() > 0, "no particles in buffer");
}
}
template <typename RowParticles, typename ColParticles>
void
analyze_impl_block(const Index start_row,
const KernelZero<RowParticles, ColParticles> &kernel) {}
template <unsigned int NI, unsigned int NJ, typename Blocks, std::size_t... I>
void analyze_impl(const MatrixReplacement<NI, NJ, Blocks> &mat,
detail::index_sequence<I...>) {
int dummy[] = {0, (analyze_impl_block(mat.template start_row<I>(),
std::get<I * NJ + I>(mat.m_blocks)),
0)...};
static_cast<void>(dummy);
}
template <unsigned int NI, unsigned int NJ, typename Blocks>
CardinalFunctionsPreconditioner &
analyzePattern(const MatrixReplacement<NI, NJ, Blocks> &mat) {
LOG(2, "CardinalFunctionsPreconditioner: analyze pattern");
m_rows = mat.rows();
m_cols = mat.cols();
analyze_impl(mat, detail::make_index_sequence<NI>());
int minsize_buffer = 1000;
int maxsize_buffer = 0;
for (size_t domain_index = 0; domain_index < m_domain_buffer.size();
++domain_index) {
const int size_buffer = m_domain_buffer[domain_index].size();
if (size_buffer < minsize_buffer)
minsize_buffer = size_buffer;
if (size_buffer > maxsize_buffer)
maxsize_buffer = size_buffer;
}
LOG(2, "CardinalFunctionsPreconditioner: finished analysis, found "
<< m_domain_buffer.size() << " domains, with " << minsize_buffer
<< "--" << maxsize_buffer << " buffer particles")
return *this;
}
template <int _Options, typename _StorageIndex>
CardinalFunctionsPreconditioner &analyzePattern(
const Eigen::SparseMatrix<Scalar, _Options, _StorageIndex> &mat) {
CHECK(m_domain_buffer.size() > 0,
"CardinalFunctionsPreconditioner::analyzePattern(): cannot analyze "
"sparse "
"matrix, "
"call analyzePattern using a Aboria MatrixReplacement class first");
return *this;
}
template <int _Options, typename _StorageIndex, int RefOptions,
typename RefStrideType>
CardinalFunctionsPreconditioner &
analyzePattern(const Eigen::Ref<
const Eigen::SparseMatrix<Scalar, _Options, _StorageIndex>,
RefOptions, RefStrideType> &mat) {
CHECK(m_domain_buffer.size() > 0,
"CardinalFunctionsPreconditioner::analyzePattern(): cannot analyze "
"sparse "
"matrix, "
"call analyzePattern using a Aboria MatrixReplacement class first");
return *this;
}
template <typename Derived>
CardinalFunctionsPreconditioner &
analyzePattern(const Eigen::DenseBase<Derived> &mat) {
CHECK(m_domain_buffer.size() > 0,
"CardinalFunctionsPreconditioner::analyzePattern(): cannot analyze "
"dense "
"matrix, "
"call analyzePattern need to pass a Aboria MatrixReplacement class "
"first");
return *this;
}
template <typename MatType>
CardinalFunctionsPreconditioner &factorize(const MatType &mat) {
LOG(2, "CardinalFunctionsPreconditioner: factorizing domain");
eigen_assert(static_cast<typename MatType::Index>(m_rows) == mat.rows() &&
"CardinalFunctionsPreconditioner::solve(): invalid number of "
"rows of mat");
eigen_assert(static_cast<typename MatType::Index>(m_cols) == mat.cols() &&
"CardinalFunctionsPreconditioner::solve(): invalid number of "
"rows of mat");
matrix_type domain_matrix;
const size_t N = mat.rows();
m_weights.resize(m_domain_buffer.size());
for (size_t domain_index = 0; domain_index < m_domain_buffer.size();
++domain_index) {
const storage_vector_type &buffer = m_domain_buffer[domain_index];
vector_type &weights = m_weights[domain_index];
const size_t size = 1 + buffer.size();
// std::cout << "domain "<<domain_index<<"indicies =
// "<<indicies.size()<<" buffer = "<<buffer.size()<<" random =
// "<<random.size()<<std::endl;
domain_matrix.resize(size, N);
weights.resize(size);
size_t i = 0;
for (const size_t &big_index_i : {domain_index}) {
for (size_t j = 0; j < N; ++j) {
domain_matrix(i, j) = mat.coeff(big_index_i, j);
}
++i;
}
for (const size_t &big_index_i : buffer) {
for (size_t j = 0; j < N; ++j) {
domain_matrix(i, j) = mat.coeff(big_index_i, j);
}
++i;
}
vector_type b = vector_type::Zero(N);
b[domain_index] = 1;
weights = domain_matrix.transpose().colPivHouseholderQr().solve(b);
// weights = domain_matrix.transpose()
// .bdcSvd(Eigen::ComputeThinU | Eigen::ComputeThinV)
// .solve(b);
double relative_error =
(domain_matrix.transpose() * weights - b).norm() / b.norm();
if (relative_error > 1e-3 || std::isnan(relative_error)) {
std::cout << "domain index = " << domain_index
<< ": relative error = " << relative_error << std::endl;
}
}
m_isInitialized = true;
return *this;
}
template <typename MatType>
CardinalFunctionsPreconditioner &compute(const MatType &mat) {
analyzePattern(mat);
return factorize(mat);
}
/** \internal */
template <typename Rhs, typename Dest>
void _solve_impl(const Rhs &b, Dest &x) const {
for (size_t i = 0; i < m_domain_buffer.size(); ++i) {
const storage_vector_type &buffer = m_domain_buffer[i];
const vector_type &weights = m_weights[i];
// x = W * b
x[i] = weights[0] * b[i];
for (size_t j = 0; j < buffer.size(); ++j) {
x[i] += weights[j] * b[buffer[j]];
}
}
}
template <typename Rhs>
inline const Eigen::Solve<CardinalFunctionsPreconditioner, Rhs>
solve(const Eigen::MatrixBase<Rhs> &b) const {
eigen_assert(static_cast<typename Rhs::Index>(m_rows) == b.rows() &&
"CardinalFunctionsPreconditioner::solve(): invalid number of "
"rows of the "
"right hand side matrix b");
eigen_assert(m_isInitialized &&
"CardinalFunctionsPreconditioner is not initialized.");
return Eigen::Solve<CardinalFunctionsPreconditioner, Rhs>(*this,
b.derived());
}
Eigen::ComputationInfo info() { return Eigen::Success; }
}; // namespace Aboria
template <typename Solver> class SchwartzPreconditioner {
typedef double Scalar;
typedef size_t Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> matrix_type;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> vector_type;
typedef Solver solver_type;
typedef std::vector<size_t> storage_vector_type;
typedef std::vector<storage_vector_type> connectivity_type;
protected:
bool m_isInitialized;
private:
double m_neighbourhood_buffer;
double m_coarse_grid_n;
connectivity_type m_domain_indicies;
connectivity_type m_domain_buffer;
std::vector<solver_type> m_domain_factorized_matrix;
Index m_rows;
Index m_cols;
public:
typedef typename vector_type::StorageIndex StorageIndex;
enum {
ColsAtCompileTime = Eigen::Dynamic,
MaxColsAtCompileTime = Eigen::Dynamic
};
SchwartzPreconditioner()
: m_isInitialized(false),
m_neighbourhood_buffer(1e5 * std::numeric_limits<double>::epsilon()),
m_coarse_grid_n(0) {}
template <typename MatType>
explicit SchwartzPreconditioner(const MatType &mat) {
compute(mat);
}
Index rows() const { return m_rows; }
Index cols() const { return m_cols; }
void set_neighbourhood_buffer_size(double arg) {
m_neighbourhood_buffer = arg;
}
void set_coarse_grid_n(int arg) { m_coarse_grid_n = arg; }
template <typename Kernel>
void analyze_impl_block(const Index start_row, const Kernel &kernel) {
typedef typename Kernel::row_elements_type row_elements_type;
typedef typename Kernel::col_elements_type col_elements_type;
typedef typename row_elements_type::query_type query_type;
typedef typename query_type::traits_type traits_type;
typedef typename query_type::child_iterator child_iterator;
typedef typename traits_type::double_d double_d;
typedef typename traits_type::int_d int_d;
typedef typename traits_type::position position;
static_assert(std::is_same<row_elements_type, col_elements_type>::value,
"Schwartz preconditioner restricted to identical row and col "
"particle sets");
const row_elements_type &a = kernel.get_row_elements();
CHECK(
&a == &(kernel.get_col_elements()),
"Schwartz preconditioner restricted to identical row and col particle "
"sets");
const query_type &query = a.get_query();
for (auto i = query.get_subtree(); i != false; ++i) {
if (query.is_leaf_node(*i)) {
auto ci = i.get_child_iterator();
auto bounds = query.get_bounds(ci);
const double_d middle = 0.5 * (bounds.bmax + bounds.bmin);
const double_d side = 0.5 * (bounds.bmax - bounds.bmin);
// skip over empty buckets
if (query.get_bucket_particles(*ci) == false)
continue;
const size_t domain_index = m_domain_indicies.size();
m_domain_indicies.push_back(connectivity_type::value_type());
m_domain_buffer.push_back(connectivity_type::value_type());
storage_vector_type &buffer = m_domain_buffer[domain_index];
storage_vector_type &indicies = m_domain_indicies[domain_index];
// add particles in bucket to indicies
// add particles in neighbouring buckets to buffer
for (auto bucket = query.template get_buckets_near_point<-1>(
middle, 1.0,
create_scale_transform(1.0 / (side + m_neighbourhood_buffer)));
bucket != false; ++bucket) {
for (auto particle = query.get_bucket_particles(*bucket);
particle != false; ++particle) {
const double_d &p = get<position>(*particle);
const size_t index =
&p - get<position>(query.get_particles_begin());
if ((p < bounds.bmin).any() || (p >= bounds.bmax).any()) {
buffer.push_back(start_row + index);
} else {
indicies.push_back(start_row + index);
}
}
}
ASSERT(buffer.size() > 0, "no particles in buffer");
ASSERT(indicies.size() > 0, "no particles in domain");
}
}
// add coarse grid
if (m_coarse_grid_n > 0) {
m_domain_indicies.push_back(connectivity_type::value_type());
m_domain_buffer.push_back(connectivity_type::value_type());
storage_vector_type &indicies = m_domain_indicies.back();
auto bounds = query.get_bounds();
indicies.resize(std::pow(m_coarse_grid_n + 1, query_type::dimension));
const double_d dx = (bounds.bmax - bounds.bmin) / m_coarse_grid_n -
std::numeric_limits<double>::epsilon();
lattice_iterator<query_type::dimension> it(
int_d::Constant(0), int_d::Constant(m_coarse_grid_n + 1),
int_d::Constant(0));
std::unordered_map<size_t, std::pair<child_iterator, size_t>> counts;
for (; it != false; ++it) {
const double_d point = (*it) * dx + bounds.bmin;
const child_iterator ci = query.get_bucket(point);
auto bucket_index = query.get_bucket_index(*ci);
auto cit = counts.find(bucket_index);
if (cit != counts.end()) {
cit->second.second++;
} else {
counts[bucket_index] = std::make_pair(ci, 1);
}
}
// for (auto i : counts) {
// std::cout << "bucket index " << i.first << " with bounds "
//<< query.get_bounds(i.second.first) << " has " << i.second.second
//<< " counts" << std::endl;
//}
int out_index = 0;
std::for_each(counts.begin(), counts.end(), [&](auto i) {
const child_iterator &ci = i.second.first;
size_t count = i.second.second;
auto pit = query.get_bucket_particles(*ci);
auto num_particles = pit.distance_to_end();
std::vector<int> bucket_indices(num_particles);
std::iota(bucket_indices.begin(), bucket_indices.end(), 0);
std::random_shuffle(bucket_indices.begin(), bucket_indices.end());
const int trunc_count = std::min(count, bucket_indices.size());
std::transform(
bucket_indices.begin(), bucket_indices.begin() + trunc_count,
indicies.begin() + out_index, [&](const int j) {
return (&get<position>(*(pit + j)) - &get<position>(a)[0]) +
start_row;
});
// std::cout << "looking for " << count
//<< " samples in buffer. Found at indicies ";
// for (size_t i = out_index; i < out_index + trunc_count; ++i) {
// std::cout << buffer[i] << " ";
//}
// std::cout << std::endl;
out_index += trunc_count;
});
indicies.resize(out_index);
ASSERT(indicies.size() > 0, "no particles in domain");
#ifdef HAVE_CAIRO_TURN_OFF
const int image_size = 512;
cairo_surface_t *surface =
cairo_svg_surface_create("coarse_grid.svg", image_size, image_size);
cairo_svg_surface_restrict_to_version(surface, CAIRO_SVG_VERSION_1_2);
cairo_t *cr = cairo_create(surface);
const double lw = 0.007;
cairo_scale(cr, image_size, image_size);
cairo_set_line_width(cr, lw);
cairo_set_source_rgba(cr, 0, 0, 0, 0.5);
cairo_move_to(cr, bounds.bmin[0], bounds.bmin[1]);
cairo_line_to(cr, bounds.bmax[0], bounds.bmin[1]);
cairo_line_to(cr, bounds.bmax[0], bounds.bmax[1]);
cairo_line_to(cr, bounds.bmin[0], bounds.bmax[1]);
cairo_close_path(cr);
cairo_stroke(cr);
const double PI = boost::math::constants::pi<double>();
cairo_set_source_rgba(cr, 0.5, 0, 0, 0.5);
for (auto i : indicies) {
auto &pos = get<position>(a)[i];
cairo_arc(cr, pos[0], pos[1], lw, 0, 2 * PI);
cairo_fill(cr);
}
cairo_destroy(cr);
cairo_surface_destroy(surface);
#endif
}
}
template <typename RowParticles, typename ColParticles>
void
analyze_impl_block(const Index start_row,
const KernelZero<RowParticles, ColParticles> &kernel) {}
template <unsigned int NI, unsigned int NJ, typename Blocks, std::size_t... I>
void analyze_impl(const MatrixReplacement<NI, NJ, Blocks> &mat,
detail::index_sequence<I...>) {
int dummy[] = {0, (analyze_impl_block(mat.template start_row<I>(),
std::get<I * NJ + I>(mat.m_blocks)),
0)...};
static_cast<void>(dummy);
}
template <unsigned int NI, unsigned int NJ, typename Blocks>
SchwartzPreconditioner &
analyzePattern(const MatrixReplacement<NI, NJ, Blocks> &mat) {
LOG(2, "SchwartzPreconditioner: analyze pattern");
m_rows = mat.rows();
m_cols = mat.cols();
analyze_impl(mat, detail::make_index_sequence<NI>());
int count = 0;
int minsize_buffer = 1000;
int maxsize_buffer = 0;
int minsize_indicies = 1000;
int maxsize_indicies = 0;
for (size_t domain_index = 0; domain_index < m_domain_indicies.size() - 1;
++domain_index) {
const int size_indicies = m_domain_indicies[domain_index].size();
const int size_buffer = m_domain_buffer[domain_index].size();
count += size_indicies;
if (size_buffer < minsize_buffer)
minsize_buffer = size_buffer;
if (size_buffer > maxsize_buffer)
maxsize_buffer = size_buffer;
if (size_indicies < minsize_indicies)
minsize_indicies = size_indicies;
if (size_indicies > maxsize_indicies)
maxsize_indicies = size_indicies;
}
LOG(2, "SchwartzPreconditioner: finished analysis, found "
<< m_domain_indicies.size() << " domains, with "
<< minsize_indicies << "--" << maxsize_indicies << " particles ("
<< count << " total), and " << minsize_buffer << "--"
<< maxsize_buffer << " buffer particles. The coarse grid has "
<< m_domain_indicies.back().size() << " particles");
return *this;
}
template <int _Options, typename _StorageIndex>
SchwartzPreconditioner &analyzePattern(
const Eigen::SparseMatrix<Scalar, _Options, _StorageIndex> &mat) {
CHECK(m_domain_indicies.size() > 0,
"SchwartzPreconditioner::analyzePattern(): cannot analyze sparse "
"matrix, "
"call analyzePattern using a Aboria MatrixReplacement class first");
return *this;
}
template <int _Options, typename _StorageIndex, int RefOptions,
typename RefStrideType>
SchwartzPreconditioner &
analyzePattern(const Eigen::Ref<
const Eigen::SparseMatrix<Scalar, _Options, _StorageIndex>,
RefOptions, RefStrideType> &mat) {
CHECK(m_domain_indicies.size() > 0,
"SchwartzPreconditioner::analyzePattern(): cannot analyze sparse "
"matrix, "
"call analyzePattern using a Aboria MatrixReplacement class first");
return *this;
}
template <typename Derived>
SchwartzPreconditioner &analyzePattern(const Eigen::DenseBase<Derived> &mat) {
CHECK(m_domain_indicies.size() > 0,
"SchwartzPreconditioner::analyzePattern(): cannot analyze dense "
"matrix, "
"call analyzePattern need to pass a Aboria MatrixReplacement class "
"first");
return *this;
}
template <typename MatType>
SchwartzPreconditioner &factorize(const MatType &mat) {
LOG(2, "SchwartzPreconditioner: factorizing domain");
eigen_assert(
static_cast<typename MatType::Index>(m_rows) == mat.rows() &&
"SchwartzPreconditioner::solve(): invalid number of rows of mat");
eigen_assert(
static_cast<typename MatType::Index>(m_cols) == mat.cols() &&
"SchwartzPreconditioner::solve(): invalid number of rows of mat");
m_domain_factorized_matrix.resize(m_domain_indicies.size());
#ifdef HAVE_OPENMP
#pragma omp parallel for
#endif
for (size_t domain_index = 0;
domain_index < m_domain_factorized_matrix.size(); ++domain_index) {
const storage_vector_type &buffer = m_domain_buffer[domain_index];
const storage_vector_type &indicies = m_domain_indicies[domain_index];
solver_type &solver = m_domain_factorized_matrix[domain_index];
const size_t size = indicies.size() + buffer.size();
// std::cout << "domain "<<domain_index<<"indicies =
// "<<indicies.size()<<" buffer = "<<buffer.size()<<" random =
// "<<random.size()<<std::endl;
matrix_type domain_matrix;
domain_matrix.resize(size, size);
size_t i = 0;
for (const size_t &big_index_i : indicies) {
size_t j = 0;
for (const size_t &big_index_j : indicies) {
domain_matrix(i, j++) = mat.coeff(big_index_i, big_index_j);
}
for (const size_t &big_index_j : buffer) {
domain_matrix(i, j++) = mat.coeff(big_index_i, big_index_j);
}
++i;
}
for (const size_t &big_index_i : buffer) {
size_t j = 0;
for (const size_t &big_index_j : indicies) {
domain_matrix(i, j++) = mat.coeff(big_index_i, big_index_j);
}
for (const size_t &big_index_j : buffer) {
domain_matrix(i, j++) = mat.coeff(big_index_i, big_index_j);
}
++i;
}
solver.compute(domain_matrix);
Eigen::VectorXd b = Eigen::VectorXd::Random(domain_matrix.rows());
Eigen::VectorXd x = solver.solve(b);
double relative_error = (domain_matrix * x - b).norm() / b.norm();
if (relative_error > 1e-3 || std::isnan(relative_error)) {
std::cout << "relative error = " << relative_error << std::endl;
}
}
m_isInitialized = true;
return *this;
}
template <typename MatType>
SchwartzPreconditioner &compute(const MatType &mat) {
analyzePattern(mat);
return factorize(mat);
}
/** \internal */
template <typename Rhs, typename Dest>
void _solve_impl(const Rhs &b, Dest &x) const {
// loop over domains and invert relevent sub-matricies in
// mat
// TODO: do I need this? x = b;
#ifdef HAVE_OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < x.size(); ++i) {
x[i] = b[i];
}
#ifdef HAVE_OPENMP
#pragma omp parallel for
#endif
for (size_t i = 0; i < m_domain_indicies.size(); ++i) {
if (m_domain_indicies.size() == 0)
continue;
const storage_vector_type &buffer = m_domain_buffer[i];
const storage_vector_type &indicies = m_domain_indicies[i];
const size_t nb = indicies.size() + buffer.size();
vector_type domain_x;
vector_type domain_b;
domain_x.resize(nb);
domain_b.resize(nb);
// copy x values from big vector
size_t sub_index = 0;
for (size_t j = 0; j < indicies.size(); ++j) {
domain_b[sub_index++] = b[indicies[j]];
}
for (size_t j = 0; j < buffer.size(); ++j) {
domain_b[sub_index++] = b[buffer[j]];
}
// solve domain
domain_x = m_domain_factorized_matrix[i].solve(domain_b);
// copy accumulate b values to big vector
sub_index = 0;
for (size_t j = 0; j < indicies.size(); ++j) {
x[indicies[j]] = domain_x[sub_index++];
}
}
}
template <typename Rhs>
inline const Eigen::Solve<SchwartzPreconditioner, Rhs>
solve(const Eigen::MatrixBase<Rhs> &b) const {
eigen_assert(
static_cast<typename Rhs::Index>(m_rows) == b.rows() &&
"SchwartzPreconditioner::solve(): invalid number of rows of the "
"right hand side matrix b");
eigen_assert(m_isInitialized &&
"SchwartzPreconditioner is not initialized.");
return Eigen::Solve<SchwartzPreconditioner, Rhs>(*this, b.derived());
}
Eigen::ComputationInfo info() { return Eigen::Success; }
}; // namespace Aboria
template <typename Solver> class SchwartzSamplingPreconditioner {
typedef double Scalar;
typedef size_t Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> matrix_type;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> vector_type;
typedef Solver solver_type;
typedef std::vector<size_t> storage_vector_type;
typedef std::vector<storage_vector_type> connectivity_type;
protected:
bool m_isInitialized;
private:
size_t m_random;
double m_sigma;
double m_M;
connectivity_type m_domain_indicies;
connectivity_type m_domain_buffer;
std::vector<solver_type> m_domain_factorized_matrix;
Index m_rows;
Index m_cols;
public:
typedef typename vector_type::StorageIndex StorageIndex;
enum {
ColsAtCompileTime = Eigen::Dynamic,
MaxColsAtCompileTime = Eigen::Dynamic
};
SchwartzSamplingPreconditioner()
: m_isInitialized(false), m_random(0), m_sigma(-1), m_M(1.0) {}
template <typename MatType>
explicit SchwartzSamplingPreconditioner(const MatType &mat) {
compute(mat);
}
Index rows() const { return m_rows; }
Index cols() const { return m_cols; }
void set_number_of_random_particles(size_t n) { m_random = n; }
void set_sigma(double value) { m_sigma = value; }
void set_rejection_sampling_scale(double value) { m_M = value; }
template <typename Kernel>
void analyze_impl_block(const Index start_row, const Kernel &kernel) {
typedef typename Kernel::row_elements_type row_elements_type;
typedef typename Kernel::col_elements_type col_elements_type;
typedef typename row_elements_type::query_type query_type;
typedef typename query_type::traits_type traits_type;
typedef typename query_type::child_iterator child_iterator;
typedef typename traits_type::double_d double_d;
typedef typename traits_type::int_d int_d;
typedef typename traits_type::position position;
static_assert(std::is_same<row_elements_type, col_elements_type>::value,
"Schwartz preconditioner restricted to identical row and col "
"particle sets");
const row_elements_type &a = kernel.get_row_elements();
CHECK(&a == &(kernel.get_col_elements()),
"Schwartz preconditioner restricted to identical row and col "
"particle "
"sets");
const query_type &query = a.get_query();
for (auto i = query.get_subtree(); i != false; ++i) {
if (query.is_leaf_node(*i)) {
auto ci = i.get_child_iterator();
auto bounds = query.get_bounds(ci);
// std::cout << "looking at bucket index " <<
// query.get_bucket_index(*ci)
//<< " with bounds " << bounds << std::endl;
// skip over empty buckets
if (query.get_bucket_particles(*ci) == false)
continue;
const size_t domain_index = m_domain_indicies.size();
m_domain_indicies.push_back(connectivity_type::value_type());
m_domain_buffer.push_back(connectivity_type::value_type());
storage_vector_type &buffer = m_domain_buffer[domain_index];
storage_vector_type &indicies = m_domain_indicies[domain_index];
// add particles in bucket to indicies
for (auto particle = query.get_bucket_particles(*i); particle != false;
++particle) {
const size_t index = &(get<position>(*particle)) -
get<position>(query.get_particles_begin());
indicies.push_back(start_row + index);
}
// add buffer particles through random sampling
int nspecial = std::pow(3, query_type::dimension);
// const int nspecial = 0;
buffer.resize(m_random + nspecial);
std::vector<child_iterator> buckets(m_random + nspecial);
std::uniform_real_distribution<double> uniform(0, 1);
std::normal_distribution<double> normal(0, m_sigma);
std::default_random_engine generator;
// add special points
// updates nspecial with actual number of special points
lattice_iterator<query_type::dimension> special_it(
int_d::Constant(0), int_d::Constant(3), int_d::Constant(0));
for (nspecial = 0; special_it != false; ++special_it, ++nspecial) {
const double_d &bmin = query.get_bounds().bmin;
const double_d &bmax = query.get_bounds().bmax;
const double_d pos =
(*special_it) * (0.5 * (bmax - bmin) -
std::numeric_limits<double>::epsilon()) +
bmin;
// std::cout <<"adding special point at pos = "<<pos<<std::endl;
const bool not_in_bucket =
(pos >= bounds.bmax).any() || (pos < bounds.bmin).any();
if (not_in_bucket) {
buckets[nspecial] = query.get_bucket(pos);
} else {
--nspecial;
}
}
buffer.resize(m_random + nspecial);
const double_d middle = 0.5 * (bounds.bmin + bounds.bmax);
if (m_sigma > 0) {
const double_d w = bounds.bmax - bounds.bmin;
if (m_sigma < 0.1 * w.maxCoeff()) {
std::vector<child_iterator> pot_buckets;
for (auto it = query.template get_buckets_near_point<-1>(
middle, 1.0, create_scale_transform(1.0 / (0.6 * w)));
it != false; ++it) {
// fill pot buckets (not self)
const auto ci = it.get_child_iterator();
auto pot_bounds = query.get_bounds(ci);
const bool middle_not_in_pot =
(middle >= pot_bounds.bmax).any() ||
(middle < pot_bounds.bmin).any();
if (middle_not_in_pot) {
pot_buckets.push_back(ci);
}
}
// uniformly sample pot buckets
std::generate(buckets.begin() + nspecial, buckets.end(), [&]() {
const int sampled_index =
std::floor(uniform(generator) * pot_buckets.size());
return pot_buckets[sampled_index];
});
} else {
const double scale2 = 1.0 / std::pow(m_sigma, 2);
auto gaussianf = [&](const double_d &x) {
return std::exp(-(x - middle).squaredNorm() * scale2);
};
std::generate(buckets.begin() + nspecial, buckets.end(), [&]() {
double_d sp;
bool accepted;
do {
for (size_t i = 0; i < query_type::dimension; i++) {
sp[i] = normal(generator) + middle[i];
}
const bool not_in_bucket =
(sp >= bounds.bmax).any() || (sp < bounds.bmin).any();
const bool in_domain =
(sp < a.get_max()).all() && (sp >= a.get_min()).all();
accepted = not_in_bucket && in_domain &&
uniform(generator) <
kernel.get_position_function()(middle, sp) /
(gaussianf(sp) * m_M);
} while (!accepted);
return query.get_bucket(sp);
});
}
} else {
const double volume = (a.get_max() - a.get_min()).prod();
std::generate(buckets.begin() + nspecial, buckets.end(), [&]() {
double_d sp;
bool accepted;
do {
for (size_t i = 0; i < query_type::dimension; i++) {
sp[i] = 0.5 * (a.get_max()[i] - a.get_min()[i]) *
uniform(generator) +
a.get_min()[i];
}
const bool not_in_bucket =
(sp >= bounds.bmax).any() || (sp < bounds.bmin).any();
const bool in_domain =
(sp < a.get_max()).all() && (sp >= a.get_min()).all();
accepted =
not_in_bucket && in_domain &&
uniform(generator) <
kernel.get_position_function()(middle, sp) * volume / m_M;
} while (!accepted);
return query.get_bucket(sp);
});
}
std::unordered_map<size_t, std::pair<child_iterator, size_t>> counts;
for (size_t i = 0; i < buckets.size(); ++i) {
auto bucket_index = query.get_bucket_index(*(buckets[i]));
auto it = counts.find(bucket_index);
if (it != counts.end()) {
it->second.second++;
} else {
counts[bucket_index] = std::make_pair(buckets[i], 1);
}
}
// for (auto i : counts) {
// std::cout << "bucket index " << i.first << " with bounds "
//<< query.get_bounds(i.second.first) << " has " << i.second.second
//<< " counts" << std::endl;
//}
int out_index = 0;
std::for_each(counts.begin(), counts.end(), [&](auto i) {
auto ci = i.second.first;
size_t count = i.second.second;
auto pit = query.get_bucket_particles(*ci);
auto num_particles = pit.distance_to_end();
std::vector<int> bucket_indices(num_particles);
std::iota(bucket_indices.begin(), bucket_indices.end(), 0);
std::random_shuffle(bucket_indices.begin(), bucket_indices.end());
const int trunc_count = std::min(count, bucket_indices.size());
std::transform(
bucket_indices.begin(), bucket_indices.begin() + trunc_count,
buffer.begin() + out_index, [&](const int i) {
return (&get<position>(*(pit + i)) - &get<position>(a)[0]) +
start_row;
});
// std::cout << "looking for " << count
//<< " samples in buffer. Found at indicies ";
// for (size_t i = out_index; i < out_index + trunc_count; ++i) {
// std::cout << buffer[i] << " ";
//}
// std::cout << std::endl;
out_index += trunc_count;
});
buffer.resize(out_index);
#ifdef HAVE_CAIRO_TURN_OFF
const int image_size = 512;
cairo_surface_t *surface = cairo_svg_surface_create(
("sampler" + std::to_string(domain_index) + ".svg").c_str(),
image_size, image_size);
cairo_svg_surface_restrict_to_version(surface, CAIRO_SVG_VERSION_1_2);
cairo_t *cr = cairo_create(surface);
const double lw = 0.007;
cairo_scale(cr, image_size, image_size);
cairo_set_line_width(cr, lw);
cairo_set_source_rgba(cr, 0, 0, 0, 0.5);
cairo_move_to(cr, bounds.bmin[0], bounds.bmin[1]);
cairo_line_to(cr, bounds.bmax[0], bounds.bmin[1]);
cairo_line_to(cr, bounds.bmax[0], bounds.bmax[1]);
cairo_line_to(cr, bounds.bmin[0], bounds.bmax[1]);
cairo_close_path(cr);
cairo_stroke(cr);
const double PI = boost::math::constants::pi<double>();
cairo_set_source_rgba(cr, 0.5, 0, 0, 0.5);
for (auto i : indicies) {
auto &pos = get<position>(a)[i];
cairo_arc(cr, pos[0], pos[1], lw, 0, 2 * PI);
cairo_fill(cr);
}
cairo_set_source_rgba(cr, 0, 0, 0.5, 0.5);
for (auto i : buffer) {
auto &pos = get<position>(a)[i];
cairo_arc(cr, pos[0], pos[1], lw, 0, 2 * PI);
cairo_fill(cr);
}
cairo_destroy(cr);
cairo_surface_destroy(surface);
#endif
ASSERT(buffer.size() > 0, "no particles in buffer");
ASSERT(indicies.size() > 0, "no particles in domain");
}
}
} // namespace Aboria
template <typename RowParticles, typename ColParticles>
void
analyze_impl_block(const Index start_row,
const KernelZero<RowParticles, ColParticles> &kernel) {}
template <unsigned int NI, unsigned int NJ, typename Blocks, std::size_t... I>
void analyze_impl(const MatrixReplacement<NI, NJ, Blocks> &mat,
detail::index_sequence<I...>) {
int dummy[] = {0, (analyze_impl_block(mat.template start_row<I>(),
std::get<I * NJ + I>(mat.m_blocks)),
0)...};
static_cast<void>(dummy);
}
template <unsigned int NI, unsigned int NJ, typename Blocks>
SchwartzSamplingPreconditioner &
analyzePattern(const MatrixReplacement<NI, NJ, Blocks> &mat) {
LOG(2, "SchwartzSamplingPreconditioner: analyze pattern");
m_rows = mat.rows();
m_cols = mat.cols();
analyze_impl(mat, detail::make_index_sequence<NI>());
int count = 0;
int minsize_buffer = 1000;
int maxsize_buffer = 0;
int minsize_indicies = 1000;
int maxsize_indicies = 0;
for (size_t domain_index = 0; domain_index < m_domain_indicies.size();
++domain_index) {
const int size_indicies = m_domain_indicies[domain_index].size();
const int size_buffer = m_domain_buffer[domain_index].size();
count += size_indicies;
if (size_buffer < minsize_buffer)
minsize_buffer = size_buffer;
if (size_buffer > maxsize_buffer)
maxsize_buffer = size_buffer;
if (size_indicies < minsize_indicies)
minsize_indicies = size_indicies;
if (size_indicies > maxsize_indicies)
maxsize_indicies = size_indicies;
}
LOG(2, "SchwartzSamplingPreconditioner: finished analysis, found "
<< m_domain_indicies.size() << " domains, with "
<< minsize_indicies << "--" << maxsize_indicies << " particles ("
<< count << " total), and " << minsize_buffer << "--"
<< maxsize_buffer << " buffer particles")
return *this;
}
template <int _Options, typename _StorageIndex>
SchwartzSamplingPreconditioner &analyzePattern(
const Eigen::SparseMatrix<Scalar, _Options, _StorageIndex> &mat) {
CHECK(m_domain_indicies.size() > 0,
"SchwartzSamplingPreconditioner::analyzePattern(): cannot analyze "
"sparse "
"matrix, "
"call analyzePattern using a Aboria MatrixReplacement class first");
return *this;
}
template <int _Options, typename _StorageIndex, int RefOptions,
typename RefStrideType>
SchwartzSamplingPreconditioner &
analyzePattern(const Eigen::Ref<
const Eigen::SparseMatrix<Scalar, _Options, _StorageIndex>,
RefOptions, RefStrideType> &mat) {
CHECK(m_domain_indicies.size() > 0,
"SchwartzSamplingPreconditioner::analyzePattern(): cannot analyze "
"sparse "
"matrix, "
"call analyzePattern using a Aboria MatrixReplacement class first");
return *this;
}
template <typename Derived>
SchwartzSamplingPreconditioner &
analyzePattern(const Eigen::DenseBase<Derived> &mat) {
CHECK(m_domain_indicies.size() > 0,
"SchwartzSamplingPreconditioner::analyzePattern(): cannot analyze "
"dense "
"matrix, "
"call analyzePattern need to pass a Aboria MatrixReplacement class "
"first");
return *this;
}
template <typename MatType>
SchwartzSamplingPreconditioner &factorize(const MatType &mat) {
LOG(2, "SchwartzSamplingPreconditioner: factorizing domain");
eigen_assert(static_cast<typename MatType::Index>(m_rows) == mat.rows() &&
"SchwartzSamplingPreconditioner::solve(): invalid number of "
"rows of mat");
eigen_assert(static_cast<typename MatType::Index>(m_cols) == mat.cols() &&
"SchwartzSamplingPreconditioner::solve(): invalid number of "
"rows of mat");
m_domain_factorized_matrix.resize(m_domain_indicies.size());
matrix_type domain_matrix;
for (size_t domain_index = 0;
domain_index < m_domain_factorized_matrix.size(); ++domain_index) {
const storage_vector_type &buffer = m_domain_buffer[domain_index];
const storage_vector_type &indicies = m_domain_indicies[domain_index];
solver_type &solver = m_domain_factorized_matrix[domain_index];
const size_t size = indicies.size() + buffer.size();
// std::cout << "domain "<<domain_index<<"indicies =
// "<<indicies.size()<<" buffer = "<<buffer.size()<<" random =
// "<<random.size()<<std::endl;
domain_matrix.resize(size, size);
size_t i = 0;
for (const size_t &big_index_i : indicies) {
size_t j = 0;
for (const size_t &big_index_j : indicies) {
domain_matrix(i, j++) = mat.coeff(big_index_i, big_index_j);
}
for (const size_t &big_index_j : buffer) {
domain_matrix(i, j++) = mat.coeff(big_index_i, big_index_j);
}
++i;
}
for (const size_t &big_index_i : buffer) {
size_t j = 0;
for (const size_t &big_index_j : indicies) {
domain_matrix(i, j++) = mat.coeff(big_index_i, big_index_j);
}
for (const size_t &big_index_j : buffer) {
domain_matrix(i, j++) = mat.coeff(big_index_i, big_index_j);
}
++i;
}
solver.compute(domain_matrix);
Eigen::VectorXd b = Eigen::VectorXd::Random(domain_matrix.rows());
Eigen::VectorXd x = solver.solve(b);
double relative_error = (domain_matrix * x - b).norm() / b.norm();
if (relative_error > 1e-3 || std::isnan(relative_error)) {
std::cout << "domain index = " << domain_index
<< ": relative error = " << relative_error << std::endl;
}
}
m_isInitialized = true;
return *this;
}
template <typename MatType>
SchwartzSamplingPreconditioner &compute(const MatType &mat) {
analyzePattern(mat);
return factorize(mat);
}
/** \internal */
template <typename Rhs, typename Dest>
void _solve_impl(const Rhs &b, Dest &x) const {
// loop over domains and invert relevent sub-matricies in
// mat
vector_type domain_x;
vector_type domain_b;
x = b;
for (size_t i = 0; i < m_domain_indicies.size(); ++i) {
if (m_domain_indicies.size() == 0)
continue;
const storage_vector_type &buffer = m_domain_buffer[i];
const storage_vector_type &indicies = m_domain_indicies[i];
const size_t nb = indicies.size() + buffer.size();
domain_x.resize(nb);
domain_b.resize(nb);
// copy x values from big vector
size_t sub_index = 0;
for (size_t j = 0; j < indicies.size(); ++j) {
domain_b[sub_index++] = b[indicies[j]];
}
for (size_t j = 0; j < buffer.size(); ++j) {
domain_b[sub_index++] = b[buffer[j]];
}
// solve domain
domain_x = m_domain_factorized_matrix[i].solve(domain_b);
// copy accumulate b values to big vector
sub_index = 0;
for (size_t j = 0; j < indicies.size(); ++j) {
x[indicies[j]] = domain_x[sub_index++];
}
}
}
template <typename Rhs>
inline const Eigen::Solve<SchwartzSamplingPreconditioner, Rhs>
solve(const Eigen::MatrixBase<Rhs> &b) const {
eigen_assert(static_cast<typename Rhs::Index>(m_rows) == b.rows() &&
"SchwartzSamplingPreconditioner::solve(): invalid number of "
"rows of the "
"right hand side matrix b");
eigen_assert(m_isInitialized &&
"SchwartzSamplingPreconditioner is not initialized.");
return Eigen::Solve<SchwartzSamplingPreconditioner, Rhs>(*this,
b.derived());
}
Eigen::ComputationInfo info() { return Eigen::Success; }
}; // namespace Aboria
template <typename Solver> class NystromPreconditioner {
typedef double Scalar;
typedef size_t Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> matrix_type;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> vector_type;
typedef Solver solver_type;
typedef std::vector<size_t> storage_vector_type;
typedef std::vector<storage_vector_type> connectivity_type;
protected:
bool m_isInitialized;
private:
size_t m_random;
double m_lambda;
std::vector<storage_vector_type> m_domain_indicies;
std::vector<solver_type> m_domain_factorized_matrix;
std::vector<matrix_type> m_domain_Kux;
std::vector<vint2> m_domain_range;
Index m_rows;
Index m_cols;
public:
typedef typename vector_type::StorageIndex StorageIndex;
enum {
ColsAtCompileTime = Eigen::Dynamic,
MaxColsAtCompileTime = Eigen::Dynamic
};
NystromPreconditioner()
: m_isInitialized(false), m_random(0), m_lambda(1e-8) {}
template <typename MatType>
explicit NystromPreconditioner(const MatType &mat) {
compute(mat);
}
Index rows() const { return m_rows; }
Index cols() const { return m_cols; }
void set_number_of_random_particles(size_t n) { m_random = n; }
void set_lambda(double val) { m_lambda = val; }
template <typename Kernel>
void analyze_impl_block(const Index start_row, const Kernel &kernel) {
typedef typename Kernel::row_elements_type row_elements_type;
typedef typename Kernel::col_elements_type col_elements_type;
static_assert(std::is_same<row_elements_type, col_elements_type>::value,
"Nystrom preconditioner restricted to identical row and col "
"particle sets");
const row_elements_type &a = kernel.get_row_elements();
CHECK(&a == &(kernel.get_col_elements()),
"Nystrom preconditioner restricted to identical row and col "
"particle "
"sets");
const size_t domain_index = m_domain_indicies.size();
m_domain_indicies.push_back(connectivity_type::value_type());
m_domain_range.push_back(vint2(start_row, start_row + a.size()));
m_domain_Kux.push_back(matrix_type());
storage_vector_type &indicies = m_domain_indicies[domain_index];
if (m_random >= a.size()) {
// add all indicies
indicies.resize(a.size());
std::iota(indicies.begin(), indicies.end(), 0);
} else {
// add some random indicies
std::uniform_int_distribution<int> uniform_index(0, a.size() - 1);
std::default_random_engine generator;
for (size_t d = 0; d < m_random; ++d) {
bool in_indicies;
size_t proposed_index;
do {
proposed_index = uniform_index(generator) + start_row;
// check not in indicies
in_indicies =
indicies.end() !=
std::find(indicies.begin(), indicies.end(), proposed_index);
} while (in_indicies);
indicies.push_back(proposed_index);
}
}
ASSERT(indicies.size() > 0, "no particles in domain");
}
template <typename RowParticles, typename ColParticles>
void
analyze_impl_block(const Index start_row,
const KernelZero<RowParticles, ColParticles> &kernel) {}
template <unsigned int NI, unsigned int NJ, typename Blocks, std::size_t... I>
void analyze_impl(const MatrixReplacement<NI, NJ, Blocks> &mat,
detail::index_sequence<I...>) {
int dummy[] = {0, (analyze_impl_block(mat.template start_row<I>(),
std::get<I * NJ + I>(mat.m_blocks)),
0)...};
static_cast<void>(dummy);
}
template <unsigned int NI, unsigned int NJ, typename Blocks>
NystromPreconditioner &
analyzePattern(const MatrixReplacement<NI, NJ, Blocks> &mat) {
LOG(2, "NystromPreconditioner: analyze pattern");
m_rows = mat.rows();
m_cols = mat.cols();
analyze_impl(mat, detail::make_index_sequence<NI>());
int count = 0;
int minsize_indicies = 1000;
int maxsize_indicies = 0;
for (size_t domain_index = 0; domain_index < m_domain_indicies.size();
++domain_index) {
const int size_indicies = m_domain_indicies[domain_index].size();
count += size_indicies;
if (size_indicies < minsize_indicies)
minsize_indicies = size_indicies;
if (size_indicies > maxsize_indicies)
maxsize_indicies = size_indicies;
}
LOG(2, "NystromPreconditioner: finished analysis, found "
<< m_domain_indicies.size() << " domains, with "
<< minsize_indicies << "--" << maxsize_indicies << " particles ("
<< count << " total)")
return *this;
}
template <int _Options, typename _StorageIndex>
NystromPreconditioner &analyzePattern(
const Eigen::SparseMatrix<Scalar, _Options, _StorageIndex> &mat) {
CHECK(m_domain_indicies.size() > 0,
"NystromPreconditioner::analyzePattern(): cannot analyze sparse "
"matrix, "
"call analyzePattern using a Aboria MatrixReplacement class first");
return *this;
}
template <int _Options, typename _StorageIndex, int RefOptions,
typename RefStrideType>
NystromPreconditioner &
analyzePattern(const Eigen::Ref<
const Eigen::SparseMatrix<Scalar, _Options, _StorageIndex>,
RefOptions, RefStrideType> &mat) {
CHECK(m_domain_indicies.size() > 0,
"NystromPreconditioner::analyzePattern(): cannot analyze sparse "
"matrix, "
"call analyzePattern using a Aboria MatrixReplacement class first");
return *this;
}
template <typename Derived>
NystromPreconditioner &analyzePattern(const Eigen::DenseBase<Derived> &mat) {
CHECK(m_domain_indicies.size() > 0,
"NystromPreconditioner::analyzePattern(): cannot analyze dense "
"matrix, "
"call analyzePattern need to pass a Aboria MatrixReplacement class "
"first");
return *this;
}
template <typename MatType>
NystromPreconditioner &factorize(const MatType &mat) {
LOG(2, "NystromPreconditioner: factorizing domain");
eigen_assert(
static_cast<typename MatType::Index>(m_rows) == mat.rows() &&
"SchwartzPreconditioner::solve(): invalid number of rows of mat");
eigen_assert(
static_cast<typename MatType::Index>(m_cols) == mat.cols() &&
"SchwartzPreconditioner::solve(): invalid number of rows of mat");
m_domain_factorized_matrix.resize(m_domain_indicies.size());
matrix_type Kuu;
for (size_t domain_index = 0;
domain_index < m_domain_factorized_matrix.size(); ++domain_index) {
const storage_vector_type &indicies = m_domain_indicies[domain_index];
solver_type &solver = m_domain_factorized_matrix[domain_index];
vint2 &range = m_domain_range[domain_index];
matrix_type &Kux = m_domain_Kux[domain_index];
const size_t size = indicies.size();
// std::cout << "domain "<<domain_index<<"indicies =
// "<<indicies.size()<<" buffer = "<<buffer.size()<<" random =
// "<<random.size()<<std::endl;
Kuu.resize(size, size);
Kux.resize(size, range[1] - range[0]);
size_t i = 0;
for (const size_t &big_index_i : indicies) {
size_t j = 0;
for (const size_t &big_index_j : indicies) {
Kuu(i, j++) = mat.coeff(big_index_i, big_index_j);
}
j = 0;
for (int big_index_j = range[0]; big_index_j < range[1];
++big_index_j) {
Kux(i, j++) = mat.coeff(big_index_i, big_index_j);
}
++i;
}
Kuu += Kux * (Kux.transpose());
solver.compute(Kuu);
Eigen::VectorXd b = Eigen::VectorXd::Random(Kuu.rows());
Eigen::VectorXd x = solver.solve(b);
double relative_error = (Kuu * x - b).norm() / b.norm();
if (relative_error > 1e-3 || std::isnan(relative_error)) {
std::cout << "relative error = " << relative_error << std::endl;
}
}
m_isInitialized = true;
return *this;
}
template <typename MatType>
NystromPreconditioner &compute(const MatType &mat) {
analyzePattern(mat);
return factorize(mat);
}
/** \internal */
template <typename Rhs, typename Dest>
void _solve_impl(const Rhs &b, Dest &x) const {
x = b;
for (size_t i = 0; i < m_domain_indicies.size(); ++i) {
auto range = m_domain_range[i];
auto Kux = m_domain_Kux[i];
auto solver = m_domain_factorized_matrix[i];
x.segment(range[0], range[1]) =
(1.0 / m_lambda) *
(matrix_type::Identity(range[1] - range[0], range[1] - range[0]) -
(Kux.transpose()) *
solver.solve(Kux * b.segment(range[0], range[1])));
}
}
template <typename Rhs>
inline const Eigen::Solve<NystromPreconditioner, Rhs>
solve(const Eigen::MatrixBase<Rhs> &b) const {
eigen_assert(
static_cast<typename Rhs::Index>(m_rows) == b.rows() &&
"NystromPreconditioner::solve(): invalid number of rows of the "
"right hand side matrix b");
eigen_assert(m_isInitialized &&
"NystromPreconditioner is not initialized.");
return Eigen::Solve<NystromPreconditioner, Rhs>(*this, b.derived());
}
Eigen::ComputationInfo info() { return Eigen::Success; }
}; // namespace Aboria
} // namespace Aboria
#endif // HAVE_EIGEN
#endif
|
GB_unaryop__identity_int16_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_int16_uint32
// op(A') function: GB_tran__identity_int16_uint32
// C type: int16_t
// A type: uint32_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
int16_t z = (int16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_int16_uint32
(
int16_t *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_int16_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
c_fft.c | /* ***********************************************************************
This program is part of the
OpenMP Source Code Repository
http://www.pcg.ull.es/ompscr/
e-mail: ompscr@etsii.ull.es
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
(LICENSE file) along with this program; if not, write to
the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
Boston, MA 02111-1307 USA
FILE: c_fft.c
VERSION: 1.0
DATE: May 2004
AUTHOR: F. de Sande
COMMENTS TO: sande@csi.ull.es
DESCRIPTION: This program computes the Fast Fourier Transform
on an input signal
COMMENTS: The algorithm uses a divide and conquer strategy
and the transform is computed as a combination of the
transforms of the even and odd terms of the original signal.
The code requires nested Parallelism.
Function write_array() is provided only for debuging purposes.
(use a small size signal if you want to write it).
REFERENCES: James W. Cooley and John W. Tukey,
An Algorithm for the Machine Calculation of Complex Fourier Series,
Mathematics of Computation, 1965, vol. 19, no. 90, pg 297-301
http://en.wikipedia.org/wiki/Cooley-Tukey_FFT_algorithm
BASIC PRAGMAS: parallel for
USAGE: ./c_fft.par 8192
INPUT: The size of the input signal
OUTPUT: The code tests the correctness of the result for the input
FILE FORMATS: -
RESTRICTIONS: The size of the input signal MUST be a power of 2
REVISION HISTORY:
**************************************************************************/
//#include "OmpSCR.h"
#include <omp.h>
#include <math.h>
#include <stdio.h>
#define KILO (1024)
#define DEFAULT_SIZE_IN_KB (64)
#define NUM_ARGS 1
#define NUM_TIMERS 1
#define ARG 1
typedef double doubleType;
typedef struct {
doubleType re;
doubleType im;
} Complex;
/* -----------------------------------------------------------------------
PROTOTYPES
* ----------------------------------------------------------------------- */
void initialize(unsigned Size, Complex *a);
void write_array(unsigned Size, Complex *a);
int test_array(unsigned Size, Complex *a);
void FFT(Complex *A, Complex *a, Complex *W, unsigned N, unsigned stride, Complex *D);
void Roots(unsigned Size, Complex *W);
unsigned get_params(int argc, char *argv[]);
/* -----------------------------------------------------------------------
IMPLEMENTATION
* ----------------------------------------------------------------------- */
/* -----------------------------------------------------------------------
Routine: initialize
Description: Initialise a vector of complex numbers
Comment: all numbers have real part 1.0 and imaginary part 0.0
* ----------------------------------------------------------------------- */
void initialize(unsigned Size, Complex *a) {
unsigned i;
for(i = 0; i < Size; i++) {
a[i].re = 1.0;
a[i].im = 0.0;
}
}
/* -----------------------------------------------------------------------
Routine: write_array
Description: Display a vector of complex numbers
* ----------------------------------------------------------------------- */
void write_array(unsigned Size, Complex *a) {
unsigned i;
for(i = 0; i < Size; i++)
printf("a[%2u] = [%.8lf,%.8lf]\n", i, a[i].re, a[i].im);
}
/* -----------------------------------------------------------------------
Routine: test_array
Description: Test is true if the complex vector is of the form
[(Size,0),(0,0),...,(0,0)]
* ----------------------------------------------------------------------- */
int test_array(unsigned Size, Complex *a) {
register unsigned i;
unsigned OK = 1;
if((a[0].re == Size) && (a[0].im == 0)) {
for(i = 1; i < Size; i++)
if (a[i].re != 0.0 || a[i].im != 0.0) {
OK = 0;
break;
}
}
else OK = 0;
return OK;
}
/* -----------------------------------------------------------------------
Procedure: Roots
Description: Computes roots of the Unary
Parameters:
unsigned Size, number of roots to compute
Complex *W, vector containing the roots
* ----------------------------------------------------------------------- */
void Roots(unsigned Size, Complex *W) {
register unsigned i;
double phi;
Complex Omega;
phi = 4 * atan(1.0) / (double)Size; /* PI/Size */
Omega.re = cos(phi);
Omega.im = sin(phi);
W[0].re = 1.0;
W[0].im = 0.0;
for(i = 1; i < Size; i++) {
W[i].re = W[i-1].re * Omega.re - W[i-1].im * Omega.im;
W[i].im = W[i-1].re * Omega.im + W[i-1].im * Omega.re;
}
}
/* -----------------------------------------------------------------------
Procedure: FFT
Description: Recursive (divide and conquer) Fast Fourier Transform
Parameters:
Complex *A, transformed output signal
Complex *a, input signal
Complex *W, vector containing the roots
unsigned N, number of elements in a
unsigned stride, between consecutive elements in a to be considered
Complex *D, auxiliar vector to do combination
* ----------------------------------------------------------------------- */
void FFT(Complex *A, Complex *a, Complex *W, unsigned N,
unsigned stride, Complex *D) {
Complex *B, *C;
Complex Aux, *pW;
unsigned n;
int i;
if (N == 1) {
A[0].re = a[0].re;
A[0].im = a[0].im;
}
else {
/* Division stage without copying input data */
n = (N >> 1); /* N = N div 2 */
/* Subproblems resolution stage */
#pragma omp parallel for
for(i = 0; i <= 1; i++) {
FFT(D + i * n, a + i * stride, W, n, stride << 1, A + i * n);
}
/* Combination stage */
B = D;
C = D + n;
#pragma omp parallel for default(none) private(i, Aux, pW) shared(stride, n, A, B, C, W)
for(i = 0; i <= n - 1; i++) {
pW = W + i * stride;
Aux.re = pW->re * C[i].re - pW->im * C[i].im;
Aux.im = pW->re * C[i].im + pW->im * C[i].re;
A[i].re = B[i].re + Aux.re;
A[i].im = B[i].im + Aux.im;
A[i+n].re = B[i].re - Aux.re;
A[i+n].im = B[i].im - Aux.im;
}
}
}
/* ----------------------------------------------------------------------- */
unsigned get_params(int argc, char *argv[]) {
char usage_str[] = "<size_in_Kb>";
unsigned sizeInKb;
if (argc == 2)
sizeInKb = atoi(argv[1]);
else
if (argc == 1)
sizeInKb = DEFAULT_SIZE_IN_KB;
else {
printf("\nUse: %s %s\n", argv[0], usage_str);
exit(-1);
}
printf("\nUse: %s %s\n", argv[0], usage_str);
printf("Running with Size: %d K\n", sizeInKb);
return sizeInKb;
}
/* ----------------------------------------------------------------------- */
int main(int argc, char *argv[]) {
unsigned N;
Complex *a, *A, *W, *D;
int NUMTHREADS;
char *PARAM_NAMES[NUM_ARGS] = {"Size of the input signal (in Kb)"};
char *TIMERS_NAMES[NUM_TIMERS] = {"Total_time" };
char *DEFAULT_VALUES[NUM_ARGS] = {"64"};
NUMTHREADS = 1; //omp_get_num_threads();
//OSCR_init (NUMTHREADS, "Divide and Conquer Fast Fourier Transform.", "Use 'fft' <size (in K)>", NUM_ARGS,
// PARAM_NAMES, DEFAULT_VALUES , NUM_TIMERS, NUM_TIMERS, TIMERS_NAMES,
// argc, argv);
N = KILO * ARG; // OSCR_getarg_int(1);
/* N = KILO * get_params(argc, argv); */
/* Memory allocation */
a = (Complex*)calloc(N, sizeof(Complex));
A = (Complex*)calloc(N, sizeof(Complex));
D = (Complex*)calloc(N, sizeof(Complex));
W = (Complex*)calloc(N>>1, sizeof(Complex));
if((a==NULL) || (A==NULL) || (D==NULL) || (W==NULL)) {
printf("Not enough memory initializing arrays\n");
exit(1);
}
initialize(N, a); /* Generate test input signal */
/* write_array(N, a); */
Roots(N >> 1, W); /* Initialise the vector of imaginary roots */
//OSCR_timer_start(0);
FFT(A, a, W, N, 1, D);
//OSCR_timer_stop(0);
/* write_array(N, A); */
/* Display results and time */
printf("Test array: ");
if (test_array(N, A))
printf("Ok\n");
else
printf("Fails\n");
//OSCR_report(1, TIMERS_NAMES);
free(W);
free(D);
free(A);
free(a);
return 0;
}
/*
* vim:ts=2:sw=2:
*/
|
ASTMatchers.h | //===- ASTMatchers.h - Structural query framework ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements matchers to be used together with the MatchFinder to
// match AST nodes.
//
// Matchers are created by generator functions, which can be combined in
// a functional in-language DSL to express queries over the C++ AST.
//
// For example, to match a class with a certain name, one would call:
// cxxRecordDecl(hasName("MyClass"))
// which returns a matcher that can be used to find all AST nodes that declare
// a class named 'MyClass'.
//
// For more complicated match expressions we're often interested in accessing
// multiple parts of the matched AST nodes once a match is found. In that case,
// call `.bind("name")` on match expressions that match the nodes you want to
// access.
//
// For example, when we're interested in child classes of a certain class, we
// would write:
// cxxRecordDecl(hasName("MyClass"), has(recordDecl().bind("child")))
// When the match is found via the MatchFinder, a user provided callback will
// be called with a BoundNodes instance that contains a mapping from the
// strings that we provided for the `.bind()` calls to the nodes that were
// matched.
// In the given example, each time our matcher finds a match we get a callback
// where "child" is bound to the RecordDecl node of the matching child
// class declaration.
//
// See ASTMatchersInternal.h for a more in-depth explanation of the
// implementation details of the matcher framework.
//
// See ASTMatchFinder.h for how to use the generated matchers to run over
// an AST.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
#define LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTTypeTraits.h"
#include "clang/AST/Attr.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/LambdaCapture.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/OperationKinds.h"
#include "clang/AST/ParentMapContext.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
#include "clang/ASTMatchers/ASTMatchersInternal.h"
#include "clang/ASTMatchers/ASTMatchersMacros.h"
#include "clang/Basic/AttrKinds.h"
#include "clang/Basic/ExceptionSpecificationType.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TypeTraits.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Regex.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <limits>
#include <string>
#include <utility>
#include <vector>
namespace clang {
namespace ast_matchers {
/// Maps string IDs to AST nodes matched by parts of a matcher.
///
/// The bound nodes are generated by calling \c bind("id") on the node matchers
/// of the nodes we want to access later.
///
/// The instances of BoundNodes are created by \c MatchFinder when the user's
/// callbacks are executed every time a match is found.
class BoundNodes {
public:
/// Returns the AST node bound to \c ID.
///
/// Returns NULL if there was no node bound to \c ID or if there is a node but
/// it cannot be converted to the specified type.
template <typename T>
const T *getNodeAs(StringRef ID) const {
return MyBoundNodes.getNodeAs<T>(ID);
}
/// Type of mapping from binding identifiers to bound nodes. This type
/// is an associative container with a key type of \c std::string and a value
/// type of \c clang::DynTypedNode
using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap;
/// Retrieve mapping from binding identifiers to bound nodes.
const IDToNodeMap &getMap() const {
return MyBoundNodes.getMap();
}
private:
friend class internal::BoundNodesTreeBuilder;
/// Create BoundNodes from a pre-filled map of bindings.
BoundNodes(internal::BoundNodesMap &MyBoundNodes)
: MyBoundNodes(MyBoundNodes) {}
internal::BoundNodesMap MyBoundNodes;
};
/// Types of matchers for the top-level classes in the AST class
/// hierarchy.
/// @{
using DeclarationMatcher = internal::Matcher<Decl>;
using StatementMatcher = internal::Matcher<Stmt>;
using TypeMatcher = internal::Matcher<QualType>;
using TypeLocMatcher = internal::Matcher<TypeLoc>;
using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>;
using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>;
using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>;
using TemplateArgumentLocMatcher = internal::Matcher<TemplateArgumentLoc>;
/// @}
/// Matches any node.
///
/// Useful when another matcher requires a child matcher, but there's no
/// additional constraint. This will often be used with an explicit conversion
/// to an \c internal::Matcher<> type such as \c TypeMatcher.
///
/// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g.,
/// \code
/// "int* p" and "void f()" in
/// int* p;
/// void f();
/// \endcode
///
/// Usable as: Any Matcher
inline internal::TrueMatcher anything() { return internal::TrueMatcher(); }
/// Matches the top declaration context.
///
/// Given
/// \code
/// int X;
/// namespace NS {
/// int Y;
/// } // namespace NS
/// \endcode
/// decl(hasDeclContext(translationUnitDecl()))
/// matches "int X", but not "int Y".
extern const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl>
translationUnitDecl;
/// Matches typedef declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typedefDecl()
/// matches "typedef int X", but not "using Y = int"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl>
typedefDecl;
/// Matches typedef name declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typedefNameDecl()
/// matches "typedef int X" and "using Y = int"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl>
typedefNameDecl;
/// Matches type alias declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typeAliasDecl()
/// matches "using Y = int", but not "typedef int X"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl>
typeAliasDecl;
/// Matches type alias template declarations.
///
/// typeAliasTemplateDecl() matches
/// \code
/// template <typename T>
/// using Y = X<T>;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl>
typeAliasTemplateDecl;
/// Matches AST nodes that were expanded within the main-file.
///
/// Example matches X but not Y
/// (matcher = cxxRecordDecl(isExpansionInMainFile())
/// \code
/// #include <Y.h>
/// class X {};
/// \endcode
/// Y.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER(isExpansionInMainFile,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
return SourceManager.isInMainFile(
SourceManager.getExpansionLoc(Node.getBeginLoc()));
}
/// Matches AST nodes that were expanded within system-header-files.
///
/// Example matches Y but not X
/// (matcher = cxxRecordDecl(isExpansionInSystemHeader())
/// \code
/// #include <SystemHeader.h>
/// class X {};
/// \endcode
/// SystemHeader.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
return false;
}
return SourceManager.isInSystemHeader(ExpansionLoc);
}
/// Matches AST nodes that were expanded within files whose name is
/// partially matching a given regex.
///
/// Example matches Y but not X
/// (matcher = cxxRecordDecl(isExpansionInFileMatching("AST.*"))
/// \code
/// #include "ASTMatcher.h"
/// class X {};
/// \endcode
/// ASTMatcher.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER_REGEX(isExpansionInFileMatching,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt,
TypeLoc),
RegExp) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
return false;
}
auto FileEntry =
SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc));
if (!FileEntry) {
return false;
}
auto Filename = FileEntry->getName();
return RegExp->match(Filename);
}
/// Matches statements that are (transitively) expanded from the named macro.
/// Does not match if only part of the statement is expanded from that macro or
/// if different parts of the the statement are expanded from different
/// appearances of the macro.
///
/// FIXME: Change to be a polymorphic matcher that works on any syntactic
/// node. There's nothing `Stmt`-specific about it.
AST_MATCHER_P(Stmt, isExpandedFromMacro, llvm::StringRef, MacroName) {
// Verifies that the statement' beginning and ending are both expanded from
// the same instance of the given macro.
auto& Context = Finder->getASTContext();
llvm::Optional<SourceLocation> B =
internal::getExpansionLocOfMacro(MacroName, Node.getBeginLoc(), Context);
if (!B) return false;
llvm::Optional<SourceLocation> E =
internal::getExpansionLocOfMacro(MacroName, Node.getEndLoc(), Context);
if (!E) return false;
return *B == *E;
}
/// Matches declarations.
///
/// Examples matches \c X, \c C, and the friend declaration inside \c C;
/// \code
/// void X();
/// class C {
/// friend X;
/// };
/// \endcode
extern const internal::VariadicAllOfMatcher<Decl> decl;
/// Matches a declaration of a linkage specification.
///
/// Given
/// \code
/// extern "C" {}
/// \endcode
/// linkageSpecDecl()
/// matches "extern "C" {}"
extern const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl>
linkageSpecDecl;
/// Matches a declaration of anything that could have a name.
///
/// Example matches \c X, \c S, the anonymous union type, \c i, and \c U;
/// \code
/// typedef int X;
/// struct S {
/// union {
/// int i;
/// } U;
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl;
/// Matches a declaration of label.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// labelDecl()
/// matches 'FOO:'
extern const internal::VariadicDynCastAllOfMatcher<Decl, LabelDecl> labelDecl;
/// Matches a declaration of a namespace.
///
/// Given
/// \code
/// namespace {}
/// namespace test {}
/// \endcode
/// namespaceDecl()
/// matches "namespace {}" and "namespace test {}"
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceDecl>
namespaceDecl;
/// Matches a declaration of a namespace alias.
///
/// Given
/// \code
/// namespace test {}
/// namespace alias = ::test;
/// \endcode
/// namespaceAliasDecl()
/// matches "namespace alias" but not "namespace test"
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceAliasDecl>
namespaceAliasDecl;
/// Matches class, struct, and union declarations.
///
/// Example matches \c X, \c Z, \c U, and \c S
/// \code
/// class X;
/// template<class T> class Z {};
/// struct S {};
/// union U {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, RecordDecl> recordDecl;
/// Matches C++ class declarations.
///
/// Example matches \c X, \c Z
/// \code
/// class X;
/// template<class T> class Z {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXRecordDecl>
cxxRecordDecl;
/// Matches C++ class template declarations.
///
/// Example matches \c Z
/// \code
/// template<class T> class Z {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl>
classTemplateDecl;
/// Matches C++ class template specializations.
///
/// Given
/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
/// \endcode
/// classTemplateSpecializationDecl()
/// matches the specializations \c A<int> and \c A<double>
extern const internal::VariadicDynCastAllOfMatcher<
Decl, ClassTemplateSpecializationDecl>
classTemplateSpecializationDecl;
/// Matches C++ class template partial specializations.
///
/// Given
/// \code
/// template<class T1, class T2, int I>
/// class A {};
///
/// template<class T, int I>
/// class A<T, T*, I> {};
///
/// template<>
/// class A<int, int, 1> {};
/// \endcode
/// classTemplatePartialSpecializationDecl()
/// matches the specialization \c A<T,T*,I> but not \c A<int,int,1>
extern const internal::VariadicDynCastAllOfMatcher<
Decl, ClassTemplatePartialSpecializationDecl>
classTemplatePartialSpecializationDecl;
/// Matches declarator declarations (field, variable, function
/// and non-type template parameter declarations).
///
/// Given
/// \code
/// class X { int y; };
/// \endcode
/// declaratorDecl()
/// matches \c int y.
extern const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl>
declaratorDecl;
/// Matches parameter variable declarations.
///
/// Given
/// \code
/// void f(int x);
/// \endcode
/// parmVarDecl()
/// matches \c int x.
extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl>
parmVarDecl;
/// Matches C++ access specifier declarations.
///
/// Given
/// \code
/// class C {
/// public:
/// int a;
/// };
/// \endcode
/// accessSpecDecl()
/// matches 'public:'
extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl>
accessSpecDecl;
/// Matches constructor initializers.
///
/// Examples matches \c i(42).
/// \code
/// class C {
/// C() : i(42) {}
/// int i;
/// };
/// \endcode
extern const internal::VariadicAllOfMatcher<CXXCtorInitializer>
cxxCtorInitializer;
/// Matches template arguments.
///
/// Given
/// \code
/// template <typename T> struct C {};
/// C<int> c;
/// \endcode
/// templateArgument()
/// matches 'int' in C<int>.
extern const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument;
/// Matches template arguments (with location info).
///
/// Given
/// \code
/// template <typename T> struct C {};
/// C<int> c;
/// \endcode
/// templateArgumentLoc()
/// matches 'int' in C<int>.
extern const internal::VariadicAllOfMatcher<TemplateArgumentLoc>
templateArgumentLoc;
/// Matches template name.
///
/// Given
/// \code
/// template <typename T> class X { };
/// X<int> xi;
/// \endcode
/// templateName()
/// matches 'X' in X<int>.
extern const internal::VariadicAllOfMatcher<TemplateName> templateName;
/// Matches non-type template parameter declarations.
///
/// Given
/// \code
/// template <typename T, int N> struct C {};
/// \endcode
/// nonTypeTemplateParmDecl()
/// matches 'N', but not 'T'.
extern const internal::VariadicDynCastAllOfMatcher<Decl,
NonTypeTemplateParmDecl>
nonTypeTemplateParmDecl;
/// Matches template type parameter declarations.
///
/// Given
/// \code
/// template <typename T, int N> struct C {};
/// \endcode
/// templateTypeParmDecl()
/// matches 'T', but not 'N'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl>
templateTypeParmDecl;
/// Matches public C++ declarations and C++ base specifers that specify public
/// inheritance.
///
/// Examples:
/// \code
/// class C {
/// public: int a; // fieldDecl(isPublic()) matches 'a'
/// protected: int b;
/// private: int c;
/// };
/// \endcode
///
/// \code
/// class Base {};
/// class Derived1 : public Base {}; // matches 'Base'
/// struct Derived2 : Base {}; // matches 'Base'
/// \endcode
AST_POLYMORPHIC_MATCHER(isPublic,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl,
CXXBaseSpecifier)) {
return getAccessSpecifier(Node) == AS_public;
}
/// Matches protected C++ declarations and C++ base specifers that specify
/// protected inheritance.
///
/// Examples:
/// \code
/// class C {
/// public: int a;
/// protected: int b; // fieldDecl(isProtected()) matches 'b'
/// private: int c;
/// };
/// \endcode
///
/// \code
/// class Base {};
/// class Derived : protected Base {}; // matches 'Base'
/// \endcode
AST_POLYMORPHIC_MATCHER(isProtected,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl,
CXXBaseSpecifier)) {
return getAccessSpecifier(Node) == AS_protected;
}
/// Matches private C++ declarations and C++ base specifers that specify private
/// inheritance.
///
/// Examples:
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c; // fieldDecl(isPrivate()) matches 'c'
/// };
/// \endcode
///
/// \code
/// struct Base {};
/// struct Derived1 : private Base {}; // matches 'Base'
/// class Derived2 : Base {}; // matches 'Base'
/// \endcode
AST_POLYMORPHIC_MATCHER(isPrivate,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl,
CXXBaseSpecifier)) {
return getAccessSpecifier(Node) == AS_private;
}
/// Matches non-static data members that are bit-fields.
///
/// Given
/// \code
/// class C {
/// int a : 2;
/// int b;
/// };
/// \endcode
/// fieldDecl(isBitField())
/// matches 'int a;' but not 'int b;'.
AST_MATCHER(FieldDecl, isBitField) {
return Node.isBitField();
}
/// Matches non-static data members that are bit-fields of the specified
/// bit width.
///
/// Given
/// \code
/// class C {
/// int a : 2;
/// int b : 4;
/// int c : 2;
/// };
/// \endcode
/// fieldDecl(hasBitWidth(2))
/// matches 'int a;' and 'int c;' but not 'int b;'.
AST_MATCHER_P(FieldDecl, hasBitWidth, unsigned, Width) {
return Node.isBitField() &&
Node.getBitWidthValue(Finder->getASTContext()) == Width;
}
/// Matches non-static data members that have an in-class initializer.
///
/// Given
/// \code
/// class C {
/// int a = 2;
/// int b = 3;
/// int c;
/// };
/// \endcode
/// fieldDecl(hasInClassInitializer(integerLiteral(equals(2))))
/// matches 'int a;' but not 'int b;'.
/// fieldDecl(hasInClassInitializer(anything()))
/// matches 'int a;' and 'int b;' but not 'int c;'.
AST_MATCHER_P(FieldDecl, hasInClassInitializer, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Initializer = Node.getInClassInitializer();
return (Initializer != nullptr &&
InnerMatcher.matches(*Initializer, Finder, Builder));
}
/// Determines whether the function is "main", which is the entry point
/// into an executable program.
AST_MATCHER(FunctionDecl, isMain) {
return Node.isMain();
}
/// Matches the specialized template of a specialization declaration.
///
/// Given
/// \code
/// template<typename T> class A {}; #1
/// template<> class A<int> {}; #2
/// \endcode
/// classTemplateSpecializationDecl(hasSpecializedTemplate(classTemplateDecl()))
/// matches '#2' with classTemplateDecl() matching the class template
/// declaration of 'A' at #1.
AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate,
internal::Matcher<ClassTemplateDecl>, InnerMatcher) {
const ClassTemplateDecl* Decl = Node.getSpecializedTemplate();
return (Decl != nullptr &&
InnerMatcher.matches(*Decl, Finder, Builder));
}
/// Matches a declaration that has been implicitly added
/// by the compiler (eg. implicit default/copy constructors).
AST_MATCHER(Decl, isImplicit) {
return Node.isImplicit();
}
/// Matches classTemplateSpecializations, templateSpecializationType and
/// functionDecl that have at least one TemplateArgument matching the given
/// InnerMatcher.
///
/// Given
/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
///
/// template<typename T> f() {};
/// void func() { f<int>(); };
/// \endcode
///
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(asString("int"))))
/// matches the specialization \c A<int>
///
/// functionDecl(hasAnyTemplateArgument(refersToType(asString("int"))))
/// matches the specialization \c f<int>
AST_POLYMORPHIC_MATCHER_P(
hasAnyTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType,
FunctionDecl),
internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> List =
internal::getTemplateSpecializationArgs(Node);
return matchesFirstInRange(InnerMatcher, List.begin(), List.end(), Finder,
Builder);
}
/// Causes all nested matchers to be matched with the specified traversal kind.
///
/// Given
/// \code
/// void foo()
/// {
/// int i = 3.0;
/// }
/// \endcode
/// The matcher
/// \code
/// traverse(TK_IgnoreImplicitCastsAndParentheses,
/// varDecl(hasInitializer(floatLiteral().bind("init")))
/// )
/// \endcode
/// matches the variable declaration with "init" bound to the "3.0".
template <typename T>
internal::Matcher<T> traverse(TraversalKind TK,
const internal::Matcher<T> &InnerMatcher) {
return internal::DynTypedMatcher::constructRestrictedWrapper(
new internal::TraversalMatcher<T>(TK, InnerMatcher),
InnerMatcher.getID().first)
.template unconditionalConvertTo<T>();
}
template <typename T>
internal::BindableMatcher<T>
traverse(TraversalKind TK, const internal::BindableMatcher<T> &InnerMatcher) {
return internal::BindableMatcher<T>(
internal::DynTypedMatcher::constructRestrictedWrapper(
new internal::TraversalMatcher<T>(TK, InnerMatcher),
InnerMatcher.getID().first)
.template unconditionalConvertTo<T>());
}
template <typename... T>
internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>
traverse(TraversalKind TK,
const internal::VariadicOperatorMatcher<T...> &InnerMatcher) {
return internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>(
TK, InnerMatcher);
}
template <template <typename ToArg, typename FromArg> class ArgumentAdapterT,
typename T, typename ToTypes>
internal::TraversalWrapper<
internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>>
traverse(TraversalKind TK, const internal::ArgumentAdaptingMatcherFuncAdaptor<
ArgumentAdapterT, T, ToTypes> &InnerMatcher) {
return internal::TraversalWrapper<
internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T,
ToTypes>>(TK, InnerMatcher);
}
template <template <typename T, typename P1> class MatcherT, typename P1,
typename ReturnTypesF>
internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>>
traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam1<
MatcherT, P1, ReturnTypesF> &InnerMatcher) {
return internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>>(
TK, InnerMatcher);
}
template <template <typename T, typename P1, typename P2> class MatcherT,
typename P1, typename P2, typename ReturnTypesF>
internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>>
traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam2<
MatcherT, P1, P2, ReturnTypesF> &InnerMatcher) {
return internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>>(
TK, InnerMatcher);
}
/// Matches expressions that match InnerMatcher after any implicit AST
/// nodes are stripped off.
///
/// Parentheses and explicit casts are not discarded.
/// Given
/// \code
/// class C {};
/// C a = C();
/// C b;
/// C c = b;
/// \endcode
/// The matchers
/// \code
/// varDecl(hasInitializer(ignoringImplicit(cxxConstructExpr())))
/// \endcode
/// would match the declarations for a, b, and c.
/// While
/// \code
/// varDecl(hasInitializer(cxxConstructExpr()))
/// \endcode
/// only match the declarations for b and c.
AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>,
InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImplicit(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after any implicit casts
/// are stripped off.
///
/// Parentheses and explicit casts are not discarded.
/// Given
/// \code
/// int arr[5];
/// int a = 0;
/// char b = 0;
/// const int c = a;
/// int *d = arr;
/// long e = (long) 0l;
/// \endcode
/// The matchers
/// \code
/// varDecl(hasInitializer(ignoringImpCasts(integerLiteral())))
/// varDecl(hasInitializer(ignoringImpCasts(declRefExpr())))
/// \endcode
/// would match the declarations for a, b, c, and d, but not e.
/// While
/// \code
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// \endcode
/// only match the declarations for b, c, and d.
AST_MATCHER_P(Expr, ignoringImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after parentheses and
/// casts are stripped off.
///
/// Implicit and non-C Style casts are also discarded.
/// Given
/// \code
/// int a = 0;
/// char b = (0);
/// void* c = reinterpret_cast<char*>(0);
/// char d = char(0);
/// \endcode
/// The matcher
/// varDecl(hasInitializer(ignoringParenCasts(integerLiteral())))
/// would match the declarations for a, b, c, and d.
/// while
/// varDecl(hasInitializer(integerLiteral()))
/// only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after implicit casts and
/// parentheses are stripped off.
///
/// Explicit casts are not discarded.
/// Given
/// \code
/// int arr[5];
/// int a = 0;
/// char b = (0);
/// const int c = a;
/// int *d = (arr);
/// long e = ((long) 0l);
/// \endcode
/// The matchers
/// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral())))
/// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr())))
/// would match the declarations for a, b, c, and d, but not e.
/// while
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// would only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder);
}
/// Matches types that match InnerMatcher after any parens are stripped.
///
/// Given
/// \code
/// void (*fp)(void);
/// \endcode
/// The matcher
/// \code
/// varDecl(hasType(pointerType(pointee(ignoringParens(functionType())))))
/// \endcode
/// would match the declaration for fp.
AST_MATCHER_P_OVERLOAD(QualType, ignoringParens, internal::Matcher<QualType>,
InnerMatcher, 0) {
return InnerMatcher.matches(Node.IgnoreParens(), Finder, Builder);
}
/// Overload \c ignoringParens for \c Expr.
///
/// Given
/// \code
/// const char* str = ("my-string");
/// \endcode
/// The matcher
/// \code
/// implicitCastExpr(hasSourceExpression(ignoringParens(stringLiteral())))
/// \endcode
/// would match the implicit cast resulting from the assignment.
AST_MATCHER_P_OVERLOAD(Expr, ignoringParens, internal::Matcher<Expr>,
InnerMatcher, 1) {
const Expr *E = Node.IgnoreParens();
return InnerMatcher.matches(*E, Finder, Builder);
}
/// Matches expressions that are instantiation-dependent even if it is
/// neither type- nor value-dependent.
///
/// In the following example, the expression sizeof(sizeof(T() + T()))
/// is instantiation-dependent (since it involves a template parameter T),
/// but is neither type- nor value-dependent, since the type of the inner
/// sizeof is known (std::size_t) and therefore the size of the outer
/// sizeof is known.
/// \code
/// template<typename T>
/// void f(T x, T y) { sizeof(sizeof(T() + T()); }
/// \endcode
/// expr(isInstantiationDependent()) matches sizeof(sizeof(T() + T())
AST_MATCHER(Expr, isInstantiationDependent) {
return Node.isInstantiationDependent();
}
/// Matches expressions that are type-dependent because the template type
/// is not yet instantiated.
///
/// For example, the expressions "x" and "x + y" are type-dependent in
/// the following code, but "y" is not type-dependent:
/// \code
/// template<typename T>
/// void add(T x, int y) {
/// x + y;
/// }
/// \endcode
/// expr(isTypeDependent()) matches x + y
AST_MATCHER(Expr, isTypeDependent) { return Node.isTypeDependent(); }
/// Matches expression that are value-dependent because they contain a
/// non-type template parameter.
///
/// For example, the array bound of "Chars" in the following example is
/// value-dependent.
/// \code
/// template<int Size> int f() { return Size; }
/// \endcode
/// expr(isValueDependent()) matches return Size
AST_MATCHER(Expr, isValueDependent) { return Node.isValueDependent(); }
/// Matches classTemplateSpecializations, templateSpecializationType and
/// functionDecl where the n'th TemplateArgument matches the given InnerMatcher.
///
/// Given
/// \code
/// template<typename T, typename U> class A {};
/// A<bool, int> b;
/// A<int, bool> c;
///
/// template<typename T> void f() {}
/// void func() { f<int>(); };
/// \endcode
/// classTemplateSpecializationDecl(hasTemplateArgument(
/// 1, refersToType(asString("int"))))
/// matches the specialization \c A<bool, int>
///
/// functionDecl(hasTemplateArgument(0, refersToType(asString("int"))))
/// matches the specialization \c f<int>
AST_POLYMORPHIC_MATCHER_P2(
hasTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType,
FunctionDecl),
unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> List =
internal::getTemplateSpecializationArgs(Node);
if (List.size() <= N)
return false;
return InnerMatcher.matches(List[N], Finder, Builder);
}
/// Matches if the number of template arguments equals \p N.
///
/// Given
/// \code
/// template<typename T> struct C {};
/// C<int> c;
/// \endcode
/// classTemplateSpecializationDecl(templateArgumentCountIs(1))
/// matches C<int>.
AST_POLYMORPHIC_MATCHER_P(
templateArgumentCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType),
unsigned, N) {
return internal::getTemplateSpecializationArgs(Node).size() == N;
}
/// Matches a TemplateArgument that refers to a certain type.
///
/// Given
/// \code
/// struct X {};
/// template<typename T> struct A {};
/// A<X> a;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(class(hasName("X")))))
/// matches the specialization \c A<X>
AST_MATCHER_P(TemplateArgument, refersToType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Type)
return false;
return InnerMatcher.matches(Node.getAsType(), Finder, Builder);
}
/// Matches a TemplateArgument that refers to a certain template.
///
/// Given
/// \code
/// template<template <typename> class S> class X {};
/// template<typename T> class Y {};
/// X<Y> xi;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToTemplate(templateName())))
/// matches the specialization \c X<Y>
AST_MATCHER_P(TemplateArgument, refersToTemplate,
internal::Matcher<TemplateName>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Template)
return false;
return InnerMatcher.matches(Node.getAsTemplate(), Finder, Builder);
}
/// Matches a canonical TemplateArgument that refers to a certain
/// declaration.
///
/// Given
/// \code
/// struct B { int next; };
/// template<int(B::*next_ptr)> struct A {};
/// A<&B::next> a;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToDeclaration(fieldDecl(hasName("next")))))
/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, refersToDeclaration,
internal::Matcher<Decl>, InnerMatcher) {
if (Node.getKind() == TemplateArgument::Declaration)
return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder);
return false;
}
/// Matches a sugar TemplateArgument that refers to a certain expression.
///
/// Given
/// \code
/// struct B { int next; };
/// template<int(B::*next_ptr)> struct A {};
/// A<&B::next> a;
/// \endcode
/// templateSpecializationType(hasAnyTemplateArgument(
/// isExpr(hasDescendant(declRefExpr(to(fieldDecl(hasName("next"))))))))
/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, isExpr, internal::Matcher<Expr>, InnerMatcher) {
if (Node.getKind() == TemplateArgument::Expression)
return InnerMatcher.matches(*Node.getAsExpr(), Finder, Builder);
return false;
}
/// Matches a TemplateArgument that is an integral value.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(isIntegral()))
/// matches the implicit instantiation of C in C<42>
/// with isIntegral() matching 42.
AST_MATCHER(TemplateArgument, isIntegral) {
return Node.getKind() == TemplateArgument::Integral;
}
/// Matches a TemplateArgument that referes to an integral type.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(refersToIntegralType(asString("int"))))
/// matches the implicit instantiation of C in C<42>.
AST_MATCHER_P(TemplateArgument, refersToIntegralType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
return InnerMatcher.matches(Node.getIntegralType(), Finder, Builder);
}
/// Matches a TemplateArgument of integral type with a given value.
///
/// Note that 'Value' is a string as the template argument's value is
/// an arbitrary precision integer. 'Value' must be euqal to the canonical
/// representation of that integral value in base 10.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(equalsIntegralValue("42")))
/// matches the implicit instantiation of C in C<42>.
AST_MATCHER_P(TemplateArgument, equalsIntegralValue,
std::string, Value) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
return Node.getAsIntegral().toString(10) == Value;
}
/// Matches an Objective-C autorelease pool statement.
///
/// Given
/// \code
/// @autoreleasepool {
/// int x = 0;
/// }
/// \endcode
/// autoreleasePoolStmt(stmt()) matches the declaration of "x"
/// inside the autorelease pool.
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
ObjCAutoreleasePoolStmt> autoreleasePoolStmt;
/// Matches any value declaration.
///
/// Example matches A, B, C and F
/// \code
/// enum X { A, B, C };
/// void F();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl;
/// Matches C++ constructor declarations.
///
/// Example matches Foo::Foo() and Foo::Foo(int)
/// \code
/// class Foo {
/// public:
/// Foo();
/// Foo(int);
/// int DoSomething();
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConstructorDecl>
cxxConstructorDecl;
/// Matches explicit C++ destructor declarations.
///
/// Example matches Foo::~Foo()
/// \code
/// class Foo {
/// public:
/// virtual ~Foo();
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl>
cxxDestructorDecl;
/// Matches enum declarations.
///
/// Example matches X
/// \code
/// enum X {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl;
/// Matches enum constants.
///
/// Example matches A, B, C
/// \code
/// enum X {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl>
enumConstantDecl;
/// Matches tag declarations.
///
/// Example matches X, Z, U, S, E
/// \code
/// class X;
/// template<class T> class Z {};
/// struct S {};
/// union U {};
/// enum E {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, TagDecl> tagDecl;
/// Matches method declarations.
///
/// Example matches y
/// \code
/// class X { void y(); };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl>
cxxMethodDecl;
/// Matches conversion operator declarations.
///
/// Example matches the operator.
/// \code
/// class X { operator int() const; };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl>
cxxConversionDecl;
/// Matches user-defined and implicitly generated deduction guide.
///
/// Example matches the deduction guide.
/// \code
/// template<typename T>
/// class X { X(int) };
/// X(int) -> X<int>;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl>
cxxDeductionGuideDecl;
/// Matches variable declarations.
///
/// Note: this does not match declarations of member variables, which are
/// "field" declarations in Clang parlance.
///
/// Example matches a
/// \code
/// int a;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl;
/// Matches field declarations.
///
/// Given
/// \code
/// class X { int m; };
/// \endcode
/// fieldDecl()
/// matches 'm'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl;
/// Matches indirect field declarations.
///
/// Given
/// \code
/// struct X { struct { int a; }; };
/// \endcode
/// indirectFieldDecl()
/// matches 'a'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl>
indirectFieldDecl;
/// Matches function declarations.
///
/// Example matches f
/// \code
/// void f();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl>
functionDecl;
/// Matches C++ function template declarations.
///
/// Example matches f
/// \code
/// template<class T> void f(T t) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl>
functionTemplateDecl;
/// Matches friend declarations.
///
/// Given
/// \code
/// class X { friend void foo(); };
/// \endcode
/// friendDecl()
/// matches 'friend void foo()'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl;
/// Matches statements.
///
/// Given
/// \code
/// { ++a; }
/// \endcode
/// stmt()
/// matches both the compound statement '{ ++a; }' and '++a'.
extern const internal::VariadicAllOfMatcher<Stmt> stmt;
/// Matches declaration statements.
///
/// Given
/// \code
/// int a;
/// \endcode
/// declStmt()
/// matches 'int a'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt;
/// Matches member expressions.
///
/// Given
/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// int a; static int b;
/// };
/// \endcode
/// memberExpr()
/// matches this->x, x, y.x, a, this->b
extern const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr;
/// Matches unresolved member expressions.
///
/// Given
/// \code
/// struct X {
/// template <class T> void f();
/// void g();
/// };
/// template <class T> void h() { X x; x.f<T>(); x.g(); }
/// \endcode
/// unresolvedMemberExpr()
/// matches x.f<T>
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr>
unresolvedMemberExpr;
/// Matches member expressions where the actual member referenced could not be
/// resolved because the base expression or the member name was dependent.
///
/// Given
/// \code
/// template <class T> void f() { T t; t.g(); }
/// \endcode
/// cxxDependentScopeMemberExpr()
/// matches t.g
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXDependentScopeMemberExpr>
cxxDependentScopeMemberExpr;
/// Matches call expressions.
///
/// Example matches x.y() and y()
/// \code
/// X x;
/// x.y();
/// y();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr;
/// Matches call expressions which were resolved using ADL.
///
/// Example matches y(x) but not y(42) or NS::y(x).
/// \code
/// namespace NS {
/// struct X {};
/// void y(X);
/// }
///
/// void y(...);
///
/// void test() {
/// NS::X x;
/// y(x); // Matches
/// NS::y(x); // Doesn't match
/// y(42); // Doesn't match
/// using NS::y;
/// y(x); // Found by both unqualified lookup and ADL, doesn't match
// }
/// \endcode
AST_MATCHER(CallExpr, usesADL) { return Node.usesADL(); }
/// Matches lambda expressions.
///
/// Example matches [&](){return 5;}
/// \code
/// [&](){return 5;}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr;
/// Matches member call expressions.
///
/// Example matches x.y()
/// \code
/// X x;
/// x.y();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr>
cxxMemberCallExpr;
/// Matches ObjectiveC Message invocation expressions.
///
/// The innermost message send invokes the "alloc" class method on the
/// NSString class, while the outermost message send invokes the
/// "initWithString" instance method on the object returned from
/// NSString's "alloc". This matcher should match both message sends.
/// \code
/// [[NSString alloc] initWithString:@"Hello"]
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr>
objcMessageExpr;
/// Matches Objective-C interface declarations.
///
/// Example matches Foo
/// \code
/// @interface Foo
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCInterfaceDecl>
objcInterfaceDecl;
/// Matches Objective-C implementation declarations.
///
/// Example matches Foo
/// \code
/// @implementation Foo
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCImplementationDecl>
objcImplementationDecl;
/// Matches Objective-C protocol declarations.
///
/// Example matches FooDelegate
/// \code
/// @protocol FooDelegate
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCProtocolDecl>
objcProtocolDecl;
/// Matches Objective-C category declarations.
///
/// Example matches Foo (Additions)
/// \code
/// @interface Foo (Additions)
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryDecl>
objcCategoryDecl;
/// Matches Objective-C category definitions.
///
/// Example matches Foo (Additions)
/// \code
/// @implementation Foo (Additions)
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl>
objcCategoryImplDecl;
/// Matches Objective-C method declarations.
///
/// Example matches both declaration and definition of -[Foo method]
/// \code
/// @interface Foo
/// - (void)method;
/// @end
///
/// @implementation Foo
/// - (void)method {}
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl>
objcMethodDecl;
/// Matches block declarations.
///
/// Example matches the declaration of the nameless block printing an input
/// integer.
///
/// \code
/// myFunc(^(int p) {
/// printf("%d", p);
/// })
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl>
blockDecl;
/// Matches Objective-C instance variable declarations.
///
/// Example matches _enabled
/// \code
/// @implementation Foo {
/// BOOL _enabled;
/// }
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl>
objcIvarDecl;
/// Matches Objective-C property declarations.
///
/// Example matches enabled
/// \code
/// @interface Foo
/// @property BOOL enabled;
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl>
objcPropertyDecl;
/// Matches Objective-C \@throw statements.
///
/// Example matches \@throw
/// \code
/// @throw obj;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtThrowStmt>
objcThrowStmt;
/// Matches Objective-C @try statements.
///
/// Example matches @try
/// \code
/// @try {}
/// @catch (...) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtTryStmt>
objcTryStmt;
/// Matches Objective-C @catch statements.
///
/// Example matches @catch
/// \code
/// @try {}
/// @catch (...) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtCatchStmt>
objcCatchStmt;
/// Matches Objective-C @finally statements.
///
/// Example matches @finally
/// \code
/// @try {}
/// @finally {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtFinallyStmt>
objcFinallyStmt;
/// Matches expressions that introduce cleanups to be run at the end
/// of the sub-expression's evaluation.
///
/// Example matches std::string()
/// \code
/// const std::string str = std::string();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExprWithCleanups>
exprWithCleanups;
/// Matches init list expressions.
///
/// Given
/// \code
/// int a[] = { 1, 2 };
/// struct B { int x, y; };
/// B b = { 5, 6 };
/// \endcode
/// initListExpr()
/// matches "{ 1, 2 }" and "{ 5, 6 }"
extern const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr>
initListExpr;
/// Matches the syntactic form of init list expressions
/// (if expression have it).
AST_MATCHER_P(InitListExpr, hasSyntacticForm,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *SyntForm = Node.getSyntacticForm();
return (SyntForm != nullptr &&
InnerMatcher.matches(*SyntForm, Finder, Builder));
}
/// Matches C++ initializer list expressions.
///
/// Given
/// \code
/// std::vector<int> a({ 1, 2, 3 });
/// std::vector<int> b = { 4, 5 };
/// int c[] = { 6, 7 };
/// std::pair<int, int> d = { 8, 9 };
/// \endcode
/// cxxStdInitializerListExpr()
/// matches "{ 1, 2, 3 }" and "{ 4, 5 }"
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXStdInitializerListExpr>
cxxStdInitializerListExpr;
/// Matches implicit initializers of init list expressions.
///
/// Given
/// \code
/// point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 };
/// \endcode
/// implicitValueInitExpr()
/// matches "[0].y" (implicitly)
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitValueInitExpr>
implicitValueInitExpr;
/// Matches paren list expressions.
/// ParenListExprs don't have a predefined type and are used for late parsing.
/// In the final AST, they can be met in template declarations.
///
/// Given
/// \code
/// template<typename T> class X {
/// void f() {
/// X x(*this);
/// int a = 0, b = 1; int i = (a, b);
/// }
/// };
/// \endcode
/// parenListExpr() matches "*this" but NOT matches (a, b) because (a, b)
/// has a predefined type and is a ParenExpr, not a ParenListExpr.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr>
parenListExpr;
/// Matches substitutions of non-type template parameters.
///
/// Given
/// \code
/// template <int N>
/// struct A { static const int n = N; };
/// struct B : public A<42> {};
/// \endcode
/// substNonTypeTemplateParmExpr()
/// matches "N" in the right-hand side of "static const int n = N;"
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
SubstNonTypeTemplateParmExpr>
substNonTypeTemplateParmExpr;
/// Matches using declarations.
///
/// Given
/// \code
/// namespace X { int x; }
/// using X::x;
/// \endcode
/// usingDecl()
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl;
/// Matches using namespace declarations.
///
/// Given
/// \code
/// namespace X { int x; }
/// using namespace X;
/// \endcode
/// usingDirectiveDecl()
/// matches \code using namespace X \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl>
usingDirectiveDecl;
/// Matches reference to a name that can be looked up during parsing
/// but could not be resolved to a specific declaration.
///
/// Given
/// \code
/// template<typename T>
/// T foo() { T a; return a; }
/// template<typename T>
/// void bar() {
/// foo<T>();
/// }
/// \endcode
/// unresolvedLookupExpr()
/// matches \code foo<T>() \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr>
unresolvedLookupExpr;
/// Matches unresolved using value declarations.
///
/// Given
/// \code
/// template<typename X>
/// class C : private X {
/// using X::x;
/// };
/// \endcode
/// unresolvedUsingValueDecl()
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl,
UnresolvedUsingValueDecl>
unresolvedUsingValueDecl;
/// Matches unresolved using value declarations that involve the
/// typename.
///
/// Given
/// \code
/// template <typename T>
/// struct Base { typedef T Foo; };
///
/// template<typename T>
/// struct S : private Base<T> {
/// using typename Base<T>::Foo;
/// };
/// \endcode
/// unresolvedUsingTypenameDecl()
/// matches \code using Base<T>::Foo \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl,
UnresolvedUsingTypenameDecl>
unresolvedUsingTypenameDecl;
/// Matches a constant expression wrapper.
///
/// Example matches the constant in the case statement:
/// (matcher = constantExpr())
/// \code
/// switch (a) {
/// case 37: break;
/// }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr>
constantExpr;
/// Matches parentheses used in expressions.
///
/// Example matches (foo() + 1)
/// \code
/// int foo() { return 1; }
/// int a = (foo() + 1);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr;
/// Matches constructor call expressions (including implicit ones).
///
/// Example matches string(ptr, n) and ptr within arguments of f
/// (matcher = cxxConstructExpr())
/// \code
/// void f(const string &a, const string &b);
/// char *ptr;
/// int n;
/// f(string(ptr, n), ptr);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr>
cxxConstructExpr;
/// Matches unresolved constructor call expressions.
///
/// Example matches T(t) in return statement of f
/// (matcher = cxxUnresolvedConstructExpr())
/// \code
/// template <typename T>
/// void f(const T& t) { return T(t); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXUnresolvedConstructExpr>
cxxUnresolvedConstructExpr;
/// Matches implicit and explicit this expressions.
///
/// Example matches the implicit this expression in "return i".
/// (matcher = cxxThisExpr())
/// \code
/// struct foo {
/// int i;
/// int f() { return i; }
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr>
cxxThisExpr;
/// Matches nodes where temporaries are created.
///
/// Example matches FunctionTakesString(GetStringByValue())
/// (matcher = cxxBindTemporaryExpr())
/// \code
/// FunctionTakesString(GetStringByValue());
/// FunctionTakesStringByPointer(GetStringPointer());
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBindTemporaryExpr>
cxxBindTemporaryExpr;
/// Matches nodes where temporaries are materialized.
///
/// Example: Given
/// \code
/// struct T {void func();};
/// T f();
/// void g(T);
/// \endcode
/// materializeTemporaryExpr() matches 'f()' in these statements
/// \code
/// T u(f());
/// g(f());
/// f().func();
/// \endcode
/// but does not match
/// \code
/// f();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
MaterializeTemporaryExpr>
materializeTemporaryExpr;
/// Matches new expressions.
///
/// Given
/// \code
/// new X;
/// \endcode
/// cxxNewExpr()
/// matches 'new X'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr;
/// Matches delete expressions.
///
/// Given
/// \code
/// delete X;
/// \endcode
/// cxxDeleteExpr()
/// matches 'delete X'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr>
cxxDeleteExpr;
/// Matches noexcept expressions.
///
/// Given
/// \code
/// bool a() noexcept;
/// bool b() noexcept(true);
/// bool c() noexcept(false);
/// bool d() noexcept(noexcept(a()));
/// bool e = noexcept(b()) || noexcept(c());
/// \endcode
/// cxxNoexceptExpr()
/// matches `noexcept(a())`, `noexcept(b())` and `noexcept(c())`.
/// doesn't match the noexcept specifier in the declarations a, b, c or d.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNoexceptExpr>
cxxNoexceptExpr;
/// Matches array subscript expressions.
///
/// Given
/// \code
/// int i = a[1];
/// \endcode
/// arraySubscriptExpr()
/// matches "a[1]"
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr>
arraySubscriptExpr;
/// Matches the value of a default argument at the call site.
///
/// Example matches the CXXDefaultArgExpr placeholder inserted for the
/// default value of the second parameter in the call expression f(42)
/// (matcher = cxxDefaultArgExpr())
/// \code
/// void f(int x, int y = 0);
/// f(42);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr>
cxxDefaultArgExpr;
/// Matches overloaded operator calls.
///
/// Note that if an operator isn't overloaded, it won't match. Instead, use
/// binaryOperator matcher.
/// Currently it does not match operators such as new delete.
/// FIXME: figure out why these do not match?
///
/// Example matches both operator<<((o << b), c) and operator<<(o, b)
/// (matcher = cxxOperatorCallExpr())
/// \code
/// ostream &operator<< (ostream &out, int i) { };
/// ostream &o; int b = 1, c = 1;
/// o << b << c;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr>
cxxOperatorCallExpr;
/// Matches expressions.
///
/// Example matches x()
/// \code
/// void f() { x(); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr;
/// Matches expressions that refer to declarations.
///
/// Example matches x in if (x)
/// \code
/// bool x;
/// if (x) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr>
declRefExpr;
/// Matches a reference to an ObjCIvar.
///
/// Example: matches "a" in "init" method:
/// \code
/// @implementation A {
/// NSString *a;
/// }
/// - (void) init {
/// a = @"hello";
/// }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr>
objcIvarRefExpr;
/// Matches a reference to a block.
///
/// Example: matches "^{}":
/// \code
/// void f() { ^{}(); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr;
/// Matches if statements.
///
/// Example matches 'if (x) {}'
/// \code
/// if (x) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt;
/// Matches for statements.
///
/// Example matches 'for (;;) {}'
/// \code
/// for (;;) {}
/// int i[] = {1, 2, 3}; for (auto a : i);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt;
/// Matches the increment statement of a for loop.
///
/// Example:
/// forStmt(hasIncrement(unaryOperator(hasOperatorName("++"))))
/// matches '++x' in
/// \code
/// for (x; x < N; ++x) { }
/// \endcode
AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Increment = Node.getInc();
return (Increment != nullptr &&
InnerMatcher.matches(*Increment, Finder, Builder));
}
/// Matches the initialization statement of a for loop.
///
/// Example:
/// forStmt(hasLoopInit(declStmt()))
/// matches 'int x = 0' in
/// \code
/// for (int x = 0; x < N; ++x) { }
/// \endcode
AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Init = Node.getInit();
return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder));
}
/// Matches range-based for statements.
///
/// cxxForRangeStmt() matches 'for (auto a : i)'
/// \code
/// int i[] = {1, 2, 3}; for (auto a : i);
/// for(int j = 0; j < 5; ++j);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt>
cxxForRangeStmt;
/// Matches the initialization statement of a for loop.
///
/// Example:
/// forStmt(hasLoopVariable(anything()))
/// matches 'int x' in
/// \code
/// for (int x : a) { }
/// \endcode
AST_MATCHER_P(CXXForRangeStmt, hasLoopVariable, internal::Matcher<VarDecl>,
InnerMatcher) {
const VarDecl *const Var = Node.getLoopVariable();
return (Var != nullptr && InnerMatcher.matches(*Var, Finder, Builder));
}
/// Matches the range initialization statement of a for loop.
///
/// Example:
/// forStmt(hasRangeInit(anything()))
/// matches 'a' in
/// \code
/// for (int x : a) { }
/// \endcode
AST_MATCHER_P(CXXForRangeStmt, hasRangeInit, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *const Init = Node.getRangeInit();
return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder));
}
/// Matches while statements.
///
/// Given
/// \code
/// while (true) {}
/// \endcode
/// whileStmt()
/// matches 'while (true) {}'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt;
/// Matches do statements.
///
/// Given
/// \code
/// do {} while (true);
/// \endcode
/// doStmt()
/// matches 'do {} while(true)'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt;
/// Matches break statements.
///
/// Given
/// \code
/// while (true) { break; }
/// \endcode
/// breakStmt()
/// matches 'break'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt;
/// Matches continue statements.
///
/// Given
/// \code
/// while (true) { continue; }
/// \endcode
/// continueStmt()
/// matches 'continue'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt>
continueStmt;
/// Matches return statements.
///
/// Given
/// \code
/// return 1;
/// \endcode
/// returnStmt()
/// matches 'return 1'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt;
/// Matches goto statements.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// gotoStmt()
/// matches 'goto FOO'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt;
/// Matches label statements.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// labelStmt()
/// matches 'FOO:'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt;
/// Matches address of label statements (GNU extension).
///
/// Given
/// \code
/// FOO: bar();
/// void *ptr = &&FOO;
/// goto *bar;
/// \endcode
/// addrLabelExpr()
/// matches '&&FOO'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AddrLabelExpr>
addrLabelExpr;
/// Matches switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// switchStmt()
/// matches 'switch(a)'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt;
/// Matches case and default statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// switchCase()
/// matches 'case 42:' and 'default:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase;
/// Matches case statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// caseStmt()
/// matches 'case 42:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt;
/// Matches default statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// defaultStmt()
/// matches 'default:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt>
defaultStmt;
/// Matches compound statements.
///
/// Example matches '{}' and '{{}}' in 'for (;;) {{}}'
/// \code
/// for (;;) {{}}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt>
compoundStmt;
/// Matches catch statements.
///
/// \code
/// try {} catch(int i) {}
/// \endcode
/// cxxCatchStmt()
/// matches 'catch(int i)'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt>
cxxCatchStmt;
/// Matches try statements.
///
/// \code
/// try {} catch(int i) {}
/// \endcode
/// cxxTryStmt()
/// matches 'try {}'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt;
/// Matches throw expressions.
///
/// \code
/// try { throw 5; } catch(int i) {}
/// \endcode
/// cxxThrowExpr()
/// matches 'throw 5'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr>
cxxThrowExpr;
/// Matches null statements.
///
/// \code
/// foo();;
/// \endcode
/// nullStmt()
/// matches the second ';'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt;
/// Matches asm statements.
///
/// \code
/// int i = 100;
/// __asm("mov al, 2");
/// \endcode
/// asmStmt()
/// matches '__asm("mov al, 2")'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt;
/// Matches bool literals.
///
/// Example matches true
/// \code
/// true
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr>
cxxBoolLiteral;
/// Matches string literals (also matches wide string literals).
///
/// Example matches "abcd", L"abcd"
/// \code
/// char *s = "abcd";
/// wchar_t *ws = L"abcd";
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral>
stringLiteral;
/// Matches character literals (also matches wchar_t).
///
/// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral),
/// though.
///
/// Example matches 'a', L'a'
/// \code
/// char ch = 'a';
/// wchar_t chw = L'a';
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral>
characterLiteral;
/// Matches integer literals of all sizes / encodings, e.g.
/// 1, 1L, 0x1 and 1U.
///
/// Does not match character-encoded integers such as L'a'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral>
integerLiteral;
/// Matches float literals of all sizes / encodings, e.g.
/// 1.0, 1.0f, 1.0L and 1e10.
///
/// Does not match implicit conversions such as
/// \code
/// float a = 10;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral>
floatLiteral;
/// Matches imaginary literals, which are based on integer and floating
/// point literals e.g.: 1i, 1.0i
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral>
imaginaryLiteral;
/// Matches fixed point literals
extern const internal::VariadicDynCastAllOfMatcher<Stmt, FixedPointLiteral>
fixedPointLiteral;
/// Matches user defined literal operator call.
///
/// Example match: "foo"_suffix
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral>
userDefinedLiteral;
/// Matches compound (i.e. non-scalar) literals
///
/// Example match: {1}, (1, 2)
/// \code
/// int array[4] = {1};
/// vector int myvec = (vector int)(1, 2);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr>
compoundLiteralExpr;
/// Matches nullptr literal.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr>
cxxNullPtrLiteralExpr;
/// Matches GNU __builtin_choose_expr.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr>
chooseExpr;
/// Matches GNU __null expression.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr>
gnuNullExpr;
/// Matches atomic builtins.
/// Example matches __atomic_load_n(ptr, 1)
/// \code
/// void foo() { int *ptr; __atomic_load_n(ptr, 1); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr;
/// Matches statement expression (GNU extension).
///
/// Example match: ({ int X = 4; X; })
/// \code
/// int C = ({ int X = 4; X; });
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr;
/// Matches binary operator expressions.
///
/// Example matches a || b
/// \code
/// !(a || b)
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator>
binaryOperator;
/// Matches unary operator expressions.
///
/// Example matches !a
/// \code
/// !a || b
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator>
unaryOperator;
/// Matches conditional operator expressions.
///
/// Example matches a ? b : c
/// \code
/// (a ? b : c) + 42
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator>
conditionalOperator;
/// Matches binary conditional operator expressions (GNU extension).
///
/// Example matches a ?: b
/// \code
/// (a ?: b) + 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
BinaryConditionalOperator>
binaryConditionalOperator;
/// Matches opaque value expressions. They are used as helpers
/// to reference another expressions and can be met
/// in BinaryConditionalOperators, for example.
///
/// Example matches 'a'
/// \code
/// (a ?: c) + 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr>
opaqueValueExpr;
/// Matches a C++ static_assert declaration.
///
/// Example:
/// staticAssertExpr()
/// matches
/// static_assert(sizeof(S) == sizeof(int))
/// in
/// \code
/// struct S {
/// int x;
/// };
/// static_assert(sizeof(S) == sizeof(int));
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, StaticAssertDecl>
staticAssertDecl;
/// Matches a reinterpret_cast expression.
///
/// Either the source expression or the destination type can be matched
/// using has(), but hasDestinationType() is more specific and can be
/// more readable.
///
/// Example matches reinterpret_cast<char*>(&p) in
/// \code
/// void* p = reinterpret_cast<char*>(&p);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXReinterpretCastExpr>
cxxReinterpretCastExpr;
/// Matches a C++ static_cast expression.
///
/// \see hasDestinationType
/// \see reinterpretCast
///
/// Example:
/// cxxStaticCastExpr()
/// matches
/// static_cast<long>(8)
/// in
/// \code
/// long eight(static_cast<long>(8));
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStaticCastExpr>
cxxStaticCastExpr;
/// Matches a dynamic_cast expression.
///
/// Example:
/// cxxDynamicCastExpr()
/// matches
/// dynamic_cast<D*>(&b);
/// in
/// \code
/// struct B { virtual ~B() {} }; struct D : B {};
/// B b;
/// D* p = dynamic_cast<D*>(&b);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDynamicCastExpr>
cxxDynamicCastExpr;
/// Matches a const_cast expression.
///
/// Example: Matches const_cast<int*>(&r) in
/// \code
/// int n = 42;
/// const int &r(n);
/// int* p = const_cast<int*>(&r);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstCastExpr>
cxxConstCastExpr;
/// Matches a C-style cast expression.
///
/// Example: Matches (int) 2.2f in
/// \code
/// int i = (int) 2.2f;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CStyleCastExpr>
cStyleCastExpr;
/// Matches explicit cast expressions.
///
/// Matches any cast expression written in user code, whether it be a
/// C-style cast, a functional-style cast, or a keyword cast.
///
/// Does not match implicit conversions.
///
/// Note: the name "explicitCast" is chosen to match Clang's terminology, as
/// Clang uses the term "cast" to apply to implicit conversions as well as to
/// actual cast expressions.
///
/// \see hasDestinationType.
///
/// Example: matches all five of the casts in
/// \code
/// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42)))))
/// \endcode
/// but does not match the implicit conversion in
/// \code
/// long ell = 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExplicitCastExpr>
explicitCastExpr;
/// Matches the implicit cast nodes of Clang's AST.
///
/// This matches many different places, including function call return value
/// eliding, as well as any type conversions.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitCastExpr>
implicitCastExpr;
/// Matches any cast nodes of Clang's AST.
///
/// Example: castExpr() matches each of the following:
/// \code
/// (int) 3;
/// const_cast<Expr *>(SubExpr);
/// char c = 0;
/// \endcode
/// but does not match
/// \code
/// int i = (0);
/// int k = 0;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr;
/// Matches functional cast expressions
///
/// Example: Matches Foo(bar);
/// \code
/// Foo f = bar;
/// Foo g = (Foo) bar;
/// Foo h = Foo(bar);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFunctionalCastExpr>
cxxFunctionalCastExpr;
/// Matches functional cast expressions having N != 1 arguments
///
/// Example: Matches Foo(bar, bar)
/// \code
/// Foo h = Foo(bar, bar);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTemporaryObjectExpr>
cxxTemporaryObjectExpr;
/// Matches predefined identifier expressions [C99 6.4.2.2].
///
/// Example: Matches __func__
/// \code
/// printf("%s", __func__);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, PredefinedExpr>
predefinedExpr;
/// Matches C99 designated initializer expressions [C99 6.7.8].
///
/// Example: Matches { [2].y = 1.0, [0].x = 1.0 }
/// \code
/// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DesignatedInitExpr>
designatedInitExpr;
/// Matches designated initializer expressions that contain
/// a specific number of designators.
///
/// Example: Given
/// \code
/// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 };
/// point ptarray2[10] = { [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 };
/// \endcode
/// designatorCountIs(2)
/// matches '{ [2].y = 1.0, [0].x = 1.0 }',
/// but not '{ [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }'.
AST_MATCHER_P(DesignatedInitExpr, designatorCountIs, unsigned, N) {
return Node.size() == N;
}
/// Matches \c QualTypes in the clang AST.
extern const internal::VariadicAllOfMatcher<QualType> qualType;
/// Matches \c Types in the clang AST.
extern const internal::VariadicAllOfMatcher<Type> type;
/// Matches \c TypeLocs in the clang AST.
extern const internal::VariadicAllOfMatcher<TypeLoc> typeLoc;
/// Matches if any of the given matchers matches.
///
/// Unlike \c anyOf, \c eachOf will generate a match result for each
/// matching submatcher.
///
/// For example, in:
/// \code
/// class A { int a; int b; };
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(eachOf(has(fieldDecl(hasName("a")).bind("v")),
/// has(fieldDecl(hasName("b")).bind("v"))))
/// \endcode
/// will generate two results binding "v", the first of which binds
/// the field declaration of \c a, the second the field declaration of
/// \c b.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
eachOf;
/// Matches if any of the given matchers matches.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
anyOf;
/// Matches if all given matchers match.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
allOf;
/// Matches any node regardless of the submatcher.
///
/// However, \c optionally will retain any bindings generated by the submatcher.
/// Useful when additional information which may or may not present about a main
/// matching node is desired.
///
/// For example, in:
/// \code
/// class Foo {
/// int bar;
/// }
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(
/// optionally(has(
/// fieldDecl(hasName("bar")).bind("var")
/// ))).bind("record")
/// \endcode
/// will produce a result binding for both "record" and "var".
/// The matcher will produce a "record" binding for even if there is no data
/// member named "bar" in that class.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<1, 1> optionally;
/// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL)
///
/// Given
/// \code
/// Foo x = bar;
/// int y = sizeof(x) + alignof(x);
/// \endcode
/// unaryExprOrTypeTraitExpr()
/// matches \c sizeof(x) and \c alignof(x)
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
UnaryExprOrTypeTraitExpr>
unaryExprOrTypeTraitExpr;
/// Matches unary expressions that have a specific type of argument.
///
/// Given
/// \code
/// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c);
/// \endcode
/// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int"))
/// matches \c sizeof(a) and \c alignof(c)
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType,
internal::Matcher<QualType>, InnerMatcher) {
const QualType ArgumentType = Node.getTypeOfArgument();
return InnerMatcher.matches(ArgumentType, Finder, Builder);
}
/// Matches unary expressions of a certain kind.
///
/// Given
/// \code
/// int x;
/// int s = sizeof(x) + alignof(x)
/// \endcode
/// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf))
/// matches \c sizeof(x)
///
/// If the matcher is use from clang-query, UnaryExprOrTypeTrait parameter
/// should be passed as a quoted string. e.g., ofKind("UETT_SizeOf").
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) {
return Node.getKind() == Kind;
}
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// alignof.
inline internal::BindableMatcher<Stmt> alignOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)),
InnerMatcher)));
}
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// sizeof.
inline internal::BindableMatcher<Stmt> sizeOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(ofKind(UETT_SizeOf), InnerMatcher)));
}
/// Matches NamedDecl nodes that have the specified name.
///
/// Supports specifying enclosing namespaces or classes by prefixing the name
/// with '<enclosing>::'.
/// Does not match typedefs of an underlying type with the given name.
///
/// Example matches X (Name == "X")
/// \code
/// class X;
/// \endcode
///
/// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X")
/// \code
/// namespace a { namespace b { class X; } }
/// \endcode
inline internal::Matcher<NamedDecl> hasName(StringRef Name) {
return internal::Matcher<NamedDecl>(
new internal::HasNameMatcher({std::string(Name)}));
}
/// Matches NamedDecl nodes that have any of the specified names.
///
/// This matcher is only provided as a performance optimization of hasName.
/// \code
/// hasAnyName(a, b, c)
/// \endcode
/// is equivalent to, but faster than
/// \code
/// anyOf(hasName(a), hasName(b), hasName(c))
/// \endcode
extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef,
internal::hasAnyNameFunc>
hasAnyName;
/// Matches NamedDecl nodes whose fully qualified names contain
/// a substring matched by the given RegExp.
///
/// Supports specifying enclosing namespaces or classes by
/// prefixing the name with '<enclosing>::'. Does not match typedefs
/// of an underlying type with the given name.
///
/// Example matches X (regexp == "::X")
/// \code
/// class X;
/// \endcode
///
/// Example matches X (regexp is one of "::X", "^foo::.*X", among others)
/// \code
/// namespace foo { namespace bar { class X; } }
/// \endcode
AST_MATCHER_REGEX(NamedDecl, matchesName, RegExp) {
std::string FullNameString = "::" + Node.getQualifiedNameAsString();
return RegExp->match(FullNameString);
}
/// Matches overloaded operator names.
///
/// Matches overloaded operator names specified in strings without the
/// "operator" prefix: e.g. "<<".
///
/// Given:
/// \code
/// class A { int operator*(); };
/// const A &operator<<(const A &a, const A &b);
/// A a;
/// a << a; // <-- This matches
/// \endcode
///
/// \c cxxOperatorCallExpr(hasOverloadedOperatorName("<<"))) matches the
/// specified line and
/// \c cxxRecordDecl(hasMethod(hasOverloadedOperatorName("*")))
/// matches the declaration of \c A.
///
/// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl>
inline internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>
hasOverloadedOperatorName(StringRef Name) {
return internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>(
{std::string(Name)});
}
/// Matches overloaded operator names.
///
/// Matches overloaded operator names specified in strings without the
/// "operator" prefix: e.g. "<<".
///
/// hasAnyOverloadesOperatorName("+", "-")
/// Is equivalent to
/// anyOf(hasOverloadedOperatorName("+"), hasOverloadedOperatorName("-"))
extern const internal::VariadicFunction<
internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>,
StringRef, internal::hasAnyOverloadedOperatorNameFunc>
hasAnyOverloadedOperatorName;
/// Matches C++ classes that are directly or indirectly derived from a class
/// matching \c Base, or Objective-C classes that directly or indirectly
/// subclass a class matching \c Base.
///
/// Note that a class is not considered to be derived from itself.
///
/// Example matches Y, Z, C (Base == hasName("X"))
/// \code
/// class X;
/// class Y : public X {}; // directly derived
/// class Z : public Y {}; // indirectly derived
/// typedef X A;
/// typedef A B;
/// class C : public B {}; // derived from a typedef of X
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("X")):
/// \code
/// class Foo;
/// typedef Foo X;
/// class Bar : public Foo {}; // derived from a type that X is a typedef of
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("NSObject"))
/// \code
/// @interface NSObject @end
/// @interface Bar : NSObject @end
/// \endcode
///
/// Usable as: Matcher<CXXRecordDecl>, Matcher<ObjCInterfaceDecl>
AST_POLYMORPHIC_MATCHER_P(
isDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base) {
// Check if the node is a C++ struct/union/class.
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/false);
// The node must be an Objective-C class.
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder,
/*Directly=*/false);
}
/// Overloaded method as shortcut for \c isDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches C++ classes that have a direct or indirect base matching \p
/// BaseSpecMatcher.
///
/// Example:
/// matcher hasAnyBase(hasType(cxxRecordDecl(hasName("SpecialBase"))))
/// \code
/// class Foo;
/// class Bar : Foo {};
/// class Baz : Bar {};
/// class SpecialBase;
/// class Proxy : SpecialBase {}; // matches Proxy
/// class IndirectlyDerived : Proxy {}; //matches IndirectlyDerived
/// \endcode
///
// FIXME: Refactor this and isDerivedFrom to reuse implementation.
AST_MATCHER_P(CXXRecordDecl, hasAnyBase, internal::Matcher<CXXBaseSpecifier>,
BaseSpecMatcher) {
return internal::matchesAnyBase(Node, BaseSpecMatcher, Finder, Builder);
}
/// Matches C++ classes that have a direct base matching \p BaseSpecMatcher.
///
/// Example:
/// matcher hasDirectBase(hasType(cxxRecordDecl(hasName("SpecialBase"))))
/// \code
/// class Foo;
/// class Bar : Foo {};
/// class Baz : Bar {};
/// class SpecialBase;
/// class Proxy : SpecialBase {}; // matches Proxy
/// class IndirectlyDerived : Proxy {}; // doesn't match
/// \endcode
AST_MATCHER_P(CXXRecordDecl, hasDirectBase, internal::Matcher<CXXBaseSpecifier>,
BaseSpecMatcher) {
return Node.hasDefinition() &&
llvm::any_of(Node.bases(), [&](const CXXBaseSpecifier &Base) {
return BaseSpecMatcher.matches(Base, Finder, Builder);
});
}
/// Similar to \c isDerivedFrom(), but also matches classes that directly
/// match \c Base.
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isSameOrDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base, 0) {
const auto M = anyOf(Base, isDerivedFrom(Base));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Overloaded method as shortcut for
/// \c isSameOrDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isSameOrDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isSameOrDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches C++ or Objective-C classes that are directly derived from a class
/// matching \c Base.
///
/// Note that a class is not considered to be derived from itself.
///
/// Example matches Y, C (Base == hasName("X"))
/// \code
/// class X;
/// class Y : public X {}; // directly derived
/// class Z : public Y {}; // indirectly derived
/// typedef X A;
/// typedef A B;
/// class C : public B {}; // derived from a typedef of X
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("X")):
/// \code
/// class Foo;
/// typedef Foo X;
/// class Bar : public Foo {}; // derived from a type that X is a typedef of
/// \endcode
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDirectlyDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base, 0) {
// Check if the node is a C++ struct/union/class.
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/true);
// The node must be an Objective-C class.
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder,
/*Directly=*/true);
}
/// Overloaded method as shortcut for \c isDirectlyDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDirectlyDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isDirectlyDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches the first method of a class or struct that satisfies \c
/// InnerMatcher.
///
/// Given:
/// \code
/// class A { void func(); };
/// class B { void member(); };
/// \endcode
///
/// \c cxxRecordDecl(hasMethod(hasName("func"))) matches the declaration of
/// \c A but not \c B.
AST_MATCHER_P(CXXRecordDecl, hasMethod, internal::Matcher<CXXMethodDecl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.method_begin(),
Node.method_end(), Finder, Builder);
}
/// Matches the generated class of lambda expressions.
///
/// Given:
/// \code
/// auto x = []{};
/// \endcode
///
/// \c cxxRecordDecl(isLambda()) matches the implicit class declaration of
/// \c decltype(x)
AST_MATCHER(CXXRecordDecl, isLambda) {
return Node.isLambda();
}
/// Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y
/// (matcher = cxxRecordDecl(has(cxxRecordDecl(hasName("X")))
/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; }; // Does not match Z.
/// \endcode
///
/// ChildT must be an AST base type.
///
/// Usable as: Any Matcher
/// Note that has is direct matcher, so it also matches things like implicit
/// casts and paren casts. If you are matching with expr then you should
/// probably consider using ignoringParenImpCasts like:
/// has(ignoringParenImpCasts(expr())).
extern const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has;
/// Matches AST nodes that have descendant AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y, Z
/// (matcher = cxxRecordDecl(hasDescendant(cxxRecordDecl(hasName("X")))))
/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; };
/// \endcode
///
/// DescendantT must be an AST base type.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasDescendantMatcher>
hasDescendant;
/// Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y, Y::X, Z::Y, Z::Y::X
/// (matcher = cxxRecordDecl(forEach(cxxRecordDecl(hasName("X")))
/// \code
/// class X {};
/// class Y { class X {}; }; // Matches Y, because Y::X is a class of name X
/// // inside Y.
/// class Z { class Y { class X {}; }; }; // Does not match Z.
/// \endcode
///
/// ChildT must be an AST base type.
///
/// As opposed to 'has', 'forEach' will cause a match for each result that
/// matches instead of only on the first one.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<internal::ForEachMatcher>
forEach;
/// Matches AST nodes that have descendant AST nodes that match the
/// provided matcher.
///
/// Example matches X, A, A::X, B, B::C, B::C::X
/// (matcher = cxxRecordDecl(forEachDescendant(cxxRecordDecl(hasName("X")))))
/// \code
/// class X {};
/// class A { class X {}; }; // Matches A, because A::X is a class of name
/// // X inside A.
/// class B { class C { class X {}; }; };
/// \endcode
///
/// DescendantT must be an AST base type.
///
/// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for
/// each result that matches instead of only on the first one.
///
/// Note: Recursively combined ForEachDescendant can cause many matches:
/// cxxRecordDecl(forEachDescendant(cxxRecordDecl(
/// forEachDescendant(cxxRecordDecl())
/// )))
/// will match 10 times (plus injected class name matches) on:
/// \code
/// class A { class B { class C { class D { class E {}; }; }; }; };
/// \endcode
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::ForEachDescendantMatcher>
forEachDescendant;
/// Matches if the node or any descendant matches.
///
/// Generates results for each match.
///
/// For example, in:
/// \code
/// class A { class B {}; class C {}; };
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(hasName("::A"),
/// findAll(cxxRecordDecl(isDefinition()).bind("m")))
/// \endcode
/// will generate results for \c A, \c B and \c C.
///
/// Usable as: Any Matcher
template <typename T>
internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) {
return eachOf(Matcher, forEachDescendant(Matcher));
}
/// Matches AST nodes that have a parent that matches the provided
/// matcher.
///
/// Given
/// \code
/// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } }
/// \endcode
/// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }".
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasParentMatcher,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>>
hasParent;
/// Matches AST nodes that have an ancestor that matches the provided
/// matcher.
///
/// Given
/// \code
/// void f() { if (true) { int x = 42; } }
/// void g() { for (;;) { int x = 43; } }
/// \endcode
/// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasAncestorMatcher,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>>
hasAncestor;
/// Matches if the provided matcher does not match.
///
/// Example matches Y (matcher = cxxRecordDecl(unless(hasName("X"))))
/// \code
/// class X {};
/// class Y {};
/// \endcode
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<1, 1> unless;
/// Matches a node if the declaration associated with that node
/// matches the given matcher.
///
/// The associated declaration is:
/// - for type nodes, the declaration of the underlying type
/// - for CallExpr, the declaration of the callee
/// - for MemberExpr, the declaration of the referenced member
/// - for CXXConstructExpr, the declaration of the constructor
/// - for CXXNewExpr, the declaration of the operator new
/// - for ObjCIvarExpr, the declaration of the ivar
///
/// For type nodes, hasDeclaration will generally match the declaration of the
/// sugared type. Given
/// \code
/// class X {};
/// typedef X Y;
/// Y y;
/// \endcode
/// in varDecl(hasType(hasDeclaration(decl()))) the decl will match the
/// typedefDecl. A common use case is to match the underlying, desugared type.
/// This can be achieved by using the hasUnqualifiedDesugaredType matcher:
/// \code
/// varDecl(hasType(hasUnqualifiedDesugaredType(
/// recordType(hasDeclaration(decl())))))
/// \endcode
/// In this matcher, the decl will match the CXXRecordDecl of class X.
///
/// Usable as: Matcher<AddrLabelExpr>, Matcher<CallExpr>,
/// Matcher<CXXConstructExpr>, Matcher<CXXNewExpr>, Matcher<DeclRefExpr>,
/// Matcher<EnumType>, Matcher<InjectedClassNameType>, Matcher<LabelStmt>,
/// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>,
/// Matcher<TagType>, Matcher<TemplateSpecializationType>,
/// Matcher<TemplateTypeParmType>, Matcher<TypedefType>,
/// Matcher<UnresolvedUsingType>
inline internal::PolymorphicMatcherWithParam1<
internal::HasDeclarationMatcher, internal::Matcher<Decl>,
void(internal::HasDeclarationSupportedTypes)>
hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) {
return internal::PolymorphicMatcherWithParam1<
internal::HasDeclarationMatcher, internal::Matcher<Decl>,
void(internal::HasDeclarationSupportedTypes)>(InnerMatcher);
}
/// Matches a \c NamedDecl whose underlying declaration matches the given
/// matcher.
///
/// Given
/// \code
/// namespace N { template<class T> void f(T t); }
/// template <class T> void g() { using N::f; f(T()); }
/// \endcode
/// \c unresolvedLookupExpr(hasAnyDeclaration(
/// namedDecl(hasUnderlyingDecl(hasName("::N::f")))))
/// matches the use of \c f in \c g() .
AST_MATCHER_P(NamedDecl, hasUnderlyingDecl, internal::Matcher<NamedDecl>,
InnerMatcher) {
const NamedDecl *UnderlyingDecl = Node.getUnderlyingDecl();
return UnderlyingDecl != nullptr &&
InnerMatcher.matches(*UnderlyingDecl, Finder, Builder);
}
/// Matches on the implicit object argument of a member call expression, after
/// stripping off any parentheses or implicit casts.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// Y g();
/// class X : public Y {};
/// void z(Y y, X x) { y.m(); (g()).m(); x.m(); }
/// \endcode
/// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("Y")))))
/// matches `y.m()` and `(g()).m()`.
/// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("X")))))
/// matches `x.m()`.
/// cxxMemberCallExpr(on(callExpr()))
/// matches `(g()).m()`.
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *ExprNode = Node.getImplicitObjectArgument()
->IgnoreParenImpCasts();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches on the receiver of an ObjectiveC Message expression.
///
/// Example
/// matcher = objCMessageExpr(hasReceiverType(asString("UIWebView *")));
/// matches the [webView ...] message invocation.
/// \code
/// NSString *webViewJavaScript = ...
/// UIWebView *webView = ...
/// [webView stringByEvaluatingJavaScriptFromString:webViewJavascript];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasReceiverType, internal::Matcher<QualType>,
InnerMatcher) {
const QualType TypeDecl = Node.getReceiverType();
return InnerMatcher.matches(TypeDecl, Finder, Builder);
}
/// Returns true when the Objective-C method declaration is a class method.
///
/// Example
/// matcher = objcMethodDecl(isClassMethod())
/// matches
/// \code
/// @interface I + (void)foo; @end
/// \endcode
/// but not
/// \code
/// @interface I - (void)bar; @end
/// \endcode
AST_MATCHER(ObjCMethodDecl, isClassMethod) {
return Node.isClassMethod();
}
/// Returns true when the Objective-C method declaration is an instance method.
///
/// Example
/// matcher = objcMethodDecl(isInstanceMethod())
/// matches
/// \code
/// @interface I - (void)bar; @end
/// \endcode
/// but not
/// \code
/// @interface I + (void)foo; @end
/// \endcode
AST_MATCHER(ObjCMethodDecl, isInstanceMethod) {
return Node.isInstanceMethod();
}
/// Returns true when the Objective-C message is sent to a class.
///
/// Example
/// matcher = objcMessageExpr(isClassMessage())
/// matches
/// \code
/// [NSString stringWithFormat:@"format"];
/// \endcode
/// but not
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
AST_MATCHER(ObjCMessageExpr, isClassMessage) {
return Node.isClassMessage();
}
/// Returns true when the Objective-C message is sent to an instance.
///
/// Example
/// matcher = objcMessageExpr(isInstanceMessage())
/// matches
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
/// but not
/// \code
/// [NSString stringWithFormat:@"format"];
/// \endcode
AST_MATCHER(ObjCMessageExpr, isInstanceMessage) {
return Node.isInstanceMessage();
}
/// Matches if the Objective-C message is sent to an instance,
/// and the inner matcher matches on that instance.
///
/// For example the method call in
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
/// is matched by
/// objcMessageExpr(hasReceiver(declRefExpr(to(varDecl(hasName("x"))))))
AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *ReceiverNode = Node.getInstanceReceiver();
return (ReceiverNode != nullptr &&
InnerMatcher.matches(*ReceiverNode->IgnoreParenImpCasts(), Finder,
Builder));
}
/// Matches when BaseName == Selector.getAsString()
///
/// matcher = objCMessageExpr(hasSelector("loadHTMLString:baseURL:"));
/// matches the outer message expr in the code below, but NOT the message
/// invocation for self.bodyView.
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) {
Selector Sel = Node.getSelector();
return BaseName.compare(Sel.getAsString()) == 0;
}
/// Matches when at least one of the supplied string equals to the
/// Selector.getAsString()
///
/// matcher = objCMessageExpr(hasSelector("methodA:", "methodB:"));
/// matches both of the expressions below:
/// \code
/// [myObj methodA:argA];
/// [myObj methodB:argB];
/// \endcode
extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>,
StringRef,
internal::hasAnySelectorFunc>
hasAnySelector;
/// Matches ObjC selectors whose name contains
/// a substring matched by the given RegExp.
/// matcher = objCMessageExpr(matchesSelector("loadHTMLString\:baseURL?"));
/// matches the outer message expr in the code below, but NOT the message
/// invocation for self.bodyView.
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_REGEX(ObjCMessageExpr, matchesSelector, RegExp) {
std::string SelectorString = Node.getSelector().getAsString();
return RegExp->match(SelectorString);
}
/// Matches when the selector is the empty selector
///
/// Matches only when the selector of the objCMessageExpr is NULL. This may
/// represent an error condition in the tree!
AST_MATCHER(ObjCMessageExpr, hasNullSelector) {
return Node.getSelector().isNull();
}
/// Matches when the selector is a Unary Selector
///
/// matcher = objCMessageExpr(matchesSelector(hasUnarySelector());
/// matches self.bodyView in the code below, but NOT the outer message
/// invocation of "loadHTMLString:baseURL:".
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER(ObjCMessageExpr, hasUnarySelector) {
return Node.getSelector().isUnarySelector();
}
/// Matches when the selector is a keyword selector
///
/// objCMessageExpr(hasKeywordSelector()) matches the generated setFrame
/// message expression in
///
/// \code
/// UIWebView *webView = ...;
/// CGRect bodyFrame = webView.frame;
/// bodyFrame.size.height = self.bodyContentHeight;
/// webView.frame = bodyFrame;
/// // ^---- matches here
/// \endcode
AST_MATCHER(ObjCMessageExpr, hasKeywordSelector) {
return Node.getSelector().isKeywordSelector();
}
/// Matches when the selector has the specified number of arguments
///
/// matcher = objCMessageExpr(numSelectorArgs(0));
/// matches self.bodyView in the code below
///
/// matcher = objCMessageExpr(numSelectorArgs(2));
/// matches the invocation of "loadHTMLString:baseURL:" but not that
/// of self.bodyView
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) {
return Node.getSelector().getNumArgs() == N;
}
/// Matches if the call expression's callee expression matches.
///
/// Given
/// \code
/// class Y { void x() { this->x(); x(); Y y; y.x(); } };
/// void f() { f(); }
/// \endcode
/// callExpr(callee(expr()))
/// matches this->x(), x(), y.x(), f()
/// with callee(...)
/// matching this->x, x, y.x, f respectively
///
/// Note: Callee cannot take the more general internal::Matcher<Expr>
/// because this introduces ambiguous overloads with calls to Callee taking a
/// internal::Matcher<Decl>, as the matcher hierarchy is purely
/// implemented in terms of implicit casts.
AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>,
InnerMatcher) {
const Expr *ExprNode = Node.getCallee();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches if the call expression's callee's declaration matches the
/// given matcher.
///
/// Example matches y.x() (matcher = callExpr(callee(
/// cxxMethodDecl(hasName("x")))))
/// \code
/// class Y { public: void x(); };
/// void z() { Y y; y.x(); }
/// \endcode
AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher,
1) {
return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder);
}
/// Matches if the expression's or declaration's type matches a type
/// matcher.
///
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and U (matcher = typedefDecl(hasType(asString("int")))
/// and friend class X (matcher = friendDecl(hasType("X"))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// typedef int U;
/// class Y { friend class X; };
/// \endcode
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType,
AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl,
ValueDecl),
internal::Matcher<QualType>, InnerMatcher, 0) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
return InnerMatcher.matches(QT, Finder, Builder);
return false;
}
/// Overloaded to match the declaration of the expression's or value
/// declaration's type.
///
/// In case of a value declaration (for example a variable declaration),
/// this resolves one layer of indirection. For example, in the value
/// declaration "X x;", cxxRecordDecl(hasName("X")) matches the declaration of
/// X, while varDecl(hasType(cxxRecordDecl(hasName("X")))) matches the
/// declaration of x.
///
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and friend class X (matcher = friendDecl(hasType("X"))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// class Y { friend class X; };
/// \endcode
///
/// Example matches class Derived
/// (matcher = cxxRecordDecl(hasAnyBase(hasType(cxxRecordDecl(hasName("Base"))))))
/// \code
/// class Base {};
/// class Derived : Base {};
/// \endcode
///
/// Usable as: Matcher<Expr>, Matcher<FriendDecl>, Matcher<ValueDecl>,
/// Matcher<CXXBaseSpecifier>
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType,
AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl,
CXXBaseSpecifier),
internal::Matcher<Decl>, InnerMatcher, 1) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
return qualType(hasDeclaration(InnerMatcher)).matches(QT, Finder, Builder);
return false;
}
/// Matches if the type location of the declarator decl's type matches
/// the inner matcher.
///
/// Given
/// \code
/// int x;
/// \endcode
/// declaratorDecl(hasTypeLoc(loc(asString("int"))))
/// matches int x
AST_MATCHER_P(DeclaratorDecl, hasTypeLoc, internal::Matcher<TypeLoc>, Inner) {
if (!Node.getTypeSourceInfo())
// This happens for example for implicit destructors.
return false;
return Inner.matches(Node.getTypeSourceInfo()->getTypeLoc(), Finder, Builder);
}
/// Matches if the matched type is represented by the given string.
///
/// Given
/// \code
/// class Y { public: void x(); };
/// void z() { Y* y; y->x(); }
/// \endcode
/// cxxMemberCallExpr(on(hasType(asString("class Y *"))))
/// matches y->x()
AST_MATCHER_P(QualType, asString, std::string, Name) {
return Name == Node.getAsString();
}
/// Matches if the matched type is a pointer type and the pointee type
/// matches the specified matcher.
///
/// Example matches y->x()
/// (matcher = cxxMemberCallExpr(on(hasType(pointsTo
/// cxxRecordDecl(hasName("Y")))))))
/// \code
/// class Y { public: void x(); };
/// void z() { Y *y; y->x(); }
/// \endcode
AST_MATCHER_P(
QualType, pointsTo, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isAnyPointerType() &&
InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
}
/// Overloaded to match the pointee type's declaration.
AST_MATCHER_P_OVERLOAD(QualType, pointsTo, internal::Matcher<Decl>,
InnerMatcher, 1) {
return pointsTo(qualType(hasDeclaration(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches if the matched type matches the unqualified desugared
/// type of the matched node.
///
/// For example, in:
/// \code
/// class A {};
/// using B = A;
/// \endcode
/// The matcher type(hasUnqualifiedDesugaredType(recordType())) matches
/// both B and A.
AST_MATCHER_P(Type, hasUnqualifiedDesugaredType, internal::Matcher<Type>,
InnerMatcher) {
return InnerMatcher.matches(*Node.getUnqualifiedDesugaredType(), Finder,
Builder);
}
/// Matches if the matched type is a reference type and the referenced
/// type matches the specified matcher.
///
/// Example matches X &x and const X &y
/// (matcher = varDecl(hasType(references(cxxRecordDecl(hasName("X"))))))
/// \code
/// class X {
/// void a(X b) {
/// X &x = b;
/// const X &y = b;
/// }
/// };
/// \endcode
AST_MATCHER_P(QualType, references, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isReferenceType() &&
InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
}
/// Matches QualTypes whose canonical type matches InnerMatcher.
///
/// Given:
/// \code
/// typedef int &int_ref;
/// int a;
/// int_ref b = a;
/// \endcode
///
/// \c varDecl(hasType(qualType(referenceType()))))) will not match the
/// declaration of b but \c
/// varDecl(hasType(qualType(hasCanonicalType(referenceType())))))) does.
AST_MATCHER_P(QualType, hasCanonicalType, internal::Matcher<QualType>,
InnerMatcher) {
if (Node.isNull())
return false;
return InnerMatcher.matches(Node.getCanonicalType(), Finder, Builder);
}
/// Overloaded to match the referenced type's declaration.
AST_MATCHER_P_OVERLOAD(QualType, references, internal::Matcher<Decl>,
InnerMatcher, 1) {
return references(qualType(hasDeclaration(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches on the implicit object argument of a member call expression. Unlike
/// `on`, matches the argument directly without stripping away anything.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// Y g();
/// class X : public Y { void g(); };
/// void z(Y y, X x) { y.m(); x.m(); x.g(); (g()).m(); }
/// \endcode
/// cxxMemberCallExpr(onImplicitObjectArgument(hasType(
/// cxxRecordDecl(hasName("Y")))))
/// matches `y.m()`, `x.m()` and (g()).m(), but not `x.g()`.
/// cxxMemberCallExpr(on(callExpr()))
/// does not match `(g()).m()`, because the parens are not ignored.
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *ExprNode = Node.getImplicitObjectArgument();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches if the type of the expression's implicit object argument either
/// matches the InnerMatcher, or is a pointer to a type that matches the
/// InnerMatcher.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// class X : public Y { void g(); };
/// void z() { Y y; y.m(); Y *p; p->m(); X x; x.m(); x.g(); }
/// \endcode
/// cxxMemberCallExpr(thisPointerType(hasDeclaration(
/// cxxRecordDecl(hasName("Y")))))
/// matches `y.m()`, `p->m()` and `x.m()`.
/// cxxMemberCallExpr(thisPointerType(hasDeclaration(
/// cxxRecordDecl(hasName("X")))))
/// matches `x.g()`.
AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType,
internal::Matcher<QualType>, InnerMatcher, 0) {
return onImplicitObjectArgument(
anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))))
.matches(Node, Finder, Builder);
}
/// Overloaded to match the type's declaration.
AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType,
internal::Matcher<Decl>, InnerMatcher, 1) {
return onImplicitObjectArgument(
anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))))
.matches(Node, Finder, Builder);
}
/// Matches a DeclRefExpr that refers to a declaration that matches the
/// specified matcher.
///
/// Example matches x in if(x)
/// (matcher = declRefExpr(to(varDecl(hasName("x")))))
/// \code
/// bool x;
/// if (x) {}
/// \endcode
AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>,
InnerMatcher) {
const Decl *DeclNode = Node.getDecl();
return (DeclNode != nullptr &&
InnerMatcher.matches(*DeclNode, Finder, Builder));
}
/// Matches a \c DeclRefExpr that refers to a declaration through a
/// specific using shadow declaration.
///
/// Given
/// \code
/// namespace a { void f() {} }
/// using a::f;
/// void g() {
/// f(); // Matches this ..
/// a::f(); // .. but not this.
/// }
/// \endcode
/// declRefExpr(throughUsingDecl(anything()))
/// matches \c f()
AST_MATCHER_P(DeclRefExpr, throughUsingDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
const NamedDecl *FoundDecl = Node.getFoundDecl();
if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl))
return InnerMatcher.matches(*UsingDecl, Finder, Builder);
return false;
}
/// Matches an \c OverloadExpr if any of the declarations in the set of
/// overloads matches the given matcher.
///
/// Given
/// \code
/// template <typename T> void foo(T);
/// template <typename T> void bar(T);
/// template <typename T> void baz(T t) {
/// foo(t);
/// bar(t);
/// }
/// \endcode
/// unresolvedLookupExpr(hasAnyDeclaration(
/// functionTemplateDecl(hasName("foo"))))
/// matches \c foo in \c foo(t); but not \c bar in \c bar(t);
AST_MATCHER_P(OverloadExpr, hasAnyDeclaration, internal::Matcher<Decl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.decls_begin(),
Node.decls_end(), Finder, Builder);
}
/// Matches the Decl of a DeclStmt which has a single declaration.
///
/// Given
/// \code
/// int a, b;
/// int c;
/// \endcode
/// declStmt(hasSingleDecl(anything()))
/// matches 'int c;' but not 'int a, b;'.
AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) {
if (Node.isSingleDecl()) {
const Decl *FoundDecl = Node.getSingleDecl();
return InnerMatcher.matches(*FoundDecl, Finder, Builder);
}
return false;
}
/// Matches a variable declaration that has an initializer expression
/// that matches the given matcher.
///
/// Example matches x (matcher = varDecl(hasInitializer(callExpr())))
/// \code
/// bool y() { return true; }
/// bool x = y();
/// \endcode
AST_MATCHER_P(
VarDecl, hasInitializer, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Initializer = Node.getAnyInitializer();
return (Initializer != nullptr &&
InnerMatcher.matches(*Initializer, Finder, Builder));
}
/// \brief Matches a static variable with local scope.
///
/// Example matches y (matcher = varDecl(isStaticLocal()))
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// static int z;
/// \endcode
AST_MATCHER(VarDecl, isStaticLocal) {
return Node.isStaticLocal();
}
/// Matches a variable declaration that has function scope and is a
/// non-static local variable.
///
/// Example matches x (matcher = varDecl(hasLocalStorage())
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
AST_MATCHER(VarDecl, hasLocalStorage) {
return Node.hasLocalStorage();
}
/// Matches a variable declaration that does not have local storage.
///
/// Example matches y and z (matcher = varDecl(hasGlobalStorage())
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
AST_MATCHER(VarDecl, hasGlobalStorage) {
return Node.hasGlobalStorage();
}
/// Matches a variable declaration that has automatic storage duration.
///
/// Example matches x, but not y, z, or a.
/// (matcher = varDecl(hasAutomaticStorageDuration())
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// \endcode
AST_MATCHER(VarDecl, hasAutomaticStorageDuration) {
return Node.getStorageDuration() == SD_Automatic;
}
/// Matches a variable declaration that has static storage duration.
/// It includes the variable declared at namespace scope and those declared
/// with "static" and "extern" storage class specifiers.
///
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// static int b;
/// extern int c;
/// varDecl(hasStaticStorageDuration())
/// matches the function declaration y, a, b and c.
/// \endcode
AST_MATCHER(VarDecl, hasStaticStorageDuration) {
return Node.getStorageDuration() == SD_Static;
}
/// Matches a variable declaration that has thread storage duration.
///
/// Example matches z, but not x, z, or a.
/// (matcher = varDecl(hasThreadStorageDuration())
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// \endcode
AST_MATCHER(VarDecl, hasThreadStorageDuration) {
return Node.getStorageDuration() == SD_Thread;
}
/// Matches a variable declaration that is an exception variable from
/// a C++ catch block, or an Objective-C \@catch statement.
///
/// Example matches x (matcher = varDecl(isExceptionVariable())
/// \code
/// void f(int y) {
/// try {
/// } catch (int x) {
/// }
/// }
/// \endcode
AST_MATCHER(VarDecl, isExceptionVariable) {
return Node.isExceptionVariable();
}
/// Checks that a call expression or a constructor call expression has
/// a specific number of arguments (including absent default arguments).
///
/// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2)))
/// \code
/// void f(int x, int y);
/// f(0, 0);
/// \endcode
AST_POLYMORPHIC_MATCHER_P(argumentCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr,
ObjCMessageExpr),
unsigned, N) {
return Node.getNumArgs() == N;
}
/// Matches the n'th argument of a call expression or a constructor
/// call expression.
///
/// Example matches y in x(y)
/// (matcher = callExpr(hasArgument(0, declRefExpr())))
/// \code
/// void x(int) { int y; x(y); }
/// \endcode
AST_POLYMORPHIC_MATCHER_P2(hasArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr,
ObjCMessageExpr),
unsigned, N, internal::Matcher<Expr>, InnerMatcher) {
return (N < Node.getNumArgs() &&
InnerMatcher.matches(
*Node.getArg(N)->IgnoreParenImpCasts(), Finder, Builder));
}
/// Matches the n'th item of an initializer list expression.
///
/// Example matches y.
/// (matcher = initListExpr(hasInit(0, expr())))
/// \code
/// int x{y}.
/// \endcode
AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
return N < Node.getNumInits() &&
InnerMatcher.matches(*Node.getInit(N), Finder, Builder);
}
/// Matches declaration statements that contain a specific number of
/// declarations.
///
/// Example: Given
/// \code
/// int a, b;
/// int c;
/// int d = 2, e;
/// \endcode
/// declCountIs(2)
/// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'.
AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) {
return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N;
}
/// Matches the n'th declaration of a declaration statement.
///
/// Note that this does not work for global declarations because the AST
/// breaks up multiple-declaration DeclStmt's into multiple single-declaration
/// DeclStmt's.
/// Example: Given non-global declarations
/// \code
/// int a, b = 0;
/// int c;
/// int d = 2, e;
/// \endcode
/// declStmt(containsDeclaration(
/// 0, varDecl(hasInitializer(anything()))))
/// matches only 'int d = 2, e;', and
/// declStmt(containsDeclaration(1, varDecl()))
/// \code
/// matches 'int a, b = 0' as well as 'int d = 2, e;'
/// but 'int c;' is not matched.
/// \endcode
AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N,
internal::Matcher<Decl>, InnerMatcher) {
const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end());
if (N >= NumDecls)
return false;
DeclStmt::const_decl_iterator Iterator = Node.decl_begin();
std::advance(Iterator, N);
return InnerMatcher.matches(**Iterator, Finder, Builder);
}
/// Matches a C++ catch statement that has a catch-all handler.
///
/// Given
/// \code
/// try {
/// // ...
/// } catch (int) {
/// // ...
/// } catch (...) {
/// // ...
/// }
/// \endcode
/// cxxCatchStmt(isCatchAll()) matches catch(...) but not catch(int).
AST_MATCHER(CXXCatchStmt, isCatchAll) {
return Node.getExceptionDecl() == nullptr;
}
/// Matches a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(
/// hasAnyConstructorInitializer(anything())
/// )))
/// record matches Foo, hasAnyConstructorInitializer matches foo_(1)
AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.init_begin(),
Node.init_end(), Finder, Builder);
}
/// Matches the field declaration of a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer(
/// forField(hasName("foo_"))))))
/// matches Foo
/// with forField matching foo_
AST_MATCHER_P(CXXCtorInitializer, forField,
internal::Matcher<FieldDecl>, InnerMatcher) {
const FieldDecl *NodeAsDecl = Node.getAnyMember();
return (NodeAsDecl != nullptr &&
InnerMatcher.matches(*NodeAsDecl, Finder, Builder));
}
/// Matches the initializer expression of a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer(
/// withInitializer(integerLiteral(equals(1)))))))
/// matches Foo
/// with withInitializer matching (1)
AST_MATCHER_P(CXXCtorInitializer, withInitializer,
internal::Matcher<Expr>, InnerMatcher) {
const Expr* NodeAsExpr = Node.getInit();
return (NodeAsExpr != nullptr &&
InnerMatcher.matches(*NodeAsExpr, Finder, Builder));
}
/// Matches a constructor initializer if it is explicitly written in
/// code (as opposed to implicitly added by the compiler).
///
/// Given
/// \code
/// struct Foo {
/// Foo() { }
/// Foo(int) : foo_("A") { }
/// string foo_;
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isWritten()))
/// will match Foo(int), but not Foo()
AST_MATCHER(CXXCtorInitializer, isWritten) {
return Node.isWritten();
}
/// Matches a constructor initializer if it is initializing a base, as
/// opposed to a member.
///
/// Given
/// \code
/// struct B {};
/// struct D : B {
/// int I;
/// D(int i) : I(i) {}
/// };
/// struct E : B {
/// E() : B() {}
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isBaseInitializer()))
/// will match E(), but not match D(int).
AST_MATCHER(CXXCtorInitializer, isBaseInitializer) {
return Node.isBaseInitializer();
}
/// Matches a constructor initializer if it is initializing a member, as
/// opposed to a base.
///
/// Given
/// \code
/// struct B {};
/// struct D : B {
/// int I;
/// D(int i) : I(i) {}
/// };
/// struct E : B {
/// E() : B() {}
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isMemberInitializer()))
/// will match D(int), but not match E().
AST_MATCHER(CXXCtorInitializer, isMemberInitializer) {
return Node.isMemberInitializer();
}
/// Matches any argument of a call expression or a constructor call
/// expression, or an ObjC-message-send expression.
///
/// Given
/// \code
/// void x(int, int, int) { int y; x(1, y, 42); }
/// \endcode
/// callExpr(hasAnyArgument(declRefExpr()))
/// matches x(1, y, 42)
/// with hasAnyArgument(...)
/// matching y
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// void foo(I *i) { [i f:12]; }
/// \endcode
/// objcMessageExpr(hasAnyArgument(integerLiteral(equals(12))))
/// matches [i f:12]
AST_POLYMORPHIC_MATCHER_P(hasAnyArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(
CallExpr, CXXConstructExpr,
CXXUnresolvedConstructExpr, ObjCMessageExpr),
internal::Matcher<Expr>, InnerMatcher) {
for (const Expr *Arg : Node.arguments()) {
BoundNodesTreeBuilder Result(*Builder);
if (InnerMatcher.matches(*Arg, Finder, &Result)) {
*Builder = std::move(Result);
return true;
}
}
return false;
}
/// Matches any capture of a lambda expression.
///
/// Given
/// \code
/// void foo() {
/// int x;
/// auto f = [x](){};
/// }
/// \endcode
/// lambdaExpr(hasAnyCapture(anything()))
/// matches [x](){};
AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<VarDecl>,
InnerMatcher, 0) {
for (const LambdaCapture &Capture : Node.captures()) {
if (Capture.capturesVariable()) {
BoundNodesTreeBuilder Result(*Builder);
if (InnerMatcher.matches(*Capture.getCapturedVar(), Finder, &Result)) {
*Builder = std::move(Result);
return true;
}
}
}
return false;
}
/// Matches any capture of 'this' in a lambda expression.
///
/// Given
/// \code
/// struct foo {
/// void bar() {
/// auto f = [this](){};
/// }
/// }
/// \endcode
/// lambdaExpr(hasAnyCapture(cxxThisExpr()))
/// matches [this](){};
AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture,
internal::Matcher<CXXThisExpr>, InnerMatcher, 1) {
return llvm::any_of(Node.captures(), [](const LambdaCapture &LC) {
return LC.capturesThis();
});
}
/// Matches a constructor call expression which uses list initialization.
AST_MATCHER(CXXConstructExpr, isListInitialization) {
return Node.isListInitialization();
}
/// Matches a constructor call expression which requires
/// zero initialization.
///
/// Given
/// \code
/// void foo() {
/// struct point { double x; double y; };
/// point pt[2] = { { 1.0, 2.0 } };
/// }
/// \endcode
/// initListExpr(has(cxxConstructExpr(requiresZeroInitialization()))
/// will match the implicit array filler for pt[1].
AST_MATCHER(CXXConstructExpr, requiresZeroInitialization) {
return Node.requiresZeroInitialization();
}
/// Matches the n'th parameter of a function or an ObjC method
/// declaration or a block.
///
/// Given
/// \code
/// class X { void f(int x) {} };
/// \endcode
/// cxxMethodDecl(hasParameter(0, hasType(varDecl())))
/// matches f(int x) {}
/// with hasParameter(...)
/// matching int x
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// \endcode
//
/// the matcher objcMethodDecl(hasParameter(0, hasName("y")))
/// matches the declaration of method f with hasParameter
/// matching y.
AST_POLYMORPHIC_MATCHER_P2(hasParameter,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
ObjCMethodDecl,
BlockDecl),
unsigned, N, internal::Matcher<ParmVarDecl>,
InnerMatcher) {
return (N < Node.parameters().size()
&& InnerMatcher.matches(*Node.parameters()[N], Finder, Builder));
}
/// Matches all arguments and their respective ParmVarDecl.
///
/// Given
/// \code
/// void f(int i);
/// int y;
/// f(y);
/// \endcode
/// callExpr(
/// forEachArgumentWithParam(
/// declRefExpr(to(varDecl(hasName("y")))),
/// parmVarDecl(hasType(isInteger()))
/// ))
/// matches f(y);
/// with declRefExpr(...)
/// matching int y
/// and parmVarDecl(...)
/// matching int i
AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr),
internal::Matcher<Expr>, ArgMatcher,
internal::Matcher<ParmVarDecl>, ParamMatcher) {
BoundNodesTreeBuilder Result;
// The first argument of an overloaded member operator is the implicit object
// argument of the method which should not be matched against a parameter, so
// we skip over it here.
BoundNodesTreeBuilder Matches;
unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl()))
.matches(Node, Finder, &Matches)
? 1
: 0;
int ParamIndex = 0;
bool Matched = false;
for (; ArgIndex < Node.getNumArgs(); ++ArgIndex) {
BoundNodesTreeBuilder ArgMatches(*Builder);
if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()),
Finder, &ArgMatches)) {
BoundNodesTreeBuilder ParamMatches(ArgMatches);
if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl(
hasParameter(ParamIndex, ParamMatcher)))),
callExpr(callee(functionDecl(
hasParameter(ParamIndex, ParamMatcher))))))
.matches(Node, Finder, &ParamMatches)) {
Result.addMatch(ParamMatches);
Matched = true;
}
}
++ParamIndex;
}
*Builder = std::move(Result);
return Matched;
}
/// Matches the ParmVarDecl nodes that are at the N'th position in the parameter
/// list. The parameter list could be that of either a block, function, or
/// objc-method.
///
///
/// Given
///
/// \code
/// void f(int a, int b, int c) {
/// }
/// \endcode
///
/// ``parmVarDecl(isAtPosition(0))`` matches ``int a``.
///
/// ``parmVarDecl(isAtPosition(1))`` matches ``int b``.
AST_MATCHER_P(ParmVarDecl, isAtPosition, unsigned, N) {
const clang::DeclContext *Context = Node.getParentFunctionOrMethod();
if (const auto *Decl = dyn_cast_or_null<FunctionDecl>(Context))
return N < Decl->param_size() && Decl->getParamDecl(N) == &Node;
if (const auto *Decl = dyn_cast_or_null<BlockDecl>(Context))
return N < Decl->param_size() && Decl->getParamDecl(N) == &Node;
if (const auto *Decl = dyn_cast_or_null<ObjCMethodDecl>(Context))
return N < Decl->param_size() && Decl->getParamDecl(N) == &Node;
return false;
}
/// Matches any parameter of a function or an ObjC method declaration or a
/// block.
///
/// Does not match the 'this' parameter of a method.
///
/// Given
/// \code
/// class X { void f(int x, int y, int z) {} };
/// \endcode
/// cxxMethodDecl(hasAnyParameter(hasName("y")))
/// matches f(int x, int y, int z) {}
/// with hasAnyParameter(...)
/// matching int y
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// \endcode
//
/// the matcher objcMethodDecl(hasAnyParameter(hasName("y")))
/// matches the declaration of method f with hasParameter
/// matching y.
///
/// For blocks, given
/// \code
/// b = ^(int y) { printf("%d", y) };
/// \endcode
///
/// the matcher blockDecl(hasAnyParameter(hasName("y")))
/// matches the declaration of the block b with hasParameter
/// matching y.
AST_POLYMORPHIC_MATCHER_P(hasAnyParameter,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
ObjCMethodDecl,
BlockDecl),
internal::Matcher<ParmVarDecl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.param_begin(),
Node.param_end(), Finder, Builder);
}
/// Matches \c FunctionDecls and \c FunctionProtoTypes that have a
/// specific parameter count.
///
/// Given
/// \code
/// void f(int i) {}
/// void g(int i, int j) {}
/// void h(int i, int j);
/// void j(int i);
/// void k(int x, int y, int z, ...);
/// \endcode
/// functionDecl(parameterCountIs(2))
/// matches \c g and \c h
/// functionProtoType(parameterCountIs(2))
/// matches \c g and \c h
/// functionProtoType(parameterCountIs(3))
/// matches \c k
AST_POLYMORPHIC_MATCHER_P(parameterCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType),
unsigned, N) {
return Node.getNumParams() == N;
}
/// Matches \c FunctionDecls that have a noreturn attribute.
///
/// Given
/// \code
/// void nope();
/// [[noreturn]] void a();
/// __attribute__((noreturn)) void b();
/// struct c { [[noreturn]] c(); };
/// \endcode
/// functionDecl(isNoReturn())
/// matches all of those except
/// \code
/// void nope();
/// \endcode
AST_MATCHER(FunctionDecl, isNoReturn) { return Node.isNoReturn(); }
/// Matches the return type of a function declaration.
///
/// Given:
/// \code
/// class X { int f() { return 1; } };
/// \endcode
/// cxxMethodDecl(returns(asString("int")))
/// matches int f() { return 1; }
AST_MATCHER_P(FunctionDecl, returns,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getReturnType(), Finder, Builder);
}
/// Matches extern "C" function or variable declarations.
///
/// Given:
/// \code
/// extern "C" void f() {}
/// extern "C" { void g() {} }
/// void h() {}
/// extern "C" int x = 1;
/// extern "C" int y = 2;
/// int z = 3;
/// \endcode
/// functionDecl(isExternC())
/// matches the declaration of f and g, but not the declaration of h.
/// varDecl(isExternC())
/// matches the declaration of x and y, but not the declaration of z.
AST_POLYMORPHIC_MATCHER(isExternC, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
VarDecl)) {
return Node.isExternC();
}
/// Matches variable/function declarations that have "static" storage
/// class specifier ("static" keyword) written in the source.
///
/// Given:
/// \code
/// static void f() {}
/// static int i = 0;
/// extern int j;
/// int k;
/// \endcode
/// functionDecl(isStaticStorageClass())
/// matches the function declaration f.
/// varDecl(isStaticStorageClass())
/// matches the variable declaration i.
AST_POLYMORPHIC_MATCHER(isStaticStorageClass,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
VarDecl)) {
return Node.getStorageClass() == SC_Static;
}
/// Matches deleted function declarations.
///
/// Given:
/// \code
/// void Func();
/// void DeletedFunc() = delete;
/// \endcode
/// functionDecl(isDeleted())
/// matches the declaration of DeletedFunc, but not Func.
AST_MATCHER(FunctionDecl, isDeleted) {
return Node.isDeleted();
}
/// Matches defaulted function declarations.
///
/// Given:
/// \code
/// class A { ~A(); };
/// class B { ~B() = default; };
/// \endcode
/// functionDecl(isDefaulted())
/// matches the declaration of ~B, but not ~A.
AST_MATCHER(FunctionDecl, isDefaulted) {
return Node.isDefaulted();
}
/// Matches functions that have a dynamic exception specification.
///
/// Given:
/// \code
/// void f();
/// void g() noexcept;
/// void h() noexcept(true);
/// void i() noexcept(false);
/// void j() throw();
/// void k() throw(int);
/// void l() throw(...);
/// \endcode
/// functionDecl(hasDynamicExceptionSpec()) and
/// functionProtoType(hasDynamicExceptionSpec())
/// match the declarations of j, k, and l, but not f, g, h, or i.
AST_POLYMORPHIC_MATCHER(hasDynamicExceptionSpec,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType)) {
if (const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node))
return FnTy->hasDynamicExceptionSpec();
return false;
}
/// Matches functions that have a non-throwing exception specification.
///
/// Given:
/// \code
/// void f();
/// void g() noexcept;
/// void h() throw();
/// void i() throw(int);
/// void j() noexcept(false);
/// \endcode
/// functionDecl(isNoThrow()) and functionProtoType(isNoThrow())
/// match the declarations of g, and h, but not f, i or j.
AST_POLYMORPHIC_MATCHER(isNoThrow,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType)) {
const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node);
// If the function does not have a prototype, then it is assumed to be a
// throwing function (as it would if the function did not have any exception
// specification).
if (!FnTy)
return false;
// Assume the best for any unresolved exception specification.
if (isUnresolvedExceptionSpec(FnTy->getExceptionSpecType()))
return true;
return FnTy->isNothrow();
}
/// Matches constexpr variable and function declarations,
/// and if constexpr.
///
/// Given:
/// \code
/// constexpr int foo = 42;
/// constexpr int bar();
/// void baz() { if constexpr(1 > 0) {} }
/// \endcode
/// varDecl(isConstexpr())
/// matches the declaration of foo.
/// functionDecl(isConstexpr())
/// matches the declaration of bar.
/// ifStmt(isConstexpr())
/// matches the if statement in baz.
AST_POLYMORPHIC_MATCHER(isConstexpr,
AST_POLYMORPHIC_SUPPORTED_TYPES(VarDecl,
FunctionDecl,
IfStmt)) {
return Node.isConstexpr();
}
/// Matches selection statements with initializer.
///
/// Given:
/// \code
/// void foo() {
/// if (int i = foobar(); i > 0) {}
/// switch (int i = foobar(); i) {}
/// for (auto& a = get_range(); auto& x : a) {}
/// }
/// void bar() {
/// if (foobar() > 0) {}
/// switch (foobar()) {}
/// for (auto& x : get_range()) {}
/// }
/// \endcode
/// ifStmt(hasInitStatement(anything()))
/// matches the if statement in foo but not in bar.
/// switchStmt(hasInitStatement(anything()))
/// matches the switch statement in foo but not in bar.
/// cxxForRangeStmt(hasInitStatement(anything()))
/// matches the range for statement in foo but not in bar.
AST_POLYMORPHIC_MATCHER_P(hasInitStatement,
AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, SwitchStmt,
CXXForRangeStmt),
internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *Init = Node.getInit();
return Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder);
}
/// Matches the condition expression of an if statement, for loop,
/// switch statement or conditional operator.
///
/// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true))))
/// \code
/// if (true) {}
/// \endcode
AST_POLYMORPHIC_MATCHER_P(
hasCondition,
AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt, WhileStmt, DoStmt,
SwitchStmt, AbstractConditionalOperator),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const Condition = Node.getCond();
return (Condition != nullptr &&
InnerMatcher.matches(*Condition, Finder, Builder));
}
/// Matches the then-statement of an if statement.
///
/// Examples matches the if statement
/// (matcher = ifStmt(hasThen(cxxBoolLiteral(equals(true)))))
/// \code
/// if (false) true; else false;
/// \endcode
AST_MATCHER_P(IfStmt, hasThen, internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Then = Node.getThen();
return (Then != nullptr && InnerMatcher.matches(*Then, Finder, Builder));
}
/// Matches the else-statement of an if statement.
///
/// Examples matches the if statement
/// (matcher = ifStmt(hasElse(cxxBoolLiteral(equals(true)))))
/// \code
/// if (false) false; else true;
/// \endcode
AST_MATCHER_P(IfStmt, hasElse, internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Else = Node.getElse();
return (Else != nullptr && InnerMatcher.matches(*Else, Finder, Builder));
}
/// Matches if a node equals a previously bound node.
///
/// Matches a node if it equals the node previously bound to \p ID.
///
/// Given
/// \code
/// class X { int a; int b; };
/// \endcode
/// cxxRecordDecl(
/// has(fieldDecl(hasName("a"), hasType(type().bind("t")))),
/// has(fieldDecl(hasName("b"), hasType(type(equalsBoundNode("t"))))))
/// matches the class \c X, as \c a and \c b have the same type.
///
/// Note that when multiple matches are involved via \c forEach* matchers,
/// \c equalsBoundNodes acts as a filter.
/// For example:
/// compoundStmt(
/// forEachDescendant(varDecl().bind("d")),
/// forEachDescendant(declRefExpr(to(decl(equalsBoundNode("d"))))))
/// will trigger a match for each combination of variable declaration
/// and reference to that variable declaration within a compound statement.
AST_POLYMORPHIC_MATCHER_P(equalsBoundNode,
AST_POLYMORPHIC_SUPPORTED_TYPES(Stmt, Decl, Type,
QualType),
std::string, ID) {
// FIXME: Figure out whether it makes sense to allow this
// on any other node types.
// For *Loc it probably does not make sense, as those seem
// unique. For NestedNameSepcifier it might make sense, as
// those also have pointer identity, but I'm not sure whether
// they're ever reused.
internal::NotEqualsBoundNodePredicate Predicate;
Predicate.ID = ID;
Predicate.Node = DynTypedNode::create(Node);
return Builder->removeBindings(Predicate);
}
/// Matches the condition variable statement in an if statement.
///
/// Given
/// \code
/// if (A* a = GetAPointer()) {}
/// \endcode
/// hasConditionVariableStatement(...)
/// matches 'A* a = GetAPointer()'.
AST_MATCHER_P(IfStmt, hasConditionVariableStatement,
internal::Matcher<DeclStmt>, InnerMatcher) {
const DeclStmt* const DeclarationStatement =
Node.getConditionVariableDeclStmt();
return DeclarationStatement != nullptr &&
InnerMatcher.matches(*DeclarationStatement, Finder, Builder);
}
/// Matches the index expression of an array subscript expression.
///
/// Given
/// \code
/// int i[5];
/// void f() { i[1] = 42; }
/// \endcode
/// arraySubscriptExpression(hasIndex(integerLiteral()))
/// matches \c i[1] with the \c integerLiteral() matching \c 1
AST_MATCHER_P(ArraySubscriptExpr, hasIndex,
internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getIdx())
return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// Matches the base expression of an array subscript expression.
///
/// Given
/// \code
/// int i[5];
/// void f() { i[1] = 42; }
/// \endcode
/// arraySubscriptExpression(hasBase(implicitCastExpr(
/// hasSourceExpression(declRefExpr()))))
/// matches \c i[1] with the \c declRefExpr() matching \c i
AST_MATCHER_P(ArraySubscriptExpr, hasBase,
internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getBase())
return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// Matches a 'for', 'while', 'do while' statement or a function
/// definition that has a given body.
///
/// Given
/// \code
/// for (;;) {}
/// \endcode
/// hasBody(compoundStmt())
/// matches 'for (;;) {}'
/// with compoundStmt()
/// matching '{}'
AST_POLYMORPHIC_MATCHER_P(hasBody,
AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt,
WhileStmt,
CXXForRangeStmt,
FunctionDecl),
internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node);
return (Statement != nullptr &&
InnerMatcher.matches(*Statement, Finder, Builder));
}
/// Matches compound statements where at least one substatement matches
/// a given matcher. Also matches StmtExprs that have CompoundStmt as children.
///
/// Given
/// \code
/// { {}; 1+2; }
/// \endcode
/// hasAnySubstatement(compoundStmt())
/// matches '{ {}; 1+2; }'
/// with compoundStmt()
/// matching '{}'
AST_POLYMORPHIC_MATCHER_P(hasAnySubstatement,
AST_POLYMORPHIC_SUPPORTED_TYPES(CompoundStmt,
StmtExpr),
internal::Matcher<Stmt>, InnerMatcher) {
const CompoundStmt *CS = CompoundStmtMatcher<NodeType>::get(Node);
return CS && matchesFirstInPointerRange(InnerMatcher, CS->body_begin(),
CS->body_end(), Finder, Builder);
}
/// Checks that a compound statement contains a specific number of
/// child statements.
///
/// Example: Given
/// \code
/// { for (;;) {} }
/// \endcode
/// compoundStmt(statementCountIs(0)))
/// matches '{}'
/// but does not match the outer compound statement.
AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) {
return Node.size() == N;
}
/// Matches literals that are equal to the given value of type ValueT.
///
/// Given
/// \code
/// f('\0', false, 3.14, 42);
/// \endcode
/// characterLiteral(equals(0))
/// matches '\0'
/// cxxBoolLiteral(equals(false)) and cxxBoolLiteral(equals(0))
/// match false
/// floatLiteral(equals(3.14)) and floatLiteral(equals(314e-2))
/// match 3.14
/// integerLiteral(equals(42))
/// matches 42
///
/// Note that you cannot directly match a negative numeric literal because the
/// minus sign is not part of the literal: It is a unary operator whose operand
/// is the positive numeric literal. Instead, you must use a unaryOperator()
/// matcher to match the minus sign:
///
/// unaryOperator(hasOperatorName("-"),
/// hasUnaryOperand(integerLiteral(equals(13))))
///
/// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>,
/// Matcher<FloatingLiteral>, Matcher<IntegerLiteral>
template <typename ValueT>
internal::PolymorphicMatcherWithParam1<internal::ValueEqualsMatcher, ValueT>
equals(const ValueT &Value) {
return internal::PolymorphicMatcherWithParam1<
internal::ValueEqualsMatcher,
ValueT>(Value);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
IntegerLiteral),
bool, Value, 0) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
IntegerLiteral),
unsigned, Value, 1) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
FloatingLiteral,
IntegerLiteral),
double, Value, 2) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
/// Matches the operator Name of operator expressions (binary or
/// unary).
///
/// Example matches a || b (matcher = binaryOperator(hasOperatorName("||")))
/// \code
/// !(a || b)
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasOperatorName,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
UnaryOperator),
std::string, Name) {
return Name == Node.getOpcodeStr(Node.getOpcode());
}
/// Matches operator expressions (binary or unary) that have any of the
/// specified names.
///
/// hasAnyOperatorName("+", "-")
/// Is equivalent to
/// anyOf(hasOperatorName("+"), hasOperatorName("-"))
extern const internal::VariadicFunction<
internal::PolymorphicMatcherWithParam1<
internal::HasAnyOperatorNameMatcher, std::vector<std::string>,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, UnaryOperator)>,
StringRef, internal::hasAnyOperatorNameFunc>
hasAnyOperatorName;
/// Matches all kinds of assignment operators.
///
/// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator()))
/// \code
/// if (a == b)
/// a += b;
/// \endcode
///
/// Example 2: matches s1 = s2
/// (matcher = cxxOperatorCallExpr(isAssignmentOperator()))
/// \code
/// struct S { S& operator=(const S&); };
/// void x() { S s1, s2; s1 = s2; }
/// \endcode
AST_POLYMORPHIC_MATCHER(isAssignmentOperator,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
CXXOperatorCallExpr)) {
return Node.isAssignmentOp();
}
/// Matches comparison operators.
///
/// Example 1: matches a == b (matcher = binaryOperator(isComparisonOperator()))
/// \code
/// if (a == b)
/// a += b;
/// \endcode
///
/// Example 2: matches s1 < s2
/// (matcher = cxxOperatorCallExpr(isComparisonOperator()))
/// \code
/// struct S { bool operator<(const S& other); };
/// void x(S s1, S s2) { bool b1 = s1 < s2; }
/// \endcode
AST_POLYMORPHIC_MATCHER(isComparisonOperator,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
CXXOperatorCallExpr)) {
return Node.isComparisonOp();
}
/// Matches the left hand side of binary operator expressions.
///
/// Example matches a (matcher = binaryOperator(hasLHS()))
/// \code
/// a || b
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasLHS,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
ArraySubscriptExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *LeftHandSide = Node.getLHS();
return (LeftHandSide != nullptr &&
InnerMatcher.matches(*LeftHandSide, Finder, Builder));
}
/// Matches the right hand side of binary operator expressions.
///
/// Example matches b (matcher = binaryOperator(hasRHS()))
/// \code
/// a || b
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasRHS,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
ArraySubscriptExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *RightHandSide = Node.getRHS();
return (RightHandSide != nullptr &&
InnerMatcher.matches(*RightHandSide, Finder, Builder));
}
/// Matches if either the left hand side or the right hand side of a
/// binary operator matches.
inline internal::Matcher<BinaryOperator> hasEitherOperand(
const internal::Matcher<Expr> &InnerMatcher) {
return anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher));
}
/// Matches if both matchers match with opposite sides of the binary operator.
///
/// Example matcher = binaryOperator(hasOperands(integerLiteral(equals(1),
/// integerLiteral(equals(2)))
/// \code
/// 1 + 2 // Match
/// 2 + 1 // Match
/// 1 + 1 // No match
/// 2 + 2 // No match
/// \endcode
inline internal::Matcher<BinaryOperator>
hasOperands(const internal::Matcher<Expr> &Matcher1,
const internal::Matcher<Expr> &Matcher2) {
return anyOf(allOf(hasLHS(Matcher1), hasRHS(Matcher2)),
allOf(hasLHS(Matcher2), hasRHS(Matcher1)));
}
/// Matches if the operand of a unary operator matches.
///
/// Example matches true (matcher = hasUnaryOperand(
/// cxxBoolLiteral(equals(true))))
/// \code
/// !true
/// \endcode
AST_MATCHER_P(UnaryOperator, hasUnaryOperand,
internal::Matcher<Expr>, InnerMatcher) {
const Expr * const Operand = Node.getSubExpr();
return (Operand != nullptr &&
InnerMatcher.matches(*Operand, Finder, Builder));
}
/// Matches if the cast's source expression
/// or opaque value's source expression matches the given matcher.
///
/// Example 1: matches "a string"
/// (matcher = castExpr(hasSourceExpression(cxxConstructExpr())))
/// \code
/// class URL { URL(string); };
/// URL url = "a string";
/// \endcode
///
/// Example 2: matches 'b' (matcher =
/// opaqueValueExpr(hasSourceExpression(implicitCastExpr(declRefExpr())))
/// \code
/// int a = b ?: 1;
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasSourceExpression,
AST_POLYMORPHIC_SUPPORTED_TYPES(CastExpr,
OpaqueValueExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const SubExpression =
internal::GetSourceExpressionMatcher<NodeType>::get(Node);
return (SubExpression != nullptr &&
InnerMatcher.matches(*SubExpression, Finder, Builder));
}
/// Matches casts that has a given cast kind.
///
/// Example: matches the implicit cast around \c 0
/// (matcher = castExpr(hasCastKind(CK_NullToPointer)))
/// \code
/// int *p = 0;
/// \endcode
///
/// If the matcher is use from clang-query, CastKind parameter
/// should be passed as a quoted string. e.g., hasCastKind("CK_NullToPointer").
AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) {
return Node.getCastKind() == Kind;
}
/// Matches casts whose destination type matches a given matcher.
///
/// (Note: Clang's AST refers to other conversions as "casts" too, and calls
/// actual casts "explicit" casts.)
AST_MATCHER_P(ExplicitCastExpr, hasDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
const QualType NodeType = Node.getTypeAsWritten();
return InnerMatcher.matches(NodeType, Finder, Builder);
}
/// Matches implicit casts whose destination type matches a given
/// matcher.
///
/// FIXME: Unit test this matcher
AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getType(), Finder, Builder);
}
/// Matches TagDecl object that are spelled with "struct."
///
/// Example matches S, but not C, U or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isStruct) {
return Node.isStruct();
}
/// Matches TagDecl object that are spelled with "union."
///
/// Example matches U, but not C, S or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isUnion) {
return Node.isUnion();
}
/// Matches TagDecl object that are spelled with "class."
///
/// Example matches C, but not S, U or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isClass) {
return Node.isClass();
}
/// Matches TagDecl object that are spelled with "enum."
///
/// Example matches E, but not C, S or U.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isEnum) {
return Node.isEnum();
}
/// Matches the true branch expression of a conditional operator.
///
/// Example 1 (conditional ternary operator): matches a
/// \code
/// condition ? a : b
/// \endcode
///
/// Example 2 (conditional binary operator): matches opaqueValueExpr(condition)
/// \code
/// condition ?: b
/// \endcode
AST_MATCHER_P(AbstractConditionalOperator, hasTrueExpression,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *Expression = Node.getTrueExpr();
return (Expression != nullptr &&
InnerMatcher.matches(*Expression, Finder, Builder));
}
/// Matches the false branch expression of a conditional operator
/// (binary or ternary).
///
/// Example matches b
/// \code
/// condition ? a : b
/// condition ?: b
/// \endcode
AST_MATCHER_P(AbstractConditionalOperator, hasFalseExpression,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *Expression = Node.getFalseExpr();
return (Expression != nullptr &&
InnerMatcher.matches(*Expression, Finder, Builder));
}
/// Matches if a declaration has a body attached.
///
/// Example matches A, va, fa
/// \code
/// class A {};
/// class B; // Doesn't match, as it has no body.
/// int va;
/// extern int vb; // Doesn't match, as it doesn't define the variable.
/// void fa() {}
/// void fb(); // Doesn't match, as it has no body.
/// @interface X
/// - (void)ma; // Doesn't match, interface is declaration.
/// @end
/// @implementation X
/// - (void)ma {}
/// @end
/// \endcode
///
/// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>,
/// Matcher<ObjCMethodDecl>
AST_POLYMORPHIC_MATCHER(isDefinition,
AST_POLYMORPHIC_SUPPORTED_TYPES(TagDecl, VarDecl,
ObjCMethodDecl,
FunctionDecl)) {
return Node.isThisDeclarationADefinition();
}
/// Matches if a function declaration is variadic.
///
/// Example matches f, but not g or h. The function i will not match, even when
/// compiled in C mode.
/// \code
/// void f(...);
/// void g(int);
/// template <typename... Ts> void h(Ts...);
/// void i();
/// \endcode
AST_MATCHER(FunctionDecl, isVariadic) {
return Node.isVariadic();
}
/// Matches the class declaration that the given method declaration
/// belongs to.
///
/// FIXME: Generalize this for other kinds of declarations.
/// FIXME: What other kind of declarations would we need to generalize
/// this to?
///
/// Example matches A() in the last line
/// (matcher = cxxConstructExpr(hasDeclaration(cxxMethodDecl(
/// ofClass(hasName("A"))))))
/// \code
/// class A {
/// public:
/// A();
/// };
/// A a = A();
/// \endcode
AST_MATCHER_P(CXXMethodDecl, ofClass,
internal::Matcher<CXXRecordDecl>, InnerMatcher) {
const CXXRecordDecl *Parent = Node.getParent();
return (Parent != nullptr &&
InnerMatcher.matches(*Parent, Finder, Builder));
}
/// Matches each method overridden by the given method. This matcher may
/// produce multiple matches.
///
/// Given
/// \code
/// class A { virtual void f(); };
/// class B : public A { void f(); };
/// class C : public B { void f(); };
/// \endcode
/// cxxMethodDecl(ofClass(hasName("C")),
/// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d")
/// matches once, with "b" binding "A::f" and "d" binding "C::f" (Note
/// that B::f is not overridden by C::f).
///
/// The check can produce multiple matches in case of multiple inheritance, e.g.
/// \code
/// class A1 { virtual void f(); };
/// class A2 { virtual void f(); };
/// class C : public A1, public A2 { void f(); };
/// \endcode
/// cxxMethodDecl(ofClass(hasName("C")),
/// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d")
/// matches twice, once with "b" binding "A1::f" and "d" binding "C::f", and
/// once with "b" binding "A2::f" and "d" binding "C::f".
AST_MATCHER_P(CXXMethodDecl, forEachOverridden,
internal::Matcher<CXXMethodDecl>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto *Overridden : Node.overridden_methods()) {
BoundNodesTreeBuilder OverriddenBuilder(*Builder);
const bool OverriddenMatched =
InnerMatcher.matches(*Overridden, Finder, &OverriddenBuilder);
if (OverriddenMatched) {
Matched = true;
Result.addMatch(OverriddenBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches declarations of virtual methods and C++ base specifers that specify
/// virtual inheritance.
///
/// Example:
/// \code
/// class A {
/// public:
/// virtual void x(); // matches x
/// };
/// \endcode
///
/// Example:
/// \code
/// class Base {};
/// class DirectlyDerived : virtual Base {}; // matches Base
/// class IndirectlyDerived : DirectlyDerived, Base {}; // matches Base
/// \endcode
///
/// Usable as: Matcher<CXXMethodDecl>, Matcher<CXXBaseSpecifier>
AST_POLYMORPHIC_MATCHER(isVirtual,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXMethodDecl,
CXXBaseSpecifier)) {
return Node.isVirtual();
}
/// Matches if the given method declaration has an explicit "virtual".
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// class B : public A {
/// public:
/// void x();
/// };
/// \endcode
/// matches A::x but not B::x
AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) {
return Node.isVirtualAsWritten();
}
/// Matches if the given method or class declaration is final.
///
/// Given:
/// \code
/// class A final {};
///
/// struct B {
/// virtual void f();
/// };
///
/// struct C : B {
/// void f() final;
/// };
/// \endcode
/// matches A and C::f, but not B, C, or B::f
AST_POLYMORPHIC_MATCHER(isFinal,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl,
CXXMethodDecl)) {
return Node.template hasAttr<FinalAttr>();
}
/// Matches if the given method declaration is pure.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x() = 0;
/// };
/// \endcode
/// matches A::x
AST_MATCHER(CXXMethodDecl, isPure) {
return Node.isPure();
}
/// Matches if the given method declaration is const.
///
/// Given
/// \code
/// struct A {
/// void foo() const;
/// void bar();
/// };
/// \endcode
///
/// cxxMethodDecl(isConst()) matches A::foo() but not A::bar()
AST_MATCHER(CXXMethodDecl, isConst) {
return Node.isConst();
}
/// Matches if the given method declaration declares a copy assignment
/// operator.
///
/// Given
/// \code
/// struct A {
/// A &operator=(const A &);
/// A &operator=(A &&);
/// };
/// \endcode
///
/// cxxMethodDecl(isCopyAssignmentOperator()) matches the first method but not
/// the second one.
AST_MATCHER(CXXMethodDecl, isCopyAssignmentOperator) {
return Node.isCopyAssignmentOperator();
}
/// Matches if the given method declaration declares a move assignment
/// operator.
///
/// Given
/// \code
/// struct A {
/// A &operator=(const A &);
/// A &operator=(A &&);
/// };
/// \endcode
///
/// cxxMethodDecl(isMoveAssignmentOperator()) matches the second method but not
/// the first one.
AST_MATCHER(CXXMethodDecl, isMoveAssignmentOperator) {
return Node.isMoveAssignmentOperator();
}
/// Matches if the given method declaration overrides another method.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// class B : public A {
/// public:
/// virtual void x();
/// };
/// \endcode
/// matches B::x
AST_MATCHER(CXXMethodDecl, isOverride) {
return Node.size_overridden_methods() > 0 || Node.hasAttr<OverrideAttr>();
}
/// Matches method declarations that are user-provided.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &) = default; // #2
/// S(S &&) = delete; // #3
/// };
/// \endcode
/// cxxConstructorDecl(isUserProvided()) will match #1, but not #2 or #3.
AST_MATCHER(CXXMethodDecl, isUserProvided) {
return Node.isUserProvided();
}
/// Matches member expressions that are called with '->' as opposed
/// to '.'.
///
/// Member calls on the implicit this pointer match as called with '->'.
///
/// Given
/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// template <class T> void f() { this->f<T>(); f<T>(); }
/// int a;
/// static int b;
/// };
/// template <class T>
/// class Z {
/// void x() { this->m; }
/// };
/// \endcode
/// memberExpr(isArrow())
/// matches this->x, x, y.x, a, this->b
/// cxxDependentScopeMemberExpr(isArrow())
/// matches this->m
/// unresolvedMemberExpr(isArrow())
/// matches this->f<T>, f<T>
AST_POLYMORPHIC_MATCHER(
isArrow, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr,
CXXDependentScopeMemberExpr)) {
return Node.isArrow();
}
/// Matches QualType nodes that are of integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isInteger())))
/// matches "a(int)", "b(long)", but not "c(double)".
AST_MATCHER(QualType, isInteger) {
return Node->isIntegerType();
}
/// Matches QualType nodes that are of unsigned integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(unsigned long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isUnsignedInteger())))
/// matches "b(unsigned long)", but not "a(int)" and "c(double)".
AST_MATCHER(QualType, isUnsignedInteger) {
return Node->isUnsignedIntegerType();
}
/// Matches QualType nodes that are of signed integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(unsigned long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isSignedInteger())))
/// matches "a(int)", but not "b(unsigned long)" and "c(double)".
AST_MATCHER(QualType, isSignedInteger) {
return Node->isSignedIntegerType();
}
/// Matches QualType nodes that are of character type.
///
/// Given
/// \code
/// void a(char);
/// void b(wchar_t);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isAnyCharacter())))
/// matches "a(char)", "b(wchar_t)", but not "c(double)".
AST_MATCHER(QualType, isAnyCharacter) {
return Node->isAnyCharacterType();
}
/// Matches QualType nodes that are of any pointer type; this includes
/// the Objective-C object pointer type, which is different despite being
/// syntactically similar.
///
/// Given
/// \code
/// int *i = nullptr;
///
/// @interface Foo
/// @end
/// Foo *f;
///
/// int j;
/// \endcode
/// varDecl(hasType(isAnyPointer()))
/// matches "int *i" and "Foo *f", but not "int j".
AST_MATCHER(QualType, isAnyPointer) {
return Node->isAnyPointerType();
}
/// Matches QualType nodes that are const-qualified, i.e., that
/// include "top-level" const.
///
/// Given
/// \code
/// void a(int);
/// void b(int const);
/// void c(const int);
/// void d(const int*);
/// void e(int const) {};
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isConstQualified())))
/// matches "void b(int const)", "void c(const int)" and
/// "void e(int const) {}". It does not match d as there
/// is no top-level const on the parameter type "const int *".
AST_MATCHER(QualType, isConstQualified) {
return Node.isConstQualified();
}
/// Matches QualType nodes that are volatile-qualified, i.e., that
/// include "top-level" volatile.
///
/// Given
/// \code
/// void a(int);
/// void b(int volatile);
/// void c(volatile int);
/// void d(volatile int*);
/// void e(int volatile) {};
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isVolatileQualified())))
/// matches "void b(int volatile)", "void c(volatile int)" and
/// "void e(int volatile) {}". It does not match d as there
/// is no top-level volatile on the parameter type "volatile int *".
AST_MATCHER(QualType, isVolatileQualified) {
return Node.isVolatileQualified();
}
/// Matches QualType nodes that have local CV-qualifiers attached to
/// the node, not hidden within a typedef.
///
/// Given
/// \code
/// typedef const int const_int;
/// const_int i;
/// int *const j;
/// int *volatile k;
/// int m;
/// \endcode
/// \c varDecl(hasType(hasLocalQualifiers())) matches only \c j and \c k.
/// \c i is const-qualified but the qualifier is not local.
AST_MATCHER(QualType, hasLocalQualifiers) {
return Node.hasLocalQualifiers();
}
/// Matches a member expression where the member is matched by a
/// given matcher.
///
/// Given
/// \code
/// struct { int first, second; } first, second;
/// int i(second.first);
/// int j(first.second);
/// \endcode
/// memberExpr(member(hasName("first")))
/// matches second.first
/// but not first.second (because the member name there is "second").
AST_MATCHER_P(MemberExpr, member,
internal::Matcher<ValueDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder);
}
/// Matches a member expression where the object expression is matched by a
/// given matcher. Implicit object expressions are included; that is, it matches
/// use of implicit `this`.
///
/// Given
/// \code
/// struct X {
/// int m;
/// int f(X x) { x.m; return m; }
/// };
/// \endcode
/// memberExpr(hasObjectExpression(hasType(cxxRecordDecl(hasName("X")))))
/// matches `x.m`, but not `m`; however,
/// memberExpr(hasObjectExpression(hasType(pointsTo(
// cxxRecordDecl(hasName("X"))))))
/// matches `m` (aka. `this->m`), but not `x.m`.
AST_POLYMORPHIC_MATCHER_P(
hasObjectExpression,
AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr,
CXXDependentScopeMemberExpr),
internal::Matcher<Expr>, InnerMatcher) {
if (const auto *E = dyn_cast<UnresolvedMemberExpr>(&Node))
if (E->isImplicitAccess())
return false;
if (const auto *E = dyn_cast<CXXDependentScopeMemberExpr>(&Node))
if (E->isImplicitAccess())
return false;
return InnerMatcher.matches(*Node.getBase(), Finder, Builder);
}
/// Matches any using shadow declaration.
///
/// Given
/// \code
/// namespace X { void b(); }
/// using X::b;
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasName("b"))))
/// matches \code using X::b \endcode
AST_MATCHER_P(UsingDecl, hasAnyUsingShadowDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(),
Node.shadow_end(), Finder, Builder);
}
/// Matches a using shadow declaration where the target declaration is
/// matched by the given matcher.
///
/// Given
/// \code
/// namespace X { int a; void b(); }
/// using X::a;
/// using X::b;
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl())))
/// matches \code using X::b \endcode
/// but not \code using X::a \endcode
AST_MATCHER_P(UsingShadowDecl, hasTargetDecl,
internal::Matcher<NamedDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder);
}
/// Matches template instantiations of function, class, or static
/// member variable template instantiations.
///
/// Given
/// \code
/// template <typename T> class X {}; class A {}; X<A> x;
/// \endcode
/// or
/// \code
/// template <typename T> class X {}; class A {}; template class X<A>;
/// \endcode
/// or
/// \code
/// template <typename T> class X {}; class A {}; extern template class X<A>;
/// \endcode
/// cxxRecordDecl(hasName("::X"), isTemplateInstantiation())
/// matches the template instantiation of X<A>.
///
/// But given
/// \code
/// template <typename T> class X {}; class A {};
/// template <> class X<A> {}; X<A> x;
/// \endcode
/// cxxRecordDecl(hasName("::X"), isTemplateInstantiation())
/// does not match, as X<A> is an explicit template specialization.
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
AST_POLYMORPHIC_MATCHER(isTemplateInstantiation,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl,
CXXRecordDecl)) {
return (Node.getTemplateSpecializationKind() == TSK_ImplicitInstantiation ||
Node.getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDefinition ||
Node.getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDeclaration);
}
/// Matches declarations that are template instantiations or are inside
/// template instantiations.
///
/// Given
/// \code
/// template<typename T> void A(T t) { T i; }
/// A(0);
/// A(0U);
/// \endcode
/// functionDecl(isInstantiated())
/// matches 'A(int) {...};' and 'A(unsigned) {...}'.
AST_MATCHER_FUNCTION(internal::Matcher<Decl>, isInstantiated) {
auto IsInstantiation = decl(anyOf(cxxRecordDecl(isTemplateInstantiation()),
functionDecl(isTemplateInstantiation())));
return decl(anyOf(IsInstantiation, hasAncestor(IsInstantiation)));
}
/// Matches statements inside of a template instantiation.
///
/// Given
/// \code
/// int j;
/// template<typename T> void A(T t) { T i; j += 42;}
/// A(0);
/// A(0U);
/// \endcode
/// declStmt(isInTemplateInstantiation())
/// matches 'int i;' and 'unsigned i'.
/// unless(stmt(isInTemplateInstantiation()))
/// will NOT match j += 42; as it's shared between the template definition and
/// instantiation.
AST_MATCHER_FUNCTION(internal::Matcher<Stmt>, isInTemplateInstantiation) {
return stmt(
hasAncestor(decl(anyOf(cxxRecordDecl(isTemplateInstantiation()),
functionDecl(isTemplateInstantiation())))));
}
/// Matches explicit template specializations of function, class, or
/// static member variable template instantiations.
///
/// Given
/// \code
/// template<typename T> void A(T t) { }
/// template<> void A(int N) { }
/// \endcode
/// functionDecl(isExplicitTemplateSpecialization())
/// matches the specialization A<int>().
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
AST_POLYMORPHIC_MATCHER(isExplicitTemplateSpecialization,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl,
CXXRecordDecl)) {
return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization);
}
/// Matches \c TypeLocs for which the given inner
/// QualType-matcher matches.
AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc,
internal::Matcher<QualType>, InnerMatcher, 0) {
return internal::BindableMatcher<TypeLoc>(
new internal::TypeLocTypeMatcher(InnerMatcher));
}
/// Matches type \c bool.
///
/// Given
/// \code
/// struct S { bool func(); };
/// \endcode
/// functionDecl(returns(booleanType()))
/// matches "bool func();"
AST_MATCHER(Type, booleanType) {
return Node.isBooleanType();
}
/// Matches type \c void.
///
/// Given
/// \code
/// struct S { void func(); };
/// \endcode
/// functionDecl(returns(voidType()))
/// matches "void func();"
AST_MATCHER(Type, voidType) {
return Node.isVoidType();
}
template <typename NodeType>
using AstTypeMatcher = internal::VariadicDynCastAllOfMatcher<Type, NodeType>;
/// Matches builtin Types.
///
/// Given
/// \code
/// struct A {};
/// A a;
/// int b;
/// float c;
/// bool d;
/// \endcode
/// builtinType()
/// matches "int b", "float c" and "bool d"
extern const AstTypeMatcher<BuiltinType> builtinType;
/// Matches all kinds of arrays.
///
/// Given
/// \code
/// int a[] = { 2, 3 };
/// int b[4];
/// void f() { int c[a[0]]; }
/// \endcode
/// arrayType()
/// matches "int a[]", "int b[4]" and "int c[a[0]]";
extern const AstTypeMatcher<ArrayType> arrayType;
/// Matches C99 complex types.
///
/// Given
/// \code
/// _Complex float f;
/// \endcode
/// complexType()
/// matches "_Complex float f"
extern const AstTypeMatcher<ComplexType> complexType;
/// Matches any real floating-point type (float, double, long double).
///
/// Given
/// \code
/// int i;
/// float f;
/// \endcode
/// realFloatingPointType()
/// matches "float f" but not "int i"
AST_MATCHER(Type, realFloatingPointType) {
return Node.isRealFloatingType();
}
/// Matches arrays and C99 complex types that have a specific element
/// type.
///
/// Given
/// \code
/// struct A {};
/// A a[7];
/// int b[7];
/// \endcode
/// arrayType(hasElementType(builtinType()))
/// matches "int b[7]"
///
/// Usable as: Matcher<ArrayType>, Matcher<ComplexType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasElementType, getElement,
AST_POLYMORPHIC_SUPPORTED_TYPES(ArrayType,
ComplexType));
/// Matches C arrays with a specified constant size.
///
/// Given
/// \code
/// void() {
/// int a[2];
/// int b[] = { 2, 3 };
/// int c[b[0]];
/// }
/// \endcode
/// constantArrayType()
/// matches "int a[2]"
extern const AstTypeMatcher<ConstantArrayType> constantArrayType;
/// Matches nodes that have the specified size.
///
/// Given
/// \code
/// int a[42];
/// int b[2 * 21];
/// int c[41], d[43];
/// char *s = "abcd";
/// wchar_t *ws = L"abcd";
/// char *w = "a";
/// \endcode
/// constantArrayType(hasSize(42))
/// matches "int a[42]" and "int b[2 * 21]"
/// stringLiteral(hasSize(4))
/// matches "abcd", L"abcd"
AST_POLYMORPHIC_MATCHER_P(hasSize,
AST_POLYMORPHIC_SUPPORTED_TYPES(ConstantArrayType,
StringLiteral),
unsigned, N) {
return internal::HasSizeMatcher<NodeType>::hasSize(Node, N);
}
/// Matches C++ arrays whose size is a value-dependent expression.
///
/// Given
/// \code
/// template<typename T, int Size>
/// class array {
/// T data[Size];
/// };
/// \endcode
/// dependentSizedArrayType
/// matches "T data[Size]"
extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType;
/// Matches C arrays with unspecified size.
///
/// Given
/// \code
/// int a[] = { 2, 3 };
/// int b[42];
/// void f(int c[]) { int d[a[0]]; };
/// \endcode
/// incompleteArrayType()
/// matches "int a[]" and "int c[]"
extern const AstTypeMatcher<IncompleteArrayType> incompleteArrayType;
/// Matches C arrays with a specified size that is not an
/// integer-constant-expression.
///
/// Given
/// \code
/// void f() {
/// int a[] = { 2, 3 }
/// int b[42];
/// int c[a[0]];
/// }
/// \endcode
/// variableArrayType()
/// matches "int c[a[0]]"
extern const AstTypeMatcher<VariableArrayType> variableArrayType;
/// Matches \c VariableArrayType nodes that have a specific size
/// expression.
///
/// Given
/// \code
/// void f(int b) {
/// int a[b];
/// }
/// \endcode
/// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to(
/// varDecl(hasName("b")))))))
/// matches "int a[b]"
AST_MATCHER_P(VariableArrayType, hasSizeExpr,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder);
}
/// Matches atomic types.
///
/// Given
/// \code
/// _Atomic(int) i;
/// \endcode
/// atomicType()
/// matches "_Atomic(int) i"
extern const AstTypeMatcher<AtomicType> atomicType;
/// Matches atomic types with a specific value type.
///
/// Given
/// \code
/// _Atomic(int) i;
/// _Atomic(float) f;
/// \endcode
/// atomicType(hasValueType(isInteger()))
/// matches "_Atomic(int) i"
///
/// Usable as: Matcher<AtomicType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasValueType, getValue,
AST_POLYMORPHIC_SUPPORTED_TYPES(AtomicType));
/// Matches types nodes representing C++11 auto types.
///
/// Given:
/// \code
/// auto n = 4;
/// int v[] = { 2, 3 }
/// for (auto i : v) { }
/// \endcode
/// autoType()
/// matches "auto n" and "auto i"
extern const AstTypeMatcher<AutoType> autoType;
/// Matches types nodes representing C++11 decltype(<expr>) types.
///
/// Given:
/// \code
/// short i = 1;
/// int j = 42;
/// decltype(i + j) result = i + j;
/// \endcode
/// decltypeType()
/// matches "decltype(i + j)"
extern const AstTypeMatcher<DecltypeType> decltypeType;
/// Matches \c AutoType nodes where the deduced type is a specific type.
///
/// Note: There is no \c TypeLoc for the deduced type and thus no
/// \c getDeducedLoc() matcher.
///
/// Given
/// \code
/// auto a = 1;
/// auto b = 2.0;
/// \endcode
/// autoType(hasDeducedType(isInteger()))
/// matches "auto a"
///
/// Usable as: Matcher<AutoType>
AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType,
AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType));
/// Matches \c DecltypeType nodes to find out the underlying type.
///
/// Given
/// \code
/// decltype(1) a = 1;
/// decltype(2.0) b = 2.0;
/// \endcode
/// decltypeType(hasUnderlyingType(isInteger()))
/// matches the type of "a"
///
/// Usable as: Matcher<DecltypeType>
AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType,
AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType));
/// Matches \c FunctionType nodes.
///
/// Given
/// \code
/// int (*f)(int);
/// void g();
/// \endcode
/// functionType()
/// matches "int (*f)(int)" and the type of "g".
extern const AstTypeMatcher<FunctionType> functionType;
/// Matches \c FunctionProtoType nodes.
///
/// Given
/// \code
/// int (*f)(int);
/// void g();
/// \endcode
/// functionProtoType()
/// matches "int (*f)(int)" and the type of "g" in C++ mode.
/// In C mode, "g" is not matched because it does not contain a prototype.
extern const AstTypeMatcher<FunctionProtoType> functionProtoType;
/// Matches \c ParenType nodes.
///
/// Given
/// \code
/// int (*ptr_to_array)[4];
/// int *array_of_ptrs[4];
/// \endcode
///
/// \c varDecl(hasType(pointsTo(parenType()))) matches \c ptr_to_array but not
/// \c array_of_ptrs.
extern const AstTypeMatcher<ParenType> parenType;
/// Matches \c ParenType nodes where the inner type is a specific type.
///
/// Given
/// \code
/// int (*ptr_to_array)[4];
/// int (*ptr_to_func)(int);
/// \endcode
///
/// \c varDecl(hasType(pointsTo(parenType(innerType(functionType()))))) matches
/// \c ptr_to_func but not \c ptr_to_array.
///
/// Usable as: Matcher<ParenType>
AST_TYPE_TRAVERSE_MATCHER(innerType, getInnerType,
AST_POLYMORPHIC_SUPPORTED_TYPES(ParenType));
/// Matches block pointer types, i.e. types syntactically represented as
/// "void (^)(int)".
///
/// The \c pointee is always required to be a \c FunctionType.
extern const AstTypeMatcher<BlockPointerType> blockPointerType;
/// Matches member pointer types.
/// Given
/// \code
/// struct A { int i; }
/// A::* ptr = A::i;
/// \endcode
/// memberPointerType()
/// matches "A::* ptr"
extern const AstTypeMatcher<MemberPointerType> memberPointerType;
/// Matches pointer types, but does not match Objective-C object pointer
/// types.
///
/// Given
/// \code
/// int *a;
/// int &b = *a;
/// int c = 5;
///
/// @interface Foo
/// @end
/// Foo *f;
/// \endcode
/// pointerType()
/// matches "int *a", but does not match "Foo *f".
extern const AstTypeMatcher<PointerType> pointerType;
/// Matches an Objective-C object pointer type, which is different from
/// a pointer type, despite being syntactically similar.
///
/// Given
/// \code
/// int *a;
///
/// @interface Foo
/// @end
/// Foo *f;
/// \endcode
/// pointerType()
/// matches "Foo *f", but does not match "int *a".
extern const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType;
/// Matches both lvalue and rvalue reference types.
///
/// Given
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c referenceType() matches the types of \c b, \c c, \c d, \c e, and \c f.
extern const AstTypeMatcher<ReferenceType> referenceType;
/// Matches lvalue reference types.
///
/// Given:
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c lValueReferenceType() matches the types of \c b, \c d, and \c e. \c e is
/// matched since the type is deduced as int& by reference collapsing rules.
extern const AstTypeMatcher<LValueReferenceType> lValueReferenceType;
/// Matches rvalue reference types.
///
/// Given:
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c rValueReferenceType() matches the types of \c c and \c f. \c e is not
/// matched as it is deduced to int& by reference collapsing rules.
extern const AstTypeMatcher<RValueReferenceType> rValueReferenceType;
/// Narrows PointerType (and similar) matchers to those where the
/// \c pointee matches a given matcher.
///
/// Given
/// \code
/// int *a;
/// int const *b;
/// float const *f;
/// \endcode
/// pointerType(pointee(isConstQualified(), isInteger()))
/// matches "int const *b"
///
/// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>,
/// Matcher<PointerType>, Matcher<ReferenceType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(
pointee, getPointee,
AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType,
PointerType, ReferenceType));
/// Matches typedef types.
///
/// Given
/// \code
/// typedef int X;
/// \endcode
/// typedefType()
/// matches "typedef int X"
extern const AstTypeMatcher<TypedefType> typedefType;
/// Matches enum types.
///
/// Given
/// \code
/// enum C { Green };
/// enum class S { Red };
///
/// C c;
/// S s;
/// \endcode
//
/// \c enumType() matches the type of the variable declarations of both \c c and
/// \c s.
extern const AstTypeMatcher<EnumType> enumType;
/// Matches template specialization types.
///
/// Given
/// \code
/// template <typename T>
/// class C { };
///
/// template class C<int>; // A
/// C<char> var; // B
/// \endcode
///
/// \c templateSpecializationType() matches the type of the explicit
/// instantiation in \c A and the type of the variable declaration in \c B.
extern const AstTypeMatcher<TemplateSpecializationType>
templateSpecializationType;
/// Matches C++17 deduced template specialization types, e.g. deduced class
/// template types.
///
/// Given
/// \code
/// template <typename T>
/// class C { public: C(T); };
///
/// C c(123);
/// \endcode
/// \c deducedTemplateSpecializationType() matches the type in the declaration
/// of the variable \c c.
extern const AstTypeMatcher<DeducedTemplateSpecializationType>
deducedTemplateSpecializationType;
/// Matches types nodes representing unary type transformations.
///
/// Given:
/// \code
/// typedef __underlying_type(T) type;
/// \endcode
/// unaryTransformType()
/// matches "__underlying_type(T)"
extern const AstTypeMatcher<UnaryTransformType> unaryTransformType;
/// Matches record types (e.g. structs, classes).
///
/// Given
/// \code
/// class C {};
/// struct S {};
///
/// C c;
/// S s;
/// \endcode
///
/// \c recordType() matches the type of the variable declarations of both \c c
/// and \c s.
extern const AstTypeMatcher<RecordType> recordType;
/// Matches tag types (record and enum types).
///
/// Given
/// \code
/// enum E {};
/// class C {};
///
/// E e;
/// C c;
/// \endcode
///
/// \c tagType() matches the type of the variable declarations of both \c e
/// and \c c.
extern const AstTypeMatcher<TagType> tagType;
/// Matches types specified with an elaborated type keyword or with a
/// qualified name.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// class C {};
///
/// class C c;
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType() matches the type of the variable declarations of both
/// \c c and \c d.
extern const AstTypeMatcher<ElaboratedType> elaboratedType;
/// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier,
/// matches \c InnerMatcher if the qualifier exists.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N"))))
/// matches the type of the variable declaration of \c d.
AST_MATCHER_P(ElaboratedType, hasQualifier,
internal::Matcher<NestedNameSpecifier>, InnerMatcher) {
if (const NestedNameSpecifier *Qualifier = Node.getQualifier())
return InnerMatcher.matches(*Qualifier, Finder, Builder);
return false;
}
/// Matches ElaboratedTypes whose named type matches \c InnerMatcher.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType(namesType(recordType(
/// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable
/// declaration of \c d.
AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>,
InnerMatcher) {
return InnerMatcher.matches(Node.getNamedType(), Finder, Builder);
}
/// Matches types that represent the result of substituting a type for a
/// template type parameter.
///
/// Given
/// \code
/// template <typename T>
/// void F(T t) {
/// int i = 1 + t;
/// }
/// \endcode
///
/// \c substTemplateTypeParmType() matches the type of 't' but not '1'
extern const AstTypeMatcher<SubstTemplateTypeParmType>
substTemplateTypeParmType;
/// Matches template type parameter substitutions that have a replacement
/// type that matches the provided matcher.
///
/// Given
/// \code
/// template <typename T>
/// double F(T t);
/// int i;
/// double j = F(i);
/// \endcode
///
/// \c substTemplateTypeParmType(hasReplacementType(type())) matches int
AST_TYPE_TRAVERSE_MATCHER(
hasReplacementType, getReplacementType,
AST_POLYMORPHIC_SUPPORTED_TYPES(SubstTemplateTypeParmType));
/// Matches template type parameter types.
///
/// Example matches T, but not int.
/// (matcher = templateTypeParmType())
/// \code
/// template <typename T> void f(int i);
/// \endcode
extern const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType;
/// Matches injected class name types.
///
/// Example matches S s, but not S<T> s.
/// (matcher = parmVarDecl(hasType(injectedClassNameType())))
/// \code
/// template <typename T> struct S {
/// void f(S s);
/// void g(S<T> s);
/// };
/// \endcode
extern const AstTypeMatcher<InjectedClassNameType> injectedClassNameType;
/// Matches decayed type
/// Example matches i[] in declaration of f.
/// (matcher = valueDecl(hasType(decayedType(hasDecayedType(pointerType())))))
/// Example matches i[1].
/// (matcher = expr(hasType(decayedType(hasDecayedType(pointerType())))))
/// \code
/// void f(int i[]) {
/// i[1] = 0;
/// }
/// \endcode
extern const AstTypeMatcher<DecayedType> decayedType;
/// Matches the decayed type, whos decayed type matches \c InnerMatcher
AST_MATCHER_P(DecayedType, hasDecayedType, internal::Matcher<QualType>,
InnerType) {
return InnerType.matches(Node.getDecayedType(), Finder, Builder);
}
/// Matches declarations whose declaration context, interpreted as a
/// Decl, matches \c InnerMatcher.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// \endcode
///
/// \c cxxRcordDecl(hasDeclContext(namedDecl(hasName("M")))) matches the
/// declaration of \c class \c D.
AST_MATCHER_P(Decl, hasDeclContext, internal::Matcher<Decl>, InnerMatcher) {
const DeclContext *DC = Node.getDeclContext();
if (!DC) return false;
return InnerMatcher.matches(*Decl::castFromDeclContext(DC), Finder, Builder);
}
/// Matches nested name specifiers.
///
/// Given
/// \code
/// namespace ns {
/// struct A { static void f(); };
/// void A::f() {}
/// void g() { A::f(); }
/// }
/// ns::A a;
/// \endcode
/// nestedNameSpecifier()
/// matches "ns::" and both "A::"
extern const internal::VariadicAllOfMatcher<NestedNameSpecifier>
nestedNameSpecifier;
/// Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc.
extern const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc>
nestedNameSpecifierLoc;
/// Matches \c NestedNameSpecifierLocs for which the given inner
/// NestedNameSpecifier-matcher matches.
AST_MATCHER_FUNCTION_P_OVERLOAD(
internal::BindableMatcher<NestedNameSpecifierLoc>, loc,
internal::Matcher<NestedNameSpecifier>, InnerMatcher, 1) {
return internal::BindableMatcher<NestedNameSpecifierLoc>(
new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>(
InnerMatcher));
}
/// Matches nested name specifiers that specify a type matching the
/// given \c QualType matcher without qualifiers.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifier(specifiesType(
/// hasDeclaration(cxxRecordDecl(hasName("A")))
/// ))
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifier, specifiesType,
internal::Matcher<QualType>, InnerMatcher) {
if (!Node.getAsType())
return false;
return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder);
}
/// Matches nested name specifier locs that specify a type matching the
/// given \c TypeLoc.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type(
/// hasDeclaration(cxxRecordDecl(hasName("A")))))))
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc,
internal::Matcher<TypeLoc>, InnerMatcher) {
return Node && Node.getNestedNameSpecifier()->getAsType() &&
InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifier.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and
/// matches "A::"
AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix,
internal::Matcher<NestedNameSpecifier>, InnerMatcher,
0) {
const NestedNameSpecifier *NextNode = Node.getPrefix();
if (!NextNode)
return false;
return InnerMatcher.matches(*NextNode, Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifierLoc.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A")))))
/// matches "A::"
AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix,
internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher,
1) {
NestedNameSpecifierLoc NextNode = Node.getPrefix();
if (!NextNode)
return false;
return InnerMatcher.matches(NextNode, Finder, Builder);
}
/// Matches nested name specifiers that specify a namespace matching the
/// given namespace matcher.
///
/// Given
/// \code
/// namespace ns { struct A {}; }
/// ns::A a;
/// \endcode
/// nestedNameSpecifier(specifiesNamespace(hasName("ns")))
/// matches "ns::"
AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace,
internal::Matcher<NamespaceDecl>, InnerMatcher) {
if (!Node.getAsNamespace())
return false;
return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder);
}
/// Overloads for the \c equalsNode matcher.
/// FIXME: Implement for other node types.
/// @{
/// Matches if a node equals another node.
///
/// \c Decl has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Decl, equalsNode, const Decl*, Other, 0) {
return &Node == Other;
}
/// Matches if a node equals another node.
///
/// \c Stmt has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Stmt, equalsNode, const Stmt*, Other, 1) {
return &Node == Other;
}
/// Matches if a node equals another node.
///
/// \c Type has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Type, equalsNode, const Type*, Other, 2) {
return &Node == Other;
}
/// @}
/// Matches each case or default statement belonging to the given switch
/// statement. This matcher may produce multiple matches.
///
/// Given
/// \code
/// switch (1) { case 1: case 2: default: switch (2) { case 3: case 4: ; } }
/// \endcode
/// switchStmt(forEachSwitchCase(caseStmt().bind("c"))).bind("s")
/// matches four times, with "c" binding each of "case 1:", "case 2:",
/// "case 3:" and "case 4:", and "s" respectively binding "switch (1)",
/// "switch (1)", "switch (2)" and "switch (2)".
AST_MATCHER_P(SwitchStmt, forEachSwitchCase, internal::Matcher<SwitchCase>,
InnerMatcher) {
BoundNodesTreeBuilder Result;
// FIXME: getSwitchCaseList() does not necessarily guarantee a stable
// iteration order. We should use the more general iterating matchers once
// they are capable of expressing this matcher (for example, it should ignore
// case statements belonging to nested switch statements).
bool Matched = false;
for (const SwitchCase *SC = Node.getSwitchCaseList(); SC;
SC = SC->getNextSwitchCase()) {
BoundNodesTreeBuilder CaseBuilder(*Builder);
bool CaseMatched = InnerMatcher.matches(*SC, Finder, &CaseBuilder);
if (CaseMatched) {
Matched = true;
Result.addMatch(CaseBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches each constructor initializer in a constructor definition.
///
/// Given
/// \code
/// class A { A() : i(42), j(42) {} int i; int j; };
/// \endcode
/// cxxConstructorDecl(forEachConstructorInitializer(
/// forField(decl().bind("x"))
/// ))
/// will trigger two matches, binding for 'i' and 'j' respectively.
AST_MATCHER_P(CXXConstructorDecl, forEachConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto *I : Node.inits()) {
BoundNodesTreeBuilder InitBuilder(*Builder);
if (InnerMatcher.matches(*I, Finder, &InitBuilder)) {
Matched = true;
Result.addMatch(InitBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches constructor declarations that are copy constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isCopyConstructor()) will match #2, but not #1 or #3.
AST_MATCHER(CXXConstructorDecl, isCopyConstructor) {
return Node.isCopyConstructor();
}
/// Matches constructor declarations that are move constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isMoveConstructor()) will match #3, but not #1 or #2.
AST_MATCHER(CXXConstructorDecl, isMoveConstructor) {
return Node.isMoveConstructor();
}
/// Matches constructor declarations that are default constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isDefaultConstructor()) will match #1, but not #2 or #3.
AST_MATCHER(CXXConstructorDecl, isDefaultConstructor) {
return Node.isDefaultConstructor();
}
/// Matches constructors that delegate to another constructor.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(int) {} // #2
/// S(S &&) : S() {} // #3
/// };
/// S::S() : S(0) {} // #4
/// \endcode
/// cxxConstructorDecl(isDelegatingConstructor()) will match #3 and #4, but not
/// #1 or #2.
AST_MATCHER(CXXConstructorDecl, isDelegatingConstructor) {
return Node.isDelegatingConstructor();
}
/// Matches constructor, conversion function, and deduction guide declarations
/// that have an explicit specifier if this explicit specifier is resolved to
/// true.
///
/// Given
/// \code
/// template<bool b>
/// struct S {
/// S(int); // #1
/// explicit S(double); // #2
/// operator int(); // #3
/// explicit operator bool(); // #4
/// explicit(false) S(bool) // # 7
/// explicit(true) S(char) // # 8
/// explicit(b) S(S) // # 9
/// };
/// S(int) -> S<true> // #5
/// explicit S(double) -> S<false> // #6
/// \endcode
/// cxxConstructorDecl(isExplicit()) will match #2 and #8, but not #1, #7 or #9.
/// cxxConversionDecl(isExplicit()) will match #4, but not #3.
/// cxxDeductionGuideDecl(isExplicit()) will match #6, but not #5.
AST_POLYMORPHIC_MATCHER(isExplicit, AST_POLYMORPHIC_SUPPORTED_TYPES(
CXXConstructorDecl, CXXConversionDecl,
CXXDeductionGuideDecl)) {
return Node.isExplicit();
}
/// Matches the expression in an explicit specifier if present in the given
/// declaration.
///
/// Given
/// \code
/// template<bool b>
/// struct S {
/// S(int); // #1
/// explicit S(double); // #2
/// operator int(); // #3
/// explicit operator bool(); // #4
/// explicit(false) S(bool) // # 7
/// explicit(true) S(char) // # 8
/// explicit(b) S(S) // # 9
/// };
/// S(int) -> S<true> // #5
/// explicit S(double) -> S<false> // #6
/// \endcode
/// cxxConstructorDecl(hasExplicitSpecifier(constantExpr())) will match #7, #8 and #9, but not #1 or #2.
/// cxxConversionDecl(hasExplicitSpecifier(constantExpr())) will not match #3 or #4.
/// cxxDeductionGuideDecl(hasExplicitSpecifier(constantExpr())) will not match #5 or #6.
AST_MATCHER_P(FunctionDecl, hasExplicitSpecifier, internal::Matcher<Expr>,
InnerMatcher) {
ExplicitSpecifier ES = ExplicitSpecifier::getFromDecl(&Node);
if (!ES.getExpr())
return false;
return InnerMatcher.matches(*ES.getExpr(), Finder, Builder);
}
/// Matches function and namespace declarations that are marked with
/// the inline keyword.
///
/// Given
/// \code
/// inline void f();
/// void g();
/// namespace n {
/// inline namespace m {}
/// }
/// \endcode
/// functionDecl(isInline()) will match ::f().
/// namespaceDecl(isInline()) will match n::m.
AST_POLYMORPHIC_MATCHER(isInline,
AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl,
FunctionDecl)) {
// This is required because the spelling of the function used to determine
// whether inline is specified or not differs between the polymorphic types.
if (const auto *FD = dyn_cast<FunctionDecl>(&Node))
return FD->isInlineSpecified();
else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node))
return NSD->isInline();
llvm_unreachable("Not a valid polymorphic type");
}
/// Matches anonymous namespace declarations.
///
/// Given
/// \code
/// namespace n {
/// namespace {} // #1
/// }
/// \endcode
/// namespaceDecl(isAnonymous()) will match #1 but not ::n.
AST_MATCHER(NamespaceDecl, isAnonymous) {
return Node.isAnonymousNamespace();
}
/// Matches declarations in the namespace `std`, but not in nested namespaces.
///
/// Given
/// \code
/// class vector {};
/// namespace foo {
/// class vector {};
/// namespace std {
/// class vector {};
/// }
/// }
/// namespace std {
/// inline namespace __1 {
/// class vector {}; // #1
/// namespace experimental {
/// class vector {};
/// }
/// }
/// }
/// \endcode
/// cxxRecordDecl(hasName("vector"), isInStdNamespace()) will match only #1.
AST_MATCHER(Decl, isInStdNamespace) { return Node.isInStdNamespace(); }
/// If the given case statement does not use the GNU case range
/// extension, matches the constant given in the statement.
///
/// Given
/// \code
/// switch (1) { case 1: case 1+1: case 3 ... 4: ; }
/// \endcode
/// caseStmt(hasCaseConstant(integerLiteral()))
/// matches "case 1:"
AST_MATCHER_P(CaseStmt, hasCaseConstant, internal::Matcher<Expr>,
InnerMatcher) {
if (Node.getRHS())
return false;
return InnerMatcher.matches(*Node.getLHS(), Finder, Builder);
}
/// Matches declaration that has a given attribute.
///
/// Given
/// \code
/// __attribute__((device)) void f() { ... }
/// \endcode
/// decl(hasAttr(clang::attr::CUDADevice)) matches the function declaration of
/// f. If the matcher is used from clang-query, attr::Kind parameter should be
/// passed as a quoted string. e.g., hasAttr("attr::CUDADevice").
AST_MATCHER_P(Decl, hasAttr, attr::Kind, AttrKind) {
for (const auto *Attr : Node.attrs()) {
if (Attr->getKind() == AttrKind)
return true;
}
return false;
}
/// Matches the return value expression of a return statement
///
/// Given
/// \code
/// return a + b;
/// \endcode
/// hasReturnValue(binaryOperator())
/// matches 'return a + b'
/// with binaryOperator()
/// matching 'a + b'
AST_MATCHER_P(ReturnStmt, hasReturnValue, internal::Matcher<Expr>,
InnerMatcher) {
if (const auto *RetValue = Node.getRetValue())
return InnerMatcher.matches(*RetValue, Finder, Builder);
return false;
}
/// Matches CUDA kernel call expression.
///
/// Example matches,
/// \code
/// kernel<<<i,j>>>();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr>
cudaKernelCallExpr;
/// Matches expressions that resolve to a null pointer constant, such as
/// GNU's __null, C++11's nullptr, or C's NULL macro.
///
/// Given:
/// \code
/// void *v1 = NULL;
/// void *v2 = nullptr;
/// void *v3 = __null; // GNU extension
/// char *cp = (char *)0;
/// int *ip = 0;
/// int i = 0;
/// \endcode
/// expr(nullPointerConstant())
/// matches the initializer for v1, v2, v3, cp, and ip. Does not match the
/// initializer for i.
AST_MATCHER(Expr, nullPointerConstant) {
return Node.isNullPointerConstant(Finder->getASTContext(),
Expr::NPC_ValueDependentIsNull);
}
/// Matches declaration of the function the statement belongs to
///
/// Given:
/// \code
/// F& operator=(const F& o) {
/// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; });
/// return *this;
/// }
/// \endcode
/// returnStmt(forFunction(hasName("operator=")))
/// matches 'return *this'
/// but does not match 'return v > 0'
AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>,
InnerMatcher) {
const auto &Parents = Finder->getASTContext().getParents(Node);
llvm::SmallVector<DynTypedNode, 8> Stack(Parents.begin(), Parents.end());
while(!Stack.empty()) {
const auto &CurNode = Stack.back();
Stack.pop_back();
if(const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) {
if(InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) {
return true;
}
} else if(const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) {
if(InnerMatcher.matches(*LambdaExprNode->getCallOperator(),
Finder, Builder)) {
return true;
}
} else {
for(const auto &Parent: Finder->getASTContext().getParents(CurNode))
Stack.push_back(Parent);
}
}
return false;
}
/// Matches a declaration that has external formal linkage.
///
/// Example matches only z (matcher = varDecl(hasExternalFormalLinkage()))
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
///
/// Example matches f() because it has external formal linkage despite being
/// unique to the translation unit as though it has internal likage
/// (matcher = functionDecl(hasExternalFormalLinkage()))
///
/// \code
/// namespace {
/// void f() {}
/// }
/// \endcode
AST_MATCHER(NamedDecl, hasExternalFormalLinkage) {
return Node.hasExternalFormalLinkage();
}
/// Matches a declaration that has default arguments.
///
/// Example matches y (matcher = parmVarDecl(hasDefaultArgument()))
/// \code
/// void x(int val) {}
/// void y(int val = 0) {}
/// \endcode
///
/// Deprecated. Use hasInitializer() instead to be able to
/// match on the contents of the default argument. For example:
///
/// \code
/// void x(int val = 7) {}
/// void y(int val = 42) {}
/// \endcode
/// parmVarDecl(hasInitializer(integerLiteral(equals(42))))
/// matches the parameter of y
///
/// A matcher such as
/// parmVarDecl(hasInitializer(anything()))
/// is equivalent to parmVarDecl(hasDefaultArgument()).
AST_MATCHER(ParmVarDecl, hasDefaultArgument) {
return Node.hasDefaultArg();
}
/// Matches array new expressions.
///
/// Given:
/// \code
/// MyClass *p1 = new MyClass[10];
/// \endcode
/// cxxNewExpr(isArray())
/// matches the expression 'new MyClass[10]'.
AST_MATCHER(CXXNewExpr, isArray) {
return Node.isArray();
}
/// Matches placement new expression arguments.
///
/// Given:
/// \code
/// MyClass *p1 = new (Storage, 16) MyClass();
/// \endcode
/// cxxNewExpr(hasPlacementArg(1, integerLiteral(equals(16))))
/// matches the expression 'new (Storage, 16) MyClass()'.
AST_MATCHER_P2(CXXNewExpr, hasPlacementArg, unsigned, Index,
internal::Matcher<Expr>, InnerMatcher) {
return Node.getNumPlacementArgs() > Index &&
InnerMatcher.matches(*Node.getPlacementArg(Index), Finder, Builder);
}
/// Matches any placement new expression arguments.
///
/// Given:
/// \code
/// MyClass *p1 = new (Storage) MyClass();
/// \endcode
/// cxxNewExpr(hasAnyPlacementArg(anything()))
/// matches the expression 'new (Storage, 16) MyClass()'.
AST_MATCHER_P(CXXNewExpr, hasAnyPlacementArg, internal::Matcher<Expr>,
InnerMatcher) {
return llvm::any_of(Node.placement_arguments(), [&](const Expr *Arg) {
return InnerMatcher.matches(*Arg, Finder, Builder);
});
}
/// Matches array new expressions with a given array size.
///
/// Given:
/// \code
/// MyClass *p1 = new MyClass[10];
/// \endcode
/// cxxNewExpr(hasArraySize(integerLiteral(equals(10))))
/// matches the expression 'new MyClass[10]'.
AST_MATCHER_P(CXXNewExpr, hasArraySize, internal::Matcher<Expr>, InnerMatcher) {
return Node.isArray() && *Node.getArraySize() &&
InnerMatcher.matches(**Node.getArraySize(), Finder, Builder);
}
/// Matches a class declaration that is defined.
///
/// Example matches x (matcher = cxxRecordDecl(hasDefinition()))
/// \code
/// class x {};
/// class y;
/// \endcode
AST_MATCHER(CXXRecordDecl, hasDefinition) {
return Node.hasDefinition();
}
/// Matches C++11 scoped enum declaration.
///
/// Example matches Y (matcher = enumDecl(isScoped()))
/// \code
/// enum X {};
/// enum class Y {};
/// \endcode
AST_MATCHER(EnumDecl, isScoped) {
return Node.isScoped();
}
/// Matches a function declared with a trailing return type.
///
/// Example matches Y (matcher = functionDecl(hasTrailingReturn()))
/// \code
/// int X() {}
/// auto Y() -> int {}
/// \endcode
AST_MATCHER(FunctionDecl, hasTrailingReturn) {
if (const auto *F = Node.getType()->getAs<FunctionProtoType>())
return F->hasTrailingReturn();
return false;
}
/// Matches expressions that match InnerMatcher that are possibly wrapped in an
/// elidable constructor and other corresponding bookkeeping nodes.
///
/// In C++17, elidable copy constructors are no longer being generated in the
/// AST as it is not permitted by the standard. They are, however, part of the
/// AST in C++14 and earlier. So, a matcher must abstract over these differences
/// to work in all language modes. This matcher skips elidable constructor-call
/// AST nodes, `ExprWithCleanups` nodes wrapping elidable constructor-calls and
/// various implicit nodes inside the constructor calls, all of which will not
/// appear in the C++17 AST.
///
/// Given
///
/// \code
/// struct H {};
/// H G();
/// void f() {
/// H D = G();
/// }
/// \endcode
///
/// ``varDecl(hasInitializer(ignoringElidableConstructorCall(callExpr())))``
/// matches ``H D = G()`` in C++11 through C++17 (and beyond).
AST_MATCHER_P(Expr, ignoringElidableConstructorCall,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
// E tracks the node that we are examining.
const Expr *E = &Node;
// If present, remove an outer `ExprWithCleanups` corresponding to the
// underlying `CXXConstructExpr`. This check won't cover all cases of added
// `ExprWithCleanups` corresponding to `CXXConstructExpr` nodes (because the
// EWC is placed on the outermost node of the expression, which this may not
// be), but, it still improves the coverage of this matcher.
if (const auto *CleanupsExpr = dyn_cast<ExprWithCleanups>(&Node))
E = CleanupsExpr->getSubExpr();
if (const auto *CtorExpr = dyn_cast<CXXConstructExpr>(E)) {
if (CtorExpr->isElidable()) {
if (const auto *MaterializeTemp =
dyn_cast<MaterializeTemporaryExpr>(CtorExpr->getArg(0))) {
return InnerMatcher.matches(*MaterializeTemp->getSubExpr(), Finder,
Builder);
}
}
}
return InnerMatcher.matches(Node, Finder, Builder);
}
//----------------------------------------------------------------------------//
// OpenMP handling.
//----------------------------------------------------------------------------//
/// Matches any ``#pragma omp`` executable directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp taskyield
/// \endcode
///
/// ``ompExecutableDirective()`` matches ``omp parallel``,
/// ``omp parallel default(none)`` and ``omp taskyield``.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective>
ompExecutableDirective;
/// Matches standalone OpenMP directives,
/// i.e., directives that can't have a structured block.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// {}
/// #pragma omp taskyield
/// \endcode
///
/// ``ompExecutableDirective(isStandaloneDirective()))`` matches
/// ``omp taskyield``.
AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) {
return Node.isStandaloneDirective();
}
/// Matches the structured-block of the OpenMP executable directive
///
/// Prerequisite: the executable directive must not be standalone directive.
/// If it is, it will never match.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// ;
/// #pragma omp parallel
/// {}
/// \endcode
///
/// ``ompExecutableDirective(hasStructuredBlock(nullStmt()))`` will match ``;``
AST_MATCHER_P(OMPExecutableDirective, hasStructuredBlock,
internal::Matcher<Stmt>, InnerMatcher) {
if (Node.isStandaloneDirective())
return false; // Standalone directives have no structured blocks.
return InnerMatcher.matches(*Node.getStructuredBlock(), Finder, Builder);
}
/// Matches any clause in an OpenMP directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// \endcode
///
/// ``ompExecutableDirective(hasAnyClause(anything()))`` matches
/// ``omp parallel default(none)``.
AST_MATCHER_P(OMPExecutableDirective, hasAnyClause,
internal::Matcher<OMPClause>, InnerMatcher) {
ArrayRef<OMPClause *> Clauses = Node.clauses();
return matchesFirstInPointerRange(InnerMatcher, Clauses.begin(),
Clauses.end(), Finder, Builder);
}
/// Matches OpenMP ``default`` clause.
///
/// Given
///
/// \code
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel default(firstprivate)
/// #pragma omp parallel
/// \endcode
///
/// ``ompDefaultClause()`` matches ``default(none)``, ``default(shared)``, and
/// ``default(firstprivate)``
extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause>
ompDefaultClause;
/// Matches if the OpenMP ``default`` clause has ``none`` kind specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel default(firstprivate)
/// \endcode
///
/// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``.
AST_MATCHER(OMPDefaultClause, isNoneKind) {
return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_none;
}
/// Matches if the OpenMP ``default`` clause has ``shared`` kind specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel default(firstprivate)
/// \endcode
///
/// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``.
AST_MATCHER(OMPDefaultClause, isSharedKind) {
return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_shared;
}
/// Matches if the OpenMP ``default`` clause has ``firstprivate`` kind
/// specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel default(firstprivate)
/// \endcode
///
/// ``ompDefaultClause(isFirstPrivateKind())`` matches only
/// ``default(firstprivate)``.
AST_MATCHER(OMPDefaultClause, isFirstPrivateKind) {
return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_firstprivate;
}
/// Matches if the OpenMP directive is allowed to contain the specified OpenMP
/// clause kind.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel for
/// #pragma omp for
/// \endcode
///
/// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches
/// ``omp parallel`` and ``omp parallel for``.
///
/// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter
/// should be passed as a quoted string. e.g.,
/// ``isAllowedToContainClauseKind("OMPC_default").``
AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind,
OpenMPClauseKind, CKind) {
return llvm::omp::isAllowedClauseForDirective(
Node.getDirectiveKind(), CKind,
Finder->getASTContext().getLangOpts().OpenMP);
}
//----------------------------------------------------------------------------//
// End OpenMP handling.
//----------------------------------------------------------------------------//
} // namespace ast_matchers
} // namespace clang
#endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
|
assignment.h | /* Portions Copyright 2019-2021 Xuesong Zhou and Peiheng Li, Cafer Avci
* If you help write or modify the code, please also list your names here.
* The reason of having Copyright info here is to ensure all the modified version, as a whole, under the GPL
* and further prevent a violation of the GPL.
*
* More about "How to use GNU licenses for your own software"
* http://www.gnu.org/licenses/gpl-howto.html
*/
// Peiheng, 02/03/21, remove them later after adopting better casting
#pragma warning(disable : 4305 4267 4018)
// stop warning: "conversion from 'int' to 'float', possible loss of data"
#pragma warning(disable: 4244)
#ifdef _WIN32
#include "pch.h"
#endif
#include <iostream>
#include <fstream>
#include <sstream>
#include <iomanip>
#include <string>
#include <cstring>
#include <cstdio>
#include <ctime>
#include <cmath>
#include <algorithm>
#include <functional>
#include <stack>
#include <list>
#include <vector>
#include <map>
#include <omp.h>
#include "config.h"
#include "utils.h"
using std::max;
using std::min;
using std::cout;
using std::endl;
using std::string;
using std::vector;
using std::map;
using std::ifstream;
using std::ofstream;
using std::istringstream;
#include "DTA.h"
void g_reset_and_update_link_volume_based_on_columns(int number_of_links, int iteration_index, bool b_self_reducing_path_volume)
{
for (int i = 0; i < number_of_links; ++i)
{
for (int tau = 0; tau < assignment.g_number_of_demand_periods; ++tau)
{
// used in travel time calculation
g_link_vector[i].flow_volume_per_period[tau] = 0;
// reserved for BPR-X
g_link_vector[i].queue_link_distance_in_km_perslot[tau] = 0;
for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at)
g_link_vector[i].volume_per_period_per_at[tau][at] = 0;
}
}
if (iteration_index >= 0)
{
for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at) //m
{
//#pragma omp parallel for
std::map<int, CColumnPath>::iterator it;
int zone_size = g_zone_vector.size();
int tau_size = assignment.g_DemandPeriodVector.size();
float link_volume_contributed_by_path_volume;
int link_seq_no;
float PCE_ratio;
int nl;
std::map<int, CColumnPath>::iterator it_begin;
std::map<int, CColumnPath>::iterator it_end;
int column_vector_size;
CColumnVector* p_column_pool;
for (int orig = 0; orig < zone_size; ++orig) // o
{
for (int dest = 0; dest < zone_size; ++dest) //d
{
for (int tau = 0; tau < tau_size; ++tau) //tau
{
p_column_pool = &(assignment.g_column_pool[orig][dest][at][tau]);
if (p_column_pool->od_volume > 0)
{
column_vector_size = p_column_pool->path_node_sequence_map.size();
it_begin = p_column_pool->path_node_sequence_map.begin();
it_end = p_column_pool->path_node_sequence_map.end();
for (it = it_begin; it != it_end; ++it)
{
link_volume_contributed_by_path_volume = it->second.path_volume; // assign all OD flow to this first path
// add path volume to link volume
for (nl = 0; nl < it->second.m_link_size; ++nl) // arc a
{
link_seq_no = it->second.path_link_vector[nl];
// MSA updating for the existing column pools
// if iteration_index = 0; then update no flow discount is used (for the column pool case)
PCE_ratio = g_link_vector[link_seq_no].VDF_period[tau].pce[at]; // updated on 08/16/2021 for link dependent and agent type dependent pce factor mainly for trucks
//#pragma omp critical
{
g_link_vector[link_seq_no].flow_volume_per_period[tau] += link_volume_contributed_by_path_volume * PCE_ratio;
g_link_vector[link_seq_no].volume_per_period_per_at[tau][at] += link_volume_contributed_by_path_volume; // pure volume, not consider PCE
}
}
// this self-deducting action does not agents with fixed routing policies.
if (!p_column_pool->bfixed_route && b_self_reducing_path_volume)
{
//after link volumn "tally", self-deducting the path volume by 1/(k+1) (i.e. keep k/(k+1) ratio of previous flow) so that the following shortes path will be receiving 1/(k+1) flow
it->second.path_volume = it->second.path_volume * (float(iteration_index) / float(iteration_index + 1));
}
}
}
}
}
}
}
}
}
double update_link_travel_time_and_cost(int inner_iteration_number)
{
if (assignment.assignment_mode == 2)
{
//compute the time-dependent delay from simulation
//for (int l = 0; l < g_link_vector.size(); l++)
//{
// float volume = assignment.m_LinkCumulativeDepartureVector[l][assignment.g_number_of_simulation_intervals - 1]; // link flow rates
// float waiting_time_count = 0;
//for (int tt = 0; tt < assignment.g_number_of_simulation_intervals; tt++)
//{
// waiting_time_count += assignment.m_LinkTDWaitingTime[l][tt/number_of_simu_intervals_in_min]; // tally total waiting cou
//}
//for (int tau = 0; tau < assignment.g_DemandPeriodVector.size(); tau++)
//{
// float travel_time = g_link_vector[l].free_flow_travel_time_in_min + waiting_time_count* number_of_seconds_per_interval / max(1, volume) / 60;
// g_link_vector[l].travel_time_per_period[tau] = travel_time;
//}
}
#pragma omp parallel for
for (int i = 0; i < g_link_vector.size(); ++i)
{
// step 1: travel time based on VDF
g_link_vector[i].calculate_dynamic_VDFunction(inner_iteration_number, false, g_link_vector[i].VDF_type_no);
for (int tau = 0; tau < assignment.g_DemandPeriodVector.size(); ++tau)
{
for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at)
{
float PCE_agent_type = assignment.g_AgentTypeVector[at].PCE;
// step 2: marginal cost for SO
g_link_vector[i].calculate_marginal_cost_for_agent_type(tau, at, PCE_agent_type);
//if (g_debug_level >= 3 && assignment.assignment_mode >= 2 && assignment.g_pFileDebugLog != NULL)
// fprintf(assignment.g_pFileDebugLog, "Update link cost: link %d->%d: tau = %d, at = %d, travel_marginal = %.3f\n",
// g_node_vector[g_link_vector[l].from_node_seq_no].node_id,
// g_node_vector[g_link_vector[l].to_node_seq_no].node_id,
// tau, at,
// g_link_vector[l].travel_marginal_cost_per_period[tau][at]);
}
}
}
double total_network_travel_time = 0;
for (int i = 0; i < g_link_vector.size(); ++i)
{
for (int tau = 0; tau < assignment.g_DemandPeriodVector.size(); ++tau)
{
total_network_travel_time += g_link_vector[i].VDF_period[tau].total_travel_time;
}
}
return total_network_travel_time;
}
// changes here are also for odmes, don't need to implement the changes in this function for now
double g_reset_and_update_link_volume_based_on_ODME_columns(int number_of_links, int iteration_no, double& system_gap)
{
float total_gap = 0;
float sub_total_gap_link_count = 0;
float sub_total_system_gap_count = 0;
system_gap = 0;
float sub_total_gap_P_count = 0;
float sub_total_gap_A_count = 0;
// reset the link volume
for (int i = 0; i < number_of_links; ++i)
{
for (int tau = 0; tau < assignment.g_number_of_demand_periods; ++tau)
{
// used in travel time calculation
g_link_vector[i].flow_volume_per_period[tau] = 0;
}
}
// reset the estimated production and attraction
for (int orig = 0; orig < g_zone_vector.size(); ++orig) // o
{
g_zone_vector[orig].est_attraction = 0;
g_zone_vector[orig].est_production = 0;
}
for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at) //m
{
//#pragma omp parallel for
std::map<int, CColumnPath>::iterator it;
int zone_size = g_zone_vector.size();
int tau_size = assignment.g_DemandPeriodVector.size();
float link_volume_contributed_by_path_volume;
int link_seq_no;
float PCE_ratio;
int nl;
std::map<int, CColumnPath>::iterator it_begin;
std::map<int, CColumnPath>::iterator it_end;
int column_vector_size;
CColumnVector* p_column_pool;
for (int orig = 0; orig < zone_size; ++orig) // o
{
for (int dest = 0; dest < zone_size; ++dest) //d
{
for (int tau = 0; tau < tau_size; ++tau) //tau
{
p_column_pool = &(assignment.g_column_pool[orig][dest][at][tau]);
if (p_column_pool->od_volume > 0)
{
// continuous: type 0
column_vector_size = p_column_pool->path_node_sequence_map.size();
it_begin = p_column_pool->path_node_sequence_map.begin();
it_end = p_column_pool->path_node_sequence_map.end();
for (it = it_begin; it != it_end; ++it) // path k
{
link_volume_contributed_by_path_volume = it->second.path_volume; // assign all OD flow to this first path
g_zone_vector[orig].est_production += it->second.path_volume;
g_zone_vector[dest].est_attraction += it->second.path_volume;
// add path volume to link volume
for (nl = 0; nl < it->second.m_link_size; ++nl) // arc a
{
link_seq_no = it->second.path_link_vector[nl];
// MSA updating for the existing column pools
// if iteration_index = 0; then update no flow discount is used (for the column pool case)
PCE_ratio = 1;
//#pragma omp critical
{
g_link_vector[link_seq_no].flow_volume_per_period[tau] += link_volume_contributed_by_path_volume * PCE_ratio;
g_link_vector[link_seq_no].volume_per_period_per_at[tau][at] += link_volume_contributed_by_path_volume; // pure volume, not consider PCE
}
}
}
}
}
}
}
}
int total_link_count = 0;
// calcualte deviation for each measurement type
for (int i = 0; i < number_of_links; ++i)
{
g_link_vector[i].calculate_dynamic_VDFunction(iteration_no,false, g_link_vector[i].VDF_type_no);
if (g_link_vector[i].obs_count >= 1) // with data
{
int tau = 0;
g_link_vector[i].est_count_dev = g_link_vector[i].flow_volume_per_period[tau] + g_link_vector[i].VDF_period[tau].preload - g_link_vector[i].obs_count;
if (dtalog.debug_level() == 2)
{
dtalog.output() << "link " << g_node_vector[g_link_vector[i].from_node_seq_no].node_id
<< "->" << g_node_vector[g_link_vector[i].to_node_seq_no].node_id
<< "obs:, " << g_link_vector[i].obs_count << "est:, " << g_link_vector[i].flow_volume_per_period[tau]
<< "dev:," << g_link_vector[i].est_count_dev << endl;
}
if (g_link_vector[i].upper_bound_flag == 0)
{
total_gap += abs(g_link_vector[i].est_count_dev);
sub_total_gap_link_count += fabs(g_link_vector[i].est_count_dev / g_link_vector[i].obs_count);
sub_total_system_gap_count += g_link_vector[i].est_count_dev / g_link_vector[i].obs_count;
}
else
{ // upper bound constraints
if (g_link_vector[i].est_count_dev > 0)
{
total_gap += abs(g_link_vector[i].est_count_dev);
sub_total_gap_link_count += fabs(g_link_vector[i].est_count_dev / g_link_vector[i].obs_count);
sub_total_system_gap_count += g_link_vector[i].est_count_dev / g_link_vector[i].obs_count;
}
}
total_link_count += 1;
}
}
for (int orig = 0; orig < g_zone_vector.size(); ++orig) // o
{
if (g_zone_vector[orig].obs_attraction >= 1) // with observation
{
g_zone_vector[orig].est_attraction_dev = g_zone_vector[orig].est_attraction - g_zone_vector[orig].obs_attraction;
if (dtalog.debug_level() == 2)
{
dtalog.output() << "zone " << g_zone_vector[orig].zone_id << "A: obs:" << g_zone_vector[orig].obs_attraction
<< ",est:," << g_zone_vector[orig].est_attraction << ",dev:," << g_zone_vector[orig].est_attraction_dev << endl;
}
total_gap += abs(g_zone_vector[orig].est_attraction_dev);
sub_total_gap_A_count += g_zone_vector[orig].est_attraction_dev / g_zone_vector[orig].obs_attraction;
}
if (g_zone_vector[orig].obs_production >= 1) // with observation
{
g_zone_vector[orig].est_production_dev = g_zone_vector[orig].est_production - g_zone_vector[orig].obs_production;
if (dtalog.debug_level() == 2)
{
dtalog.output() << "zone " << g_zone_vector[orig].zone_id << "P: obs:" << g_zone_vector[orig].obs_production
<< ",est:," << g_zone_vector[orig].est_production << ",dev:," << g_zone_vector[orig].est_production_dev << endl;
}
total_gap += abs(g_zone_vector[orig].est_production_dev);
sub_total_gap_P_count += g_zone_vector[orig].est_production_dev / g_zone_vector[orig].obs_production;
}
}
dtalog.output() << "ODME #" << iteration_no/*<< " total abs gap= " << total_gap*/
<< " ,%link_MAPE: " << (sub_total_gap_link_count) / max(1, total_link_count) * 100 <<
" ,%system_MPE: " << (sub_total_system_gap_count) / max(1, total_link_count) * 100 << endl;
double gap = sub_total_gap_link_count / max(1, total_link_count);
system_gap = sub_total_system_gap_count / max(1, total_link_count);
return gap;
}
void g_update_gradient_cost_and_assigned_flow_in_column_pool(Assignment& assignment, int inner_iteration_number)
{
double total_system_cost_gap = 0;
float total_relative_gap = 0;
double total_system_travel_cost = 0;
// we can have a recursive formulat to reupdate the current link volume by a factor of k/(k+1),
// and use the newly generated path flow to add the additional 1/(k+1)
g_reset_and_update_link_volume_based_on_columns(g_link_vector.size(), inner_iteration_number, false);
// step 4: based on newly calculated path volumn, update volume based travel time, and update volume based resource balance, update gradie
update_link_travel_time_and_cost(inner_iteration_number);
// step 0
//step 1: calculate shortest path at inner iteration of column flow updating
#pragma omp parallel for
for (int orig = 0; orig < g_zone_vector.size(); ++orig) // o
{
CColumnVector* p_column_pool;
std::map<int, CColumnPath>::iterator it, it_begin, it_end;
int column_vector_size;
float least_gradient_cost = 999999;
int least_gradient_cost_path_seq_no = -1;
int least_gradient_cost_path_node_sum_index = -1;
int path_seq_count = 0;
double path_toll = 0;
double path_gradient_cost = 0;
double path_distance = 0;
double path_travel_time = 0;
int link_seq_no;
double link_travel_time;
double total_switched_out_path_volume = 0;
double step_size = 0;
double previous_path_volume = 0;
for (int dest = 0; dest < g_zone_vector.size(); ++dest) //d
{
for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at) //m
{
for (int tau = 0; tau < assignment.g_DemandPeriodVector.size(); ++tau) //tau
{
p_column_pool = &(assignment.g_column_pool[orig][dest][at][tau]);
if (p_column_pool->od_volume > 0)
{
column_vector_size = p_column_pool->path_node_sequence_map.size();
// scan through the map with different node sum for different paths
/// step 1: update gradient cost for each column path
least_gradient_cost = 999999;
least_gradient_cost_path_seq_no = -1;
least_gradient_cost_path_node_sum_index = -1;
path_seq_count = 0;
it_begin = p_column_pool->path_node_sequence_map.begin();
it_end = p_column_pool->path_node_sequence_map.end();
for (it = it_begin; it != it_end; ++it)
{
path_toll = 0;
path_gradient_cost = 0;
path_distance = 0;
path_travel_time = 0;
for (int nl = 0; nl < it->second.m_link_size; ++nl) // arc a
{
link_seq_no = it->second.path_link_vector[nl];
path_toll += g_link_vector[link_seq_no].VDF_period[tau].toll[at];
path_distance += g_link_vector[link_seq_no].link_distance_in_km;
link_travel_time = g_link_vector[link_seq_no].travel_time_per_period[tau];
path_travel_time += link_travel_time;
path_gradient_cost += g_link_vector[link_seq_no].get_generalized_first_order_gradient_cost_of_second_order_loss_for_agent_type(tau, at);
}
it->second.path_toll = path_toll;
it->second.path_travel_time = path_travel_time;
it->second.path_gradient_cost = path_gradient_cost;
it->second.path_gradient_cost_per_iteration_map[inner_iteration_number] = path_gradient_cost;
if (column_vector_size == 1) // only one path
{
total_system_travel_cost += (it->second.path_gradient_cost * it->second.path_volume);
break;
}
if (path_gradient_cost < least_gradient_cost)
{
least_gradient_cost = path_gradient_cost;
least_gradient_cost_path_seq_no = it->second.path_seq_no;
least_gradient_cost_path_node_sum_index = it->first;
}
}
if (column_vector_size >= 2)
{
// step 2: calculate gradient cost difference for each column path
total_switched_out_path_volume = 0;
for (it = it_begin; it != it_end; ++it)
{
if (it->second.path_seq_no != least_gradient_cost_path_seq_no) //for non-least cost path
{
it->second.path_gradient_cost_difference = it->second.path_gradient_cost - least_gradient_cost;
it->second.path_gradient_cost_relative_difference = it->second.path_gradient_cost_difference / max(0.0001f, least_gradient_cost);
total_system_cost_gap += (it->second.path_gradient_cost_difference * it->second.path_volume);
total_system_travel_cost += (it->second.path_gradient_cost * it->second.path_volume);
step_size = 1.0 / (inner_iteration_number + 2) * p_column_pool->od_volume;
previous_path_volume = it->second.path_volume;
double flow_shift = step_size * it->second.path_gradient_cost_relative_difference;
if (flow_shift > it->second.path_volume*0.5)
{
flow_shift = it->second.path_volume * 0.5;
}
//recall that it->second.path_gradient_cost_difference >=0
// step 3.1: shift flow from nonshortest path to shortest path
it->second.path_volume = max(0.0, it->second.path_volume - flow_shift);
it->second.path_volume_per_iteration_map[inner_iteration_number] = it->second.path_volume;
//we use min(step_size to ensure a path is not switching more than 1/n proportion of flow
it->second.path_switch_volume = (previous_path_volume - it->second.path_volume);
total_switched_out_path_volume += (previous_path_volume - it->second.path_volume);
}
}
//step 3.2 consider least cost path, receive all volume shifted from non-shortest path
if (least_gradient_cost_path_seq_no != -1)
{
p_column_pool->path_node_sequence_map[least_gradient_cost_path_node_sum_index].path_volume += total_switched_out_path_volume;
p_column_pool->path_node_sequence_map[least_gradient_cost_path_node_sum_index].path_volume_per_iteration_map[inner_iteration_number] = p_column_pool->path_node_sequence_map[least_gradient_cost_path_node_sum_index].path_volume;
total_system_travel_cost += (p_column_pool->path_node_sequence_map[least_gradient_cost_path_node_sum_index].path_gradient_cost *
p_column_pool->path_node_sequence_map[least_gradient_cost_path_node_sum_index].path_volume);
}
}
}
}
}
}
}
dtalog.output() << "column updating: iteration= " << inner_iteration_number << ", total_gap=" << total_system_cost_gap
<< ",total_relative_gap=" << total_system_cost_gap / max(0.00001, total_system_travel_cost) << endl;
}
void g_column_pool_optimization(Assignment& assignment, int column_updating_iterations)
{
// column_updating_iterations is internal numbers of column updating
for (int n = 0; n < column_updating_iterations; ++n)
{
g_update_gradient_cost_and_assigned_flow_in_column_pool(assignment, n);
if (dtalog.debug_level() >= 3)
{
for (int i = 0; i < g_link_vector.size(); ++i)
{
dtalog.output() << "link: " << g_node_vector[g_link_vector[i].from_node_seq_no].node_id << "-->"
<< g_node_vector[g_link_vector[i].to_node_seq_no].node_id << ", "
<< "flow count:" << g_link_vector[i].flow_volume_per_period[0] << endl;
}
}
}
}
void g_column_pool_route_scheduling(Assignment& assignment, int inner_iteration_number)
{
//step 1: calculate shortest path at inner iteration of column flow updating
#pragma omp parallel for
for (int orig = 0; orig < g_zone_vector.size(); ++orig) // o
{
CColumnVector* p_column_pool;
std::map<int, CColumnPath>::iterator it, it_begin, it_end;
int column_vector_size;
int path_seq_count = 0;
double path_toll = 0;
double path_gradient_cost = 0;
double path_distance = 0;
double path_travel_time = 0;
int link_seq_no;
double link_travel_time;
for (int dest = 0; dest < g_zone_vector.size(); ++dest) //d
{
for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at) //m
{
for (int tau = 0; tau < assignment.g_DemandPeriodVector.size(); ++tau) //tau
{
p_column_pool = &(assignment.g_column_pool[orig][dest][at][tau]);
if (p_column_pool->od_volume > 0)
{
if (assignment.g_AgentTypeVector[at].real_time_information == 1) // case of VMS
{
column_vector_size = p_column_pool->path_node_sequence_map.size();
// scan through the map with different node sum for different paths
path_seq_count = 0;
it_begin = p_column_pool->path_node_sequence_map.begin();
it_end = p_column_pool->path_node_sequence_map.end();
//test condition 1: passing through information zone
bool b_passing_information_zone = false;
int new_orig_zone_id = 0;
std::vector <int> link_seq_vector;
//test condition 2: passing through capacity impact area
bool b_passing_capacity_impact_area = false;
for (it = it_begin; it != it_end; ++it) // scan each first-stage original path
{
if (it->second.path_volume < 0.00001)
continue;
for (int nl = 0; nl < it->second.m_link_size; ++nl) // arc a
{
link_seq_no = it->second.path_link_vector[nl];
CLink* pCurrentLink = &(g_link_vector[link_seq_no]);
if (b_passing_information_zone == false &&
assignment.node_seq_no_2_info_zone_id_mapping.find(pCurrentLink->to_node_seq_no) != assignment.node_seq_no_2_info_zone_id_mapping.end()) // this node been defined as zone
{
int zone_id = assignment.node_seq_no_2_info_zone_id_mapping[pCurrentLink->to_node_seq_no];
int zone_no = assignment.g_zoneid_to_zone_seq_no_mapping[zone_id];
if(assignment.zone_seq_no_2_info_mapping.find(zone_no) != assignment.zone_seq_no_2_info_mapping.end()) // as information zone
{
b_passing_information_zone = true;
new_orig_zone_id = zone_id; // zone id to zone no.
for (int nl2 = 0; nl2 <= nl; ++nl2) // arc a
{ // copy the existing link sequence up to the downstream node id corresponding to the info zone
link_seq_no = it->second.path_link_vector[nl2];
link_seq_vector.push_back(link_seq_no);
}
}
}
if (pCurrentLink->capacity_reduction_map.find(tau) != pCurrentLink->capacity_reduction_map.end())
{
b_passing_capacity_impact_area = true;
}
}
if (b_passing_capacity_impact_area == true && b_passing_information_zone == true)
{
CColumnVector* p_2_stage_column_pool;
int info_orig = assignment.g_zoneid_to_zone_seq_no_mapping[new_orig_zone_id];
//step 2: fetch the related column pool from the information node/zone
p_2_stage_column_pool = &(assignment.g_column_pool[info_orig][dest][at][tau]); // we come from info_orig but going to the same destination with same at, and assignment period tau
// scan through the map with different node sum for different continuous paths
std::map<int, CColumnPath>::iterator it2, it_begin2, it_end2;
it_begin2 = p_2_stage_column_pool->path_node_sequence_map.begin();
it_end2 = p_2_stage_column_pool->path_node_sequence_map.end();
for (it2 = it_begin2; it2 != it_end2; ++it2) // we can still have k-path from the info zone to to final destination so we need to random select one
{
for (int nl = 1; nl < it2->second.m_link_size; ++nl) // arc a // exclude virtual link at the end;
{
link_seq_vector.push_back(it2->second.path_link_vector[nl]);
}
break; // only connect with the first available second stage path
}
if (it->second.path_link_vector != NULL)
{
// copy the updated path (stage1 + stage 2) back to the path link vector
delete it->second.path_link_vector;
it->second.path_link_vector = new int[link_seq_vector.size()];
for (int l = 0; l < link_seq_vector.size(); l++)
{
it->second.path_link_vector[l] = link_seq_vector[l];
}
it->second.m_link_size = link_seq_vector.size();
// copy the updated path (stage1 + stage 2) back to the path node vector
delete it->second.path_node_vector;
it->second.path_node_vector = new int[link_seq_vector.size()+1];
// first node
it->second.path_node_vector[0] = g_link_vector[link_seq_vector[0]].from_node_seq_no;
// remaining nodes to the end of path
for (int l = 0; l < link_seq_vector.size(); l++)
{
it->second.path_node_vector[l+1] = g_link_vector[link_seq_vector[l]].to_node_seq_no;
}
it->second.m_node_size = link_seq_vector.size() + 1;
}
p_2_stage_column_pool->od_volume += it->second.path_volume;// carry over the switching path flow to the second path volume count
p_2_stage_column_pool->information_type = 1;
it2->second.path_volume += it->second.path_volume;// carry over the switching path flow to the second path volume count
} // two conditions satisified
} //end of scanning for the first stage path in the column pool
} // agent type is real time agent type
} // with positve OD volume
} // tau
} //agent type
} //dest
} // orig
dtalog.output() << " updating";
}
void g_column_pool_activity_scheduling(Assignment& assignment, int inner_iteration_number)
{
//step 1: calculate shortest path at inner iteration of column flow updating
for (int orig = 0; orig < g_zone_vector.size(); ++orig) // o
{
CColumnVector* p_column_pool;
int column_vector_size;
int path_seq_count = 0;
double path_toll = 0;
double path_gradient_cost = 0;
double path_distance = 0;
double path_travel_time = 0;
int link_seq_no;
double link_travel_time;
for (int dest = 0; dest < g_zone_vector.size(); ++dest) //d
{
for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at) //m
{
for (int tau = 0; tau < assignment.g_DemandPeriodVector.size(); ++tau) //tau
{
p_column_pool = &(assignment.g_column_pool[orig][dest][at][tau]);
if (p_column_pool->od_volume > 0)
{
if (p_column_pool->activity_zone_no_vector.size()) // case of activity zones
{
p_column_pool->path_node_sequence_map.clear(); // remove existing single OD pair based routes
int aat = p_column_pool->activity_agent_type_no;
std::vector <int> link_seq_vector;
// for each origin and detination pair in activity zone no to perform routing continuously
for(int az = 0; az < p_column_pool->activity_zone_no_vector.size()-1; az++) // key step: go through each activty OD pair
{ // 0 will the origin
// last one will destination
CColumnVector* p_2_stage_column_pool;
int activity_orig = p_column_pool->activity_zone_no_vector[az];
int activity_dest = p_column_pool->activity_zone_no_vector[az+1];
//step 2: fetch the related column pool from the information node/zone
p_2_stage_column_pool = &(assignment.g_column_pool[activity_orig][activity_dest][aat][tau]); // we come from info_orig but going to the same destination with same at, and assignment period tau
// scan through the map with different node sum for different continuous paths
std::map<int, CColumnPath>::iterator it2, it_begin2, it_end2;
it_begin2 = p_2_stage_column_pool->path_node_sequence_map.begin();
it_end2 = p_2_stage_column_pool->path_node_sequence_map.end();
for (it2 = it_begin2; it2 != it_end2; ++it2) // we can still have k-path from the info zone to to final destination so we need to random select one
{
for (int nl = 1; nl < it2->second.m_link_size-1; ++nl) // arc a // exclude virtual link in the beginning and at the end;
{
link_seq_vector.push_back(it2->second.path_link_vector[nl]);
}
break; // only connect with the first available second stage path
}
}
if (link_seq_vector.size() == 0)
{
int i_debug = 1;
continue;
}
int node_sum = 0;
for (int l = 0; l < link_seq_vector.size(); l++)
{
node_sum+= link_seq_vector[l];
}
// add this unique path // later we can add k activity paths
int path_count = p_column_pool->path_node_sequence_map.size();
p_column_pool->path_node_sequence_map[node_sum].path_seq_no = path_count;
p_column_pool->path_node_sequence_map[node_sum].path_volume = p_column_pool->od_volume;
//assignment.g_column_pool[m_origin_zone_seq_no][destination_zone_seq_no][agent_type][tau].time = m_label_time_array[i];
//assignment.g_column_pool[m_origin_zone_seq_no][destination_zone_seq_no][agent_type][tau].path_node_sequence_map[node_sum].path_distance = m_label_distance_array[i];
p_column_pool->path_node_sequence_map[node_sum].path_toll = 0;
p_column_pool->path_node_sequence_map[node_sum].path_link_vector = new int[link_seq_vector.size()];
p_column_pool->path_node_sequence_map[node_sum].path_node_vector = new int[link_seq_vector.size() + 1];
for (int l = 0; l < link_seq_vector.size(); l++)
{
p_column_pool->path_node_sequence_map[node_sum].path_link_vector[l] = link_seq_vector[l];
p_column_pool->path_node_sequence_map[node_sum].path_link_STL_vector.push_back(link_seq_vector[l]);
}
p_column_pool->path_node_sequence_map[node_sum].m_link_size = link_seq_vector.size();
// copy the updated path (stage1 + stage 2) back to the path node vector
// first node
p_column_pool->path_node_sequence_map[node_sum].path_node_vector[0] = g_link_vector[link_seq_vector[0]].from_node_seq_no;
// remaining nodes to the end of path
for (int l = 0; l < link_seq_vector.size(); l++)
{
p_column_pool->path_node_sequence_map[node_sum].path_node_vector[l + 1] = g_link_vector[link_seq_vector[l]].to_node_seq_no;
}
p_column_pool->path_node_sequence_map[node_sum].m_node_size = link_seq_vector.size() + 1;
} //end of conditions for activity chain
} // with positve OD volume
} // tau
} //agent type
} //dest
} // orig
dtalog.output() << " updating";
}
|
ompcompress.c | #ifdef _OPENMP
/* compress 1d contiguous array in parallel */
static void
_t2(compress_omp, Scalar, 1)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = (const Scalar*)field->data;
uint nx = field->nx;
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
uint blocks = (nx + 3) / 4;
uint chunks = chunk_count_omp(stream, blocks, threads);
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
if (!bs)
return;
/* compress chunks of blocks in parallel */
int chunk;
#pragma omp parallel for num_threads(threads)
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
uint bmin = chunk_offset(blocks, chunks, chunk + 0);
uint bmax = chunk_offset(blocks, chunks, chunk + 1);
uint block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
/* determine block origin x within array */
const Scalar* p = data;
uint x = 4 * block;
p += x;
/* compress partial or full block */
if (nx - x < 4)
_t2(zfpns.zfp_encode_partial_block_strided, Scalar, 1)(&s, p, MIN(nx - x, 4u), 1);
else
_t2(zfpns.zfp_encode_block, Scalar, 1)(&s, p);
}
}
/* concatenate per-thread streams */
compress_finish_par(stream, bs, chunks);
}
/* compress 1d strided array in parallel */
static void
_t2(compress_strided_omp, Scalar, 1)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = (const Scalar*)field->data;
uint nx = field->nx;
int sx = field->sx ? field->sx : 1;
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
uint blocks = (nx + 3) / 4;
uint chunks = chunk_count_omp(stream, blocks, threads);
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
if (!bs)
return;
/* compress chunks of blocks in parallel */
int chunk;
#pragma omp parallel for num_threads(threads)
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
uint bmin = chunk_offset(blocks, chunks, chunk + 0);
uint bmax = chunk_offset(blocks, chunks, chunk + 1);
uint block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
/* determine block origin x within array */
const Scalar* p = data;
uint x = 4 * block;
p += sx * (ptrdiff_t)x;
/* compress partial or full block */
if (nx - x < 4)
_t2(zfpns.zfp_encode_partial_block_strided, Scalar, 1)(&s, p, MIN(nx - x, 4u), sx);
else
_t2(zfpns.zfp_encode_block_strided, Scalar, 1)(&s, p, sx);
}
}
/* concatenate per-thread streams */
compress_finish_par(stream, bs, chunks);
}
/* compress 2d strided array in parallel */
static void
_t2(compress_strided_omp, Scalar, 2)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = (const Scalar*)field->data;
uint nx = field->nx;
uint ny = field->ny;
int sx = field->sx ? field->sx : 1;
int sy = field->sy ? field->sy : (int)nx;
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
uint bx = (nx + 3) / 4;
uint by = (ny + 3) / 4;
uint blocks = bx * by;
uint chunks = chunk_count_omp(stream, blocks, threads);
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
if (!bs)
return;
/* compress chunks of blocks in parallel */
int chunk;
#pragma omp parallel for num_threads(threads)
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
uint bmin = chunk_offset(blocks, chunks, chunk + 0);
uint bmax = chunk_offset(blocks, chunks, chunk + 1);
uint block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
/* determine block origin (x, y) within array */
const Scalar* p = data;
uint b = block;
uint x, y;
x = 4 * (b % bx); b /= bx;
y = 4 * b;
p += sx * (ptrdiff_t)x + sy * (ptrdiff_t)y;
/* compress partial or full block */
if (nx - x < 4 || ny - y < 4)
_t2(zfpns.zfp_encode_partial_block_strided, Scalar, 2)(&s, p, MIN(nx - x, 4u), MIN(ny - y, 4u), sx, sy);
else
_t2(zfpns.zfp_encode_block_strided, Scalar, 2)(&s, p, sx, sy);
}
}
/* concatenate per-thread streams */
compress_finish_par(stream, bs, chunks);
}
/* compress 3d strided array in parallel */
static void
_t2(compress_strided_omp, Scalar, 3)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = (const Scalar*)field->data;
uint nx = field->nx;
uint ny = field->ny;
uint nz = field->nz;
int sx = field->sx ? field->sx : 1;
int sy = field->sy ? field->sy : (int)nx;
int sz = field->sz ? field->sz : (int)(nx * ny);
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
uint bx = (nx + 3) / 4;
uint by = (ny + 3) / 4;
uint bz = (nz + 3) / 4;
uint blocks = bx * by * bz;
uint chunks = chunk_count_omp(stream, blocks, threads);
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
if (!bs)
return;
/* compress chunks of blocks in parallel */
int chunk;
#pragma omp parallel for num_threads(threads)
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
uint bmin = chunk_offset(blocks, chunks, chunk + 0);
uint bmax = chunk_offset(blocks, chunks, chunk + 1);
uint block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
/* determine block origin (x, y, z) within array */
const Scalar* p = data;
uint b = block;
uint x, y, z;
x = 4 * (b % bx); b /= bx;
y = 4 * (b % by); b /= by;
z = 4 * b;
p += sx * (ptrdiff_t)x + sy * (ptrdiff_t)y + sz * (ptrdiff_t)z;
/* compress partial or full block */
if (nx - x < 4 || ny - y < 4 || nz - z < 4)
_t2(zfpns.zfp_encode_partial_block_strided, Scalar, 3)(&s, p, MIN(nx - x, 4u), MIN(ny - y, 4u), MIN(nz - z, 4u), sx, sy, sz);
else
_t2(zfpns.zfp_encode_block_strided, Scalar, 3)(&s, p, sx, sy, sz);
}
}
/* concatenate per-thread streams */
compress_finish_par(stream, bs, chunks);
}
/* compress 4d strided array in parallel */
static void
_t2(compress_strided_omp, Scalar, 4)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = field->data;
uint nx = field->nx;
uint ny = field->ny;
uint nz = field->nz;
uint nw = field->nw;
int sx = field->sx ? field->sx : 1;
int sy = field->sy ? field->sy : (int)nx;
int sz = field->sz ? field->sz : (int)(nx * ny);
int sw = field->sw ? field->sw : (int)(nx * ny * nz);
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
uint bx = (nx + 3) / 4;
uint by = (ny + 3) / 4;
uint bz = (nz + 3) / 4;
uint bw = (nw + 3) / 4;
uint blocks = bx * by * bz * bw;
uint chunks = chunk_count_omp(stream, blocks, threads);
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
if (!bs)
return;
/* compress chunks of blocks in parallel */
int chunk;
#pragma omp parallel for num_threads(threads)
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
uint bmin = chunk_offset(blocks, chunks, chunk + 0);
uint bmax = chunk_offset(blocks, chunks, chunk + 1);
uint block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
/* determine block origin (x, y, z, w) within array */
const Scalar* p = data;
uint b = block;
uint x, y, z, w;
x = 4 * (b % bx); b /= bx;
y = 4 * (b % by); b /= by;
z = 4 * (b % bz); b /= bz;
w = 4 * b;
p += sx * (ptrdiff_t)x + sy * (ptrdiff_t)y + sz * (ptrdiff_t)z + sw * (ptrdiff_t)w;
/* compress partial or full block */
if (nx - x < 4 || ny - y < 4 || nz - z < 4 || nw - w < 4)
_t2(zfpns.zfp_encode_partial_block_strided, Scalar, 4)(&s, p, MIN(nx - x, 4u), MIN(ny - y, 4u), MIN(nz - z, 4u), MIN(nw - w, 4u), sx, sy, sz, sw);
else
_t2(zfpns.zfp_encode_block_strided, Scalar, 4)(&s, p, sx, sy, sz, sw);
}
}
/* concatenate per-thread streams */
compress_finish_par(stream, bs, chunks);
}
#endif
|
block_jacobi_precond.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <math.h>
#include <omp.h>
#include "H2Pack.h"
#include "block_jacobi_precond.h"
// Construct a block_jacobi_precond from a H2Pack structure
void H2P_build_block_jacobi_precond(H2Pack_p h2pack, const DTYPE shift, block_jacobi_precond_p *precond_)
{
block_jacobi_precond_p precond = (block_jacobi_precond_p) malloc(sizeof(block_jacobi_precond_s));
assert(precond != NULL);
int n_point = h2pack->n_point;
int n_block = h2pack->n_leaf_node;
int n_thread = h2pack->n_thread;
int krnl_dim = h2pack->krnl_dim;
int *pt_cluster = h2pack->pt_cluster;
int *leaf_nodes = h2pack->height_nodes;
int *D_nrow = h2pack->D_nrow;
DTYPE *coord = h2pack->coord;
double st = get_wtime_sec();
int *blk_sizes = (int*) malloc(sizeof(int) * n_block);
int *blk_displs = (int*) malloc(sizeof(int) * (n_block + 1));
size_t *blk_inv_ptr = (size_t*) malloc(sizeof(size_t) * n_block);
assert(blk_sizes != NULL && blk_displs != NULL && blk_inv_ptr != NULL);
size_t blk_total_size = 0;
blk_displs[0] = 0;
for (int i = 0; i < n_block; i++)
{
int node = leaf_nodes[i];
blk_sizes[i] = D_nrow[i];
blk_inv_ptr[i] = blk_total_size;
blk_displs[i + 1] = blk_displs[i] + D_nrow[i];
blk_total_size += D_nrow[i] * D_nrow[i];
}
DTYPE *blk_inv = (DTYPE*) malloc(sizeof(DTYPE) * blk_total_size);
ASSERT_PRINTF(blk_inv != NULL, "Failed to allocate array of size %zu for block Jacobi preconditioner\n", blk_total_size);
size_t total_msize = sizeof(int) * (2 * n_block + 1) + sizeof(size_t) * n_block + sizeof(DTYPE) * blk_total_size;
int *all_ipiv = (int*) malloc(sizeof(int) * n_point * krnl_dim);
#pragma omp parallel num_threads(n_thread)
{
int tid = omp_get_thread_num();
#pragma omp for schedule(dynamic)
for (int i = 0; i < n_block; i++)
{
int node = leaf_nodes[i];
int pt_s = pt_cluster[2 * node];
int pt_e = pt_cluster[2 * node + 1];
int npt = pt_e - pt_s + 1;
int blk_size = blk_sizes[i];
int *ipiv = all_ipiv + pt_s * krnl_dim;
DTYPE *blk_node = blk_inv + blk_inv_ptr[i];
if (blk_size == 0) continue;
h2pack->krnl_eval(
coord + pt_s, n_point, npt,
coord + pt_s, n_point, npt,
h2pack->krnl_param, blk_node, npt * krnl_dim
);
for (int j = 0; j < blk_size; j++)
blk_node[j * blk_size + j] += shift;
int info;
info = LAPACK_GETRF(LAPACK_ROW_MAJOR, blk_size, blk_size, blk_node, blk_size, ipiv);
ASSERT_PRINTF(info == 0, "Node %d: blk_size = %d, LAPACK_GETRF return %d\n", node, blk_size, info);
info = LAPACK_GETRI(LAPACK_ROW_MAJOR, blk_size, blk_node, blk_size, ipiv);
ASSERT_PRINTF(info == 0, "Node %d: blk_size = %d, LAPACK_GETRI return %d\n", node, blk_size, info);
} // End of i loop
} // End of "#pragma omp parallel"
free(all_ipiv);
double et = get_wtime_sec();
size_t pmt_idx_bytes = sizeof(int) * h2pack->krnl_mat_size;
size_t pmt_vec_bytes = sizeof(DTYPE) * h2pack->krnl_mat_size;
int *fwd_pmt = (int*) malloc(pmt_idx_bytes);
int *bwd_pmt = (int*) malloc(pmt_idx_bytes);
DTYPE *pmt_b = (DTYPE*) malloc(pmt_vec_bytes);
DTYPE *pmt_x = (DTYPE*) malloc(pmt_vec_bytes);
ASSERT_PRINTF(
fwd_pmt != NULL && bwd_pmt != NULL && pmt_b != NULL && pmt_x != NULL,
"Failed to allocate vector permutation arrays for FSAI preconditioner\n"
);
memcpy(fwd_pmt, h2pack->fwd_pmt_idx, pmt_idx_bytes);
memcpy(bwd_pmt, h2pack->bwd_pmt_idx, pmt_idx_bytes);
total_msize += 2 * (pmt_idx_bytes + pmt_vec_bytes);
precond->mat_size = h2pack->krnl_mat_size;
precond->n_block = n_block;
precond->blk_sizes = blk_sizes;
precond->blk_displs = blk_displs;
precond->blk_inv = blk_inv;
precond->pmt_b = pmt_b;
precond->pmt_x = pmt_x;
precond->fwd_pmt = fwd_pmt;
precond->bwd_pmt = bwd_pmt;
precond->blk_inv_ptr = blk_inv_ptr;
precond->t_build = et - st;
precond->t_apply = 0.0;
precond->n_apply = 0;
precond->mem_MB = (double) total_msize / 1048576.0;
*precond_ = precond;
}
// Apply block Jacobi preconditioner, x := M_{BJP}^{-1} * b
void block_jacobi_precond_apply(block_jacobi_precond_p precond, const DTYPE *b, DTYPE *x)
{
if (precond == NULL) return;
int mat_size = precond->mat_size;
int n_block = precond->n_block;
int *blk_sizes = precond->blk_sizes;
int *blk_displs = precond->blk_displs;
int *fwd_pmt = precond->fwd_pmt;
int *bwd_pmt = precond->bwd_pmt;
size_t *blk_inv_ptr = precond->blk_inv_ptr;
DTYPE *blk_inv = precond->blk_inv;
DTYPE *pmt_b = precond->pmt_b;
DTYPE *pmt_x = precond->pmt_x;
double st = get_wtime_sec();
gather_vector_elements(sizeof(DTYPE), mat_size, fwd_pmt, b, pmt_b);
#pragma omp parallel for schedule(dynamic)
for (int i = 0; i < n_block; i++)
{
int blk_size_i = blk_sizes[i];
int blk_spos_i = blk_displs[i];
const DTYPE *blk_inv_i = blk_inv + blk_inv_ptr[i];
const DTYPE *b_blk = pmt_b + blk_spos_i;
DTYPE *x_blk = pmt_x + blk_spos_i;
CBLAS_GEMV(
CblasRowMajor, CblasNoTrans, blk_size_i, blk_size_i,
1.0, blk_inv_i, blk_size_i, b_blk, 1, 0.0, x_blk, 1
);
}
gather_vector_elements(sizeof(DTYPE), mat_size, bwd_pmt, pmt_x, x);
double et = get_wtime_sec();
precond->t_apply += et - st;
precond->n_apply++;
}
// Destroy a block_jacobi_precond structure
void block_jacobi_precond_destroy(block_jacobi_precond_p *precond_)
{
block_jacobi_precond_p precond = *precond_;
if (precond == NULL) return;
free(precond->blk_sizes);
free(precond->blk_displs);
free(precond->blk_inv);
free(precond->pmt_b);
free(precond->pmt_x);
free(precond->fwd_pmt);
free(precond->bwd_pmt);
free(precond->blk_inv_ptr);
free(precond);
*precond_ = NULL;
}
// Print statistic info of a FSAI_precond structure
void block_jacobi_precond_print_stat(block_jacobi_precond_p precond)
{
if (precond == NULL) return;
printf(
"Block Jacobi precond used memory = %.2lf MB, build time = %.3lf sec, apply avg time = %.3lf sec\n",
precond->mem_MB, precond->t_build, precond->t_apply / (double) precond->n_apply
);
}
|
GB_reduce_each_vector.c | //------------------------------------------------------------------------------
// GB_reduce_each_vector: Tx(j)=reduce(A(:,j)), reduce a matrix to a vector
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Reduce a matrix to a vector. The kth vector A(:,k) is reduced to the kth
// scalar Tx(k). Each thread computes the reductions on roughly the same number
// of entries, which means that a vector A(:,k) may be reduced by more than one
// thread. The first vector A(:,kfirst) reduced by thread tid may be partial,
// where the prior thread tid-1 (and other prior threads) may also do some of
// the reductions for this same vector A(:,kfirst). The thread tid fully
// reduces all vectors A(:,k) for k in the range kfirst+1 to klast-1. The last
// vector A(:,klast) reduced by thread tid may also be partial. Thread tid+1,
// and following threads, may also do some of the reduces for A(:,klast).
#ifndef GB_GET_J
#define GB_GET_J ;
#endif
{
// Ah, Ai, asize, avlen, avdim unused for some uses of this template
#include "GB_unused.h"
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
const int64_t *GB_RESTRICT Ap = A->p ;
const int64_t *GB_RESTRICT Ah = A->h ;
const int64_t *GB_RESTRICT Ai = A->i ;
const GB_ATYPE *GB_RESTRICT Ax = A->x ;
size_t asize = A->type->size ;
int64_t avlen = A->vlen ;
int64_t avdim = A->vdim ;
//--------------------------------------------------------------------------
// workspace for first and last vectors of each slice
//--------------------------------------------------------------------------
// ztype Wfirst [ntasks], Wlast [ntasks] ;
GB_CTYPE *GB_RESTRICT Wfirst = (GB_CTYPE *) Wfirst_space ;
GB_CTYPE *GB_RESTRICT Wlast = (GB_CTYPE *) Wlast_space ;
//--------------------------------------------------------------------------
// reduce each slice
//--------------------------------------------------------------------------
// each thread reduces its own part in parallel
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
// if kfirst > klast then thread tid does no work at all
int64_t kfirst = kfirst_slice [tid] ;
int64_t klast = klast_slice [tid] ;
//----------------------------------------------------------------------
// reduce vectors kfirst to klast
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// find the part of A(:,k) to be reduced by this thread
//------------------------------------------------------------------
GB_GET_J ;
int64_t pA_start, pA_end ;
GB_get_pA_and_pC (&pA_start, &pA_end, NULL,
tid, k, kfirst, klast, pstart_slice, NULL, NULL, Ap) ;
//------------------------------------------------------------------
// reduce Ax [pA_start ... pA_end-1] to a scalar, if non-empty
//------------------------------------------------------------------
if (pA_start < pA_end)
{
//--------------------------------------------------------------
// reduce the vector to the scalar s
//--------------------------------------------------------------
// ztype s = (ztype) Ax [pA_start], with typecast
GB_SCALAR (s) ;
GB_CAST_ARRAY_TO_SCALAR (s, Ax, pA_start) ;
for (int64_t p = pA_start+1 ; p < pA_end ; p++)
{
// check for early exit
GB_BREAK_IF_TERMINAL (s) ;
// s += (ztype) Ax [p], with typecast
GB_ADD_CAST_ARRAY_TO_SCALAR (s, Ax, p) ;
}
//--------------------------------------------------------------
// save the result s
//--------------------------------------------------------------
if (k == kfirst)
{
// Wfirst [tid] = s ; no typecast
GB_COPY_SCALAR_TO_ARRAY (Wfirst, tid, s) ;
}
else if (k == klast)
{
// Wlast [tid] = s ; no typecast
GB_COPY_SCALAR_TO_ARRAY (Wlast, tid, s) ;
}
else
{
// Tx [k] = s ; no typecast
GB_COPY_SCALAR_TO_ARRAY (Tx, k, s) ;
}
}
}
}
//--------------------------------------------------------------------------
// reduce the first and last vector of each slice using a single thread
//--------------------------------------------------------------------------
// This step is sequential, but it takes only O(ntasks) time. The only
// case where this could be a problem is if a user-defined operator was
// a very costly one.
int64_t kprior = -1 ;
for (int tid = 0 ; tid < ntasks ; tid++)
{
//----------------------------------------------------------------------
// sum up the partial result that thread tid computed for kfirst
//----------------------------------------------------------------------
int64_t kfirst = kfirst_slice [tid] ;
int64_t klast = klast_slice [tid] ;
if (kfirst <= klast)
{
int64_t pA_start = pstart_slice [tid] ;
int64_t pA_end = GB_IMIN (Ap [kfirst+1], pstart_slice [tid+1]) ;
if (pA_start < pA_end)
{
if (kprior < kfirst)
{
// This thread is the first one that did work on
// A(:,kfirst), so use it to start the reduction.
// Tx [kfirst] = Wfirst [tid], no typecast
GB_COPY_ARRAY_TO_ARRAY (Tx, kfirst, Wfirst, tid) ;
}
else
{
// Tx [kfirst] += Wfirst [tid], no typecast
GB_ADD_ARRAY_TO_ARRAY (Tx, kfirst, Wfirst, tid) ;
}
kprior = kfirst ;
}
}
//----------------------------------------------------------------------
// sum up the partial result that thread tid computed for klast
//----------------------------------------------------------------------
if (kfirst < klast)
{
int64_t pA_start = Ap [klast] ;
int64_t pA_end = pstart_slice [tid+1] ;
if (pA_start < pA_end)
{
/* if */ ASSERT (kprior < klast) ;
{
// This thread is the first one that did work on
// A(:,klast), so use it to start the reduction.
// Tx [klast] = Wlast [tid], no typecast
GB_COPY_ARRAY_TO_ARRAY (Tx, klast, Wlast, tid) ;
}
/*
else
{
// If kfirst < klast and A(:,klast is not empty, then this
// task is always the first one to do work on A(:,klast),
// so this case is never used.
ASSERT (GB_DEAD_CODE) ;
// Tx [klast] += Wlast [tid], no typecast
GB_ADD_ARRAY_TO_ARRAY (Tx, klast, Wlast, tid) ;
}
*/
kprior = klast ;
}
}
}
}
|
special_accumulation_ops.h | //
// @author raver119@gmail.com
//
#ifndef LIBND4J_SPECIAL_ACCUMULATION_OPS_H
#define LIBND4J_SPECIAL_ACCUMULATION_OPS_H
#include <templatemath.h>
//#include <ops/ops.h>
//#include <loops/reduce.h>
namespace simdOps {
template<typename T>
class LogSumExp {
public:
static const bool requiresSpecialAccumulation = true;
op_def static T startingValue(const T *input) {
return (T) 0.0f;
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T op(T d1, T d2) {
return nd4j::math::nd4j_exp<T>(d1 - d2);
}
op_def static T op(T d1, T* extraParams) {
return nd4j::math::nd4j_exp<T>(d1 - extraParams[0]);
}
op_def static T postProcess(T reduction, Nd4jIndex n, T *extraParams) {
return extraParams[0] + nd4j::math::nd4j_log<T>(reduction);
}
#ifdef __CUDACC__
__device__ static inline void aggregatePartials(T *sPartials, int tid, int numItems, T *extraParams) {
// start the shared memory loop on the next power of 2 less
// than the block size. If block size is not a power of 2,
// accumulate the intermediate sums in the remainder range.
int floorPow2 = numItems;
if (floorPow2 & (floorPow2 - 1)) {
while (floorPow2 & (floorPow2 - 1)) {
floorPow2 &= floorPow2 - 1;
}
if (tid >= floorPow2) {
sPartials[tid - floorPow2] = update(sPartials[tid - floorPow2], sPartials[tid], extraParams);
}
__syncthreads();
}
for (int activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) {
if (tid < activeThreads && tid + activeThreads < numItems) {
sPartials[tid] = update(sPartials[tid], sPartials[tid + activeThreads], extraParams);
}
__syncthreads();
}
}
static inline __device__ void execSpecialCuda(
T *dx,
int *xShapeInfo,
T *extraParams,
T *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength,
T *reductionBuffer,
UnifiedSharedMemory *manager,
int *tadOnlyShapeInfo,
Nd4jIndex *tadOffsets) {
// we assume that RESULT already holds max values
//shared memory space for storing intermediate results
__shared__ T *sPartials;
// __shared__ shape::TAD *tad;
__shared__ int tadLength;
__shared__ int tadRank;
__shared__ int numTads;
__shared__ int *tadShape;
__shared__ int *tadStride;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sPartials = (T *) shmem;
tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength);
tadRank = shape::rank(tadOnlyShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
tadShape = shape::shapeOf(tadOnlyShapeInfo);
tadStride = shape::stride(tadOnlyShapeInfo);
}
__syncthreads();
int xCoord[MAX_RANK];
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
Nd4jIndex tadOffsetForBlock = tadOffsets[r];
sPartials[threadIdx.x] = startingValue(dx + tadOffsetForBlock);
for (int i = threadIdx.x; i < tadLength; i += blockDim.x) {
shape::ind2subC(tadRank, tadShape, i, xCoord);
Nd4jIndex xOffset = shape::getOffset(tadOffsetForBlock, tadShape, tadStride, xCoord, tadRank);
sPartials[threadIdx.x] = update(sPartials[threadIdx.x], op(dx[xOffset], result[r]), extraParams);
}
__syncthreads();
// aggregate. do NOT reduce for elements > tadLength
aggregatePartials(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), &result[r]);
__syncthreads();
if (threadIdx.x == 0)
result[r] = postProcess(sPartials[threadIdx.x], tadLength, &result[r]);
}
}
#endif
static void execSpecial(T *x,
int *xShapeInfo,
T *extraParams,
T *result,
int *resultShapeInfoBuffer,
int *dimension,
int dimensionLength,
int *tadShapeInfo,
Nd4jIndex *tadOffset) {
int resultLength = shape::length(resultShapeInfoBuffer);
int *tadOnlyShapeInfo = tadShapeInfo;
Nd4jIndex *tadOffsets = tadOffset;
shape::TAD *tad = nullptr;
if (tadOnlyShapeInfo == nullptr || tadOffsets == nullptr) {
tad = new shape::TAD(xShapeInfo, dimension, dimensionLength);
tad->createTadOnlyShapeInfo();
tad->createOffsets();
if (tad->dimensionLength < 1) {
delete tad;
return;
}
tadOnlyShapeInfo = tad->tadOnlyShapeInfo;
tadOffsets = tad->tadOffsets;
}
const int tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength);
int numTads = shape::length(xShapeInfo) / tadLength;
int tadEWS = shape::elementWiseStride(tadOnlyShapeInfo);
int tadsPerThread = resultLength / TAD_THRESHOLD;
int num_threads = nd4j::math::nd4j_max<int>(1, tadsPerThread);
num_threads = nd4j::math::nd4j_min<int>(num_threads, omp_get_max_threads());
if (tadEWS > 0 && (numTads == 1 || shape::isVector(tadOnlyShapeInfo) || shape::isScalar(tadOnlyShapeInfo))) {
#pragma omp parallel for schedule(guided) num_threads(num_threads) if (num_threads > 1) proc_bind(AFFINITY) default(shared)
for (int i = 0; i < resultLength; i++) {
T *iter = x + tadOffsets[i];
T start = startingValue(iter);
if (tadEWS == 1) {
for (int j = 0; j < tadLength; j++) {
start = update(start, op(iter[j], result[i]), extraParams);
}
}
else {
for (int j = 0; j < tadLength; j++) {
start = update(start, op(iter[j * tadEWS], result[i]), extraParams);
}
}
result[i] = postProcess(start, tadLength, &result[i]);
}
}
else {
int *tadShape = shape::shapeOf(tadOnlyShapeInfo);
int *tadStride = shape::stride(tadOnlyShapeInfo);
int tadRank = shape::rank(tadOnlyShapeInfo);
#pragma omp parallel for schedule(guided) num_threads(num_threads) if (num_threads > 1) proc_bind(AFFINITY) default(shared)
for (int i = 0; i < resultLength; i++) {
Nd4jIndex offset = tadOffsets[i];
int xCoord[MAX_RANK];
T start = startingValue(x + offset);
for (int j = 0; j < tadLength; j++) {
shape::ind2subC(tadRank, tadShape, j, xCoord);
Nd4jIndex xOffset = shape::getOffset(offset, tadShape, tadStride, xCoord, tadRank);
//printf("C I: %i; V: %f; op: %f\n", i, x[xOffset], op(x[xOffset], (float) result[i]));
start = update(start, op(x[xOffset], result[i]), extraParams);
}
result[i] = postProcess(start, tadLength, &result[i]);;
}
}
if (tad != nullptr)
delete tad;
}
};
}
#endif //LIBND4J_SPECIAL_ACCUMULATION_OPS_H
|
GrB_Matrix_export.c | //------------------------------------------------------------------------------
// GrB_Matrix_export: export a matrix in CSR, CSC, FullC, FullR, or COO format
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Exports the contents of a matrix in one of 3 formats: CSR, CSC, or COO
// (triplet format). The exported matrix is not modified. No typecast is
// performed; the output array Ax must be of the same type as the input matrix
// A.
// The required sizes of the Ap, Ai, and Ax arrays are given by
// GrB_Matrix_exportSize.
// The GraphBLAS C API does not have a GrB* method to query the type of a
// GrB_Matrix or the size of a type. SuiteSparse:GraphBLAS provides
// GxB_Matrix_type_name to query the type of a matrix (returning a string),
// which can be converted into a GrB_Type with GxB_Type_from_name. The size of
// a type can be queried with GxB_Type_size. Using these methods, a user
// application can ensure that its Ax array has the correct size for any
// given GrB_Matrix it wishes to export, regardless of its type.
#include "GB_transpose.h"
#define GB_FREE_ALL \
{ \
GB_phbix_free (T) ; \
}
//------------------------------------------------------------------------------
// GB_export_worker: export a matrix of any type
//------------------------------------------------------------------------------
static GrB_Info GB_export_worker // export a matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, row indices for COO
GrB_Index *Ai, // row indices for CSR, CSC, col indices for COO
void *Ax, // values (must match the type of A_input)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A_input, // matrix to export
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
GrB_Matrix A = A_input ;
struct GB_Matrix_opaque T_header ;
GrB_Matrix T = GB_clear_static_header (&T_header) ;
switch (format)
{
case GrB_CSR_FORMAT :
case GrB_CSC_FORMAT :
case GrB_COO_FORMAT :
GB_RETURN_IF_NULL (Ap) ; GB_RETURN_IF_NULL (Ap_len) ;
GB_RETURN_IF_NULL (Ai) ; GB_RETURN_IF_NULL (Ai_len) ;
default:
GB_RETURN_IF_NULL (Ax) ; GB_RETURN_IF_NULL (Ax_len) ;
}
// finish any pending work
GB_MATRIX_WAIT (A) ;
//--------------------------------------------------------------------------
// determine current format of A and if a copy is needed
//--------------------------------------------------------------------------
int sparsity = GB_sparsity (A) ;
bool is_csc = A->is_csc ;
bool make_copy ;
bool csc_requested ;
switch (format)
{
case GrB_CSR_FORMAT :
make_copy = !(sparsity == GxB_SPARSE && !is_csc) ;
csc_requested = false ;
break ;
case GrB_CSC_FORMAT :
make_copy = !(sparsity == GxB_SPARSE && is_csc) ;
csc_requested = true ;
break ;
// case GrB_DENSE_ROW_FORMAT :
// if (!GB_is_dense (A))
// {
// // A must dense or full
// return (GrB_INVALID_VALUE) ;
// }
// make_copy = !(sparsity == GxB_FULL && !is_csc) ;
// csc_requested = false ;
// break ;
// case GrB_DENSE_COL_FORMAT :
// if (!GB_is_dense (A))
// {
// // A must dense or full
// return (GrB_INVALID_VALUE) ;
// }
// make_copy = !(sparsity == GxB_FULL && is_csc) ;
// csc_requested = true ;
// break ;
case GrB_COO_FORMAT :
// never make a copy to export in tuple format
make_copy = false ;
csc_requested = is_csc ;
break ;
default :
// unknown format
return (GrB_INVALID_VALUE) ;
}
//--------------------------------------------------------------------------
// create a copy if the matrix is not in the requested format
//--------------------------------------------------------------------------
if (make_copy)
{
if (is_csc != csc_requested)
{
// T = A'
GB_OK (GB_transpose_cast (T, A->type, csc_requested, A, false,
Context)) ;
}
else
{
// T = A
GB_OK (GB_dup_worker (&T, A->iso, A, true, A->type, Context)) ;
}
switch (format)
{
case GrB_CSR_FORMAT :
case GrB_CSC_FORMAT :
GB_OK (GB_convert_any_to_sparse (T, Context)) ;
break ;
// case GrB_DENSE_ROW_FORMAT :
// case GrB_DENSE_COL_FORMAT :
// GB_convert_any_to_full (T) ;
// break ;
default :
break ;
}
A = T ;
}
//--------------------------------------------------------------------------
// export the contents of the matrix
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
GrB_Index nvals = GB_nnz (A) ;
int64_t plen = A->vdim+1 ;
switch (format)
{
case GrB_CSR_FORMAT :
case GrB_CSC_FORMAT :
if (plen > (*Ap_len) || nvals > (*Ai_len))
{
GB_FREE_ALL ;
return (GrB_INSUFFICIENT_SPACE) ;
}
GB_memcpy (Ap, A->p, plen * sizeof (GrB_Index), nthreads_max) ;
GB_memcpy (Ai, A->i, nvals * sizeof (GrB_Index), nthreads_max) ;
(*Ap_len) = plen ;
(*Ai_len) = nvals ;
// case GrB_DENSE_ROW_FORMAT :
// case GrB_DENSE_COL_FORMAT :
if (nvals > (*Ax_len))
{
GB_FREE_ALL ;
return (GrB_INSUFFICIENT_SPACE) ;
}
(*Ax_len) = nvals ;
ASSERT (csc_requested == A->is_csc) ;
if (A->iso)
{
// expand the iso A->x into the non-iso array Ax
ASSERT (nvals > 0) ;
GB_iso_expand (Ax, nvals, A->x, A->type->size, Context) ;
}
else
{
GB_memcpy (Ax, A->x, nvals, nthreads_max) ;
}
break ;
default:
case GrB_COO_FORMAT :
if (nvals > (*Ap_len) || nvals > (*Ai_len) || nvals > (*Ax_len))
{
GB_FREE_ALL ;
return (GrB_INSUFFICIENT_SPACE) ;
}
GB_OK (GB_extractTuples (Ap, Ai, Ax, &nvals, A->type->code, A,
Context)) ;
(*Ap_len) = nvals ;
(*Ai_len) = nvals ;
(*Ax_len) = nvals ;
break ;
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_ALL ;
#pragma omp flush
return (GrB_SUCCESS) ;
}
//------------------------------------------------------------------------------
// GrB_Matrix_export_*: export a matrix of a given type
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL ;
#define GB_EXPORT(prefix,ctype,T,acode) \
GrB_Info GB_EVAL3 (prefix, _Matrix_export_, T) /* export a matrix */ \
( \
GrB_Index *Ap, /* pointers for CSR, CSC, row indices for COO */\
GrB_Index *Ai, /* row indices for CSR, CSC, col indices for COO */\
ctype *Ax, /* values (must match the type of A) */\
GrB_Index *Ap_len, /* number of entries in Ap (not # of bytes) */\
GrB_Index *Ai_len, /* number of entries in Ai (not # of bytes) */\
GrB_Index *Ax_len, /* number of entries in Ax (not # of bytes) */\
GrB_Format format, /* export format */\
GrB_Matrix A /* matrix to export */\
) \
{ \
GB_WHERE1 (GB_STR(prefix) "_Matrix_export_" GB_STR(T) \
" (Ap, Ai, Ax, &Ap_len, &Ai_len, &Ax_len, format, A)") ; \
GB_BURBLE_START (GB_STR(prefix) "_Matrix_export_" GB_STR(T)) ; \
GB_RETURN_IF_NULL_OR_FAULTY (A) ; \
if (A->type->code != acode) return (GrB_DOMAIN_MISMATCH) ; \
GrB_Info info = GB_export_worker (Ap, Ai, (void *) Ax, \
Ap_len, Ai_len, Ax_len, format, A, Context) ; \
GB_BURBLE_END ; \
return (info) ; \
}
GB_EXPORT (GrB, bool , BOOL , GB_BOOL_code )
GB_EXPORT (GrB, int8_t , INT8 , GB_INT8_code )
GB_EXPORT (GrB, int16_t , INT16 , GB_INT16_code )
GB_EXPORT (GrB, int32_t , INT32 , GB_INT32_code )
GB_EXPORT (GrB, int64_t , INT64 , GB_INT64_code )
GB_EXPORT (GrB, uint8_t , UINT8 , GB_UINT8_code )
GB_EXPORT (GrB, uint16_t , UINT16 , GB_UINT16_code)
GB_EXPORT (GrB, uint32_t , UINT32 , GB_UINT32_code)
GB_EXPORT (GrB, uint64_t , UINT64 , GB_UINT64_code)
GB_EXPORT (GrB, float , FP32 , GB_FP32_code )
GB_EXPORT (GrB, double , FP64 , GB_FP64_code )
GB_EXPORT (GxB, GxB_FC32_t, FC32 , GB_FC32_code )
GB_EXPORT (GxB, GxB_FC64_t, FC64 , GB_FC64_code )
GB_EXPORT (GrB, void , UDT , GB_UDT_code )
|
omp_parallel_sections_reduction.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
int test_omp_parallel_sections_reduction()
{
int sum;
int known_sum;
double dpt;
double dsum;
double dknown_sum;
double dt=0.5; /* base of geometric row for + and - test*/
double rounding_error= 1.E-5;
int diff;
double ddiff;
int product;
int known_product;
int logic_and;
int bit_and;
int logic_or;
int bit_or;
int exclusiv_bit_or;
int logics[1000];
int i;
int result;
sum = 7;
dsum=0;
product =1;
dpt = 1;
logic_and=1;
bit_and=1;
logic_or=0;
bit_or=0;
exclusiv_bit_or=0;
result =0;
/* int my_islarger;*/
/*int is_larger=1;*/
// Test summation of integers
known_sum = (999*1000)/2+7;
#pragma omp parallel sections private(i) reduction(+:sum)
{
#pragma omp section
{
for (i=1;i<300;i++) {
sum=sum+i;
}
}
#pragma omp section
{
for (i=300;i<700;i++) {
sum=sum+i;
}
}
#pragma omp section
{
for (i=700;i<1000;i++) {
sum=sum+i;
}
}
}
if(known_sum!=sum) {
result++;
fprintf(stderr,"Error in sum with integers: Result was %d"
" instead of %d.\n",sum, known_sum);
}
// Test differences of integers
diff = (999*1000)/2;
#pragma omp parallel sections private(i) reduction(-:diff)
{
#pragma omp section
{
for (i=1;i<300;i++) {
diff=diff-i;
}
}
#pragma omp section
{
for (i=300;i<700;i++) {
diff=diff-i;
}
}
#pragma omp section
{
for (i=700;i<1000;i++) {
diff=diff-i;
}
}
}
if(diff != 0) {
result++;
fprintf(stderr,"Error in Difference with integers: Result was %d"
" instead of 0.\n",diff);
}
// Test summation of doubles
for (i=0;i<20;++i) {
dpt*=dt;
}
dknown_sum = (1-dpt)/(1-dt);
#pragma omp parallel sections private(i) reduction(+:dsum)
{
#pragma omp section
{
for (i=0;i<6;++i) {
dsum += pow(dt,i);
}
}
#pragma omp section
{
for (i=6;i<12;++i) {
dsum += pow(dt,i);
}
}
#pragma omp section
{
for (i=12;i<20;++i) {
dsum += pow(dt,i);
}
}
}
if( fabs(dsum-dknown_sum) > rounding_error ) {
result++;
fprintf(stderr,"Error in sum with doubles: Result was %f"
" instead of %f (Difference: %E)\n",
dsum, dknown_sum, dsum-dknown_sum);
}
// Test differences of doubles
dpt=1;
for (i=0;i<20;++i) {
dpt*=dt;
}
fprintf(stderr,"\n");
ddiff = (1-dpt)/(1-dt);
#pragma omp parallel sections private(i) reduction(-:ddiff)
{
#pragma omp section
{
for (i=0;i<6;++i) {
ddiff -= pow(dt,i);
}
}
#pragma omp section
{
for (i=6;i<12;++i) {
ddiff -= pow(dt,i);
}
}
#pragma omp section
{
for (i=12;i<20;++i) {
ddiff -= pow(dt,i);
}
}
}
if( fabs(ddiff) > rounding_error) {
result++;
fprintf(stderr,"Error in Difference with doubles: Result was %E"
" instead of 0.0\n",ddiff);
}
// Test product of integers
known_product = 3628800;
#pragma omp parallel sections private(i) reduction(*:product)
{
#pragma omp section
{
for(i=1;i<3;i++) {
product *= i;
}
}
#pragma omp section
{
for(i=3;i<7;i++) {
product *= i;
}
}
#pragma omp section
{
for(i=7;i<11;i++) {
product *= i;
}
}
}
if(known_product != product) {
result++;
fprintf(stderr,"Error in Product with integers: Result was %d"
" instead of %d\n",product,known_product);
}
// Test logical AND
for(i=0;i<1000;i++) {
logics[i]=1;
}
#pragma omp parallel sections private(i) reduction(&&:logic_and)
{
#pragma omp section
{
for (i=1;i<300;i++) {
logic_and = (logic_and && logics[i]);
}
}
#pragma omp section
{
for (i=300;i<700;i++) {
logic_and = (logic_and && logics[i]);
}
}
#pragma omp section
{
for (i=700;i<1000;i++) {
logic_and = (logic_and && logics[i]);
}
}
}
if(!logic_and) {
result++;
fprintf(stderr,"Error in logic AND part 1\n");
}
logic_and = 1;
logics[501] = 0;
#pragma omp parallel sections private(i) reduction(&&:logic_and)
{
#pragma omp section
{
for (i=1;i<300;i++) {
logic_and = (logic_and && logics[i]);
}
}
#pragma omp section
{
for (i=300;i<700;i++) {
logic_and = (logic_and && logics[i]);
}
}
#pragma omp section
{
for (i=700;i<1000;i++) {
logic_and = (logic_and && logics[i]);
}
}
}
if(logic_and) {
result++;
fprintf(stderr,"Error in logic AND part 2");
}
// Test logical OR
for(i=0;i<1000;i++) {
logics[i]=0;
}
#pragma omp parallel sections private(i) reduction(||:logic_or)
{
#pragma omp section
{
for (i=1;i<300;i++) {
logic_or = (logic_or || logics[i]);
}
}
#pragma omp section
{
for (i=300;i<700;i++) {
logic_or = (logic_or || logics[i]);
}
}
#pragma omp section
{
for (i=700;i<1000;i++) {
logic_or = (logic_or || logics[i]);
}
}
}
if(logic_or) {
result++;
fprintf(stderr,"Error in logic OR part 1\n");
}
logic_or = 0;
logics[501]=1;
#pragma omp parallel sections private(i) reduction(||:logic_or)
{
#pragma omp section
{
for (i=1;i<300;i++) {
logic_or = (logic_or || logics[i]);
}
}
#pragma omp section
{
for (i=300;i<700;i++) {
logic_or = (logic_or || logics[i]);
}
}
#pragma omp section
{
for (i=700;i<1000;i++) {
logic_or = (logic_or || logics[i]);
}
}
}
if(!logic_or) {
result++;
fprintf(stderr,"Error in logic OR part 2\n");
}
// Test bitwise AND
for(i=0;i<1000;++i) {
logics[i]=1;
}
#pragma omp parallel sections private(i) reduction(&:bit_and)
{
#pragma omp section
{
for(i=0;i<300;++i) {
bit_and = (bit_and & logics[i]);
}
}
#pragma omp section
{
for(i=300;i<700;++i) {
bit_and = (bit_and & logics[i]);
}
}
#pragma omp section
{
for(i=700;i<1000;++i) {
bit_and = (bit_and & logics[i]);
}
}
}
if(!bit_and) {
result++;
fprintf(stderr,"Error in BIT AND part 1\n");
}
bit_and = 1;
logics[501]=0;
#pragma omp parallel sections private(i) reduction(&:bit_and)
{
#pragma omp section
{
for(i=0;i<300;++i) {
bit_and = bit_and & logics[i];
}
}
#pragma omp section
{
for(i=300;i<700;++i) {
bit_and = bit_and & logics[i];
}
}
#pragma omp section
{
for(i=700;i<1000;++i) {
bit_and = bit_and & logics[i];
}
}
}
if(bit_and) {
result++;
fprintf(stderr,"Error in BIT AND part 2");
}
// Test bitwise OR
for(i=0;i<1000;i++) {
logics[i]=0;
}
#pragma omp parallel sections private(i) reduction(|:bit_or)
{
#pragma omp section
{
for(i=0;i<300;++i) {
bit_or = bit_or | logics[i];
}
}
#pragma omp section
{
for(i=300;i<700;++i) {
bit_or = bit_or | logics[i];
}
}
#pragma omp section
{
for(i=700;i<1000;++i) {
bit_or = bit_or | logics[i];
}
}
}
if(bit_or) {
result++;
fprintf(stderr,"Error in BIT OR part 1\n");
}
bit_or = 0;
logics[501]=1;
#pragma omp parallel sections private(i) reduction(|:bit_or)
{
#pragma omp section
{
for(i=0;i<300;++i) {
bit_or = bit_or | logics[i];
}
}
#pragma omp section
{
for(i=300;i<700;++i) {
bit_or = bit_or | logics[i];
}
}
#pragma omp section
{
for(i=700;i<1000;++i) {
bit_or = bit_or | logics[i];
}
}
}
if(!bit_or) {
result++;
fprintf(stderr,"Error in BIT OR part 2\n");
}
// Test bitwise XOR
for(i=0;i<1000;i++) {
logics[i]=0;
}
#pragma omp parallel sections private(i) reduction(^:exclusiv_bit_or)
{
#pragma omp section
{
for(i=0;i<300;++i) {
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
#pragma omp section
{
for(i=300;i<700;++i) {
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
#pragma omp section
{
for(i=700;i<1000;++i) {
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
}
if(exclusiv_bit_or) {
result++;
fprintf(stderr,"Error in EXCLUSIV BIT OR part 1\n");
}
exclusiv_bit_or = 0;
logics[501]=1;
#pragma omp parallel sections private(i) reduction(^:exclusiv_bit_or)
{
#pragma omp section
{
for(i=0;i<300;++i) {
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
#pragma omp section
{
for(i=300;i<700;++i) {
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
#pragma omp section
{
for(i=700;i<1000;++i) {
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
}
if(!exclusiv_bit_or) {
result++;
fprintf(stderr,"Error in EXCLUSIV BIT OR part 2\n");
}
/*printf("\nResult:%d\n",result);*/
return (result==0);
}
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_parallel_sections_reduction()) {
num_failed++;
}
}
return num_failed;
}
|
DRB029-truedep1-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
This program has data races due to true dependence within the loop at 63.
Data race pair: a[i+1]@64:5 vs. a[i]@64:12
*/
#include <stdlib.h>
#include <stdio.h>
int main(int argc, char* argv[])
{
int i;
int len=100;
int a[100];
#pragma omp parallel for private(i)
for (i=0;i<len;i++)
a[i]=i;
for (i=0;i<len-1;i++)
a[i+1]=a[i]+1;
printf("a[50]=%d\n", a[50]);
return 0;
}
|
a.35.5.c | /* { dg-do compile } */
void work (int, int);
void
wrong5 (int n)
{
#pragma omp parallel
{
#pragma omp critical
{
work (n, 0);
/* incorrect nesting of barrier region in a critical region */
#pragma omp barrier /* { dg-error "may not be closely nested" } */
work (n, 1);
}
}
}
|
pr38650.c | /* PR c++/38650 */
/* { dg-do run } */
#include <stdlib.h>
int e;
int
main ()
{
volatile int i, j = 10;
e = 0;
#pragma omp parallel for reduction(+:e)
for (i = 0; i < j; i += 1)
e++;
if (e != 10)
abort ();
e = 0;
#pragma omp parallel for reduction(+:e)
for (i = 0; i < j; ++i)
e++;
if (e != 10)
abort ();
e = 0;
#pragma omp parallel for reduction(+:e)
for (i = 0; i < j; i++)
e++;
if (e != 10)
abort ();
e = 0;
#pragma omp parallel for reduction(+:e)
for (i = 0; i < 10; i += 1)
e++;
if (e != 10)
abort ();
e = 0;
#pragma omp parallel for reduction(+:e)
for (i = 0; i < 10; ++i)
e++;
if (e != 10)
abort ();
e = 0;
#pragma omp parallel for reduction(+:e)
for (i = 0; i < 10; i++)
e++;
if (e != 10)
abort ();
return 0;
}
|
GB_binop__isne_int32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isne_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__isne_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__isne_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__isne_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_int32)
// A*D function (colscale): GB (_AxD__isne_int32)
// D*A function (rowscale): GB (_DxB__isne_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__isne_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__isne_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_int32)
// C=scalar+B GB (_bind1st__isne_int32)
// C=scalar+B' GB (_bind1st_tran__isne_int32)
// C=A+scalar GB (_bind2nd__isne_int32)
// C=A'+scalar GB (_bind2nd_tran__isne_int32)
// C type: int32_t
// A type: int32_t
// A pattern? 0
// B type: int32_t
// B pattern? 0
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNE || GxB_NO_INT32 || GxB_NO_ISNE_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isne_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isne_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isne_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isne_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isne_int32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isne_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int32_t alpha_scalar ;
int32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int32_t *) alpha_scalar_in)) ;
beta_scalar = (*((int32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isne_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isne_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isne_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isne_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isne_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isne_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__isne_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__isne_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
hcb_basis_core.h | #ifndef _HCB_BASIS_CORE_H
#define _HCB_BASIS_CORE_H
#include <complex>
#include <vector>
#include <iostream>
#include "general_basis_core.h"
#include "numpy/ndarraytypes.h"
#include "benes_perm.h"
#include "openmp.h"
namespace basis_general {
template<class I>
I inline hcb_map_bits(I s,const int map[],const int N){
I ss = 0;
for(int i=N-1;i>=0;--i){
int j = map[i];
ss ^= (j<0 ? ((s&1)^1)<<(N+j) : (s&1)<<(N-j-1) );
s >>= 1;
}
return ss;
}
template<class I>
class hcb_basis_core : public general_basis_core<I>
{
public:
std::vector<tr_benes<I>> benes_maps;
std::vector<I> invs;
hcb_basis_core(const int _N) : \
general_basis_core<I>::general_basis_core(_N) {}
hcb_basis_core(const int _N,const int _nt,const int _maps[], \
const int _pers[], const int _qs[]) : \
general_basis_core<I>::general_basis_core(_N,_nt,_maps,_pers,_qs) {
benes_maps.resize(_nt);
invs.resize(_nt);
ta_index<I> index;
for(int j=0;j<bit_info<I>::bits;j++){index.data[j] = no_index;}
for(int i=0;i<_nt;i++){
const int * map = &general_basis_core<I>::maps[i*_N];
I inv = 0;
for(int j=0;j<_N;j++){
int m = map[j];
int bit_j = _N - j - 1;
if(m<0){
int bit_m = _N + m;
index.data[bit_j] = bit_m;
inv ^= ((I)1 << bit_j);
}
else{
int bit_m = _N - m -1;
index.data[bit_j] = bit_m;
}
}
gen_benes<I>(&benes_maps[i],index);
invs[i] = inv;
}
}
~hcb_basis_core() {}
I map_state(I s,int n_map,int &sign){
if(general_basis_core<I>::nt<=0){
return s;
}
return benes_bwd(&benes_maps[n_map],s^invs[n_map]);
}
void map_state(I s[],npy_intp M,int n_map,signed char sign[]){
if(general_basis_core<I>::nt<=0){
return;
}
const tr_benes<I> * benes_map = &benes_maps[n_map];
const I inv = invs[n_map];
#pragma omp for schedule(static)
for(npy_intp i=0;i<M;i++){
s[i] = benes_bwd(benes_map,s[i]^inv);
}
}
std::vector<int> count_particles(const I s){
std::vector<int> v(1);
v[0] = bit_count(s,general_basis_core<I>::N);
return v;
}
// I map_state(I s,int n_map,int &sign){
// if(general_basis_core<I>::nt<=0){
// return s;
// }
// const int n = general_basis_core<I>::N;
// return hcb_map_bits(s,&general_basis_core<I>::maps[n_map*n],n);
// }
// void map_state(I s[],npy_intp M,int n_map,signed char sign[]){
// if(general_basis_core<I>::nt<=0){
// return;
// }
// const int n = general_basis_core<I>::N;
// const int * map = &general_basis_core<I>::maps[n_map*n];
// #pragma omp for schedule(static,1)
// for(npy_intp i=0;i<M;i++){
// s[i] = hcb_map_bits(s[i],map,n);
// }
// }
I inline next_state_pcon(const I s){
if(s==0){return s;}
I t = (s | (s - 1)) + 1;
return t | ((((t & (0-t)) / (s & (0-s))) >> 1) - 1);
}
int op(I &r,std::complex<double> &m,const int n_op,const char opstr[],const int indx[]){
const I s = r;
const I one = 1;
for(int j=n_op-1;j>-1;j--){
const int ind = general_basis_core<I>::N-indx[j]-1;
const I b = (one << ind);
const bool a = (bool)((r >> ind)&one);
const char op = opstr[j];
switch(op){
case 'z':
m *= (a?0.5:-0.5);
break;
case 'n':
m *= (a?1:0);
break;
case 'x':
r ^= b;
m *= 0.5;
break;
case 'y':
m *= (a?std::complex<double>(0,0.5):std::complex<double>(0,-0.5));
r ^= b;
break;
case '+':
m *= (a?0:1);
r ^= b;
break;
case '-':
m *= (a?1:0);
r ^= b;
break;
case 'I':
break;
default:
return -1;
}
if(std::abs(m)==0){
r = s;
break;
}
}
return 0;
}
};
}
#endif
|
GB_unaryop__identity_fp32_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_fp32_fp64
// op(A') function: GB_tran__identity_fp32_fp64
// C type: float
// A type: double
// cast: float cij = (float) aij
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_fp32_fp64
(
float *restrict Cx,
const double *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_fp32_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
morphology.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y %
% MM MM O O R R P P H H O O L O O G Y Y %
% M M M O O RRRR PPPP HHHHH O O L O O G GGG Y %
% M M O O R R P H H O O L O O G G Y %
% M M OOO R R P H H OOO LLLLL OOO GGG Y %
% %
% %
% MagickCore Morphology Methods %
% %
% Software Design %
% Anthony Thyssen %
% January 2010 %
% %
% %
% Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Morphology is the application of various kernels, of any size or shape, to an
% image in various ways (typically binary, but not always).
%
% Convolution (weighted sum or average) is just one specific type of
% morphology. Just one that is very common for image bluring and sharpening
% effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring.
%
% This module provides not only a general morphology function, and the ability
% to apply more advanced or iterative morphologies, but also functions for the
% generation of many different types of kernel arrays from user supplied
% arguments. Prehaps even the generation of a kernel from a small image.
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/cache-view.h"
#include "magick/color-private.h"
#include "magick/channel.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/hashmap.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/memory-private.h"
#include "magick/monitor-private.h"
#include "magick/morphology.h"
#include "magick/morphology-private.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/prepress.h"
#include "magick/quantize.h"
#include "magick/registry.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/splay-tree.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/utility.h"
/*
Other global definitions used by module.
*/
#define Minimize(assign,value) assign=MagickMin(assign,value)
#define Maximize(assign,value) assign=MagickMax(assign,value)
/* Integer Factorial Function - for a Binomial kernel */
#if 1
static inline size_t fact(size_t n)
{
size_t l,f;
for(f=1, l=2; l <= n; f=f*l, l++);
return(f);
}
#elif 1 /* glibc floating point alternatives */
#define fact(n) ((size_t)tgamma((double)n+1))
#else
#define fact(n) ((size_t)lgamma((double)n+1))
#endif
/* Currently these are only internal to this module */
static void
CalcKernelMetaData(KernelInfo *),
ExpandMirrorKernelInfo(KernelInfo *),
ExpandRotateKernelInfo(KernelInfo *, const double),
RotateKernelInfo(KernelInfo *, double);
/* Quick function to find last kernel in a kernel list */
static inline KernelInfo *LastKernelInfo(KernelInfo *kernel)
{
while (kernel->next != (KernelInfo *) NULL)
kernel=kernel->next;
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelInfo() takes the given string (generally supplied by the
% user) and converts it into a Morphology/Convolution Kernel. This allows
% users to specify a kernel from a number of pre-defined kernels, or to fully
% specify their own kernel for a specific Convolution or Morphology
% Operation.
%
% The kernel so generated can be any rectangular array of floating point
% values (doubles) with the 'control point' or 'pixel being affected'
% anywhere within that array of values.
%
% Previously IM was restricted to a square of odd size using the exact
% center as origin, this is no longer the case, and any rectangular kernel
% with any value being declared the origin. This in turn allows the use of
% highly asymmetrical kernels.
%
% The floating point values in the kernel can also include a special value
% known as 'nan' or 'not a number' to indicate that this value is not part
% of the kernel array. This allows you to shaped the kernel within its
% rectangular area. That is 'nan' values provide a 'mask' for the kernel
% shape. However at least one non-nan value must be provided for correct
% working of a kernel.
%
% The returned kernel should be freed using the DestroyKernelInfo() when you
% are finished with it. Do not free this memory yourself.
%
% Input kernel defintion strings can consist of any of three types.
%
% "name:args[[@><]"
% Select from one of the built in kernels, using the name and
% geometry arguments supplied. See AcquireKernelBuiltIn()
%
% "WxH[+X+Y][@><]:num, num, num ..."
% a kernel of size W by H, with W*H floating point numbers following.
% the 'center' can be optionally be defined at +X+Y (such that +0+0
% is top left corner). If not defined the pixel in the center, for
% odd sizes, or to the immediate top or left of center for even sizes
% is automatically selected.
%
% "num, num, num, num, ..."
% list of floating point numbers defining an 'old style' odd sized
% square kernel. At least 9 values should be provided for a 3x3
% square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc.
% Values can be space or comma separated. This is not recommended.
%
% You can define a 'list of kernels' which can be used by some morphology
% operators A list is defined as a semi-colon separated list kernels.
%
% " kernel ; kernel ; kernel ; "
%
% Any extra ';' characters, at start, end or between kernel defintions are
% simply ignored.
%
% The special flags will expand a single kernel, into a list of rotated
% kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree
% cyclic rotations, while a '>' will generate a list of 90-degree rotations.
% The '<' also exands using 90-degree rotates, but giving a 180-degree
% reflected kernel before the +/- 90-degree rotations, which can be important
% for Thinning operations.
%
% Note that 'name' kernels will start with an alphabetic character while the
% new kernel specification has a ':' character in its specification string.
% If neither is the case, it is assumed an old style of a simple list of
% numbers generating a odd-sized square kernel has been given.
%
% The format of the AcquireKernal method is:
%
% KernelInfo *AcquireKernelInfo(const char *kernel_string)
%
% A description of each parameter follows:
%
% o kernel_string: the Morphology/Convolution kernel wanted.
%
*/
/* This was separated so that it could be used as a separate
** array input handling function, such as for -color-matrix
*/
static KernelInfo *ParseKernelArray(const char *kernel_string)
{
KernelInfo
*kernel;
char
token[MaxTextExtent];
const char
*p,
*end;
register ssize_t
i;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
MagickStatusType
flags;
GeometryInfo
args;
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) ResetMagickMemory(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = UserDefinedKernel;
kernel->next = (KernelInfo *) NULL;
kernel->signature = MagickSignature;
if (kernel_string == (const char *) NULL)
return(kernel);
/* find end of this specific kernel definition string */
end = strchr(kernel_string, ';');
if ( end == (char *) NULL )
end = strchr(kernel_string, '\0');
/* clear flags - for Expanding kernel lists thorugh rotations */
flags = NoValue;
/* Has a ':' in argument - New user kernel specification
FUTURE: this split on ':' could be done by StringToken()
*/
p = strchr(kernel_string, ':');
if ( p != (char *) NULL && p < end)
{
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, kernel_string, (size_t) (p-kernel_string));
token[p-kernel_string] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
/* Size handling and checks of geometry settings */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 1.0; /* then width = 1 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
kernel->width = (size_t)args.rho;
kernel->height = (size_t)args.sigma;
/* Offset Handling and Checks */
if ( args.xi < 0.0 || args.psi < 0.0 )
return(DestroyKernelInfo(kernel));
kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi
: (ssize_t) (kernel->width-1)/2;
kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi
: (ssize_t) (kernel->height-1)/2;
if ( kernel->x >= (ssize_t) kernel->width ||
kernel->y >= (ssize_t) kernel->height )
return(DestroyKernelInfo(kernel));
p++; /* advance beyond the ':' */
}
else
{ /* ELSE - Old old specification, forming odd-square kernel */
/* count up number of values given */
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
for (i=0; p < end; i++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
}
/* set the size of the kernel - old sized square */
kernel->width = kernel->height= (size_t) sqrt((double) i+1.0);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
}
/* Read in the kernel values from rest of input string argument */
kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(
kernel->width,kernel->height*sizeof(*kernel->values)));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
kernel->minimum=MagickMaximumValue;
kernel->maximum=(-MagickMaximumValue);
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
if ( LocaleCompare("nan",token) == 0
|| LocaleCompare("-",token) == 0 ) {
kernel->values[i] = nan; /* this value is not part of neighbourhood */
}
else {
kernel->values[i] = StringToDouble(token,(char **) NULL);
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
}
/* sanity check -- no more values in kernel definition */
GetMagickToken(p,&p,token);
if ( *token != '\0' && *token != ';' && *token != '\'' )
return(DestroyKernelInfo(kernel));
#if 0
/* this was the old method of handling a incomplete kernel */
if ( i < (ssize_t) (kernel->width*kernel->height) ) {
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
for ( ; i < (ssize_t) (kernel->width*kernel->height); i++)
kernel->values[i]=0.0;
}
#else
/* Number of values for kernel was not enough - Report Error */
if ( i < (ssize_t) (kernel->width*kernel->height) )
return(DestroyKernelInfo(kernel));
#endif
/* check that we recieved at least one real (non-nan) value! */
if (kernel->minimum == MagickMaximumValue)
return(DestroyKernelInfo(kernel));
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */
ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */
return(kernel);
}
static KernelInfo *ParseKernelName(const char *kernel_string)
{
char
token[MaxTextExtent];
const char
*p,
*end;
GeometryInfo
args;
KernelInfo
*kernel;
MagickStatusType
flags;
ssize_t
type;
/* Parse special 'named' kernel */
GetMagickToken(kernel_string,&p,token);
type=ParseCommandOption(MagickKernelOptions,MagickFalse,token);
if ( type < 0 || type == UserDefinedKernel )
return((KernelInfo *) NULL); /* not a valid named kernel */
while (((isspace((int) ((unsigned char) *p)) != 0) ||
(*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';'))
p++;
end = strchr(p, ';'); /* end of this kernel defintion */
if ( end == (char *) NULL )
end = strchr(p, '\0');
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, p, (size_t) (end-p));
token[end-p] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
/* special handling of missing values in input string */
switch( type ) {
/* Shape Kernel Defaults */
case UnityKernel:
if ( (flags & WidthValue) == 0 )
args.rho = 1.0; /* Default scale = 1.0, zero is valid */
break;
case SquareKernel:
case DiamondKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
if ( (flags & HeightValue) == 0 )
args.sigma = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RingKernel:
if ( (flags & XValue) == 0 )
args.xi = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RectangleKernel: /* Rectangle - set size defaults */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 3; /* then width = 3 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
if ( (flags & XValue) == 0 ) /* center offset if not defined */
args.xi = (double)(((ssize_t)args.rho-1)/2);
if ( (flags & YValue) == 0 )
args.psi = (double)(((ssize_t)args.sigma-1)/2);
break;
/* Distance Kernel Defaults */
case ChebyshevKernel:
case ManhattanKernel:
case OctagonalKernel:
case EuclideanKernel:
if ( (flags & HeightValue) == 0 ) /* no distance scale */
args.sigma = 100.0; /* default distance scaling */
else if ( (flags & AspectValue ) != 0 ) /* '!' flag */
args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */
else if ( (flags & PercentValue ) != 0 ) /* '%' flag */
args.sigma *= QuantumRange/100.0; /* percentage of color range */
break;
default:
break;
}
kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args);
if ( kernel == (KernelInfo *) NULL )
return(kernel);
/* global expand to rotated kernel list - only for single kernels */
if ( kernel->next == (KernelInfo *) NULL ) {
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 45.0);
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0);
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel);
}
return(kernel);
}
MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string)
{
KernelInfo
*kernel,
*new_kernel;
char
*kernel_cache,
token[MaxTextExtent];
const char
*p;
if (kernel_string == (const char *) NULL)
return(ParseKernelArray(kernel_string));
p=kernel_string;
kernel_cache=(char *) NULL;
if (*kernel_string == '@')
{
ExceptionInfo *exception=AcquireExceptionInfo();
kernel_cache=FileToString(kernel_string+1,~0UL,exception);
exception=DestroyExceptionInfo(exception);
if (kernel_cache == (char *) NULL)
return((KernelInfo *) NULL);
p=(const char *) kernel_cache;
}
kernel=NULL;
while (GetMagickToken(p,NULL,token), *token != '\0')
{
/* ignore extra or multiple ';' kernel separators */
if (*token != ';')
{
/* tokens starting with alpha is a Named kernel */
if (isalpha((int) ((unsigned char) *token)) != 0)
new_kernel=ParseKernelName(p);
else /* otherwise a user defined kernel array */
new_kernel=ParseKernelArray(p);
/* Error handling -- this is not proper error handling! */
if (new_kernel == (KernelInfo *) NULL)
{
if (kernel != (KernelInfo *) NULL)
kernel=DestroyKernelInfo(kernel);
return((KernelInfo *) NULL);
}
/* initialise or append the kernel list */
if (kernel == (KernelInfo *) NULL)
kernel=new_kernel;
else
LastKernelInfo(kernel)->next=new_kernel;
}
/* look for the next kernel in list */
p=strchr(p,';');
if (p == (char *) NULL)
break;
p++;
}
if (kernel_cache != (char *) NULL)
kernel_cache=DestroyString(kernel_cache);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l B u i l t I n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelBuiltIn() returned one of the 'named' built-in types of
% kernels used for special purposes such as gaussian blurring, skeleton
% pruning, and edge distance determination.
%
% They take a KernelType, and a set of geometry style arguments, which were
% typically decoded from a user supplied string, or from a more complex
% Morphology Method that was requested.
%
% The format of the AcquireKernalBuiltIn method is:
%
% KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
% const GeometryInfo args)
%
% A description of each parameter follows:
%
% o type: the pre-defined type of kernel wanted
%
% o args: arguments defining or modifying the kernel
%
% Convolution Kernels
%
% Unity
% The a No-Op or Scaling single element kernel.
%
% Gaussian:{radius},{sigma}
% Generate a two-dimensional gaussian kernel, as used by -gaussian.
% The sigma for the curve is required. The resulting kernel is
% normalized,
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% NOTE: that the 'radius' is optional, but if provided can limit (clip)
% the final size of the resulting kernel to a square 2*radius+1 in size.
% The radius should be at least 2 times that of the sigma value, or
% sever clipping and aliasing may result. If not given or set to 0 the
% radius will be determined so as to produce the best minimal error
% result, which is usally much larger than is normally needed.
%
% LoG:{radius},{sigma}
% "Laplacian of a Gaussian" or "Mexician Hat" Kernel.
% The supposed ideal edge detection, zero-summing kernel.
%
% An alturnative to this kernel is to use a "DoG" with a sigma ratio of
% approx 1.6 (according to wikipedia).
%
% DoG:{radius},{sigma1},{sigma2}
% "Difference of Gaussians" Kernel.
% As "Gaussian" but with a gaussian produced by 'sigma2' subtracted
% from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1.
% The result is a zero-summing kernel.
%
% Blur:{radius},{sigma}[,{angle}]
% Generates a 1 dimensional or linear gaussian blur, at the angle given
% (current restricted to orthogonal angles). If a 'radius' is given the
% kernel is clipped to a width of 2*radius+1. Kernel can be rotated
% by a 90 degree angle.
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% Note that two convolutions with two "Blur" kernels perpendicular to
% each other, is equivalent to a far larger "Gaussian" kernel with the
% same sigma value, However it is much faster to apply. This is how the
% "-blur" operator actually works.
%
% Comet:{width},{sigma},{angle}
% Blur in one direction only, much like how a bright object leaves
% a comet like trail. The Kernel is actually half a gaussian curve,
% Adding two such blurs in opposite directions produces a Blur Kernel.
% Angle can be rotated in multiples of 90 degrees.
%
% Note that the first argument is the width of the kernel and not the
% radius of the kernel.
%
% Binomial:[{radius}]
% Generate a discrete kernel using a 2 dimentional Pascel's Triangle
% of values. Used for special forma of image filters
%
% # Still to be implemented...
% #
% # Filter2D
% # Filter1D
% # Set kernel values using a resize filter, and given scale (sigma)
% # Cylindrical or Linear. Is this possible with an image?
% #
%
% Named Constant Convolution Kernels
%
% All these are unscaled, zero-summing kernels by default. As such for
% non-HDRI version of ImageMagick some form of normalization, user scaling,
% and biasing the results is recommended, to prevent the resulting image
% being 'clipped'.
%
% The 3x3 kernels (most of these) can be circularly rotated in multiples of
% 45 degrees to generate the 8 angled varients of each of the kernels.
%
% Laplacian:{type}
% Discrete Lapacian Kernels, (without normalization)
% Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood)
% Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood)
% Type 2 : 3x3 with center:4 edge:1 corner:-2
% Type 3 : 3x3 with center:4 edge:-2 corner:1
% Type 5 : 5x5 laplacian
% Type 7 : 7x7 laplacian
% Type 15 : 5x5 LoG (sigma approx 1.4)
% Type 19 : 9x9 LoG (sigma approx 1.4)
%
% Sobel:{angle}
% Sobel 'Edge' convolution kernel (3x3)
% | -1, 0, 1 |
% | -2, 0, 2 |
% | -1, 0, 1 |
%
% Roberts:{angle}
% Roberts convolution kernel (3x3)
% | 0, 0, 0 |
% | -1, 1, 0 |
% | 0, 0, 0 |
%
% Prewitt:{angle}
% Prewitt Edge convolution kernel (3x3)
% | -1, 0, 1 |
% | -1, 0, 1 |
% | -1, 0, 1 |
%
% Compass:{angle}
% Prewitt's "Compass" convolution kernel (3x3)
% | -1, 1, 1 |
% | -1,-2, 1 |
% | -1, 1, 1 |
%
% Kirsch:{angle}
% Kirsch's "Compass" convolution kernel (3x3)
% | -3,-3, 5 |
% | -3, 0, 5 |
% | -3,-3, 5 |
%
% FreiChen:{angle}
% Frei-Chen Edge Detector is based on a kernel that is similar to
% the Sobel Kernel, but is designed to be isotropic. That is it takes
% into account the distance of the diagonal in the kernel.
%
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) |
% | 1, 0, -1 |
%
% FreiChen:{type},{angle}
%
% Frei-Chen Pre-weighted kernels...
%
% Type 0: default un-nomalized version shown above.
%
% Type 1: Orthogonal Kernel (same as type 11 below)
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 2: Diagonal form of Kernel...
% | 1, sqrt(2), 0 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 0, -sqrt(2) -1 |
%
% However this kernel is als at the heart of the FreiChen Edge Detection
% Process which uses a set of 9 specially weighted kernel. These 9
% kernels not be normalized, but directly applied to the image. The
% results is then added together, to produce the intensity of an edge in
% a specific direction. The square root of the pixel value can then be
% taken as the cosine of the edge, and at least 2 such runs at 90 degrees
% from each other, both the direction and the strength of the edge can be
% determined.
%
% Type 10: All 9 of the following pre-weighted kernels...
%
% Type 11: | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 12: | 1, sqrt(2), 1 |
% | 0, 0, 0 | / 2*sqrt(2)
% | 1, sqrt(2), 1 |
%
% Type 13: | sqrt(2), -1, 0 |
% | -1, 0, 1 | / 2*sqrt(2)
% | 0, 1, -sqrt(2) |
%
% Type 14: | 0, 1, -sqrt(2) |
% | -1, 0, 1 | / 2*sqrt(2)
% | sqrt(2), -1, 0 |
%
% Type 15: | 0, -1, 0 |
% | 1, 0, 1 | / 2
% | 0, -1, 0 |
%
% Type 16: | 1, 0, -1 |
% | 0, 0, 0 | / 2
% | -1, 0, 1 |
%
% Type 17: | 1, -2, 1 |
% | -2, 4, -2 | / 6
% | -1, -2, 1 |
%
% Type 18: | -2, 1, -2 |
% | 1, 4, 1 | / 6
% | -2, 1, -2 |
%
% Type 19: | 1, 1, 1 |
% | 1, 1, 1 | / 3
% | 1, 1, 1 |
%
% The first 4 are for edge detection, the next 4 are for line detection
% and the last is to add a average component to the results.
%
% Using a special type of '-1' will return all 9 pre-weighted kernels
% as a multi-kernel list, so that you can use them directly (without
% normalization) with the special "-set option:morphology:compose Plus"
% setting to apply the full FreiChen Edge Detection Technique.
%
% If 'type' is large it will be taken to be an actual rotation angle for
% the default FreiChen (type 0) kernel. As such FreiChen:45 will look
% like a Sobel:45 but with 'sqrt(2)' instead of '2' values.
%
% WARNING: The above was layed out as per
% http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf
% But rotated 90 degrees so direction is from left rather than the top.
% I have yet to find any secondary confirmation of the above. The only
% other source found was actual source code at
% http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf
% Neigher paper defineds the kernels in a way that looks locical or
% correct when taken as a whole.
%
% Boolean Kernels
%
% Diamond:[{radius}[,{scale}]]
% Generate a diamond shaped kernel with given radius to the points.
% Kernel size will again be radius*2+1 square and defaults to radius 1,
% generating a 3x3 kernel that is slightly larger than a square.
%
% Square:[{radius}[,{scale}]]
% Generate a square shaped kernel of size radius*2+1, and defaulting
% to a 3x3 (radius 1).
%
% Octagon:[{radius}[,{scale}]]
% Generate octagonal shaped kernel of given radius and constant scale.
% Default radius is 3 producing a 7x7 kernel. A radius of 1 will result
% in "Diamond" kernel.
%
% Disk:[{radius}[,{scale}]]
% Generate a binary disk, thresholded at the radius given, the radius
% may be a float-point value. Final Kernel size is floor(radius)*2+1
% square. A radius of 5.3 is the default.
%
% NOTE: That a low radii Disk kernels produce the same results as
% many of the previously defined kernels, but differ greatly at larger
% radii. Here is a table of equivalences...
% "Disk:1" => "Diamond", "Octagon:1", or "Cross:1"
% "Disk:1.5" => "Square"
% "Disk:2" => "Diamond:2"
% "Disk:2.5" => "Octagon"
% "Disk:2.9" => "Square:2"
% "Disk:3.5" => "Octagon:3"
% "Disk:4.5" => "Octagon:4"
% "Disk:5.4" => "Octagon:5"
% "Disk:6.4" => "Octagon:6"
% All other Disk shapes are unique to this kernel, but because a "Disk"
% is more circular when using a larger radius, using a larger radius is
% preferred over iterating the morphological operation.
%
% Rectangle:{geometry}
% Simply generate a rectangle of 1's with the size given. You can also
% specify the location of the 'control point', otherwise the closest
% pixel to the center of the rectangle is selected.
%
% Properly centered and odd sized rectangles work the best.
%
% Symbol Dilation Kernels
%
% These kernel is not a good general morphological kernel, but is used
% more for highlighting and marking any single pixels in an image using,
% a "Dilate" method as appropriate.
%
% For the same reasons iterating these kernels does not produce the
% same result as using a larger radius for the symbol.
%
% Plus:[{radius}[,{scale}]]
% Cross:[{radius}[,{scale}]]
% Generate a kernel in the shape of a 'plus' or a 'cross' with
% a each arm the length of the given radius (default 2).
%
% NOTE: "plus:1" is equivalent to a "Diamond" kernel.
%
% Ring:{radius1},{radius2}[,{scale}]
% A ring of the values given that falls between the two radii.
% Defaults to a ring of approximataly 3 radius in a 7x7 kernel.
% This is the 'edge' pixels of the default "Disk" kernel,
% More specifically, "Ring" -> "Ring:2.5,3.5,1.0"
%
% Hit and Miss Kernels
%
% Peak:radius1,radius2
% Find any peak larger than the pixels the fall between the two radii.
% The default ring of pixels is as per "Ring".
% Edges
% Find flat orthogonal edges of a binary shape
% Corners
% Find 90 degree corners of a binary shape
% Diagonals:type
% A special kernel to thin the 'outside' of diagonals
% LineEnds:type
% Find end points of lines (for pruning a skeletion)
% Two types of lines ends (default to both) can be searched for
% Type 0: All line ends
% Type 1: single kernel for 4-conneected line ends
% Type 2: single kernel for simple line ends
% LineJunctions
% Find three line junctions (within a skeletion)
% Type 0: all line junctions
% Type 1: Y Junction kernel
% Type 2: Diagonal T Junction kernel
% Type 3: Orthogonal T Junction kernel
% Type 4: Diagonal X Junction kernel
% Type 5: Orthogonal + Junction kernel
% Ridges:type
% Find single pixel ridges or thin lines
% Type 1: Fine single pixel thick lines and ridges
% Type 2: Find two pixel thick lines and ridges
% ConvexHull
% Octagonal Thickening Kernel, to generate convex hulls of 45 degrees
% Skeleton:type
% Traditional skeleton generating kernels.
% Type 1: Tradional Skeleton kernel (4 connected skeleton)
% Type 2: HIPR2 Skeleton kernel (8 connected skeleton)
% Type 3: Thinning skeleton based on a ressearch paper by
% Dan S. Bloomberg (Default Type)
% ThinSE:type
% A huge variety of Thinning Kernels designed to preserve conectivity.
% many other kernel sets use these kernels as source definitions.
% Type numbers are 41-49, 81-89, 481, and 482 which are based on
% the super and sub notations used in the source research paper.
%
% Distance Measuring Kernels
%
% Different types of distance measuring methods, which are used with the
% a 'Distance' morphology method for generating a gradient based on
% distance from an edge of a binary shape, though there is a technique
% for handling a anti-aliased shape.
%
% See the 'Distance' Morphological Method, for information of how it is
% applied.
%
% Chebyshev:[{radius}][x{scale}[%!]]
% Chebyshev Distance (also known as Tchebychev or Chessboard distance)
% is a value of one to any neighbour, orthogonal or diagonal. One why
% of thinking of it is the number of squares a 'King' or 'Queen' in
% chess needs to traverse reach any other position on a chess board.
% It results in a 'square' like distance function, but one where
% diagonals are given a value that is closer than expected.
%
% Manhattan:[{radius}][x{scale}[%!]]
% Manhattan Distance (also known as Rectilinear, City Block, or the Taxi
% Cab distance metric), it is the distance needed when you can only
% travel in horizontal or vertical directions only. It is the
% distance a 'Rook' in chess would have to travel, and results in a
% diamond like distances, where diagonals are further than expected.
%
% Octagonal:[{radius}][x{scale}[%!]]
% An interleving of Manhatten and Chebyshev metrics producing an
% increasing octagonally shaped distance. Distances matches those of
% the "Octagon" shaped kernel of the same radius. The minimum radius
% and default is 2, producing a 5x5 kernel.
%
% Euclidean:[{radius}][x{scale}[%!]]
% Euclidean distance is the 'direct' or 'as the crow flys' distance.
% However by default the kernel size only has a radius of 1, which
% limits the distance to 'Knight' like moves, with only orthogonal and
% diagonal measurements being correct. As such for the default kernel
% you will get octagonal like distance function.
%
% However using a larger radius such as "Euclidean:4" you will get a
% much smoother distance gradient from the edge of the shape. Especially
% if the image is pre-processed to include any anti-aliasing pixels.
% Of course a larger kernel is slower to use, and not always needed.
%
% The first three Distance Measuring Kernels will only generate distances
% of exact multiples of {scale} in binary images. As such you can use a
% scale of 1 without loosing any information. However you also need some
% scaling when handling non-binary anti-aliased shapes.
%
% The "Euclidean" Distance Kernel however does generate a non-integer
% fractional results, and as such scaling is vital even for binary shapes.
%
*/
MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
const GeometryInfo *args)
{
KernelInfo
*kernel;
register ssize_t
i;
register ssize_t
u,
v;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
/* Generate a new empty kernel if needed */
kernel=(KernelInfo *) NULL;
switch(type) {
case UndefinedKernel: /* These should not call this function */
case UserDefinedKernel:
assert("Should not call this function" != (char *) NULL);
break;
case LaplacianKernel: /* Named Descrete Convolution Kernels */
case SobelKernel: /* these are defined using other kernels */
case RobertsKernel:
case PrewittKernel:
case CompassKernel:
case KirschKernel:
case FreiChenKernel:
case EdgesKernel: /* Hit and Miss kernels */
case CornersKernel:
case DiagonalsKernel:
case LineEndsKernel:
case LineJunctionsKernel:
case RidgesKernel:
case ConvexHullKernel:
case SkeletonKernel:
case ThinSEKernel:
break; /* A pre-generated kernel is not needed */
#if 0
/* set to 1 to do a compile-time check that we haven't missed anything */
case UnityKernel:
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case BlurKernel:
case CometKernel:
case BinomialKernel:
case DiamondKernel:
case SquareKernel:
case RectangleKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
case RingKernel:
case PeaksKernel:
case ChebyshevKernel:
case ManhattanKernel:
case OctangonalKernel:
case EuclideanKernel:
#else
default:
#endif
/* Generate the base Kernel Structure */
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) ResetMagickMemory(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = type;
kernel->next = (KernelInfo *) NULL;
kernel->signature = MagickSignature;
break;
}
switch(type) {
/*
Convolution Kernels
*/
case UnityKernel:
{
kernel->height = kernel->width = (size_t) 1;
kernel->x = kernel->y = (ssize_t) 0;
kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(1,
sizeof(*kernel->values)));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
kernel->maximum = kernel->values[0] = args->rho;
break;
}
break;
case GaussianKernel:
case DoGKernel:
case LoGKernel:
{ double
sigma = fabs(args->sigma),
sigma2 = fabs(args->xi),
A, B, R;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else if ( (type != DoGKernel) || (sigma >= sigma2) )
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma);
else
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2);
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(
kernel->width,kernel->height*sizeof(*kernel->values)));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* WARNING: The following generates a 'sampled gaussian' kernel.
* What we really want is a 'discrete gaussian' kernel.
*
* How to do this is I don't know, but appears to be basied on the
* Error Function 'erf()' (intergral of a gaussian)
*/
if ( type == GaussianKernel || type == DoGKernel )
{ /* Calculate a Gaussian, OR positive half of a DoG */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
if ( type == DoGKernel )
{ /* Subtract a Negative Gaussian for "Difference of Gaussian" */
if ( sigma2 > MagickEpsilon )
{ sigma = sigma2; /* simplify loop expressions */
A = 1.0/(2.0*sigma*sigma);
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0;
}
if ( type == LoGKernel )
{ /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ R = ((double)(u*u+v*v))*A;
kernel->values[i] = (1-R)*exp(-R)*B;
}
}
else /* special case - generate a unity kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
/* Note the above kernels may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, and thus
** producing a very bright kernel.
**
** Normalization will still be needed.
*/
/* Normalize the 2D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
break;
}
case BlurKernel:
{ double
sigma = fabs(args->sigma),
alpha, beta;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else
kernel->width = GetOptimalKernelWidth1D(args->rho,sigma);
kernel->height = 1;
kernel->x = (ssize_t) (kernel->width-1)/2;
kernel->y = 0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
#if 1
#define KernelRank 3
/* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix).
** It generates a gaussian 3 times the width, and compresses it into
** the expected range. This produces a closer normalization of the
** resulting kernel, especially for very low sigma values.
** As such while wierd it is prefered.
**
** I am told this method originally came from Photoshop.
**
** A properly normalized curve is generated (apart from edge clipping)
** even though we later normalize the result (for edge clipping)
** to allow the correct generation of a "Difference of Blurs".
*/
/* initialize */
v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */
(void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
/* Calculate a Positive 1D Gaussian */
if ( sigma > MagickEpsilon )
{ sigma *= KernelRank; /* simplify loop expressions */
alpha = 1.0/(2.0*sigma*sigma);
beta= (double) (1.0/(MagickSQ2PI*sigma ));
for ( u=-v; u <= v; u++) {
kernel->values[(u+v)/KernelRank] +=
exp(-((double)(u*u))*alpha)*beta;
}
}
else /* special case - generate a unity kernel */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
#else
/* Direct calculation without curve averaging
This is equivelent to a KernelRank of 1 */
/* Calculate a Positive Gaussian */
if ( sigma > MagickEpsilon )
{ alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
beta = 1.0/(MagickSQ2PI*sigma);
for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u))*alpha)*beta;
}
else /* special case - generate a unity kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
#endif
/* Note the above kernel may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (< 0.1) the central value becomes larger than one, as a
** result of not generating a actual 'discrete' kernel, and thus
** producing a very bright 'impulse'.
**
** Becuase of these two factors Normalization is required!
*/
/* Normalize the 1D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
/* rotate the 1D kernel by given angle */
RotateKernelInfo(kernel, args->xi );
break;
}
case CometKernel:
{ double
sigma = fabs(args->sigma),
A;
if ( args->rho < 1.0 )
kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1;
else
kernel->width = (size_t)args->rho;
kernel->x = kernel->y = 0;
kernel->height = 1;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* A comet blur is half a 1D gaussian curve, so that the object is
** blurred in one direction only. This may not be quite the right
** curve to use so may change in the future. The function must be
** normalised after generation, which also resolves any clipping.
**
** As we are normalizing and not subtracting gaussians,
** there is no need for a divisor in the gaussian formula
**
** It is less comples
*/
if ( sigma > MagickEpsilon )
{
#if 1
#define KernelRank 3
v = (ssize_t) kernel->width*KernelRank; /* start/end points */
(void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*sizeof(*kernel->values));
sigma *= KernelRank; /* simplify the loop expression */
A = 1.0/(2.0*sigma*sigma);
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( u=0; u < v; u++) {
kernel->values[u/KernelRank] +=
exp(-((double)(u*u))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
}
for (i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range += kernel->values[i];
#else
A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range +=
kernel->values[i] = exp(-((double)(i*i))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
#endif
}
else /* special case - generate a unity kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
}
kernel->minimum = 0.0;
kernel->maximum = kernel->values[0];
kernel->negative_range = 0.0;
ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */
RotateKernelInfo(kernel, args->xi); /* Rotate by angle */
break;
}
case BinomialKernel:
{
size_t
order_f;
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
order_f = fact(kernel->width-1);
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=0; v < (ssize_t)kernel->height; v++)
{ size_t
alpha = order_f / ( fact((size_t) v) * fact(kernel->height-v-1) );
for ( u=0; u < (ssize_t)kernel->width; u++, i++)
kernel->positive_range += kernel->values[i] = (double)
(alpha * order_f / ( fact((size_t) u) * fact(kernel->height-u-1) ));
}
kernel->minimum = 1.0;
kernel->maximum = kernel->values[kernel->x+kernel->y*kernel->width];
kernel->negative_range = 0.0;
break;
}
/*
Convolution Kernels - Well Known Named Constant Kernels
*/
case LaplacianKernel:
{ switch ( (int) args->rho ) {
case 0:
default: /* laplacian square filter -- default */
kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1");
break;
case 1: /* laplacian diamond filter */
kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0");
break;
case 2:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
break;
case 3:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1");
break;
case 5: /* a 5x5 laplacian */
kernel=ParseKernelArray(
"5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4");
break;
case 7: /* a 7x7 laplacian */
kernel=ParseKernelArray(
"7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" );
break;
case 15: /* a 5x5 LoG (sigma approx 1.4) */
kernel=ParseKernelArray(
"5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0");
break;
case 19: /* a 9x9 LoG (sigma approx 1.4) */
/* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */
kernel=ParseKernelArray(
"9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
break;
}
case SobelKernel:
{ /* Simple Sobel Kernel */
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case RobertsKernel:
{
kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case PrewittKernel:
{
kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case CompassKernel:
{
kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case KirschKernel:
{
kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case FreiChenKernel:
/* Direction is set to be left to right positive */
/* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */
/* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */
{ switch ( (int) args->rho ) {
default:
case 0:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +MagickSQ2;
kernel->values[5] = -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
break;
case 2:
kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = kernel->values[3]= +MagickSQ2;
kernel->values[5] = kernel->values[7]= -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 10:
kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19");
if (kernel == (KernelInfo *) NULL)
return(kernel);
break;
case 1:
case 11:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +MagickSQ2;
kernel->values[5] = -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 12:
kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = +MagickSQ2;
kernel->values[7] = +MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 13:
kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[0] = +MagickSQ2;
kernel->values[8] = -MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 14:
kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[2] = -MagickSQ2;
kernel->values[6] = +MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 15:
kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 16:
kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 17:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 18:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 19:
kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/3.0, NoValue);
break;
}
if ( fabs(args->sigma) >= MagickEpsilon )
/* Rotate by correctly supplied 'angle' */
RotateKernelInfo(kernel, args->sigma);
else if ( args->rho > 30.0 || args->rho < -30.0 )
/* Rotate by out of bounds 'type' */
RotateKernelInfo(kernel, args->rho);
break;
}
/*
Boolean or Shaped Kernels
*/
case DiamondKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case SquareKernel:
case RectangleKernel:
{ double
scale;
if ( type == SquareKernel )
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = (size_t) (2*args->rho+1);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
scale = args->sigma;
}
else {
/* NOTE: user defaults set in "AcquireKernelInfo()" */
if ( args->rho < 1.0 || args->sigma < 1.0 )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->width = (size_t)args->rho;
kernel->height = (size_t)args->sigma;
if ( args->xi < 0.0 || args->xi > (double)kernel->width ||
args->psi < 0.0 || args->psi > (double)kernel->height )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->x = (ssize_t) args->xi;
kernel->y = (ssize_t) args->psi;
scale = 1.0;
}
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values to scale given */
u=(ssize_t) (kernel->width*kernel->height);
for ( i=0; i < u; i++)
kernel->values[i] = scale;
kernel->minimum = kernel->maximum = scale; /* a flat shape */
kernel->positive_range = scale*u;
break;
}
case OctagonKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <=
((long)kernel->x + (long)(kernel->x/2)) )
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case DiskKernel:
{
ssize_t
limit = (ssize_t)(args->rho*args->rho);
if (args->rho < 0.4) /* default radius approx 4.3 */
kernel->width = kernel->height = 9L, limit = 18L;
else
kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ((u*u+v*v) <= limit)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case PlusKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
case CrossKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == v || u == -v) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
/*
HitAndMiss Kernels
*/
case RingKernel:
case PeaksKernel:
{
ssize_t
limit1,
limit2,
scale;
if (args->rho < args->sigma)
{
kernel->width = ((size_t)args->sigma)*2+1;
limit1 = (ssize_t)(args->rho*args->rho);
limit2 = (ssize_t)(args->sigma*args->sigma);
}
else
{
kernel->width = ((size_t)args->rho)*2+1;
limit1 = (ssize_t)(args->sigma*args->sigma);
limit2 = (ssize_t)(args->rho*args->rho);
}
if ( limit2 <= 0 )
kernel->width = 7L, limit1 = 7L, limit2 = 11L;
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */
scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi);
for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ ssize_t radius=u*u+v*v;
if (limit1 < radius && radius <= limit2)
kernel->positive_range += kernel->values[i] = (double) scale;
else
kernel->values[i] = nan;
}
kernel->minimum = kernel->maximum = (double) scale;
if ( type == PeaksKernel ) {
/* set the central point in the middle */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
kernel->maximum = 1.0;
}
break;
}
case EdgesKernel:
{
kernel=AcquireKernelInfo("ThinSE:482");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */
break;
}
case CornersKernel:
{
kernel=AcquireKernelInfo("ThinSE:87");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */
break;
}
case DiagonalsKernel:
{
switch ( (int) args->rho ) {
case 0:
default:
{ KernelInfo
*new_kernel;
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
ExpandMirrorKernelInfo(kernel);
return(kernel);
}
case 1:
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
break;
case 2:
kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineEndsKernel:
{ /* Kernels for finding the end of thin lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all end of lines */
return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>"));
case 1:
/* kernel for 4-connected line ends - no rotation */
kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-");
break;
case 2:
/* kernel to add for 8-connected lines - no rotation */
kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1");
break;
case 3:
/* kernel to add for orthogonal line ends - does not find corners */
kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0");
break;
case 4:
/* traditional line end - fails on last T end */
kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineJunctionsKernel:
{ /* kernels for finding the junctions of multiple lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all line junctions */
return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>"));
case 1:
/* Y Junction */
kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-");
break;
case 2:
/* Diagonal T Junctions */
kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1");
break;
case 3:
/* Orthogonal T Junctions */
kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-");
break;
case 4:
/* Diagonal X Junctions */
kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1");
break;
case 5:
/* Orthogonal X Junctions - minimal diamond kernel */
kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case RidgesKernel:
{ /* Ridges - Ridge finding kernels */
KernelInfo
*new_kernel;
switch ( (int) args->rho ) {
case 1:
default:
kernel=ParseKernelArray("3x1:0,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */
break;
case 2:
kernel=ParseKernelArray("4x1:0,1,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */
/* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */
/* Unfortunatally we can not yet rotate a non-square kernel */
/* But then we can't flip a non-symetrical kernel either */
new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
break;
}
break;
}
case ConvexHullKernel:
{
KernelInfo
*new_kernel;
/* first set of 8 kernels */
kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0);
/* append the mirror versions too - no flip function yet */
new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
ExpandRotateKernelInfo(new_kernel, 90.0);
LastKernelInfo(kernel)->next = new_kernel;
break;
}
case SkeletonKernel:
{
switch ( (int) args->rho ) {
case 1:
default:
/* Traditional Skeleton...
** A cyclically rotated single kernel
*/
kernel=AcquireKernelInfo("ThinSE:482");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */
break;
case 2:
/* HIPR Variation of the cyclic skeleton
** Corners of the traditional method made more forgiving,
** but the retain the same cyclic order.
*/
kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;");
if (kernel == (KernelInfo *) NULL)
return(kernel);
if (kernel->next == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
kernel->type = type;
kernel->next->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */
break;
case 3:
/* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
*/
kernel=AcquireKernelInfo(
"ThinSE:41; ThinSE:42; ThinSE:43");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->next->type = type;
kernel->next->next->type = type;
ExpandMirrorKernelInfo(kernel); /* 12 kernels total */
break;
}
break;
}
case ThinSEKernel:
{ /* Special kernels for general thinning, while preserving connections
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
** And
** http://tpgit.github.com/Leptonica/ccthin_8c_source.html
**
** Note kernels do not specify the origin pixel, allowing them
** to be used for both thickening and thinning operations.
*/
switch ( (int) args->rho ) {
/* SE for 4-connected thinning */
case 41: /* SE_4_1 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1");
break;
case 42: /* SE_4_2 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-");
break;
case 43: /* SE_4_3 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1");
break;
case 44: /* SE_4_4 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-");
break;
case 45: /* SE_4_5 */
kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-");
break;
case 46: /* SE_4_6 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1");
break;
case 47: /* SE_4_7 */
kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-");
break;
case 48: /* SE_4_8 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1");
break;
case 49: /* SE_4_9 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1");
break;
/* SE for 8-connected thinning - negatives of the above */
case 81: /* SE_8_0 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-");
break;
case 82: /* SE_8_2 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-");
break;
case 83: /* SE_8_3 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-");
break;
case 84: /* SE_8_4 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-");
break;
case 85: /* SE_8_5 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-");
break;
case 86: /* SE_8_6 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1");
break;
case 87: /* SE_8_7 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-");
break;
case 88: /* SE_8_8 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-");
break;
case 89: /* SE_8_9 */
kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-");
break;
/* Special combined SE kernels */
case 423: /* SE_4_2 , SE_4_3 Combined Kernel */
kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-");
break;
case 823: /* SE_8_2 , SE_8_3 Combined Kernel */
kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-");
break;
case 481: /* SE_48_1 - General Connected Corner Kernel */
kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-");
break;
default:
case 482: /* SE_48_2 - General Edge Kernel */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
/*
Distance Measuring Kernels
*/
case ChebyshevKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*MagickMax(fabs((double)u),fabs((double)v)) );
kernel->maximum = kernel->values[0];
break;
}
case ManhattanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*(labs((long) u)+labs((long) v)) );
kernel->maximum = kernel->values[0];
break;
}
case OctagonalKernel:
{
if (args->rho < 2.0)
kernel->width = kernel->height = 5; /* default/minimum radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{
double
r1 = MagickMax(fabs((double)u),fabs((double)v)),
r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5);
kernel->positive_range += kernel->values[i] =
args->sigma*MagickMax(r1,r2);
}
kernel->maximum = kernel->values[0];
break;
}
case EuclideanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*sqrt((double)(u*u+v*v)) );
kernel->maximum = kernel->values[0];
break;
}
default:
{
/* No-Op Kernel - Basically just a single pixel on its own */
kernel=ParseKernelArray("1:1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = UndefinedKernel;
break;
}
break;
}
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneKernelInfo() creates a new clone of the given Kernel List so that its
% can be modified without effecting the original. The cloned kernel should
% be destroyed using DestoryKernelInfo() when no longer needed.
%
% The format of the CloneKernelInfo method is:
%
% KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be cloned
%
*/
MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
{
register ssize_t
i;
KernelInfo
*new_kernel;
assert(kernel != (KernelInfo *) NULL);
new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (new_kernel == (KernelInfo *) NULL)
return(new_kernel);
*new_kernel=(*kernel); /* copy values in structure */
/* replace the values with a copy of the values */
new_kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (new_kernel->values == (double *) NULL)
return(DestroyKernelInfo(new_kernel));
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
new_kernel->values[i]=kernel->values[i];
/* Also clone the next kernel in the kernel list */
if ( kernel->next != (KernelInfo *) NULL ) {
new_kernel->next = CloneKernelInfo(kernel->next);
if ( new_kernel->next == (KernelInfo *) NULL )
return(DestroyKernelInfo(new_kernel));
}
return(new_kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyKernelInfo() frees the memory used by a Convolution/Morphology
% kernel.
%
% The format of the DestroyKernelInfo method is:
%
% KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be destroyed
%
*/
MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
{
assert(kernel != (KernelInfo *) NULL);
if (kernel->next != (KernelInfo *) NULL)
kernel->next=DestroyKernelInfo(kernel->next);
kernel->values=(double *) RelinquishAlignedMemory(kernel->values);
kernel=(KernelInfo *) RelinquishMagickMemory(kernel);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d M i r r o r K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandMirrorKernelInfo() takes a single kernel, and expands it into a
% sequence of 90-degree rotated kernels but providing a reflected 180
% rotatation, before the -/+ 90-degree rotations.
%
% This special rotation order produces a better, more symetrical thinning of
% objects.
%
% The format of the ExpandMirrorKernelInfo method is:
%
% void ExpandMirrorKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
#if 0
static void FlopKernelInfo(KernelInfo *kernel)
{ /* Do a Flop by reversing each row. */
size_t
y;
register ssize_t
x,r;
register double
*k,t;
for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width)
for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--)
t=k[x], k[x]=k[r], k[r]=t;
kernel->x = kernel->width - kernel->x - 1;
angle = fmod(angle+180.0, 360.0);
}
#endif
static void ExpandMirrorKernelInfo(KernelInfo *kernel)
{
KernelInfo
*clone,
*last;
last = kernel;
clone = CloneKernelInfo(last);
RotateKernelInfo(clone, 180); /* flip */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
RotateKernelInfo(clone, 90); /* transpose */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
RotateKernelInfo(clone, 180); /* flop */
LastKernelInfo(last)->next = clone;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating
% incrementally by the angle given, until the kernel repeats.
%
% WARNING: 45 degree rotations only works for 3x3 kernels.
% While 90 degree roatations only works for linear and square kernels
%
% The format of the ExpandRotateKernelInfo method is:
%
% void ExpandRotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
/* Internal Routine - Return true if two kernels are the same */
static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1,
const KernelInfo *kernel2)
{
register size_t
i;
/* check size and origin location */
if ( kernel1->width != kernel2->width
|| kernel1->height != kernel2->height
|| kernel1->x != kernel2->x
|| kernel1->y != kernel2->y )
return MagickFalse;
/* check actual kernel values */
for (i=0; i < (kernel1->width*kernel1->height); i++) {
/* Test for Nan equivalence */
if ( IsNaN(kernel1->values[i]) && !IsNaN(kernel2->values[i]) )
return MagickFalse;
if ( IsNaN(kernel2->values[i]) && !IsNaN(kernel1->values[i]) )
return MagickFalse;
/* Test actual values are equivalent */
if ( fabs(kernel1->values[i] - kernel2->values[i]) >= MagickEpsilon )
return MagickFalse;
}
return MagickTrue;
}
static void ExpandRotateKernelInfo(KernelInfo *kernel, const double angle)
{
KernelInfo
*clone,
*last;
last = kernel;
DisableMSCWarning(4127)
while(1) {
RestoreMSCWarning
clone = CloneKernelInfo(last);
RotateKernelInfo(clone, angle);
if ( SameKernelInfo(kernel, clone) != MagickFalse )
break;
LastKernelInfo(last)->next = clone;
last = clone;
}
clone = DestroyKernelInfo(clone); /* kernel has repeated - junk the clone */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a l c M e t a K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only,
% using the kernel values. This should only ne used if it is not possible to
% calculate that meta-data in some easier way.
%
% It is important that the meta-data is correct before ScaleKernelInfo() is
% used to perform kernel normalization.
%
% The format of the CalcKernelMetaData method is:
%
% void CalcKernelMetaData(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% WARNING: Minimum and Maximum values are assumed to include zero, even if
% zero is not part of the kernel (as in Gaussian Derived kernels). This
% however is not true for flat-shaped morphological kernels.
%
% WARNING: Only the specific kernel pointed to is modified, not a list of
% multiple kernels.
%
% This is an internal function and not expected to be useful outside this
% module. This could change however.
*/
static void CalcKernelMetaData(KernelInfo *kernel)
{
register size_t
i;
kernel->minimum = kernel->maximum = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; i < (kernel->width*kernel->height); i++)
{
if ( fabs(kernel->values[i]) < MagickEpsilon )
kernel->values[i] = 0.0;
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y A p p l y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyApply() applies a morphological method, multiple times using
% a list of multiple kernels. This is the method that should be called by
% other 'operators' that internally use morphology operations as part of
% their processing.
%
% It is basically equivalent to as MorphologyImage() (see below) but
% without any user controls. This allows internel programs to use this
% function, to actually perform a specific task without possible interference
% by any API user supplied settings.
%
% It is MorphologyImage() task to extract any such user controls, and
% pass them to this function for processing.
%
% More specifically all given kernels should already be scaled, normalised,
% and blended appropriatally before being parred to this routine. The
% appropriate bias, and compose (typically 'UndefinedComposeOp') given.
%
% The format of the MorphologyApply method is:
%
% Image *MorphologyApply(const Image *image,MorphologyMethod method,
% const ChannelType channel, const ssize_t iterations,
% const KernelInfo *kernel, const CompositeMethod compose,
% const double bias, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the source image
%
% o method: the morphology method to be applied.
%
% o channel: the channels to which the operations are applied
% The channel 'sync' flag determines if 'alpha weighting' is
% applied for convolution style operations.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
%
% o compose: How to handle or merge multi-kernel results.
% If 'UndefinedCompositeOp' use default for the Morphology method.
% If 'NoCompositeOp' force image to be re-iterated by each kernel.
% Otherwise merge the results using the compose method given.
%
% o bias: Convolution Output Bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
/* Apply a Morphology Primative to an image using the given kernel.
** Two pre-created images must be provided, and no image is created.
** It returns the number of pixels that changed between the images
** for result convergence determination.
*/
static ssize_t MorphologyPrimitive(const Image *image, Image *result_image,
const MorphologyMethod method, const ChannelType channel,
const KernelInfo *kernel,const double bias,ExceptionInfo *exception)
{
#define MorphologyTag "Morphology/Image"
CacheView
*p_view,
*q_view;
register ssize_t
i;
size_t
*changes,
changed,
virt_width;
ssize_t
y,
offx,
offy;
MagickBooleanType
status;
MagickOffsetType
progress;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(result_image != (Image *) NULL);
assert(result_image->signature == MagickSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
status=MagickTrue;
progress=0;
p_view=AcquireVirtualCacheView(image,exception);
q_view=AcquireAuthenticCacheView(result_image,exception);
virt_width=image->columns+kernel->width-1;
/* Some methods (including convolve) needs use a reflected kernel.
* Adjust 'origin' offsets to loop though kernel as a reflection.
*/
offx = kernel->x;
offy = kernel->y;
switch(method) {
case ConvolveMorphology:
case DilateMorphology:
case DilateIntensityMorphology:
case IterativeDistanceMorphology:
/* kernel needs to used with reflection about origin */
offx = (ssize_t) kernel->width-offx-1;
offy = (ssize_t) kernel->height-offy-1;
break;
case ErodeMorphology:
case ErodeIntensityMorphology:
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
/* kernel is used as is, without reflection */
break;
default:
assert("Not a Primitive Morphology Method" != (char *) NULL);
break;
}
changed=0;
changes=(size_t *) AcquireQuantumMemory(GetOpenMPMaximumThreads(),
sizeof(*changes));
if (changes == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++)
changes[i]=0;
if ( method == ConvolveMorphology && kernel->width == 1 )
{ /* Special handling (for speed) of vertical (blur) kernels.
** This performs its handling in columns rather than in rows.
** This is only done for convolve as it is the only method that
** generates very large 1-D vertical kernels (such as a 'BlurKernel')
**
** Timing tests (on single CPU laptop)
** Using a vertical 1-d Blue with normal row-by-row (below)
** time convert logo: -morphology Convolve Blur:0x10+90 null:
** 0.807u
** Using this column method
** time convert logo: -morphology Convolve Blur:0x10+90 null:
** 0.620u
**
** Anthony Thyssen, 14 June 2010
*/
register ssize_t
x;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,result_image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
register const PixelPacket
*magick_restrict p;
register const IndexPacket
*magick_restrict p_indexes;
register PixelPacket
*magick_restrict q;
register IndexPacket
*magick_restrict q_indexes;
register ssize_t
y;
ssize_t
r;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(p_view,x,-offy,1,image->rows+kernel->height-1,
exception);
q=GetCacheViewAuthenticPixels(q_view,x,0,1,result_image->rows,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
p_indexes=GetCacheViewVirtualIndexQueue(p_view);
q_indexes=GetCacheViewAuthenticIndexQueue(q_view);
/* offset to origin in 'p'. while 'q' points to it directly */
r = offy;
for (y=0; y < (ssize_t) image->rows; y++)
{
DoublePixelPacket
result;
register ssize_t
v;
register const double
*magick_restrict k;
register const PixelPacket
*magick_restrict k_pixels;
register const IndexPacket
*magick_restrict k_indexes;
/* Copy input image to the output image for unused channels
* This removes need for 'cloning' a new image every iteration
*/
*q = p[r];
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+y,GetPixelIndex(p_indexes+r));
/* Set the bias of the weighted average output */
result.red =
result.green =
result.blue =
result.opacity =
result.index = bias;
/* Weighted Average of pixels using reflected kernel
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
*/
k = &kernel->values[ kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
if ( ((channel & SyncChannels) == 0 ) ||
(image->matte == MagickFalse) )
{ /* No 'Sync' involved.
** Convolution is simple greyscale channel operation
*/
for (v=0; v < (ssize_t) kernel->height; v++) {
if ( IsNaN(*k) ) continue;
result.red += (*k)*GetPixelRed(k_pixels);
result.green += (*k)*GetPixelGreen(k_pixels);
result.blue += (*k)*GetPixelBlue(k_pixels);
result.opacity += (*k)*GetPixelOpacity(k_pixels);
if ( image->colorspace == CMYKColorspace)
result.index += (*k)*(*k_indexes);
k--;
k_pixels++;
k_indexes++;
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelOpacity(q,ClampToQuantum(result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+y,ClampToQuantum(result.index));
}
else
{ /* Channel 'Sync' Flag, and Alpha Channel enabled.
** Weight the color channels with Alpha Channel so that
** transparent pixels are not part of the results.
*/
double
gamma; /* divisor, sum of color alpha weighting */
MagickRealType
alpha; /* alpha weighting for colors : alpha */
size_t
count; /* alpha valus collected, number kernel values */
count=0;
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++) {
if ( IsNaN(*k) ) continue;
alpha=QuantumScale*(QuantumRange-GetPixelOpacity(k_pixels));
count++; /* number of alpha values collected */
alpha*=(*k); /* include kernel weighting now */
gamma += alpha; /* normalize alpha weights only */
result.red += alpha*GetPixelRed(k_pixels);
result.green += alpha*GetPixelGreen(k_pixels);
result.blue += alpha*GetPixelBlue(k_pixels);
result.opacity += (*k)*GetPixelOpacity(k_pixels);
if ( image->colorspace == CMYKColorspace)
result.index += alpha*(*k_indexes);
k--;
k_pixels++;
k_indexes++;
}
/* Sync'ed channels, all channels are modified */
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height/count;
SetPixelRed(q,ClampToQuantum(gamma*result.red));
SetPixelGreen(q,ClampToQuantum(gamma*result.green));
SetPixelBlue(q,ClampToQuantum(gamma*result.blue));
SetPixelOpacity(q,ClampToQuantum(result.opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+y,ClampToQuantum(gamma*result.index));
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q))
|| ( p[r].green != GetPixelGreen(q))
|| ( p[r].blue != GetPixelBlue(q))
|| ( (image->matte != MagickFalse) &&
(p[r].opacity != GetPixelOpacity(q)))
|| ( (image->colorspace == CMYKColorspace) &&
(GetPixelIndex(p_indexes+r) != GetPixelIndex(q_indexes+y))) )
changes[id]++;
p++;
q++;
} /* y */
if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MorphologyPrimitive)
#endif
proceed=SetImageProgress(image,MorphologyTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
} /* x */
result_image->type=image->type;
q_view=DestroyCacheView(q_view);
p_view=DestroyCacheView(p_view);
for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++)
changed+=changes[i];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t) changed : 0);
}
/*
** Normal handling of horizontal or rectangular kernels (row by row)
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,result_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const PixelPacket
*magick_restrict p;
register const IndexPacket
*magick_restrict p_indexes;
register PixelPacket
*magick_restrict q;
register IndexPacket
*magick_restrict q_indexes;
register ssize_t
x;
size_t
r;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(p_view, -offx, y-offy, virt_width,
kernel->height, exception);
q=GetCacheViewAuthenticPixels(q_view,0,y,result_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
p_indexes=GetCacheViewVirtualIndexQueue(p_view);
q_indexes=GetCacheViewAuthenticIndexQueue(q_view);
/* offset to origin in 'p'. while 'q' points to it directly */
r = virt_width*offy + offx;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
v;
register ssize_t
u;
register const double
*magick_restrict k;
register const PixelPacket
*magick_restrict k_pixels;
register const IndexPacket
*magick_restrict k_indexes;
DoublePixelPacket
result,
min,
max;
/* Copy input image to the output image for unused channels
* This removes need for 'cloning' a new image every iteration
*/
*q = p[r];
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+x,GetPixelIndex(p_indexes+r));
/* Defaults */
min.red =
min.green =
min.blue =
min.opacity =
min.index = (double) QuantumRange;
max.red =
max.green =
max.blue =
max.opacity =
max.index = 0.0;
/* default result is the original pixel value */
result.red = (double) p[r].red;
result.green = (double) p[r].green;
result.blue = (double) p[r].blue;
result.opacity = QuantumRange - (double) p[r].opacity;
result.index = 0.0;
if ( image->colorspace == CMYKColorspace)
result.index = (double) GetPixelIndex(p_indexes+r);
switch (method) {
case ConvolveMorphology:
/* Set the bias of the weighted average output */
result.red =
result.green =
result.blue =
result.opacity =
result.index = bias;
break;
case DilateIntensityMorphology:
case ErodeIntensityMorphology:
/* use a boolean flag indicating when first match found */
result.red = 0.0; /* result is not used otherwise */
break;
default:
break;
}
switch ( method ) {
case ConvolveMorphology:
/* Weighted Average of pixels using reflected kernel
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
**
** Correlation is actually the same as this but without reflecting
** the kernel, and thus 'lower-level' that Convolution. However
** as Convolution is the more common method used, and it does not
** really cost us much in terms of processing to use a reflected
** kernel, so it is Convolution that is implemented.
**
** Correlation will have its kernel reflected before calling
** this function to do a Convolve.
**
** For more details of Correlation vs Convolution see
** http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
if ( ((channel & SyncChannels) == 0 ) ||
(image->matte == MagickFalse) )
{ /* No 'Sync' involved.
** Convolution is simple greyscale channel operation
*/
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
result.red += (*k)*k_pixels[u].red;
result.green += (*k)*k_pixels[u].green;
result.blue += (*k)*k_pixels[u].blue;
result.opacity += (*k)*k_pixels[u].opacity;
if ( image->colorspace == CMYKColorspace)
result.index += (*k)*GetPixelIndex(k_indexes+u);
}
k_pixels += virt_width;
k_indexes += virt_width;
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum((MagickRealType) result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum((MagickRealType) result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum((MagickRealType) result.blue));
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelOpacity(q,ClampToQuantum((MagickRealType) result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
}
else
{ /* Channel 'Sync' Flag, and Alpha Channel enabled.
** Weight the color channels with Alpha Channel so that
** transparent pixels are not part of the results.
*/
double
alpha, /* alpha weighting for colors : alpha */
gamma; /* divisor, sum of color alpha weighting */
size_t
count; /* alpha valus collected, number kernel values */
count=0;
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
alpha=QuantumScale*(QuantumRange-k_pixels[u].opacity);
count++; /* number of alpha values collected */
alpha*=(*k); /* include kernel weighting now */
gamma += alpha; /* normalize alpha weights only */
result.red += alpha*k_pixels[u].red;
result.green += alpha*k_pixels[u].green;
result.blue += alpha*k_pixels[u].blue;
result.opacity += (*k)*k_pixels[u].opacity;
if ( image->colorspace == CMYKColorspace)
result.index+=alpha*GetPixelIndex(k_indexes+u);
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* Sync'ed channels, all channels are modified */
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height*kernel->width/count;
SetPixelRed(q,ClampToQuantum((MagickRealType) (gamma*result.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (gamma*result.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (gamma*result.blue)));
SetPixelOpacity(q,ClampToQuantum(result.opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+x,ClampToQuantum((MagickRealType) (gamma*
result.index)));
}
break;
case ErodeMorphology:
/* Minimum Value within kernel neighbourhood
**
** NOTE that the kernel is not reflected for this operation!
**
** NOTE: in normal Greyscale Morphology, the kernel value should
** be added to the real value, this is currently not done, due to
** the nature of the boolean kernels being used.
*/
k = kernel->values;
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k++) {
if ( IsNaN(*k) || (*k) < 0.5 ) continue;
Minimize(min.red, (double) k_pixels[u].red);
Minimize(min.green, (double) k_pixels[u].green);
Minimize(min.blue, (double) k_pixels[u].blue);
Minimize(min.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(min.index,(double) GetPixelIndex(k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case DilateMorphology:
/* Maximum Value within kernel neighbourhood
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
**
** NOTE: in normal Greyscale Morphology, the kernel value should
** be added to the real value, this is currently not done, due to
** the nature of the boolean kernels being used.
**
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) || (*k) < 0.5 ) continue;
Maximize(max.red, (double) k_pixels[u].red);
Maximize(max.green, (double) k_pixels[u].green);
Maximize(max.blue, (double) k_pixels[u].blue);
Maximize(max.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Maximize(max.index, (double) GetPixelIndex(
k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
/* Minimum of Foreground Pixel minus Maxumum of Background Pixels
**
** NOTE that the kernel is not reflected for this operation,
** and consists of both foreground and background pixel
** neighbourhoods, 0.0 for background, and 1.0 for foreground
** with either Nan or 0.5 values for don't care.
**
** Note that this will never produce a meaningless negative
** result. Such results can cause Thinning/Thicken to not work
** correctly when used against a greyscale image.
*/
k = kernel->values;
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k++) {
if ( IsNaN(*k) ) continue;
if ( (*k) > 0.7 )
{ /* minimim of foreground pixels */
Minimize(min.red, (double) k_pixels[u].red);
Minimize(min.green, (double) k_pixels[u].green);
Minimize(min.blue, (double) k_pixels[u].blue);
Minimize(min.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(min.index,(double) GetPixelIndex(
k_indexes+u));
}
else if ( (*k) < 0.3 )
{ /* maximum of background pixels */
Maximize(max.red, (double) k_pixels[u].red);
Maximize(max.green, (double) k_pixels[u].green);
Maximize(max.blue, (double) k_pixels[u].blue);
Maximize(max.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Maximize(max.index, (double) GetPixelIndex(
k_indexes+u));
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* Pattern Match if difference is positive */
min.red -= max.red; Maximize( min.red, 0.0 );
min.green -= max.green; Maximize( min.green, 0.0 );
min.blue -= max.blue; Maximize( min.blue, 0.0 );
min.opacity -= max.opacity; Maximize( min.opacity, 0.0 );
min.index -= max.index; Maximize( min.index, 0.0 );
break;
case ErodeIntensityMorphology:
/* Select Pixel with Minimum Intensity within kernel neighbourhood
**
** WARNING: the intensity test fails for CMYK and does not
** take into account the moderating effect of the alpha channel
** on the intensity.
**
** NOTE that the kernel is not reflected for this operation!
*/
k = kernel->values;
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k++) {
if ( IsNaN(*k) || (*k) < 0.5 ) continue;
if ( result.red == 0.0 ||
GetPixelIntensity(image,&(k_pixels[u])) < GetPixelIntensity(result_image,q) ) {
/* copy the whole pixel - no channel selection */
*q = k_pixels[u];
if ( result.red > 0.0 ) changes[id]++;
result.red = 1.0;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case DilateIntensityMorphology:
/* Select Pixel with Maximum Intensity within kernel neighbourhood
**
** WARNING: the intensity test fails for CMYK and does not
** take into account the moderating effect of the alpha channel
** on the intensity (yet).
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) || (*k) < 0.5 ) continue; /* boolean kernel */
if ( result.red == 0.0 ||
GetPixelIntensity(image,&(k_pixels[u])) > GetPixelIntensity(result_image,q) ) {
/* copy the whole pixel - no channel selection */
*q = k_pixels[u];
if ( result.red > 0.0 ) changes[id]++;
result.red = 1.0;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case IterativeDistanceMorphology:
/* Work out an iterative distance from black edge of a white image
** shape. Essentually white values are decreased to the smallest
** 'distance from edge' it can find.
**
** It works by adding kernel values to the neighbourhood, and and
** select the minimum value found. The kernel is rotated before
** use, so kernel distances match resulting distances, when a user
** provided asymmetric kernel is applied.
**
**
** This code is almost identical to True GrayScale Morphology But
** not quite.
**
** GreyDilate Kernel values added, maximum value found Kernel is
** rotated before use.
**
** GrayErode: Kernel values subtracted and minimum value found No
** kernel rotation used.
**
** Note the the Iterative Distance method is essentially a
** GrayErode, but with negative kernel values, and kernel
** rotation applied.
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index,(*k)+GetPixelIndex(
k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case UndefinedMorphology:
default:
break; /* Do nothing */
}
/* Final mathematics of results (combine with original image?)
**
** NOTE: Difference Morphology operators Edge* and *Hat could also
** be done here but works better with iteration as a image difference
** in the controlling function (below). Thicken and Thinning however
** should be done here so thay can be iterated correctly.
*/
switch ( method ) {
case HitAndMissMorphology:
case ErodeMorphology:
result = min; /* minimum of neighbourhood */
break;
case DilateMorphology:
result = max; /* maximum of neighbourhood */
break;
case ThinningMorphology:
/* subtract pattern match from original */
result.red -= min.red;
result.green -= min.green;
result.blue -= min.blue;
result.opacity -= min.opacity;
result.index -= min.index;
break;
case ThickenMorphology:
/* Add the pattern matchs to the original */
result.red += min.red;
result.green += min.green;
result.blue += min.blue;
result.opacity += min.opacity;
result.index += min.index;
break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
switch ( method ) {
case UndefinedMorphology:
case ConvolveMorphology:
case DilateIntensityMorphology:
case ErodeIntensityMorphology:
break; /* full pixel was directly assigned - not a channel method */
default:
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if ((channel & OpacityChannel) != 0
&& image->matte != MagickFalse )
SetPixelAlpha(q,ClampToQuantum(result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
break;
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q) )
|| ( p[r].green != GetPixelGreen(q) )
|| ( p[r].blue != GetPixelBlue(q) )
|| ( (image->matte != MagickFalse) &&
(p[r].opacity != GetPixelOpacity(q)))
|| ( (image->colorspace == CMYKColorspace) &&
(GetPixelIndex(p_indexes+r) != GetPixelIndex(q_indexes+x))) )
changes[id]++;
p++;
q++;
} /* x */
if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MorphologyPrimitive)
#endif
proceed=SetImageProgress(image,MorphologyTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
} /* y */
q_view=DestroyCacheView(q_view);
p_view=DestroyCacheView(p_view);
for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++)
changed+=changes[i];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t)changed : -1);
}
/* This is almost identical to the MorphologyPrimative() function above,
** but will apply the primitive directly to the actual image using two
** passes, once in each direction, with the results of the previous (and
** current) row being re-used.
**
** That is after each row is 'Sync'ed' into the image, the next row will
** make use of those values as part of the calculation of the next row.
** It then repeats, but going in the oppisite (bottom-up) direction.
**
** Because of this 're-use of results' this function can not make use
** of multi-threaded, parellel processing.
*/
static ssize_t MorphologyPrimitiveDirect(Image *image,
const MorphologyMethod method, const ChannelType channel,
const KernelInfo *kernel,ExceptionInfo *exception)
{
CacheView
*auth_view,
*virt_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y, offx, offy;
size_t
changed,
virt_width;
status=MagickTrue;
changed=0;
progress=0;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
/* Some methods (including convolve) needs use a reflected kernel.
* Adjust 'origin' offsets to loop though kernel as a reflection.
*/
offx = kernel->x;
offy = kernel->y;
switch(method) {
case DistanceMorphology:
case VoronoiMorphology:
/* kernel needs to used with reflection about origin */
offx = (ssize_t) kernel->width-offx-1;
offy = (ssize_t) kernel->height-offy-1;
break;
#if 0
case ?????Morphology:
/* kernel is used as is, without reflection */
break;
#endif
default:
assert("Not a PrimativeDirect Morphology Method" != (char *) NULL);
break;
}
/* DO NOT THREAD THIS CODE! */
/* two views into same image (virtual, and actual) */
virt_view=AcquireVirtualCacheView(image,exception);
auth_view=AcquireAuthenticCacheView(image,exception);
virt_width=image->columns+kernel->width-1;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register const IndexPacket
*magick_restrict p_indexes;
register PixelPacket
*magick_restrict q;
register IndexPacket
*magick_restrict q_indexes;
register ssize_t
x;
ssize_t
r;
/* NOTE read virtual pixels, and authentic pixels, from the same image!
** we read using virtual to get virtual pixel handling, but write back
** into the same image.
**
** Only top half of kernel is processed as we do a single pass downward
** through the image iterating the distance function as we go.
*/
if (status == MagickFalse)
break;
p=GetCacheViewVirtualPixels(virt_view, -offx, y-offy, virt_width, (size_t) offy+1,
exception);
q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
status=MagickFalse;
if (status == MagickFalse)
break;
p_indexes=GetCacheViewVirtualIndexQueue(virt_view);
q_indexes=GetCacheViewAuthenticIndexQueue(auth_view);
/* offset to origin in 'p'. while 'q' points to it directly */
r = (ssize_t) virt_width*offy + offx;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
v;
register ssize_t
u;
register const double
*magick_restrict k;
register const PixelPacket
*magick_restrict k_pixels;
register const IndexPacket
*magick_restrict k_indexes;
MagickPixelPacket
result;
/* Starting Defaults */
GetMagickPixelPacket(image,&result);
SetMagickPixelPacket(image,q,q_indexes,&result);
if ( method != VoronoiMorphology )
result.opacity = QuantumRange - result.opacity;
switch ( method ) {
case DistanceMorphology:
/* Add kernel Value and select the minimum value found. */
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v <= (ssize_t) offy; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=0; u < (ssize_t) offx; u++, k--) {
if ( x+u-offx < 0 ) continue; /* off the edge! */
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u));
}
break;
case VoronoiMorphology:
/* Apply Distance to 'Matte' channel, while coping the color
** values of the closest pixel.
**
** This is experimental, and realy the 'alpha' component should
** be completely separate 'masking' channel so that alpha can
** also be used as part of the results.
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v <= (ssize_t) offy; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=0; u < (ssize_t) offx; u++, k--) {
if ( x+u-offx < 0 ) continue; /* off the edge! */
if ( IsNaN(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
switch ( method ) {
case VoronoiMorphology:
SetPixelPacket(image,&result,q,q_indexes);
break;
default:
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
SetPixelAlpha(q,ClampToQuantum(result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
break;
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q) )
|| ( p[r].green != GetPixelGreen(q) )
|| ( p[r].blue != GetPixelBlue(q) )
|| ( (image->matte != MagickFalse) &&
(p[r].opacity != GetPixelOpacity(q)))
|| ( (image->colorspace == CMYKColorspace) &&
(GetPixelIndex(p_indexes+r) != GetPixelIndex(q_indexes+x))) )
changed++; /* The pixel was changed in some way! */
p++; /* increment pixel buffers */
q++;
} /* x */
if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
if ( SetImageProgress(image,MorphologyTag,progress++,image->rows)
== MagickFalse )
status=MagickFalse;
} /* y */
/* Do the reversed pass through the image */
for (y=(ssize_t)image->rows-1; y >= 0; y--)
{
register const PixelPacket
*magick_restrict p;
register const IndexPacket
*magick_restrict p_indexes;
register PixelPacket
*magick_restrict q;
register IndexPacket
*magick_restrict q_indexes;
register ssize_t
x;
ssize_t
r;
if (status == MagickFalse)
break;
/* NOTE read virtual pixels, and authentic pixels, from the same image!
** we read using virtual to get virtual pixel handling, but write back
** into the same image.
**
** Only the bottom half of the kernel will be processes as we
** up the image.
*/
p=GetCacheViewVirtualPixels(virt_view, -offx, y, virt_width, (size_t) kernel->y+1,
exception);
q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
status=MagickFalse;
if (status == MagickFalse)
break;
p_indexes=GetCacheViewVirtualIndexQueue(virt_view);
q_indexes=GetCacheViewAuthenticIndexQueue(auth_view);
/* adjust positions to end of row */
p += image->columns-1;
q += image->columns-1;
/* offset to origin in 'p'. while 'q' points to it directly */
r = offx;
for (x=(ssize_t)image->columns-1; x >= 0; x--)
{
ssize_t
v;
register ssize_t
u;
register const double
*magick_restrict k;
register const PixelPacket
*magick_restrict k_pixels;
register const IndexPacket
*magick_restrict k_indexes;
MagickPixelPacket
result;
/* Default - previously modified pixel */
GetMagickPixelPacket(image,&result);
SetMagickPixelPacket(image,q,q_indexes,&result);
if ( method != VoronoiMorphology )
result.opacity = QuantumRange - result.opacity;
switch ( method ) {
case DistanceMorphology:
/* Add kernel Value and select the minimum value found. */
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=offy; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index,(*k)+GetPixelIndex(k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) {
if ( (x+u-offx) >= (ssize_t)image->columns ) continue;
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u));
}
break;
case VoronoiMorphology:
/* Apply Distance to 'Matte' channel, coping the closest color.
**
** This is experimental, and realy the 'alpha' component should
** be completely separate 'masking' channel.
*/
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=offy; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) {
if ( (x+u-offx) >= (ssize_t)image->columns ) continue;
if ( IsNaN(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
switch ( method ) {
case VoronoiMorphology:
SetPixelPacket(image,&result,q,q_indexes);
break;
default:
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
SetPixelAlpha(q,ClampToQuantum(result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
break;
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q) )
|| ( p[r].green != GetPixelGreen(q) )
|| ( p[r].blue != GetPixelBlue(q) )
|| ( (image->matte != MagickFalse) &&
(p[r].opacity != GetPixelOpacity(q)))
|| ( (image->colorspace == CMYKColorspace) &&
(GetPixelIndex(p_indexes+r) != GetPixelIndex(q_indexes+x))) )
changed++; /* The pixel was changed in some way! */
p--; /* go backward through pixel buffers */
q--;
} /* x */
if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
if ( SetImageProgress(image,MorphologyTag,progress++,image->rows)
== MagickFalse )
status=MagickFalse;
} /* y */
auth_view=DestroyCacheView(auth_view);
virt_view=DestroyCacheView(virt_view);
return(status ? (ssize_t) changed : -1);
}
/* Apply a Morphology by calling one of the above low level primitive
** application functions. This function handles any iteration loops,
** composition or re-iteration of results, and compound morphology methods
** that is based on multiple low-level (staged) morphology methods.
**
** Basically this provides the complex grue between the requested morphology
** method and raw low-level implementation (above).
*/
MagickExport Image *MorphologyApply(const Image *image, const ChannelType
channel,const MorphologyMethod method, const ssize_t iterations,
const KernelInfo *kernel, const CompositeOperator compose,
const double bias, ExceptionInfo *exception)
{
CompositeOperator
curr_compose;
Image
*curr_image, /* Image we are working with or iterating */
*work_image, /* secondary image for primitive iteration */
*save_image, /* saved image - for 'edge' method only */
*rslt_image; /* resultant image - after multi-kernel handling */
KernelInfo
*reflected_kernel, /* A reflected copy of the kernel (if needed) */
*norm_kernel, /* the current normal un-reflected kernel */
*rflt_kernel, /* the current reflected kernel (if needed) */
*this_kernel; /* the kernel being applied */
MorphologyMethod
primitive; /* the current morphology primitive being applied */
CompositeOperator
rslt_compose; /* multi-kernel compose method for results to use */
MagickBooleanType
special, /* do we use a direct modify function? */
verbose; /* verbose output of results */
size_t
method_loop, /* Loop 1: number of compound method iterations (norm 1) */
method_limit, /* maximum number of compound method iterations */
kernel_number, /* Loop 2: the kernel number being applied */
stage_loop, /* Loop 3: primitive loop for compound morphology */
stage_limit, /* how many primitives are in this compound */
kernel_loop, /* Loop 4: iterate the kernel over image */
kernel_limit, /* number of times to iterate kernel */
count, /* total count of primitive steps applied */
kernel_changed, /* total count of changed using iterated kernel */
method_changed; /* total count of changed over method iteration */
ssize_t
changed; /* number pixels changed by last primitive operation */
char
v_info[MaxTextExtent];
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
count = 0; /* number of low-level morphology primitives performed */
if ( iterations == 0 )
return((Image *) NULL); /* null operation - nothing to do! */
kernel_limit = (size_t) iterations;
if ( iterations < 0 ) /* negative interations = infinite (well alomst) */
kernel_limit = image->columns>image->rows ? image->columns : image->rows;
verbose = IsMagickTrue(GetImageArtifact(image,"debug"));
/* initialise for cleanup */
curr_image = (Image *) image;
curr_compose = image->compose;
(void) curr_compose;
work_image = save_image = rslt_image = (Image *) NULL;
reflected_kernel = (KernelInfo *) NULL;
/* Initialize specific methods
* + which loop should use the given iteratations
* + how many primitives make up the compound morphology
* + multi-kernel compose method to use (by default)
*/
method_limit = 1; /* just do method once, unless otherwise set */
stage_limit = 1; /* assume method is not a compound */
special = MagickFalse; /* assume it is NOT a direct modify primitive */
rslt_compose = compose; /* and we are composing multi-kernels as given */
switch( method ) {
case SmoothMorphology: /* 4 primitive compound morphology */
stage_limit = 4;
break;
case OpenMorphology: /* 2 primitive compound morphology */
case OpenIntensityMorphology:
case TopHatMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case EdgeMorphology:
stage_limit = 2;
break;
case HitAndMissMorphology:
rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */
/* FALL THUR */
case ThinningMorphology:
case ThickenMorphology:
method_limit = kernel_limit; /* iterate the whole method */
kernel_limit = 1; /* do not do kernel iteration */
break;
case DistanceMorphology:
case VoronoiMorphology:
special = MagickTrue; /* use special direct primative */
break;
default:
break;
}
/* Apply special methods with special requirments
** For example, single run only, or post-processing requirements
*/
if ( special != MagickFalse )
{
rslt_image=CloneImage(image,0,0,MagickTrue,exception);
if (rslt_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(rslt_image,DirectClass) == MagickFalse)
{
InheritException(exception,&rslt_image->exception);
goto error_cleanup;
}
changed = MorphologyPrimitiveDirect(rslt_image, method,
channel, kernel, exception);
if ( verbose != MagickFalse )
(void) (void) FormatLocaleFile(stderr,
"%s:%.20g.%.20g #%.20g => Changed %.20g\n",
CommandOptionToMnemonic(MagickMorphologyOptions, method),
1.0,0.0,1.0, (double) changed);
if ( changed < 0 )
goto error_cleanup;
if ( method == VoronoiMorphology ) {
/* Preserve the alpha channel of input image - but turned off */
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel);
(void) CompositeImageChannel(rslt_image, DefaultChannels,
CopyOpacityCompositeOp, image, 0, 0);
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel);
}
goto exit_cleanup;
}
/* Handle user (caller) specified multi-kernel composition method */
if ( compose != UndefinedCompositeOp )
rslt_compose = compose; /* override default composition for method */
if ( rslt_compose == UndefinedCompositeOp )
rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */
/* Some methods require a reflected kernel to use with primitives.
* Create the reflected kernel for those methods. */
switch ( method ) {
case CorrelateMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case SmoothMorphology:
reflected_kernel = CloneKernelInfo(kernel);
if (reflected_kernel == (KernelInfo *) NULL)
goto error_cleanup;
RotateKernelInfo(reflected_kernel,180);
break;
default:
break;
}
/* Loops around more primitive morpholgy methods
** erose, dilate, open, close, smooth, edge, etc...
*/
/* Loop 1: iterate the compound method */
method_loop = 0;
method_changed = 1;
while ( method_loop < method_limit && method_changed > 0 ) {
method_loop++;
method_changed = 0;
/* Loop 2: iterate over each kernel in a multi-kernel list */
norm_kernel = (KernelInfo *) kernel;
this_kernel = (KernelInfo *) kernel;
rflt_kernel = reflected_kernel;
kernel_number = 0;
while ( norm_kernel != NULL ) {
/* Loop 3: Compound Morphology Staging - Select Primative to apply */
stage_loop = 0; /* the compound morphology stage number */
while ( stage_loop < stage_limit ) {
stage_loop++; /* The stage of the compound morphology */
/* Select primitive morphology for this stage of compound method */
this_kernel = norm_kernel; /* default use unreflected kernel */
primitive = method; /* Assume method is a primitive */
switch( method ) {
case ErodeMorphology: /* just erode */
case EdgeInMorphology: /* erode and image difference */
primitive = ErodeMorphology;
break;
case DilateMorphology: /* just dilate */
case EdgeOutMorphology: /* dilate and image difference */
primitive = DilateMorphology;
break;
case OpenMorphology: /* erode then dialate */
case TopHatMorphology: /* open and image difference */
primitive = ErodeMorphology;
if ( stage_loop == 2 )
primitive = DilateMorphology;
break;
case OpenIntensityMorphology:
primitive = ErodeIntensityMorphology;
if ( stage_loop == 2 )
primitive = DilateIntensityMorphology;
break;
case CloseMorphology: /* dilate, then erode */
case BottomHatMorphology: /* close and image difference */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
if ( stage_loop == 2 )
primitive = ErodeMorphology;
break;
case CloseIntensityMorphology:
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateIntensityMorphology;
if ( stage_loop == 2 )
primitive = ErodeIntensityMorphology;
break;
case SmoothMorphology: /* open, close */
switch ( stage_loop ) {
case 1: /* start an open method, which starts with Erode */
primitive = ErodeMorphology;
break;
case 2: /* now Dilate the Erode */
primitive = DilateMorphology;
break;
case 3: /* Reflect kernel a close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
break;
case 4: /* Finish the Close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ErodeMorphology;
break;
}
break;
case EdgeMorphology: /* dilate and erode difference */
primitive = DilateMorphology;
if ( stage_loop == 2 ) {
save_image = curr_image; /* save the image difference */
curr_image = (Image *) image;
primitive = ErodeMorphology;
}
break;
case CorrelateMorphology:
/* A Correlation is a Convolution with a reflected kernel.
** However a Convolution is a weighted sum using a reflected
** kernel. It may seem stange to convert a Correlation into a
** Convolution as the Correlation is the simplier method, but
** Convolution is much more commonly used, and it makes sense to
** implement it directly so as to avoid the need to duplicate the
** kernel when it is not required (which is typically the
** default).
*/
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ConvolveMorphology;
break;
default:
break;
}
assert( this_kernel != (KernelInfo *) NULL );
/* Extra information for debugging compound operations */
if ( verbose != MagickFalse ) {
if ( stage_limit > 1 )
(void) FormatLocaleString(v_info,MaxTextExtent,"%s:%.20g.%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions,method),(double)
method_loop,(double) stage_loop);
else if ( primitive != method )
(void) FormatLocaleString(v_info, MaxTextExtent, "%s:%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions, method),(double)
method_loop);
else
v_info[0] = '\0';
}
/* Loop 4: Iterate the kernel with primitive */
kernel_loop = 0;
kernel_changed = 0;
changed = 1;
while ( kernel_loop < kernel_limit && changed > 0 ) {
kernel_loop++; /* the iteration of this kernel */
/* Create a clone as the destination image, if not yet defined */
if ( work_image == (Image *) NULL )
{
work_image=CloneImage(image,0,0,MagickTrue,exception);
if (work_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(work_image,DirectClass) == MagickFalse)
{
InheritException(exception,&work_image->exception);
goto error_cleanup;
}
/* work_image->type=image->type; ??? */
}
/* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */
count++;
changed = MorphologyPrimitive(curr_image, work_image, primitive,
channel, this_kernel, bias, exception);
if ( verbose != MagickFalse ) {
if ( kernel_loop > 1 )
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line from previous */
(void) (void) FormatLocaleFile(stderr,
"%s%s%s:%.20g.%.20g #%.20g => Changed %.20g",
v_info,CommandOptionToMnemonic(MagickMorphologyOptions,
primitive),(this_kernel == rflt_kernel ) ? "*" : "",
(double) (method_loop+kernel_loop-1),(double) kernel_number,
(double) count,(double) changed);
}
if ( changed < 0 )
goto error_cleanup;
kernel_changed += changed;
method_changed += changed;
/* prepare next loop */
{ Image *tmp = work_image; /* swap images for iteration */
work_image = curr_image;
curr_image = tmp;
}
if ( work_image == image )
work_image = (Image *) NULL; /* replace input 'image' */
} /* End Loop 4: Iterate the kernel with primitive */
if ( verbose != MagickFalse && kernel_changed != (size_t)changed )
(void) FormatLocaleFile(stderr, " Total %.20g",(double) kernel_changed);
if ( verbose != MagickFalse && stage_loop < stage_limit )
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line before looping */
#if 0
(void) FormatLocaleFile(stderr, "--E-- image=0x%lx\n", (unsigned long)image);
(void) FormatLocaleFile(stderr, " curr =0x%lx\n", (unsigned long)curr_image);
(void) FormatLocaleFile(stderr, " work =0x%lx\n", (unsigned long)work_image);
(void) FormatLocaleFile(stderr, " save =0x%lx\n", (unsigned long)save_image);
(void) FormatLocaleFile(stderr, " union=0x%lx\n", (unsigned long)rslt_image);
#endif
} /* End Loop 3: Primative (staging) Loop for Coumpound Methods */
/* Final Post-processing for some Compound Methods
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** Turn off SVG composition 'alpha blending'.
*/
switch( method ) {
case EdgeOutMorphology:
case EdgeInMorphology:
case TopHatMorphology:
case BottomHatMorphology:
if ( verbose != MagickFalse )
(void) FormatLocaleFile(stderr, "\n%s: Difference with original image",
CommandOptionToMnemonic(MagickMorphologyOptions, method) );
(void) CompositeImageChannel(curr_image,
(ChannelType) (channel & ~SyncChannels),
DifferenceCompositeOp, image, 0, 0);
break;
case EdgeMorphology:
if ( verbose != MagickFalse )
(void) FormatLocaleFile(stderr, "\n%s: Difference of Dilate and Erode",
CommandOptionToMnemonic(MagickMorphologyOptions, method) );
(void) CompositeImageChannel(curr_image,
(ChannelType) (channel & ~SyncChannels),
DifferenceCompositeOp, save_image, 0, 0);
save_image = DestroyImage(save_image); /* finished with save image */
break;
default:
break;
}
/* multi-kernel handling: re-iterate, or compose results */
if ( kernel->next == (KernelInfo *) NULL )
rslt_image = curr_image; /* just return the resulting image */
else if ( rslt_compose == NoCompositeOp )
{ if ( verbose != MagickFalse ) {
if ( this_kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " (re-iterate)");
else
(void) FormatLocaleFile(stderr, " (done)");
}
rslt_image = curr_image; /* return result, and re-iterate */
}
else if ( rslt_image == (Image *) NULL)
{ if ( verbose != MagickFalse )
(void) FormatLocaleFile(stderr, " (save for compose)");
rslt_image = curr_image;
curr_image = (Image *) image; /* continue with original image */
}
else
{ /* Add the new 'current' result to the composition
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** IE: Turn off SVG composition 'alpha blending'.
*/
if ( verbose != MagickFalse )
(void) FormatLocaleFile(stderr, " (compose \"%s\")",
CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) );
(void) CompositeImageChannel(rslt_image,
(ChannelType) (channel & ~SyncChannels), rslt_compose,
curr_image, 0, 0);
curr_image = DestroyImage(curr_image);
curr_image = (Image *) image; /* continue with original image */
}
if ( verbose != MagickFalse )
(void) FormatLocaleFile(stderr, "\n");
/* loop to the next kernel in a multi-kernel list */
norm_kernel = norm_kernel->next;
if ( rflt_kernel != (KernelInfo *) NULL )
rflt_kernel = rflt_kernel->next;
kernel_number++;
} /* End Loop 2: Loop over each kernel */
} /* End Loop 1: compound method interation */
goto exit_cleanup;
/* Yes goto's are bad, but it makes cleanup lot more efficient */
error_cleanup:
if ( curr_image == rslt_image )
curr_image = (Image *) NULL;
if ( rslt_image != (Image *) NULL )
rslt_image = DestroyImage(rslt_image);
exit_cleanup:
if ( curr_image == rslt_image || curr_image == image )
curr_image = (Image *) NULL;
if ( curr_image != (Image *) NULL )
curr_image = DestroyImage(curr_image);
if ( work_image != (Image *) NULL )
work_image = DestroyImage(work_image);
if ( save_image != (Image *) NULL )
save_image = DestroyImage(save_image);
if ( reflected_kernel != (KernelInfo *) NULL )
reflected_kernel = DestroyKernelInfo(reflected_kernel);
return(rslt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyImageChannel() applies a user supplied kernel to the image
% according to the given mophology method.
%
% This function applies any and all user defined settings before calling
% the above internal function MorphologyApply().
%
% User defined settings include...
% * Output Bias for Convolution and correlation ("-bias"
or "-define convolve:bias=??")
% * Kernel Scale/normalize settings ("-set 'option:convolve:scale'")
% This can also includes the addition of a scaled unity kernel.
% * Show Kernel being applied ("-set option:showkernel 1")
%
% The format of the MorphologyImage method is:
%
% Image *MorphologyImage(const Image *image,MorphologyMethod method,
% const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception)
%
% Image *MorphologyImageChannel(const Image *image, const ChannelType
% channel,MorphologyMethod method,const ssize_t iterations,
% KernelInfo *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
% Warning: kernel may be normalized for the Convolve method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphologyImage(const Image *image,
const MorphologyMethod method,const ssize_t iterations,
const KernelInfo *kernel,ExceptionInfo *exception)
{
Image
*morphology_image;
morphology_image=MorphologyImageChannel(image,DefaultChannels,method,
iterations,kernel,exception);
return(morphology_image);
}
MagickExport Image *MorphologyImageChannel(const Image *image,
const ChannelType channel,const MorphologyMethod method,
const ssize_t iterations,const KernelInfo *kernel,ExceptionInfo *exception)
{
KernelInfo
*curr_kernel;
CompositeOperator
compose;
double
bias;
Image
*morphology_image;
/* Apply Convolve/Correlate Normalization and Scaling Factors.
* This is done BEFORE the ShowKernelInfo() function is called so that
* users can see the results of the 'option:convolve:scale' option.
*/
curr_kernel = (KernelInfo *) kernel;
bias=image->bias;
if ((method == ConvolveMorphology) || (method == CorrelateMorphology))
{
const char
*artifact;
artifact = GetImageArtifact(image,"convolve:bias");
if (artifact != (const char *) NULL)
bias=StringToDoubleInterval(artifact,(double) QuantumRange+1.0);
artifact = GetImageArtifact(image,"convolve:scale");
if ( artifact != (const char *) NULL ) {
if ( curr_kernel == kernel )
curr_kernel = CloneKernelInfo(kernel);
if (curr_kernel == (KernelInfo *) NULL) {
curr_kernel=DestroyKernelInfo(curr_kernel);
return((Image *) NULL);
}
ScaleGeometryKernelInfo(curr_kernel, artifact);
}
}
/* display the (normalized) kernel via stderr */
if ( IsMagickTrue(GetImageArtifact(image,"showkernel"))
|| IsMagickTrue(GetImageArtifact(image,"convolve:showkernel"))
|| IsMagickTrue(GetImageArtifact(image,"morphology:showkernel")) )
ShowKernelInfo(curr_kernel);
/* Override the default handling of multi-kernel morphology results
* If 'Undefined' use the default method
* If 'None' (default for 'Convolve') re-iterate previous result
* Otherwise merge resulting images using compose method given.
* Default for 'HitAndMiss' is 'Lighten'.
*/
{ const char
*artifact;
compose = UndefinedCompositeOp; /* use default for method */
artifact = GetImageArtifact(image,"morphology:compose");
if ( artifact != (const char *) NULL)
compose = (CompositeOperator) ParseCommandOption(
MagickComposeOptions,MagickFalse,artifact);
}
/* Apply the Morphology */
morphology_image = MorphologyApply(image, channel, method, iterations,
curr_kernel, compose, bias, exception);
/* Cleanup and Exit */
if ( curr_kernel != kernel )
curr_kernel=DestroyKernelInfo(curr_kernel);
return(morphology_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateKernelInfo() rotates the kernel by the angle given.
%
% Currently it is restricted to 90 degree angles, of either 1D kernels
% or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels.
% It will ignore usless rotations for specific 'named' built-in kernels.
%
% The format of the RotateKernelInfo method is:
%
% void RotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is currently internal to this module only, but can be exported
% to other modules if needed.
*/
static void RotateKernelInfo(KernelInfo *kernel, double angle)
{
/* angle the lower kernels first */
if ( kernel->next != (KernelInfo *) NULL)
RotateKernelInfo(kernel->next, angle);
/* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical
**
** TODO: expand beyond simple 90 degree rotates, flips and flops
*/
/* Modulus the angle */
angle = fmod(angle, 360.0);
if ( angle < 0 )
angle += 360.0;
if ( 337.5 < angle || angle <= 22.5 )
return; /* Near zero angle - no change! - At least not at this time */
/* Handle special cases */
switch (kernel->type) {
/* These built-in kernels are cylindrical kernels, rotating is useless */
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case DiskKernel:
case PeaksKernel:
case LaplacianKernel:
case ChebyshevKernel:
case ManhattanKernel:
case EuclideanKernel:
return;
/* These may be rotatable at non-90 angles in the future */
/* but simply rotating them in multiples of 90 degrees is useless */
case SquareKernel:
case DiamondKernel:
case PlusKernel:
case CrossKernel:
return;
/* These only allows a +/-90 degree rotation (by transpose) */
/* A 180 degree rotation is useless */
case BlurKernel:
if ( 135.0 < angle && angle <= 225.0 )
return;
if ( 225.0 < angle && angle <= 315.0 )
angle -= 180;
break;
default:
break;
}
/* Attempt rotations by 45 degrees -- 3x3 kernels only */
if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 )
{
if ( kernel->width == 3 && kernel->height == 3 )
{ /* Rotate a 3x3 square by 45 degree angle */
double t = kernel->values[0];
kernel->values[0] = kernel->values[3];
kernel->values[3] = kernel->values[6];
kernel->values[6] = kernel->values[7];
kernel->values[7] = kernel->values[8];
kernel->values[8] = kernel->values[5];
kernel->values[5] = kernel->values[2];
kernel->values[2] = kernel->values[1];
kernel->values[1] = t;
/* rotate non-centered origin */
if ( kernel->x != 1 || kernel->y != 1 ) {
ssize_t x,y;
x = (ssize_t) kernel->x-1;
y = (ssize_t) kernel->y-1;
if ( x == y ) x = 0;
else if ( x == 0 ) x = -y;
else if ( x == -y ) y = 0;
else if ( y == 0 ) y = x;
kernel->x = (ssize_t) x+1;
kernel->y = (ssize_t) y+1;
}
angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */
kernel->angle = fmod(kernel->angle+45.0, 360.0);
}
else
perror("Unable to rotate non-3x3 kernel by 45 degrees");
}
if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 )
{
if ( kernel->width == 1 || kernel->height == 1 )
{ /* Do a transpose of a 1 dimensional kernel,
** which results in a fast 90 degree rotation of some type.
*/
ssize_t
t;
t = (ssize_t) kernel->width;
kernel->width = kernel->height;
kernel->height = (size_t) t;
t = kernel->x;
kernel->x = kernel->y;
kernel->y = t;
if ( kernel->width == 1 ) {
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
} else {
angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */
kernel->angle = fmod(kernel->angle+270.0, 360.0);
}
}
else if ( kernel->width == kernel->height )
{ /* Rotate a square array of values by 90 degrees */
{ register size_t
i,j,x,y;
register double
*k,t;
k=kernel->values;
for( i=0, x=kernel->width-1; i<=x; i++, x--)
for( j=0, y=kernel->height-1; j<y; j++, y--)
{ t = k[i+j*kernel->width];
k[i+j*kernel->width] = k[j+x*kernel->width];
k[j+x*kernel->width] = k[x+y*kernel->width];
k[x+y*kernel->width] = k[y+i*kernel->width];
k[y+i*kernel->width] = t;
}
}
/* rotate the origin - relative to center of array */
{ register ssize_t x,y;
x = (ssize_t) (kernel->x*2-kernel->width+1);
y = (ssize_t) (kernel->y*2-kernel->height+1);
kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2;
kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2;
}
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
}
else
perror("Unable to rotate a non-square, non-linear kernel 90 degrees");
}
if ( 135.0 < angle && angle <= 225.0 )
{
/* For a 180 degree rotation - also know as a reflection
* This is actually a very very common operation!
* Basically all that is needed is a reversal of the kernel data!
* And a reflection of the origon
*/
double
t;
register double
*k;
size_t
i,
j;
k=kernel->values;
for ( i=0, j=kernel->width*kernel->height-1; i<j; i++, j--)
t=k[i], k[i]=k[j], k[j]=t;
kernel->x = (ssize_t) kernel->width - kernel->x - 1;
kernel->y = (ssize_t) kernel->height - kernel->y - 1;
angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */
kernel->angle = fmod(kernel->angle+180.0, 360.0);
}
/* At this point angle should at least between -45 (315) and +45 degrees
* In the future some form of non-orthogonal angled rotates could be
* performed here, posibily with a linear kernel restriction.
*/
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e G e o m e t r y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleGeometryKernelInfo() takes a geometry argument string, typically
% provided as a "-set option:convolve:scale {geometry}" user setting,
% and modifies the kernel according to the parsed arguments of that setting.
%
% The first argument (and any normalization flags) are passed to
% ScaleKernelInfo() to scale/normalize the kernel. The second argument
% is then passed to UnityAddKernelInfo() to add a scled unity kernel
% into the scaled/normalized kernel.
%
% The format of the ScaleGeometryKernelInfo method is:
%
% void ScaleGeometryKernelInfo(KernelInfo *kernel,
% const double scaling_factor,const MagickStatusType normalize_flags)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% o geometry:
% The geometry string to parse, typically from the user provided
% "-set option:convolve:scale {geometry}" setting.
%
*/
MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel,
const char *geometry)
{
GeometryFlags
flags;
GeometryInfo
args;
SetGeometryInfo(&args);
flags = (GeometryFlags) ParseGeometry(geometry, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/
args.rho *= 0.01, args.sigma *= 0.01;
if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */
args.rho = 1.0;
if ( (flags & SigmaValue) == 0 )
args.sigma = 0.0;
/* Scale/Normalize the input kernel */
ScaleKernelInfo(kernel, args.rho, flags);
/* Add Unity Kernel, for blending with original */
if ( (flags & SigmaValue) != 0 )
UnityAddKernelInfo(kernel, args.sigma);
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleKernelInfo() scales the given kernel list by the given amount, with or
% without normalization of the sum of the kernel values (as per given flags).
%
% By default (no flags given) the values within the kernel is scaled
% directly using given scaling factor without change.
%
% If either of the two 'normalize_flags' are given the kernel will first be
% normalized and then further scaled by the scaling factor value given.
%
% Kernel normalization ('normalize_flags' given) is designed to ensure that
% any use of the kernel scaling factor with 'Convolve' or 'Correlate'
% morphology methods will fall into -1.0 to +1.0 range. Note that for
% non-HDRI versions of IM this may cause images to have any negative results
% clipped, unless some 'bias' is used.
%
% More specifically. Kernels which only contain positive values (such as a
% 'Gaussian' kernel) will be scaled so that those values sum to +1.0,
% ensuring a 0.0 to +1.0 output range for non-HDRI images.
%
% For Kernels that contain some negative values, (such as 'Sharpen' kernels)
% the kernel will be scaled by the absolute of the sum of kernel values, so
% that it will generally fall within the +/- 1.0 range.
%
% For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel
% will be scaled by just the sum of the postive values, so that its output
% range will again fall into the +/- 1.0 range.
%
% For special kernels designed for locating shapes using 'Correlate', (often
% only containing +1 and -1 values, representing foreground/brackground
% matching) a special normalization method is provided to scale the positive
% values separately to those of the negative values, so the kernel will be
% forced to become a zero-sum kernel better suited to such searches.
%
% WARNING: Correct normalization of the kernel assumes that the '*_range'
% attributes within the kernel structure have been correctly set during the
% kernels creation.
%
% NOTE: The values used for 'normalize_flags' have been selected specifically
% to match the use of geometry options, so that '!' means NormalizeValue, '^'
% means CorrelateNormalizeValue. All other GeometryFlags values are ignored.
%
% The format of the ScaleKernelInfo method is:
%
% void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,
% const MagickStatusType normalize_flags )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scaling_factor:
% multiply all values (after normalization) by this factor if not
% zero. If the kernel is normalized regardless of any flags.
%
% o normalize_flags:
% GeometryFlags defining normalization method to use.
% specifically: NormalizeValue, CorrelateNormalizeValue,
% and/or PercentValue
%
*/
MagickExport void ScaleKernelInfo(KernelInfo *kernel,
const double scaling_factor,const GeometryFlags normalize_flags)
{
register ssize_t
i;
register double
pos_scale,
neg_scale;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags);
/* Normalization of Kernel */
pos_scale = 1.0;
if ( (normalize_flags&NormalizeValue) != 0 ) {
if ( fabs(kernel->positive_range + kernel->negative_range) >= MagickEpsilon )
/* non-zero-summing kernel (generally positive) */
pos_scale = fabs(kernel->positive_range + kernel->negative_range);
else
/* zero-summing kernel */
pos_scale = kernel->positive_range;
}
/* Force kernel into a normalized zero-summing kernel */
if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) {
pos_scale = ( fabs(kernel->positive_range) >= MagickEpsilon )
? kernel->positive_range : 1.0;
neg_scale = ( fabs(kernel->negative_range) >= MagickEpsilon )
? -kernel->negative_range : 1.0;
}
else
neg_scale = pos_scale;
/* finialize scaling_factor for positive and negative components */
pos_scale = scaling_factor/pos_scale;
neg_scale = scaling_factor/neg_scale;
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
if ( ! IsNaN(kernel->values[i]) )
kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale;
/* convolution output range */
kernel->positive_range *= pos_scale;
kernel->negative_range *= neg_scale;
/* maximum and minimum values in kernel */
kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale;
kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale;
/* swap kernel settings if user's scaling factor is negative */
if ( scaling_factor < MagickEpsilon ) {
double t;
t = kernel->positive_range;
kernel->positive_range = kernel->negative_range;
kernel->negative_range = t;
t = kernel->maximum;
kernel->maximum = kernel->minimum;
kernel->minimum = 1;
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h o w K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShowKernelInfo() outputs the details of the given kernel defination to
% standard error, generally due to a users 'showkernel' option request.
%
% The format of the ShowKernel method is:
%
% void ShowKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickExport void ShowKernelInfo(const KernelInfo *kernel)
{
const KernelInfo
*k;
size_t
c, i, u, v;
for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) {
(void) FormatLocaleFile(stderr, "Kernel");
if ( kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " #%lu", (unsigned long) c );
(void) FormatLocaleFile(stderr, " \"%s",
CommandOptionToMnemonic(MagickKernelOptions, k->type) );
if ( fabs(k->angle) >= MagickEpsilon )
(void) FormatLocaleFile(stderr, "@%lg", k->angle);
(void) FormatLocaleFile(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long)
k->width,(unsigned long) k->height,(long) k->x,(long) k->y);
(void) FormatLocaleFile(stderr,
" with values from %.*lg to %.*lg\n",
GetMagickPrecision(), k->minimum,
GetMagickPrecision(), k->maximum);
(void) FormatLocaleFile(stderr, "Forming a output range from %.*lg to %.*lg",
GetMagickPrecision(), k->negative_range,
GetMagickPrecision(), k->positive_range);
if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Zero-Summing)\n");
else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Normalized)\n");
else
(void) FormatLocaleFile(stderr, " (Sum %.*lg)\n",
GetMagickPrecision(), k->positive_range+k->negative_range);
for (i=v=0; v < k->height; v++) {
(void) FormatLocaleFile(stderr, "%2lu:", (unsigned long) v );
for (u=0; u < k->width; u++, i++)
if ( IsNaN(k->values[i]) )
(void) FormatLocaleFile(stderr," %*s", GetMagickPrecision()+3, "nan");
else
(void) FormatLocaleFile(stderr," %*.*lg", GetMagickPrecision()+3,
GetMagickPrecision(), k->values[i]);
(void) FormatLocaleFile(stderr,"\n");
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n i t y A d d K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel
% to the given pre-scaled and normalized Kernel. This in effect adds that
% amount of the original image into the resulting convolution kernel. This
% value is usually provided by the user as a percentage value in the
% 'convolve:scale' setting.
%
% The resulting effect is to convert the defined kernels into blended
% soft-blurs, unsharp kernels or into sharpening kernels.
%
% The format of the UnityAdditionKernelInfo method is:
%
% void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scale:
% scaling factor for the unity kernel to be added to
% the given kernel.
%
*/
MagickExport void UnityAddKernelInfo(KernelInfo *kernel,
const double scale)
{
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
UnityAddKernelInfo(kernel->next, scale);
/* Add the scaled unity kernel to the existing kernel */
kernel->values[kernel->x+kernel->y*kernel->width] += scale;
CalcKernelMetaData(kernel); /* recalculate the meta-data */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Z e r o K e r n e l N a n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroKernelNans() replaces any special 'nan' value that may be present in
% the kernel with a zero value. This is typically done when the kernel will
% be used in special hardware (GPU) convolution processors, to simply
% matters.
%
% The format of the ZeroKernelNans method is:
%
% void ZeroKernelNans (KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickExport void ZeroKernelNans(KernelInfo *kernel)
{
register size_t
i;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ZeroKernelNans(kernel->next);
for (i=0; i < (kernel->width*kernel->height); i++)
if ( IsNaN(kernel->values[i]) )
kernel->values[i] = 0.0;
return;
}
|
find_most_influential.h | //===------------------------------------------------------------*- C++ -*-===//
//
// Ripples: A C++ Library for Influence Maximization
// Marco Minutoli <marco.minutoli@pnnl.gov>
// Pacific Northwest National Laboratory
//
//===----------------------------------------------------------------------===//
//
// Copyright (c) 2019, Battelle Memorial Institute
//
// Battelle Memorial Institute (hereinafter Battelle) hereby grants permission
// to any person or entity lawfully obtaining a copy of this software and
// associated documentation files (hereinafter “the Software”) to redistribute
// and use the Software in source and binary forms, with or without
// modification. Such person or entity may use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and may permit
// others to do so, subject to the following conditions:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimers.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Other than as used herein, neither the name Battelle Memorial Institute or
// Battelle may be used in any form whatsoever without the express written
// consent of Battelle.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL BATTELLE OR CONTRIBUTORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
//===----------------------------------------------------------------------===//
#ifndef RIPPLES_FIND_MOST_INFLUENTIAL_H
#define RIPPLES_FIND_MOST_INFLUENTIAL_H
#include <algorithm>
#include <queue>
#include <unordered_set>
#include <vector>
#include <fstream>
#include <omp.h>
#include "ripples/counting.h"
#include "ripples/imm_execution_record.h"
#include "ripples/partition.h"
#include "ripples/streaming_find_most_influential.h"
#include "ripples/utility.h"
#include "ripples/huffman.h"
#include "spdlog/sinks/stdout_color_sinks.h"
#include "spdlog/spdlog.h"
#ifdef RIPPLES_ENABLE_CUDA
#include "ripples/cuda/cuda_generate_rrr_sets.h"
#include "ripples/cuda/find_most_influential.h"
#include "thrust/count.h"
#include "thrust/device_ptr.h"
#endif
namespace ripples {
// template <typename GraphTy, typename RRRset>
// auto DumpRRRSets(const GraphTy &G, std::vector<RRRset> &RRRsets) {
// using vertex_type = typename GraphTy::vertex_type;
// auto in_begin=RRRsets.begin();
// std::vector<vertex_type> output_vtx;
// std::for_each(in_begin->begin(), in_begin->begin()+10,
// [&](const vertex_type v) { output_vtx.push_back(v); });
// return output_vtx;
// }
template <typename GraphTy, typename RRRset>
void DumpRRRSets(const GraphTy &G, std::vector<RRRset> &RRRsets) {
using vertex_type = typename GraphTy::vertex_type;
auto in_begin=RRRsets.begin();
auto in_end=RRRsets.end();
size_t s1 = RRRsets.size();
size_t total_rrr_size = 0;
// std::vector<vertex_type> output_vtx;
std::ofstream FILE("rrr_out.bin", std::ios::out | std::ofstream::binary);
FILE.write(reinterpret_cast<const char *>(&s1), sizeof(s1));
for (; in_begin != in_end; ++in_begin) {
// std::for_each(in_begin->begin(), in_begin->end(),
// [&](const vertex_type v) { output_vtx.push_back(v); });
int s2=std::distance(in_begin->begin(),in_begin->end());
if(s2!=in_begin->size()){
std::cout<<"s2="<<s2<<", size="<<in_begin->size()<<std::endl;
}
total_rrr_size+=s2;
FILE.write(reinterpret_cast<const char *>(&s2), sizeof(s2));
// FILE.write(reinterpret_cast<const char *> (&(*in_begin->begin())), s2*sizeof(vertex_type));
}
std::cout<<"total-rrr="<<s1<<", total-vtx-size="<<total_rrr_size;
std::cout<<", total-bytes:"<<(total_rrr_size * sizeof(vertex_type))/(1024*1024)<<"Mb."<<std::endl;
// FILE.write(reinterpret_cast<const char *> (&output_vtx[0]), total_rrr_size*sizeof(vertex_type));
}
template <typename RRRset>
void DumpCompRRRSets(unsigned char** compR, unsigned int* compBytes, std::vector<RRRset> &RRRsets) {
size_t s1 = RRRsets.size();
size_t total_rrr_size = 0;
// std::vector<vertex_type> output_vtx;
std::ofstream FILE("compr_out.bin", std::ios::out | std::ofstream::binary);
FILE.write(reinterpret_cast<const char *>(&s1), sizeof(s1));
for (size_t i=0; i<s1; i++) {
int s2=RRRsets[i].size();
total_rrr_size+=s2;
unsigned int s2bytes=compBytes[i];
FILE.write(reinterpret_cast<const char *>(&s2), sizeof(s2));
// std::cout<<"("<<i<<"):"<<s2<<":"<<compR[i][0]<<" ";
// FILE.write(reinterpret_cast<const char *> (compR[i]), s2bytes*sizeof(unsigned char));
}
std::cout<<std::endl;
std::cout<<"total-compRRR="<<s1<<", total-comp-size="<<total_rrr_size;
std::cout<<", total-bytes:"<<(total_rrr_size * sizeof(int))/(1024*1024)<<"Mb."<<std::endl;
// FILE.write(reinterpret_cast<const char *> (&output_vtx[0]), total_rrr_size*sizeof(vertex_type));
}
//! \brief Select k seeds starting from the a list of Random Reverse
//! Reachability Sets.
//!
//! \tparam GraphTy The graph type.
//! \tparam RRRset The type storing Random Reverse Reachability Sets.
//! \tparam execution_tag The execution policy.
//!
//! \param G The input graph.
//! \param k The size of the seed set.
//! \param RRRsets A vector of Random Reverse Reachability sets.
//! \param ex_tag The execution policy tag.
//!
//! \return a pair where the size_t is the number of RRRset covered and
//! the set of vertices selected as seeds.
template <typename GraphTy, typename ConfTy, typename RRRset>
auto FindMostInfluentialSet(const GraphTy &G, const ConfTy &CFG,
std::vector<RRRset> &RRRsets,
IMMExecutionRecord &record, bool enableGPU,
sequential_tag &&ex_tag) {
using vertex_type = typename GraphTy::vertex_type;
size_t k = CFG.k;
std::vector<uint32_t> vertexCoverage(G.num_nodes(), 0);
auto cmp = [](std::pair<vertex_type, size_t> &a,
std::pair<vertex_type, size_t> &b) {
return a.second < b.second;
};
using priorityQueue =
std::priority_queue<std::pair<vertex_type, size_t>,
std::vector<std::pair<vertex_type, size_t>>,
decltype(cmp)>;
std::vector<std::pair<vertex_type, size_t>> queue_storage(G.num_nodes());
auto counting = measure<>::exec_time([&]() {
CountOccurrencies(RRRsets.begin(), RRRsets.end(), vertexCoverage.begin(),
vertexCoverage.end(),
std::forward<sequential_tag>(ex_tag));
});
InitHeapStorage(vertexCoverage.begin(), vertexCoverage.end(),
queue_storage.begin(), queue_storage.end(),
std::forward<sequential_tag>(ex_tag));
priorityQueue queue(cmp, std::move(queue_storage));
std::vector<typename GraphTy::vertex_type> result;
result.reserve(k);
size_t uncovered = RRRsets.size();
auto end = RRRsets.end();
typename IMMExecutionRecord::ex_time_ms pivoting;
while (result.size() < k && uncovered != 0) {
auto element = queue.top();
queue.pop();
if (element.second > vertexCoverage[element.first]) {
element.second = vertexCoverage[element.first];
queue.push(element);
continue;
}
uncovered -= element.second;
auto cmp = [=](const RRRset &a) -> auto {
return !std::binary_search(a.begin(), a.end(), element.first);
};
auto start = std::chrono::high_resolution_clock::now();
auto itr = partition(RRRsets.begin(), end, cmp,
std::forward<sequential_tag>(ex_tag));
pivoting += (std::chrono::high_resolution_clock::now() - start);
counting += measure<>::exec_time([&]() {
if (std::distance(itr, end) < std::distance(RRRsets.begin(), itr)) {
UpdateCounters(itr, end, vertexCoverage,
std::forward<sequential_tag>(ex_tag));
} else {
std::fill(vertexCoverage.begin(), vertexCoverage.end(), 0);
CountOccurrencies(RRRsets.begin(), itr, vertexCoverage.begin(),
vertexCoverage.end(),
std::forward<sequential_tag>(ex_tag));
}
});
end = itr;
result.push_back(element.first);
}
double f = double(RRRsets.size() - uncovered) / RRRsets.size();
record.Counting.push_back(
std::chrono::duration_cast<typename IMMExecutionRecord::ex_time_ms>(
counting));
record.Pivoting.push_back(pivoting);
return std::make_pair(f, result);
}
template <typename GraphTy, typename ConfTy, typename RRRset>
auto FindMostInfluentialSet(const GraphTy &G, const ConfTy &CFG,
std::vector<RRRset> &RRRsets,
IMMExecutionRecord &record, bool enableGPU,
omp_parallel_tag &&ex_tag) {
size_t num_gpu = 0;
size_t num_max_cpu = 0;
#pragma omp single
{
num_max_cpu =
std::min<size_t>(omp_get_max_threads(), CFG.seed_select_max_workers);
}
#ifdef RIPPLES_ENABLE_CUDA
if (enableGPU) {
num_gpu = std::min(cuda_num_devices(), CFG.seed_select_max_gpu_workers);
}
#endif
StreamingFindMostInfluential<GraphTy> SE(G, RRRsets, num_max_cpu, num_gpu, record);
return SE.find_most_influential_set(CFG.k,CFG.histogramMode);
}
#if RIPPLES_ENABLE_CUDA
template <typename Itr>
void MoveRRRSets(Itr in_begin, Itr in_end, uint32_t *d_rrr_index,
uint32_t *d_rrr_sets, size_t rrr_index_size,
size_t rrr_sets_size) {
std::vector<uint32_t> buffer(rrr_sets_size);
std::vector<uint32_t> buffer2(rrr_sets_size);
auto position = buffer.begin();
auto position2 = buffer2.begin();
uint32_t id = 0;
for (auto itr = in_begin; itr < in_end; ++itr, ++id) {
position = std::copy(itr->begin(), itr->end(), position);
std::fill(position2, position2 + itr->size(), id);
std::advance(position2, itr->size());
}
cuda_h2d(reinterpret_cast<void *>(d_rrr_index),
reinterpret_cast<void *>(buffer2.data()),
sizeof(uint32_t) * rrr_sets_size);
cuda_h2d(reinterpret_cast<void *>(d_rrr_sets),
reinterpret_cast<void *>(buffer.data()),
sizeof(uint32_t) * rrr_sets_size);
}
template <typename GraphTy, typename RRRset>
auto FindMostInfluentialSet(const GraphTy &G, size_t k,
std::vector<RRRset> &RRRsets,
IMMExecutionRecord &record,
cuda_parallel_tag &&ex_tag) {
using vertex_type = typename GraphTy::vertex_type;
size_t rrr_sets_size = 0;
#pragma omp parallel for reduction(+ : rrr_sets_size)
for (auto itr = RRRsets.begin(); itr < RRRsets.end(); ++itr) {
rrr_sets_size += itr->size();
}
size_t rrr_index_size = rrr_sets_size;
uint32_t *d_Counters;
cuda_malloc(reinterpret_cast<void **>(&d_Counters),
sizeof(uint32_t) * G.num_nodes());
cuda_memset(reinterpret_cast<void **>(d_Counters), 0,
sizeof(uint32_t) * G.num_nodes());
uint32_t *d_rrr_index;
uint32_t *d_rrr_sets;
cuda_malloc(reinterpret_cast<void **>(&d_rrr_index),
sizeof(uint32_t) * rrr_index_size);
cuda_malloc(reinterpret_cast<void **>(&d_rrr_sets),
sizeof(uint32_t) * rrr_sets_size);
char *d_rr_mask;
cuda_malloc(reinterpret_cast<void **>(&d_rr_mask),
sizeof(char) * RRRsets.size());
cuda_memset(reinterpret_cast<void *>(d_rr_mask), 0,
sizeof(char) * RRRsets.size());
auto counting = measure<>::exec_time([&]() {
MoveRRRSets(RRRsets.begin(), RRRsets.end(), d_rrr_index, d_rrr_sets,
rrr_index_size, rrr_sets_size);
});
counting += measure<>::exec_time([&]() {
CudaCountOccurrencies(d_Counters, d_rrr_sets, rrr_sets_size, G.num_nodes());
});
std::vector<vertex_type> result;
size_t uncovered = RRRsets.size();
while (uncovered != 0) {
// Find Max
auto v = CudaMaxElement(d_Counters, G.num_nodes());
result.push_back(v.first);
uncovered -= v.second;
std::cout << "Reference Selected : " << v.first << " " << v.second
<< std::endl;
if (result.size() == k) break;
// Update Counters
counting += measure<>::exec_time([&]() {
CudaUpdateCounters(rrr_sets_size, d_rrr_index, d_rrr_sets, d_rr_mask,
d_Counters, G.num_nodes(), v.first);
});
}
cuda_free(d_Counters);
cuda_free(d_rrr_index);
cuda_free(d_rrr_sets);
cuda_free(d_rr_mask);
double f = double(RRRsets.size() - uncovered) / RRRsets.size();
record.Counting.push_back(
std::chrono::duration_cast<typename IMMExecutionRecord::ex_time_ms>(
counting));
return std::make_pair(f, result);
}
#endif
} // namespace ripples
#endif // RIPPLES_FIND_MOST_INFLUENTIAL_H
|
ast-dump-openmp-target.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test() {
#pragma omp target
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target.c:3:1, line:6:1> line:3:6 test 'void ()'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:13, line:6:1>
// CHECK-NEXT: `-OMPTargetDirective {{.*}} <line:4:1, col:19>
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:3>
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: |-CapturedStmt {{.*}} <col:3>
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-NullStmt {{.*}} <col:3> openmp_structured_block
// CHECK-NEXT: | `-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target.c:4:1) *const restrict'
// CHECK-NEXT: |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target.c:4:1) *const restrict'
// CHECK-NEXT: |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | `-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: |-NullStmt {{.*}} <line:5:3> openmp_structured_block
// CHECK-NEXT: `-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target.c:4:1) *const restrict'
|
reduction-clauseModificado.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
int main(int argc, char **argv) {
int i, n=20, a[n],suma=10;
if(argc < 2) {
fprintf(stderr,"Falta iteraciones\n");
exit(-1);
}
n = atoi(argv[1]); if (n>20) {n=20; printf("n=%d",n);}
for (i=0; i<n; i++) a[i] = i;
#pragma omp parallel for reduction(+:suma)
for (i=0; i<n; i++) suma += a[i];
printf("Tras 'parallel' suma=%d\n",suma);
}
|
pomelo_fmt_plug.c | /*
* POMELO cracker patch for JtR. Hacked together during the Hash Runner 2015
* contest by Dhiru Kholia.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_pomelo;
#elif FMT_REGISTERS_H
john_register_one(&fmt_pomelo);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 512 // XXX
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "pomelo"
#define FORMAT_NAME ""
#define FORMAT_TAG "$pomelo$"
#define TAG_LENGTH sizeof(FORMAT_TAG) - 1
#if __SSE2__
#define ALGORITHM_NAME "POMELO 128/128 SSE2 1x"
#elif !defined(USE_GCC_ASM_IA32) && defined(USE_GCC_ASM_X64)
#define ALGORITHM_NAME "POMELO 64/64"
#else
#define ALGORITHM_NAME "POMELO 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define CIPHERTEXT_LENGTH 64
#define BINARY_SIZE 32
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define SALT_ALIGN sizeof(int)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests pomelo_tests[] = {
{"$pomelo$2$3$hash runner 2015$8333ad83e46e425872c5545741d6da105cd31ad58926e437d32247e59b26703e", "HashRunner2014"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static struct custom_salt {
unsigned char salt[64];
unsigned int saltlen;
unsigned int t_cost;
unsigned int m_cost;
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
if (!saved_key) {
saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out));
}
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p = ciphertext;
char Buf[256];
if (strncmp(p, FORMAT_TAG, TAG_LENGTH))
return 0;
p += TAG_LENGTH;
strnzcpy(Buf, p, sizeof(Buf));
p = strtokm(Buf, "$");
if (!p || !isdec(p))
return 0;
p = strtokm(NULL, "$");
if (!p || !isdec(p))
return 0;
p = strtokm(NULL, "$");
if (!p || strlen(p) >= sizeof(cur_salt->salt))
return 0;
p = strtokm(NULL, "$");
if (!p || strlen(p) != CIPHERTEXT_LENGTH)
return 0;
while(*p)
if(atoi16l[ARCH_INDEX(*p++)]==0x7f)
return 0;
return 1;
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
char *p, *q;
memset(&cs, 0, sizeof(cs));
p = ciphertext + TAG_LENGTH;
cs.t_cost = atoi(p);
p = strchr(p, '$') + 1;
cs.m_cost = atoi(p);
p = strchr(p, '$') + 1;
q = strchr(p, '$');
cs.saltlen = q - p;
strncpy((char*)cs.salt, p, cs.saltlen);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static unsigned char *out;
int i;
char *p = strrchr(ciphertext, '$') + 1;
if (!out) out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD);
memset(out, 0, BINARY_SIZE);
for (i = 0; i < BINARY_SIZE; i++) {
out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
int PHS_pomelo(void *out, size_t outlen, const void *in, size_t inlen, const void *salt, size_t saltlen, unsigned int t_cost, unsigned int m_cost);
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
PHS_pomelo((unsigned char *)crypt_out[index], 32, saved_key[index], strlen(saved_key[index]), cur_salt->salt, cur_salt->saltlen, cur_salt->t_cost, cur_salt->m_cost);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void pomelo_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_pomelo = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
pomelo_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
pomelo_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
generator_spgemm_csc_asparse.c | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/libxsmm/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
/**
* @file
* This file is part of GemmCodeGenerator.
*
* @author Alexander Heinecke (alexander.heinecke AT mytum.de, http://www5.in.tum.de/wiki/index.php/Alexander_Heinecke,_M.Sc.,_M.Sc._with_honors)
*
* @section LICENSE
* Copyright (c) 2012-2014, Technische Universitaet Muenchen
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* @section DESCRIPTION
* <DESCRIPTION>
*/
#include "generator_spgemm_csc_asparse.h"
#include "generator_common.h"
#include "libxsmm_main.h"
LIBXSMM_API_INTERN
void libxsmm_sparse_csc_asparse_innerloop_scalar( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_k,
const unsigned int i_z,
const unsigned int* i_row_idx,
const unsigned int* i_column_idx ) {
char l_new_code[512];
int l_max_code_length = 511;
int l_code_length = 0;
if ( LIBXSMM_DATATYPE_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d c%u_%u = _mm_load_sd(&C[(l_n*%u)+%u]);\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d a%u_%u = _mm_load_sd(&A[%u]);\n", i_k, i_z, i_column_idx[i_k] + i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_sd(c%u_%u, _mm_mul_sd(a%u_%u, _mm256_castpd256_pd128(b%u)));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && !defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_sd(c%u_%u, _mm_mul_sd(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_store_sd(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 c%u_%u = _mm_load_ss(&C[(l_n*%u)+%u]);\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 a%u_%u = _mm_load_ss(&A[%u]);\n", i_k, i_z, i_column_idx[i_k] + i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_ss(c%u_%u, _mm_mul_ss(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_store_ss(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
}
LIBXSMM_API_INTERN
void libxsmm_sparse_csc_asparse_innerloop_two_vector( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_k,
const unsigned int i_z,
const unsigned int* i_row_idx,
const unsigned int* i_column_idx ) {
char l_new_code[512];
int l_max_code_length = 511;
int l_code_length = 0;
if ( LIBXSMM_DATATYPE_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d c%u_%u = _mm_loadu_pd(&C[(l_n*%u)+%u]);\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d a%u_%u = _mm_loadu_pd(&A[%u]);\n", i_k, i_z, i_column_idx[i_k] + i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_pd(c%u_%u, _mm_mul_pd(a%u_%u, _mm256_castpd256_pd128(b%u)));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && !defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_pd(c%u_%u, _mm_mul_pd(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_storeu_pd(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 c%u_%u = _mm_castpd_ps(_mm_load_sd((const double*)&C[(l_n*%u)+%u]));\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 a%u_%u = _mm_castpd_ps(_mm_load_sd((const double*)&A[%u]));\n", i_k, i_z, i_column_idx[i_k] + i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_ps(c%u_%u, _mm_mul_ps(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_store_sd((double*)&C[(l_n*%u)+%u], _mm_castps_pd(c%u_%u));\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
}
LIBXSMM_API_INTERN
void libxsmm_sparse_csc_asparse_innerloop_four_vector( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_k,
const unsigned int i_z,
const unsigned int* i_row_idx,
const unsigned int* i_column_idx ) {
char l_new_code[512];
int l_max_code_length = 511;
int l_code_length = 0;
if ( LIBXSMM_DATATYPE_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
unsigned int l_i;
unsigned int l_z = i_z;
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m256d c%u_%u = _mm256_loadu_pd(&C[(l_n*%u)+%u]);\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m256d a%u_%u = _mm256_loadu_pd(&A[%u]);\n", i_k, i_z, i_column_idx[i_k] + i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm256_add_pd(c%u_%u, _mm256_mul_pd(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm256_storeu_pd(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && !defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
for ( l_i = 0; l_i < 2; l_i++ ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d c%u_%u = _mm_loadu_pd(&C[(l_n*%u)+%u]);\n", i_k, l_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + l_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d a%u_%u = _mm_loadu_pd(&A[%u]);\n", i_k, l_z, i_column_idx[i_k] + l_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_pd(c%u_%u, _mm_mul_pd(a%u_%u, b%u));\n", i_k, l_z, i_k, l_z, i_k, l_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_storeu_pd(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + l_z], i_k, l_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_z += 2;
}
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 c%u_%u = _mm_loadu_ps(&C[(l_n*%u)+%u]);\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 a%u_%u = _mm_loadu_ps(&A[%u]);\n", i_k, i_z, i_column_idx[i_k] + i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_ps(c%u_%u, _mm_mul_ps(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_storeu_ps(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_spgemm_csc_asparse( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const char* i_arch,
const unsigned int* i_row_idx,
const unsigned int* i_column_idx,
const double* i_values ) {
char l_new_code[512];
int l_max_code_length = 511;
int l_code_length = 0;
unsigned int l_k;
unsigned int l_flop_count = 0;
LIBXSMM_UNUSED(i_arch);
LIBXSMM_UNUSED(i_values);
/* loop over columns in C in generated code, we fully unroll inside each column */
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " unsigned int l_n = 0;\n #pragma nounroll_and_jam\n for ( l_n = 0; l_n < %u; l_n++) {\n", (unsigned int)i_xgemm_desc->n);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
/* reset the current column in C if needed */
if (0 != (LIBXSMM_GEMM_FLAG_BETA_0 & i_xgemm_desc->flags)) { /* Beta=0 */
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " unsigned int l_m = 0;\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
if ( i_xgemm_desc->m > 1 ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
if ( LIBXSMM_DATATYPE_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_m = 0; l_m < %u; l_m++) {\n C[(l_n*%u)+l_m] = 0.0;\n }\n", (unsigned int)i_xgemm_desc->m, (unsigned int)i_xgemm_desc->ldc);
} else {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_m = 0; l_m < %u; l_m++) {\n C[(l_n*%u)+l_m] = 0.0f;\n }\n", (unsigned int)i_xgemm_desc->m, (unsigned int)i_xgemm_desc->ldc);
}
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
assert(0 != i_column_idx);
/* loop over columns in A, rows in B and fully unroll */
for ( l_k = 0; l_k < (unsigned int)i_xgemm_desc->k; l_k++ ) {
unsigned int l_column_elements = i_column_idx[l_k + 1] - i_column_idx[l_k];
unsigned int l_z = 0;
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) || defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
if ( l_column_elements > 0 ) {
if ( LIBXSMM_DATATYPE_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && defined(__AVX__)\n __m256d b%u = _mm256_broadcast_sd(&B[(l_n*%u)+%u]);\n#endif\n", l_k, (unsigned int)i_xgemm_desc->ldb, l_k);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && !defined(__AVX__)\n __m128d b%u = _mm_loaddup_pd(&B[(l_n*%u)+%u]);\n#endif\n", l_k, (unsigned int)i_xgemm_desc->ldb, l_k);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && defined(__AVX__)\n __m128 b%u = _mm_broadcast_ss(&B[(l_n*%u)+%u]);\n#endif\n", l_k, (unsigned int)i_xgemm_desc->ldb, l_k);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && !defined(__AVX__)\n __m128 b%u = _mm_load_ss(&B[(l_n*%u)+%u]); b%u = _mm_shuffle_ps(b%u, b%u, 0x00);\n#endif\n", l_k, (unsigned int)i_xgemm_desc->ldb, l_k, l_k, l_k, l_k);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
}
/* loop over the columns of A and look for vectorization potential */
for ( l_z = 0; l_z < l_column_elements; l_z++ ) {
assert(0 != i_row_idx);
/* 4 element vector might be possible */
if ( (l_z < (l_column_elements - 3)) && (l_column_elements > 3) ) {
/* check for 256bit vector instruction */
if ((i_row_idx[i_column_idx[l_k] + l_z] + 1 == i_row_idx[i_column_idx[l_k] + l_z + 1]) &&
(i_row_idx[i_column_idx[l_k] + l_z] + 2 == i_row_idx[i_column_idx[l_k] + l_z + 2]) &&
(i_row_idx[i_column_idx[l_k] + l_z] + 3 == i_row_idx[i_column_idx[l_k] + l_z + 3]) &&
(i_row_idx[i_column_idx[l_k] + l_z + 3] < (unsigned int)i_xgemm_desc->m)) {
libxsmm_sparse_csc_asparse_innerloop_four_vector(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx);
l_z += 3;
/* check for 128bit vector instruction */
} else if ((i_row_idx[i_column_idx[l_k] + l_z] + 1 == i_row_idx[i_column_idx[l_k] + l_z + 1]) &&
(i_row_idx[i_column_idx[l_k] + l_z + 1] < (unsigned int)i_xgemm_desc->m) ) {
libxsmm_sparse_csc_asparse_innerloop_two_vector(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx);
l_z++;
/* scalar instruction */
} else {
if ( (i_row_idx[i_column_idx[l_k] + l_z] < (unsigned int)i_xgemm_desc->m) ) {
libxsmm_sparse_csc_asparse_innerloop_scalar(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx);
}
}
/* 2 element vector might be possible */
} else if ( (l_z < (l_column_elements - 1)) && (l_column_elements > 1)) {
/* check for 128bit vector instruction */
if ((i_row_idx[i_column_idx[l_k] + l_z] + 1 == i_row_idx[i_column_idx[l_k] + l_z + 1]) &&
(i_row_idx[i_column_idx[l_k] + l_z + 1] < (unsigned int)i_xgemm_desc->m) ) {
libxsmm_sparse_csc_asparse_innerloop_two_vector(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx);
l_z++;
/* scalar instruction */
} else {
if ( (i_row_idx[i_column_idx[l_k] + l_z] < (unsigned int)i_xgemm_desc->m) ) {
libxsmm_sparse_csc_asparse_innerloop_scalar(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx);
}
}
/* scalar anyways */
} else {
if ( (i_row_idx[i_column_idx[l_k] + l_z] < (unsigned int)i_xgemm_desc->m) ) {
libxsmm_sparse_csc_asparse_innerloop_scalar(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx);
}
}
}
/* C fallback code */
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#else\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
/* loop over the columns of A */
for ( l_z = 0; l_z < l_column_elements; l_z++ ) {
if ( (i_row_idx[i_column_idx[l_k] + l_z] < (unsigned int)i_xgemm_desc->m) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " C[(l_n*%u)+%u] += A[%u] * B[(l_n*%u)+%u];\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[l_k] + l_z], i_column_idx[l_k] + l_z, (unsigned int)i_xgemm_desc->ldb, l_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_flop_count += 2;
}
}
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " }\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
/* add flop counter */
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "\n#ifndef NDEBUG\n#ifdef _OPENMP\n#pragma omp atomic\n#endif\nlibxsmm_num_total_flops += %u;\n#endif\n", l_flop_count * i_xgemm_desc->n);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
|
LBLT.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <omp.h>
#include <immintrin.h>
int binary_search_right_boundary_kernel_LBLT(const int *row_pointer,
const int key_input,
const int size)
{
int start = 0;
int stop = size - 1;
int median;
int key_median;
while (stop >= start)
{
median = (stop + start) / 2;
key_median = row_pointer[median];
if (key_input >= key_median)
start = median + 1;
else
stop = median - 1;
}
return start;
}
void Dot_Product_Avx2_dLBLT(int len,
const int *indx,
const double *Val,
const double *X,
double *res)
{
const int *colIndPtr = indx;
const double *matValPtr = (double *) Val;
const double *x = (double *) X;
int j;
double result = 0.0;
__m256d vec_y;
vec_y = _mm256_setzero_pd();
int nnzThisLine = len;
int k_iter = nnzThisLine / 4;
int k_rem = nnzThisLine % 4;
//Loop in multiples of 4 non-zeroes
for (j = 0; j < k_iter; j++) {
vec_y = _mm256_fmadd_pd(
*((__m256d_u *) (matValPtr)),
_mm256_set_pd(x[*(colIndPtr + 3)],
x[*(colIndPtr + 2)],
x[*(colIndPtr + 1)],
x[*(colIndPtr)]),
vec_y);
matValPtr += 4;
colIndPtr += 4;
}
// Horizontal addition
if (k_iter) {
// sum[0] += sum[1] ; sum[2] += sum[3]
vec_y = _mm256_hadd_pd(vec_y, vec_y);
// Cast avx_sum to 128 bit to obtain sum[0] and sum[1]
__m128d sum_lo = _mm256_castpd256_pd128(vec_y);
// Extract 128 bits to obtain sum[2] and sum[3]
__m128d sum_hi = _mm256_extractf128_pd(vec_y, 1);
// Add remaining two sums
__m128d sse_sum = _mm_add_pd(sum_lo, sum_hi);
// Store result
result = sse_sum[0];
}
//Remainder loop for nnzThisLine%4
for (j = 0; j < k_rem; j++)
{
result += *matValPtr++ * x[*colIndPtr++];
}
*(double *) res = result;
}
//int main(int argc, char ** argv)
int spmvLBLT(int m,int n,int nnzR,int* RowPtr,int* ColIdx,double*Val,double* GFlops_LBLT,double* Time_LBLT,double* time_pre,double* LBLT_error)
{
//char *filename = argv[1];
//printf ("filename = %s\n", filename);
//read matrix
//int m, n, nnzR, isSymmetric;
//mmio_info(&m, &n, &nnzR, &isSymmetric, filename);
//int *RowPtr = (int *)malloc((m+1) * sizeof(int));
//int *ColIdx = (int *)malloc(nnzR * sizeof(int));
//double *Val = (double *)malloc(nnzR * sizeof(double));
//mmio_data(RowPtr, ColIdx, Val, filename);
for (int i = 0; i < nnzR; i++)
Val[i] = 1;
//printf("The order of the rating matrix R is %i by %i, #nonzeros = %i\n",m, n, nnzR);
//create X, Y,Y_golden
double *X = (double *)malloc(sizeof(double) * (n+1));
double *Y = (double *)malloc(sizeof(double) * (m+1));
double *Y_golden = (double *)malloc(sizeof(double) * (m+1));
memset (X, 0, sizeof(double) * (n+1));
memset (Y, 0, sizeof(double) * (m+1));
memset (Y_golden, 0, sizeof(double) * (m+1));
for (int i = 0; i < n; i++)
X[i] = 1;
for (int i = 0; i < m; i++)
for(int j = RowPtr[i]; j < RowPtr[i+1]; j++)
Y_golden[i] += Val[j] * X[ColIdx[j]];
//int nthreads = atoi(argv[2]);
//omp_set_num_threads(nthreads);
//printf("#threads is %i \n", nthreads);
int nthreads = omp_get_max_threads();
//printf("omp_num_thread_LBL = %i\n", nthreads);
//int iter = atoi(argv[3]);
//printf("#iter is %i \n", iter);
int iter = 500;
struct timeval t1, t2, t3;
gettimeofday(&t1, NULL);
int *csrSplitter_yid = (int *)malloc((nthreads+1) * sizeof(int));
int stridennz = ceil((double)nnzR/(double)nthreads);
//#pragma omp parallel for
for (int tid = 0; tid <= nthreads; tid++)
{
// compute partition boundaries by partition of size stride
int boundary_yid = tid * stridennz;
// clamp partition boundaries to [0, nnzR]
boundary_yid = boundary_yid > nnzR ? nnzR : boundary_yid;
// binary search
csrSplitter_yid[tid] = binary_search_right_boundary_kernel_LBLT(RowPtr, boundary_yid, m + 1) - 1;
//printf("csrSplitter_yid[%d] is %d\n", tid, csrSplitter_yid[tid]);
}
gettimeofday(&t2, NULL);
int *Apinter = (int *)malloc(nthreads * sizeof(int));
memset(Apinter, 0, nthreads *sizeof(int) );
//每个线程执行行数
//#pragma omp parallel for
for (int tid = 0; tid < nthreads; tid++)
{
Apinter[tid] = csrSplitter_yid[tid+1] - csrSplitter_yid[tid];
//printf("A[%d] is %d\n", tid, Apinter[tid]);
}
int *Bpinter = (int *)malloc(nthreads * sizeof(int));
memset(Bpinter, 0, nthreads *sizeof(int) );
//每个线程执行非零元数
//#pragma omp parallel for
for (int tid = 0; tid < nthreads; tid++)
{
int num = 0;
for (int u = csrSplitter_yid[tid]; u < csrSplitter_yid[tid+1]; u++)
{
num += RowPtr[ u + 1 ] - RowPtr[u];
}
Bpinter[tid] = num;
//printf("B [%d]is %d\n",tid, Bpinter[tid]);
}
int *Yid = (int *)malloc(sizeof(int) * nthreads);
memset (Yid, 0, sizeof(int) * nthreads);
//每个线程
int flag = -2;
//#pragma omp parallel for
for (int tid = 0; tid < nthreads; tid++)
{
//printf("tid = %i, csrSplitter: %i -> %i\n", tid, csrSplitter_yid[tid], csrSplitter_yid[tid+1]);
if (csrSplitter_yid[tid + 1] - csrSplitter_yid[tid] == 0 && tid != 0)
{
Yid[tid] = csrSplitter_yid[tid];
flag = 1;
}
else if (flag == 1)
{
Yid[tid] = csrSplitter_yid[tid];
flag = -2;
}
else
{
Yid[tid] = -1;
}
}
//行平均用在多行上
//int sto = nthreads > nnzR ? nthreads : nnzR;
int *Start1 = (int *)malloc(sizeof(int) * nthreads);
memset (Start1, 0, sizeof(int) * nthreads);
int *End1 = (int *)malloc(sizeof(int) * nthreads);
memset (End1, 0, sizeof(int) * nthreads);
int *label = (int *)malloc(sizeof(int) * nthreads);
memset (label, 0, sizeof(int) * nthreads);
int start1, search1 = 0;
//#pragma omp parallel for
for (int tid = 0;tid < nthreads;tid++)
{
if (Apinter[tid] == 0)
{
if(search1 == 0)
{
start1 = tid;
search1 = 1;
}
}
if(search1 == 1 && Apinter[tid]!= 0)
{
int nntz = floor((double)Apinter[tid] / (double)(tid-start1+1));
if( nntz != 0)
{
for(int i = start1;i <= tid;i++)
{
label[i] = i;
}
}
else if((tid-start1+1) >= Apinter[tid] && Apinter[tid] != 0)
{
for(int i = start1;i <= tid;i++)
{
label[i] = i;
}
}
int mntz = Apinter[tid] - (nntz * (tid-start1));
//start and end
int n = start1;
Start1[n] = csrSplitter_yid[tid];
End1[n] = Start1[n] + nntz;
//printf("start1a[%d] = %d, end1a[%d] = %d\n",n,Start1[n],n, End1[n]);
for (int p = start1 + 1; p <= tid ; p++)
{
if(p == tid)
{
Start1[p] = End1[p - 1];
End1[p] = Start1[p] + mntz;
}
else
{
Start1[p] = End1[p-1];
End1[p] = Start1[p] + nntz;
}
//printf("start1b[%d] = %d, end1b[%d] = %d\n",n,Start1[n],n, End1[n]);
}
search1 = 0;
}
}
//非零元平均用在行数小于线程数
double *Ypartialsum = (double *)malloc(sizeof(double) * nthreads);
memset (Ypartialsum, 0, sizeof(double) * nthreads);
double *Ysum = (double *)malloc(sizeof(double) * nthreads);
memset (Ysum, 0, sizeof(double) * nthreads);
int *Start2 = (int *)malloc(sizeof(int) * nthreads);
memset (Start2, 0, sizeof(int) * nthreads);
int *End2 = (int *)malloc(sizeof(int) * nthreads);
memset (End2, 0, sizeof(int) * nthreads);
int start2, search2 = 0;
//#pragma omp parallel for
for (int tid = 0;tid < nthreads;tid++)
{
if (Bpinter[tid] == 0)
{
if(search2 == 0)
{
start2 = tid;
search2 = 1;
}
}
if(search2 == 1 && Bpinter[tid]!= 0)
{
int nntz2 = floor((double)Bpinter[tid] / (double)(tid-start2+1));
int mntz2 = Bpinter[tid] - (nntz2 * (tid-start2));
//start and end
int n = start2;
for (int i = start2; i >= 0; i--)
{
Start2[n] += Bpinter[i];
End2[n] = Start2[n] + nntz2;
}
for (n = start2 + 1; n < tid ; n++)
{
Start2[n] = End2[n-1];
End2[n] = Start2[n] + nntz2;
}
if (n == tid)
{
Start2[n] = End2[n - 1];
End2[n] = Start2[n] + mntz2;
}
search2 = 0;
}
}
gettimeofday(&t3, NULL);
double time_LBL_pre = ((t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0);
double time_LBLT_pre = ((t3.tv_sec - t1.tv_sec) * 1000.0 + (t3.tv_usec - t1.tv_usec) / 1000.0);
for(int tid = 0; tid < nthreads; tid++)
{
if(Yid[tid] != -1)
{
time_pre[2] = time_LBLT_pre;
}
else
{
time_pre[2] = time_LBL_pre;
}
}
//-----------------------------------parallel_omp_balanced_Yid-------------------------------------
int currentiter = 0;
gettimeofday(&t1, NULL);
for (currentiter = 0; currentiter < iter; currentiter++)
{
#pragma omp parallel for
for (int tid = 0; tid < nthreads; tid++)
Y[Yid[tid]] = 0;
#pragma omp parallel for
for (int tid = 0; tid < nthreads; tid++)
{
if (Yid[tid] == -1)
{
for (int u = csrSplitter_yid[tid]; u < csrSplitter_yid[tid+1]; u++)
{
double sum = 0;
for (int j = RowPtr[u]; j < RowPtr[u + 1]; j++)
{
sum += Val[j] * X[ColIdx[j]];
}
Y[u] = sum;
}
}
if (label[tid] != 0)
{
for (int u = Start1[tid]; u < End1[tid]; u++)
{
double sum = 0;
for (int j = RowPtr[u]; j < RowPtr[u + 1]; j++)
{
sum += Val[j] * X[ColIdx[j]];
}
Y[u] = sum;
}
}
if (Yid[tid] != -1 && label[tid] == 0)
{
Ysum[tid] = 0;
Ypartialsum[tid] = 0;
for (int j = Start2[tid]; j < End2[tid]; j++)
{
Ypartialsum[tid] += Val[j] * X[ColIdx[j]];
}
Ysum[tid] += Ypartialsum[tid];
Y[Yid[tid]] += Ysum[tid];
}
}
}
gettimeofday(&t2, NULL);
double time_balanced2 = ((t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0) / iter;
double GFlops_balanced2 = 2 * nnzR / time_balanced2 / pow(10,6);
int errorcount_balanced2 = 0;
for (int i = 0; i < m; i++)
if (Y[i] != Y_golden[i])
errorcount_balanced2++;
//printf("time_LBLT = %f\n", time_balanced2);
//printf("errorcount_LBLT = %i\n", errorcount_balanced2);
//printf("GFlops_balanced2 = %f\n", GFlops_balanced2);
GFlops_LBLT[0] = GFlops_balanced2;
Time_LBLT[0] = time_balanced2;
LBLT_error[0] = errorcount_balanced2;
//-----------------------------------------------------------------------
//------------------------------------parallel_omp_balanced_avx2_Yid------------------------------------
gettimeofday(&t1, NULL);
for (currentiter = 0; currentiter < iter; currentiter++)
{
#pragma omp parallel for
for (int tid = 0; tid < nthreads; tid++)
Y[Yid[tid]] = 0;
#pragma omp parallel for
for (int tid = 0; tid < nthreads; tid++)
{
if (Yid[tid] == -1)
{
//printf("%d %d\n",tid,csrSplitter[tid]);
for (int u = csrSplitter_yid[tid]; u < csrSplitter_yid[tid + 1]; u++)
{
Dot_Product_Avx2_dLBLT(RowPtr[u + 1] - RowPtr[u],
ColIdx + RowPtr[u],
Val,
X,
Y + u);
}
}
else if (label[tid] != 0)
{
for (int u = Start1[tid]; u < End1[tid]; u++)
{
Dot_Product_Avx2_dLBLT(
RowPtr[u + 1] - RowPtr[u],
ColIdx + RowPtr[u],
Val,
X,
Y + u);
}
}
if (Yid[tid] != -1 && label[tid] == 0)
{
Dot_Product_Avx2_dLBLT(
End2[tid] - Start2[tid],
ColIdx + Start2[tid],
Val + Start2[tid],
X,
Ysum + tid);
}
}
}
gettimeofday(&t2, NULL);
double time_balanced2_avx = ((t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0) / iter;
double GFlops_balanced2_avx = 2 * nnzR / time_balanced2_avx / pow(10,6);
int errorcount_balanced2_avx = 0;
for (int i = 0; i < m; i++)
if (Y[i] != Y_golden[i])
errorcount_balanced2_avx++;
//printf("time_balanced2_avx = %f\n", time_balanced2_avx);
//printf("errorcount_balanced2_avx = %i\n", errorcount_balanced2_avx);
//printf("GFlops_balanced2_avx = %f\n", GFlops_balanced2_avx);
GFlops_LBLT[1] = GFlops_balanced2_avx;
Time_LBLT[1] = time_balanced2_avx;
LBLT_error[1] = errorcount_balanced2_avx;
//------------------------------------------------------------------------
return 0;
}
|
GB_binop__islt_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__islt_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__islt_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__islt_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__islt_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_int16)
// A*D function (colscale): GB (_AxD__islt_int16)
// D*A function (rowscale): GB (_DxB__islt_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__islt_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__islt_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_int16)
// C=scalar+B GB (_bind1st__islt_int16)
// C=scalar+B' GB (_bind1st_tran__islt_int16)
// C=A+scalar GB (_bind2nd__islt_int16)
// C=A'+scalar GB (_bind2nd_tran__islt_int16)
// C type: int16_t
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 0
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLT || GxB_NO_INT16 || GxB_NO_ISLT_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__islt_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__islt_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__islt_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__islt_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__islt_int16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__islt_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__islt_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__islt_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__islt_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__islt_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__islt_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__islt_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__islt_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__islt_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_dense_ewise3_noaccum_template.c | //------------------------------------------------------------------------------
// GB_dense_ewise3_noaccum_template: C = A+B where all 3 matrices are dense
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
#include "GB_unused.h"
{
//--------------------------------------------------------------------------
// get A, B, and C
//--------------------------------------------------------------------------
// any matrix may be aliased to any other (C==A, C==B, and/or A==B)
GB_ATYPE *Ax = A->x ;
GB_BTYPE *Bx = B->x ;
GB_CTYPE *Cx = C->x ;
const int64_t cnz = GB_NNZ (C) ;
ASSERT (GB_is_dense (A)) ;
ASSERT (GB_is_dense (B)) ;
ASSERT (GB_is_dense (C)) ;
int64_t p ;
//--------------------------------------------------------------------------
// C = A+B where all 3 matrices are dense
//--------------------------------------------------------------------------
if (C == B)
{
//----------------------------------------------------------------------
// C = A+C where A and C are dense
//----------------------------------------------------------------------
#if GB_HAS_CBLAS & GB_OP_IS_PLUS_REAL
GB_CBLAS_AXPY (cnz, (GB_CTYPE) 1, Ax, Cx, nthreads) ; // C += A
#elif GB_HAS_CBLAS & GB_OP_IS_MINUS_REAL
GB_CBLAS_AXPY (cnz, (GB_CTYPE) -1, Ax, Cx, nthreads) ; // C -= A
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < cnz ; p++)
{
GB_GETA (aij, Ax, p) ; // aij = Ax [p]
GB_BINOP (GB_CX (p), aij, GB_CX (p)) ; // Cx [p] = aij + Cx [p]
}
#endif
}
else if (C == A)
{
//----------------------------------------------------------------------
// C = C+B where B and C are dense
//----------------------------------------------------------------------
#if GB_HAS_CBLAS & GB_OP_IS_PLUS_REAL
GB_CBLAS_AXPY (cnz, (GB_CTYPE) 1, Bx, Cx, nthreads) ; // C += B
#elif GB_HAS_CBLAS & GB_OP_IS_MINUS_REAL
GB_CBLAS_AXPY (cnz, (GB_CTYPE) -1, Bx, Cx, nthreads) ; // C -= B
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < cnz ; p++)
{
GB_GETB (bij, Bx, p) ; // bij = Bx [p]
GB_BINOP (GB_CX (p), GB_CX (p), bij) ; // Cx [p] += bij
}
#endif
}
else
{
//----------------------------------------------------------------------
// C = A+B where all 3 matrices are dense
//----------------------------------------------------------------------
// note that A and B may still be aliased to each other
#if GB_HAS_CBLAS && GB_OP_IS_PLUS_REAL
GB_memcpy (Cx, Ax, cnz * sizeof (GB_CTYPE), nthreads) ; // C = A
GB_CBLAS_AXPY (cnz, (GB_CTYPE) 1, Bx, Cx, nthreads) ; // C += B
#elif GB_HAS_CBLAS && GB_OP_IS_MINUS_REAL
GB_memcpy (Cx, Ax, cnz * sizeof (GB_CTYPE), nthreads) ; // C = A
GB_CBLAS_AXPY (cnz, (GB_CTYPE) -1, Bx, Cx, nthreads) ; // C -= B
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < cnz ; p++)
{
GB_GETA (aij, Ax, p) ; // aij = Ax [p]
GB_GETB (bij, Bx, p) ; // bij = Bx [p]
GB_BINOP (GB_CX (p), aij, bij) ; // Cx [p] = aij + bij
}
#endif
}
}
|
GB_unop__identity_int32_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int32_int8)
// op(A') function: GB (_unop_tran__identity_int32_int8)
// C type: int32_t
// A type: int8_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int32_t z = (int32_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = (int32_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int32_int8)
(
int32_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
int32_t z = (int32_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int8_t aij = Ax [p] ;
int32_t z = (int32_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int32_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GeneralMatrixMatrix.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_GENERAL_MATRIX_MATRIX_H
#define EIGEN_GENERAL_MATRIX_MATRIX_H
namespace Eigen {
namespace internal {
template<typename _LhsScalar, typename _RhsScalar> class level3_blocking;
/* Specialization for a row-major destination matrix => simple transposition of the product */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs,
int ResInnerStride>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor,ResInnerStride>
{
typedef gebp_traits<RhsScalar,LhsScalar> Traits;
typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static EIGEN_STRONG_INLINE void run(
Index rows, Index cols, Index depth,
const LhsScalar* lhs, Index lhsStride,
const RhsScalar* rhs, Index rhsStride,
ResScalar* res, Index resIncr, Index resStride,
ResScalar alpha,
level3_blocking<RhsScalar,LhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
// transpose the product such that the result is column major
general_matrix_matrix_product<Index,
RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs,
ColMajor,ResInnerStride>
::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resIncr,resStride,alpha,blocking,info);
}
};
/* Specialization for a col-major destination matrix
* => Blocking algorithm following Goto's paper */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs,
int ResInnerStride>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor,ResInnerStride>
{
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static void run(Index rows, Index cols, Index depth,
const LhsScalar* _lhs, Index lhsStride,
const RhsScalar* _rhs, Index rhsStride,
ResScalar* _res, Index resIncr, Index resStride,
ResScalar alpha,
level3_blocking<LhsScalar,RhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper;
typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper;
typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor,Unaligned,ResInnerStride> ResMapper;
LhsMapper lhs(_lhs, lhsStride);
RhsMapper rhs(_rhs, rhsStride);
ResMapper res(_res, resStride, resIncr);
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction
gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, typename Traits::LhsPacket4Packing, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs;
gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;
#ifdef EIGEN_HAS_OPENMP
if(info)
{
// this is the parallel version!
int tid = omp_get_thread_num();
int threads = omp_get_num_threads();
LhsScalar* blockA = blocking.blockA();
eigen_internal_assert(blockA!=0);
std::size_t sizeB = kc*nc;
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0);
// For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...
for(Index k=0; k<depth; k+=kc)
{
const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A'
// In order to reduce the chance that a thread has to wait for the other,
// let's start by packing B'.
pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc);
// Pack A_k to A' in a parallel fashion:
// each thread packs the sub block A_k,i to A'_i where i is the thread id.
// However, before copying to A'_i, we have to make sure that no other thread is still using it,
// i.e., we test that info[tid].users equals 0.
// Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it.
while(info[tid].users!=0) {}
info[tid].users = threads;
pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length);
// Notify the other threads that the part A'_i is ready to go.
info[tid].sync = k;
// Computes C_i += A' * B' per A'_i
for(int shift=0; shift<threads; ++shift)
{
int i = (tid+shift)%threads;
// At this point we have to make sure that A'_i has been updated by the thread i,
// we use testAndSetOrdered to mimic a volatile access.
// However, no need to wait for the B' part which has been updated by the current thread!
if (shift>0) {
while(info[i].sync!=k) {
}
}
gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha);
}
// Then keep going as usual with the remaining B'
for(Index j=nc; j<cols; j+=nc)
{
const Index actual_nc = (std::min)(j+nc,cols)-j;
// pack B_k,j to B'
pack_rhs(blockB, rhs.getSubMapper(k,j), actual_kc, actual_nc);
// C_j += A' * B'
gebp(res.getSubMapper(0, j), blockA, blockB, rows, actual_kc, actual_nc, alpha);
}
// Release all the sub blocks A'_i of A' for the current thread,
// i.e., we simply decrement the number of users by 1
for(Index i=0; i<threads; ++i)
#if !EIGEN_HAS_CXX11_ATOMIC
#pragma omp atomic
#endif
info[i].users -= 1;
}
}
else
#endif // EIGEN_HAS_OPENMP
{
EIGEN_UNUSED_VARIABLE(info);
// this is the sequential version!
std::size_t sizeA = kc*mc;
std::size_t sizeB = kc*nc;
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA());
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB());
const bool pack_rhs_once = mc!=rows && kc==depth && nc==cols;
// For each horizontal panel of the rhs, and corresponding panel of the lhs...
for(Index i2=0; i2<rows; i2+=mc)
{
const Index actual_mc = (std::min)(i2+mc,rows)-i2;
for(Index k2=0; k2<depth; k2+=kc)
{
const Index actual_kc = (std::min)(k2+kc,depth)-k2;
// OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
// => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching)
// Note that this panel will be read as many times as the number of blocks in the rhs's
// horizontal panel which is, in practice, a very low number.
pack_lhs(blockA, lhs.getSubMapper(i2,k2), actual_kc, actual_mc);
// For each kc x nc block of the rhs's horizontal panel...
for(Index j2=0; j2<cols; j2+=nc)
{
const Index actual_nc = (std::min)(j2+nc,cols)-j2;
// We pack the rhs's block into a sequential chunk of memory (L2 caching)
// Note that this block will be read a very high number of times, which is equal to the number of
// micro horizontal panel of the large rhs's panel (e.g., rows/12 times).
if((!pack_rhs_once) || i2==0)
pack_rhs(blockB, rhs.getSubMapper(k2,j2), actual_kc, actual_nc);
// Everything is packed, we can now call the panel * block kernel:
gebp(res.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, alpha);
}
}
}
}
}
};
/*********************************************************************************
* Specialization of generic_product_impl for "large" GEMM, i.e.,
* implementation of the high level wrapper to general_matrix_matrix_product
**********************************************************************************/
template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>
struct gemm_functor
{
gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking)
: m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking)
{}
void initParallelSession(Index num_threads) const
{
m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads);
m_blocking.allocateA();
}
void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const
{
if(cols==-1)
cols = m_rhs.cols();
Gemm::run(rows, cols, m_lhs.cols(),
&m_lhs.coeffRef(row,0), m_lhs.outerStride(),
&m_rhs.coeffRef(0,col), m_rhs.outerStride(),
(Scalar*)&(m_dest.coeffRef(row,col)), m_dest.innerStride(), m_dest.outerStride(),
m_actualAlpha, m_blocking, info);
}
typedef typename Gemm::Traits Traits;
protected:
const Lhs& m_lhs;
const Rhs& m_rhs;
Dest& m_dest;
Scalar m_actualAlpha;
BlockingType& m_blocking;
};
template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1,
bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space;
template<typename _LhsScalar, typename _RhsScalar>
class level3_blocking
{
typedef _LhsScalar LhsScalar;
typedef _RhsScalar RhsScalar;
protected:
LhsScalar* m_blockA;
RhsScalar* m_blockB;
Index m_mc;
Index m_nc;
Index m_kc;
public:
level3_blocking()
: m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0)
{}
inline Index mc() const { return m_mc; }
inline Index nc() const { return m_nc; }
inline Index kc() const { return m_kc; }
inline LhsScalar* blockA() { return m_blockA; }
inline RhsScalar* blockB() { return m_blockB; }
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true /* == FiniteAtCompileTime */>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor,
ActualRows = Transpose ? MaxCols : MaxRows,
ActualCols = Transpose ? MaxRows : MaxCols
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
enum {
SizeA = ActualRows * MaxDepth,
SizeB = ActualCols * MaxDepth
};
#if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES
EIGEN_ALIGN_MAX LhsScalar m_staticA[SizeA];
EIGEN_ALIGN_MAX RhsScalar m_staticB[SizeB];
#else
EIGEN_ALIGN_MAX char m_staticA[SizeA * sizeof(LhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1];
EIGEN_ALIGN_MAX char m_staticB[SizeB * sizeof(RhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1];
#endif
public:
gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/)
{
this->m_mc = ActualRows;
this->m_nc = ActualCols;
this->m_kc = MaxDepth;
#if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES
this->m_blockA = m_staticA;
this->m_blockB = m_staticB;
#else
this->m_blockA = reinterpret_cast<LhsScalar*>((internal::UIntPtr(m_staticA) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1));
this->m_blockB = reinterpret_cast<RhsScalar*>((internal::UIntPtr(m_staticB) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1));
#endif
}
void initParallel(Index, Index, Index, Index)
{}
inline void allocateA() {}
inline void allocateB() {}
inline void allocateAll() {}
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
Index m_sizeA;
Index m_sizeB;
public:
gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
if(l3_blocking)
{
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc, num_threads);
}
else // no l3 blocking
{
Index n = this->m_nc;
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, n, num_threads);
}
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
}
void initParallel(Index rows, Index cols, Index depth, Index num_threads)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
eigen_internal_assert(this->m_blockA==0 && this->m_blockB==0);
Index m = this->m_mc;
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, this->m_nc, num_threads);
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
}
void allocateA()
{
if(this->m_blockA==0)
this->m_blockA = aligned_new<LhsScalar>(m_sizeA);
}
void allocateB()
{
if(this->m_blockB==0)
this->m_blockB = aligned_new<RhsScalar>(m_sizeB);
}
void allocateAll()
{
allocateA();
allocateB();
}
~gemm_blocking_space()
{
aligned_delete(this->m_blockA, m_sizeA);
aligned_delete(this->m_blockB, m_sizeB);
}
};
} // end namespace internal
namespace internal {
template<typename Lhs, typename Rhs>
struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct>
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> >
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
typedef typename Lhs::Scalar LhsScalar;
typedef typename Rhs::Scalar RhsScalar;
typedef internal::blas_traits<Lhs> LhsBlasTraits;
typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned;
typedef internal::blas_traits<Rhs> RhsBlasTraits;
typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned;
enum {
MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime)
};
typedef generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> lazyproduct;
template<typename Dst>
static void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
// See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=404 for a discussion and helper program
// to determine the following heuristic.
// EIGEN_GEMM_TO_COEFFBASED_THRESHOLD is typically defined to 20 in GeneralProduct.h,
// unless it has been specialized by the user or for a given architecture.
// Note that the condition rhs.rows()>0 was required because lazy product is (was?) not happy with empty inputs.
// I'm not sure it is still required.
if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0)
lazyproduct::eval_dynamic(dst, lhs, rhs, internal::assign_op<typename Dst::Scalar,Scalar>());
else
{
dst.setZero();
scaleAndAddTo(dst, lhs, rhs, Scalar(1));
}
}
template<typename Dst>
static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0)
lazyproduct::eval_dynamic(dst, lhs, rhs, internal::add_assign_op<typename Dst::Scalar,Scalar>());
else
scaleAndAddTo(dst,lhs, rhs, Scalar(1));
}
template<typename Dst>
static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0)
lazyproduct::eval_dynamic(dst, lhs, rhs, internal::sub_assign_op<typename Dst::Scalar,Scalar>());
else
scaleAndAddTo(dst, lhs, rhs, Scalar(-1));
}
template<typename Dest>
static void scaleAndAddTo(Dest& dst, const Lhs& a_lhs, const Rhs& a_rhs, const Scalar& alpha)
{
eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols());
if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0)
return;
if (dst.cols() == 1)
{
// Fallback to GEMV if either the lhs or rhs is a runtime vector
typename Dest::ColXpr dst_vec(dst.col(0));
return internal::generic_product_impl<Lhs,typename Rhs::ConstColXpr,DenseShape,DenseShape,GemvProduct>
::scaleAndAddTo(dst_vec, a_lhs, a_rhs.col(0), alpha);
}
else if (dst.rows() == 1)
{
// Fallback to GEMV if either the lhs or rhs is a runtime vector
typename Dest::RowXpr dst_vec(dst.row(0));
return internal::generic_product_impl<typename Lhs::ConstRowXpr,Rhs,DenseShape,DenseShape,GemvProduct>
::scaleAndAddTo(dst_vec, a_lhs.row(0), a_rhs, alpha);
}
typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs);
typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs);
Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs)
* RhsBlasTraits::extractScalarFactor(a_rhs);
typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar,
Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType;
typedef internal::gemm_functor<
Scalar, Index,
internal::general_matrix_matrix_product<
Index,
LhsScalar, (ActualLhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate),
RhsScalar, (ActualRhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate),
(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,
Dest::InnerStrideAtCompileTime>,
ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor;
BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true);
internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>
(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), a_lhs.cols(), Dest::Flags&RowMajorBit);
}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_GENERAL_MATRIX_MATRIX_H
|
analyze.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <fcntl.h>
int main(int argc, char * argv[])
{
FILE * fp1;
int i, j, n, fd, ppn, nnodes;
ssize_t rc;
char host[80], filename[80];
char * allhosts, * ptr;
int cpulist[40];
int iter, maxiter = 80000; // 400 sec at 5 msec per sample
double *** alldata;
double mintime, maxtime, mean, samples;
int rank, minhost, mincpu, maxhost, maxcpu;
double ssq, sigma, relative_variation;
double * compmax, * compmin;
int bin, numbins = 50;
double xhisto, hmin, histo_bin_width, prob;
double avgmin, sumtime, efficiency;
long * histo;
ppn = 40;
j = 0;
for (i=0; i<80; i+=4) {
cpulist[j] = i;
j++;
}
for (i=88; i<168; i+=4) {
cpulist[j] = i;
j++;
}
printf("starting with ppn = %d\n", ppn);
fp1 = fopen("hf", "r");
if (fp1 == NULL) {
printf("can't open hf ... exiting\n");
exit(0);
}
n = 0;
while (EOF != fscanf(fp1, "%s", host)) n++;
nnodes = n;
printf("got nnodes = %d\n", nnodes);
rewind(fp1);
histo = (long *) malloc(numbins*sizeof(long));
allhosts = (char *) malloc(nnodes*sizeof(host));
alldata = (double ***) malloc(nnodes*sizeof(double *));
alldata[0] = (double **) malloc(nnodes*ppn*sizeof(double *));
alldata[0][0] = (double *) malloc(nnodes*ppn*maxiter*sizeof(double));
for (n=0; n<nnodes; n++) alldata[n] = alldata[0] + n*ppn;
for (n=0; n<nnodes; n++) {
for (i=0; i<ppn; i++) {
alldata[n][i] = alldata[0][0] + i*maxiter + n*ppn*maxiter;
}
}
// read in all data
for (n=0; n<nnodes; n++) {
fscanf(fp1, "%s", host);
strcpy(allhosts + n*sizeof(host), host);
printf("reading data for host %d = %s ...\n", n, host);
#pragma omp parallel private(i,j,filename,fd,rc)
for (i=0; i<ppn; i++) {
j = cpulist[i];
sprintf(filename, "%s.%d", host, j);
fd = open(filename, O_RDONLY);
if (fd < 0) {
printf("missing data file %s ... exiting\n", filename);
exit(0);
}
rc = read(fd, &alldata[n][i][0], maxiter*sizeof(double));
if (rc < 0) {
printf("read failed for file %s ... exiting\n", filename);
exit(0);
}
close(fd);
}
}
// compute global min/max per rank
mintime = 1.0e30;
maxtime = 0.0;
minhost = 0;
mincpu = 0;
maxhost = 0;
maxcpu = 0;
for (n=0; n<nnodes; n++) {
for (i=0; i<ppn; i++) {
for (iter=0; iter<maxiter; iter++) {
mean += alldata[n][i][iter];
if (alldata[n][i][iter] > maxtime) {
maxtime = alldata[n][i][iter];
maxhost = n;
maxcpu = cpulist[i];
}
if (alldata[n][i][iter] < mintime) {
mintime = alldata[n][i][iter];
minhost = n;
mincpu = cpulist[i];
}
}
}
}
samples = ((double) nnodes) * ((double) ppn) * ((double) maxiter);
mean = mean / samples;
printf("\n");
printf("global min time = %.3lf msec on host %s cpu %d \n", mintime, allhosts + minhost*sizeof(host), mincpu);
printf("\n");
printf("global max time = %.3lf msec on host %s cpu %d \n", maxtime, allhosts + maxhost*sizeof(host), maxcpu);
printf("\n");
printf("global avg time = %.3lf\n", mean);
printf("\n");
ssq = 0.0;
for (n=0; n<nnodes; n++) {
for (i=0; i<ppn; i++) {
for (iter=0; iter<maxiter; iter++) {
ssq += (alldata[n][i][iter] - mean) * (alldata[n][i][iter] - mean);
}
}
}
sigma = sqrt(ssq/samples);
relative_variation = 100.0 * sigma / mean;
printf("overall relative variation = %.2lf percent\n", relative_variation);
// compute mean and relative variation by host
samples = ((double) ppn) * ((double) maxiter);
// compmax is the max compute time in any given iteration
compmax = (double *) malloc(maxiter*sizeof(double));
// compmin is the min compute time per node
compmin = (double *) malloc(nnodes*sizeof(double));
for (iter=0; iter<maxiter; iter++) compmax[iter] = 0.0;
for (n=0; n<nnodes; n++) compmin[n] = 1.0e30;
printf("\n");
printf("percent variation = 100*sigma/mean for the max computation times per step by node\n");
printf(" host mean(msec) percent variation\n");
for (n=0; n<nnodes; n++) {
// the compmax used here is node-local
mean = 0.0;
for (iter=0; iter<maxiter; iter++) {
compmax[iter] = 0.0;
for (i=0; i<ppn; i++) {
if (alldata[n][i][iter] > compmax[iter]) compmax[iter] = alldata[n][i][iter];
if (alldata[n][i][iter] < compmin[n]) compmin[n] = alldata[n][i][iter];
mean += alldata[n][i][iter];
}
}
mean = mean / samples;
ssq = 0.0;
for (i=0; i<ppn; i++) {
for (iter=0; iter<maxiter; iter++) {
ssq += (alldata[n][i][iter] - compmax[iter]) * (alldata[n][i][iter] - compmax[iter]);
}
}
sigma = sqrt(ssq/samples);
relative_variation = 100.0 * sigma / mean;
printf("%14s %10.2lf %10.2lf\n", allhosts + n*sizeof(host), mean, relative_variation);
}
avgmin = 0.0;
for (n=0; n<nnodes; n++) avgmin += compmin[n];
// avgmin is the min compute time per node, averaged over all nodes
avgmin = avgmin / ((double) nnodes);
// re-define compmax to be the max time in any rank for a given iteration
for (iter=0; iter<maxiter; iter++) {
compmax[iter] = 0.0;
for (n=0; n<nnodes; n++) {
for (i=0; i<ppn; i++) {
if (alldata[n][i][iter] > compmax[iter]) compmax[iter] = alldata[n][i][iter];
}
}
}
// sumtime is the time expected for a parallel job
sumtime = 0.0;
for (iter=0; iter<maxiter; iter++) sumtime += compmax[iter];
efficiency = ((double) maxiter) * avgmin / sumtime;
printf("\n");
printf("estimated overall efficiency = %.3lf\n", efficiency);
// histogram all samples
histo_bin_width = (maxtime - mintime)/((double) (numbins - 1));
hmin = mintime - 0.5*histo_bin_width;
for (bin = 0; bin < numbins; bin++) histo[bin] = 0L;
for (n=0; n<nnodes; n++) {
for (i=0; i<ppn; i++) {
for (iter=0; iter<maxiter; iter++) {
if (histo_bin_width > 0.0) bin = (int) ((alldata[n][i][iter] - hmin)/histo_bin_width);
else bin = 0;
if ((bin >= 0) && (bin < numbins)) histo[bin]++;
}
}
}
printf("\n");
printf("histogram of step times for all ranks\n");
printf(" msec count density\n");
for (bin = 0; bin < numbins; bin++) {
xhisto = mintime + histo_bin_width*((double) bin);
prob = 1.0e-3*((double) histo[bin]) / histo_bin_width;
printf("%10.3lf %10ld %20.4lf\n", xhisto, histo[bin], prob);
}
printf("\n");
printf("summary data by rank:\n");
printf(" host cpu mean(msec) relative variation (percent)\n");
for (n=0; n<nnodes; n++) {
for (i=0; i<ppn; i++) {
mean = 0.0;
for (iter=0; iter<maxiter; iter++) mean += alldata[n][i][iter];
mean = mean / ((double) maxiter);
ssq = 0.0;
for (iter=0; iter<maxiter; iter++) ssq += (alldata[n][i][iter] - mean) * (alldata[n][i][iter] - mean);
sigma = sqrt(ssq/((double) maxiter));
relative_variation = 100.0 * sigma / mean;
printf("%14s %6d %8.2lf %8.2lf\n", allhosts + n*sizeof(host), cpulist[i], mean, relative_variation);
}
}
return 0;
}
|
box_coder_op.h | /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <string>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math/math_function.h"
namespace paddle {
namespace operators {
enum class BoxCodeType { kEncodeCenterSize = 0, kDecodeCenterSize = 1 };
inline BoxCodeType GetBoxCodeType(const std::string& type) {
if (type == "encode_center_size") {
return BoxCodeType::kEncodeCenterSize;
} else if (type == "decode_center_size") {
return BoxCodeType::kDecodeCenterSize;
}
PADDLE_THROW("Not support type %s.", type);
}
template <typename DeviceContext, typename T>
class BoxCoderKernel : public framework::OpKernel<T> {
public:
void EncodeCenterSize(const framework::Tensor* target_box,
const framework::Tensor* prior_box,
const framework::Tensor* prior_box_var,
const bool normalized,
const std::vector<float> variance, T* output) const {
int64_t row = target_box->dims()[0];
int64_t col = prior_box->dims()[0];
int64_t len = prior_box->dims()[1];
auto* target_box_data = target_box->data<T>();
auto* prior_box_data = prior_box->data<T>();
const T* prior_box_var_data = nullptr;
if (prior_box_var) prior_box_var_data = prior_box_var->data<T>();
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(2)
#endif
for (int64_t i = 0; i < row; ++i) {
for (int64_t j = 0; j < col; ++j) {
T prior_box_width = prior_box_data[j * len + 2] -
prior_box_data[j * len] + (normalized == false);
T prior_box_height = prior_box_data[j * len + 3] -
prior_box_data[j * len + 1] +
(normalized == false);
T prior_box_center_x = prior_box_data[j * len] + prior_box_width / 2;
T prior_box_center_y =
prior_box_data[j * len + 1] + prior_box_height / 2;
T target_box_center_x =
(target_box_data[i * len + 2] + target_box_data[i * len]) / 2;
T target_box_center_y =
(target_box_data[i * len + 3] + target_box_data[i * len + 1]) / 2;
T target_box_width = target_box_data[i * len + 2] -
target_box_data[i * len] + (normalized == false);
T target_box_height = target_box_data[i * len + 3] -
target_box_data[i * len + 1] +
(normalized == false);
size_t offset = i * col * len + j * len;
output[offset] =
(target_box_center_x - prior_box_center_x) / prior_box_width;
output[offset + 1] =
(target_box_center_y - prior_box_center_y) / prior_box_height;
output[offset + 2] =
std::log(std::fabs(target_box_width / prior_box_width));
output[offset + 3] =
std::log(std::fabs(target_box_height / prior_box_height));
if (prior_box_var) {
int prior_var_offset = 0;
if (prior_box_var->dims().size() == 2) {
prior_var_offset = j * len;
}
output[offset] /= prior_box_var_data[prior_var_offset];
output[offset + 1] /= prior_box_var_data[prior_var_offset + 1];
output[offset + 2] /= prior_box_var_data[prior_var_offset + 2];
output[offset + 3] /= prior_box_var_data[prior_var_offset + 3];
} else if (!(variance.empty())) {
for (int k = 0; k < 4; ++k) {
output[offset + k] /= static_cast<T>(variance[k]);
}
}
}
}
}
void DecodeCenterSize(const framework::Tensor* target_box,
const framework::Tensor* prior_box,
const framework::Tensor* prior_box_var,
const bool normalized, const int axis,
const std::vector<float> variance, T* output) const {
int64_t row = target_box->dims()[0];
int64_t col = target_box->dims()[1];
int64_t len = target_box->dims()[2];
auto* target_box_data = target_box->data<T>();
auto* prior_box_data = prior_box->data<T>();
const T* prior_box_var_data = nullptr;
if (prior_box_var) prior_box_var_data = prior_box_var->data<T>();
int prior_box_offset = 0;
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(2)
#endif
for (int64_t i = 0; i < row; ++i) {
for (int64_t j = 0; j < col; ++j) {
size_t offset = i * col * len + j * len;
if (axis == 0) {
prior_box_offset = j * len;
} else if (axis == 1) {
prior_box_offset = i * len;
}
T prior_box_width = prior_box_data[prior_box_offset + 2] -
prior_box_data[prior_box_offset] +
(normalized == false);
T prior_box_height = prior_box_data[prior_box_offset + 3] -
prior_box_data[prior_box_offset + 1] +
(normalized == false);
T prior_box_center_x =
prior_box_data[prior_box_offset] + prior_box_width / 2;
T prior_box_center_y =
prior_box_data[prior_box_offset + 1] + prior_box_height / 2;
T target_box_center_x = 0, target_box_center_y = 0;
T target_box_width = 0, target_box_height = 0;
T box_var_x = T(1), box_var_y = T(1);
T box_var_w = T(1), box_var_h = T(1);
if (prior_box_var) {
int prior_var_offset = 0;
if (prior_box_var->dims().size() == 2) {
if (axis == 0)
prior_var_offset = j * len;
else if (axis == 1)
prior_var_offset = i * len;
}
box_var_x = prior_box_var_data[prior_var_offset];
box_var_y = prior_box_var_data[prior_var_offset + 1];
box_var_w = prior_box_var_data[prior_var_offset + 2];
box_var_h = prior_box_var_data[prior_var_offset + 3];
} else if (!(variance.empty())) {
box_var_x = static_cast<T>(variance[0]);
box_var_y = static_cast<T>(variance[1]);
box_var_w = static_cast<T>(variance[2]);
box_var_h = static_cast<T>(variance[3]);
}
target_box_center_x =
box_var_x * target_box_data[offset] * prior_box_width +
prior_box_center_x;
target_box_center_y =
box_var_y * target_box_data[offset + 1] * prior_box_height +
prior_box_center_y;
target_box_width =
std::exp(box_var_w * target_box_data[offset + 2]) * prior_box_width;
target_box_height = std::exp(box_var_h * target_box_data[offset + 3]) *
prior_box_height;
output[offset] = target_box_center_x - target_box_width / 2;
output[offset + 1] = target_box_center_y - target_box_height / 2;
output[offset + 2] =
target_box_center_x + target_box_width / 2 - (normalized == false);
output[offset + 3] =
target_box_center_y + target_box_height / 2 - (normalized == false);
}
}
}
void Compute(const framework::ExecutionContext& context) const override {
auto* prior_box = context.Input<framework::Tensor>("PriorBox");
auto* prior_box_var = context.Input<framework::Tensor>("PriorBoxVar");
auto* target_box = context.Input<framework::LoDTensor>("TargetBox");
auto* output_box = context.Output<framework::Tensor>("OutputBox");
std::vector<float> variance = context.Attr<std::vector<float>>("variance");
const int axis = context.Attr<int>("axis");
if (target_box->lod().size()) {
PADDLE_ENFORCE_EQ(target_box->lod().size(), 1UL,
"Only support 1 level of LoD.");
}
if (prior_box_var) {
PADDLE_ENFORCE(variance.empty(),
"Input 'PriorBoxVar' and attribute 'variance' should not"
"be used at the same time.");
}
if (!(variance.empty())) {
PADDLE_ENFORCE(static_cast<int>(variance.size()) == 4,
"Size of attribute 'variance' should be 4");
}
auto code_type = GetBoxCodeType(context.Attr<std::string>("code_type"));
bool normalized = context.Attr<bool>("box_normalized");
auto row = target_box->dims()[0];
auto col = prior_box->dims()[0];
if (code_type == BoxCodeType::kDecodeCenterSize) {
col = target_box->dims()[1];
}
auto len = prior_box->dims()[1];
output_box->mutable_data<T>({row, col, len}, context.GetPlace());
T* output = output_box->data<T>();
if (code_type == BoxCodeType::kEncodeCenterSize) {
EncodeCenterSize(target_box, prior_box, prior_box_var, normalized,
variance, output);
} else if (code_type == BoxCodeType::kDecodeCenterSize) {
DecodeCenterSize(target_box, prior_box, prior_box_var, normalized, axis,
variance, output);
}
}
};
} // namespace operators
} // namespace paddle
|
tutorial_region.c | /*
* Copyright (c) 2015 - 2022, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <stdint.h>
#include <mpi.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "tutorial_region.h"
#ifdef TUTORIAL_ENABLE_MKL
#include "mkl.h"
#else
/* Terrible DGEMM implementation should only be used if there is no */
/* BLAS support. Build assumes that the Intel(R) Math Kernel Library */
/* is the only provider of BLAS. */
static inline
void dgemm(const char *transa, const char *transb, const int *M,
const int *N, const int *K, const double *alpha,
const double *A, const int *LDA, const double *B,
const int *LDB, const double *beta, double *C, const int *LDC)
{
#pragma omp parallel for
for (int i = 0; i < *M; ++i) {
for (int j = 0; j < *N; ++j) {
C[i * *LDC + j] = 0;
for (int k = 0; k < *K; ++k) {
C[i * *LDC + j] += A[i * *LDA + j] * B[j * *LDB + k];
}
}
}
}
#endif
int tutorial_sleep(double big_o, int do_report)
{
int err = 0;
if (big_o != 0.0) {
struct timespec seconds = {(time_t)(big_o),
(long)((big_o -
(time_t)(big_o)) * 1E9)};
if (do_report) {
printf("Sleeping for %e seconds\n", big_o);
fflush(stdout);
}
err = clock_nanosleep(CLOCK_REALTIME, 0, &seconds, NULL);
}
return err;
}
int tutorial_dgemm(double big_o, int do_report)
{
int err = 0;
if (big_o != 0.0) {
int matrix_size = (int) pow(4e9 * big_o, 1.0/3.0);
int pad_size = 64;
size_t mem_size = sizeof(double) * (matrix_size * (matrix_size + pad_size));
char transa = 'n';
char transb = 'n';
int M = matrix_size;
int N = matrix_size;
int K = matrix_size;
int LDA = matrix_size + pad_size / sizeof(double);
int LDB = matrix_size + pad_size / sizeof(double);
int LDC = matrix_size + pad_size / sizeof(double);
double alpha = 2.0;
double beta = 3.0;
double *A = NULL;
double *B = NULL;
double *C = NULL;
err = posix_memalign((void *)&A, pad_size, mem_size);
if (!err) {
err = posix_memalign((void *)&B, pad_size, mem_size);
}
if (!err) {
err = posix_memalign((void *)&C, pad_size, mem_size);
}
if (!err) {
#pragma omp parallel for
for (int i = 0; i < mem_size / sizeof(double); ++i) {
A[i] = random() / RAND_MAX;
B[i] = random() / RAND_MAX;
}
if (do_report) {
printf("Executing a %d x %d DGEMM\n", matrix_size, matrix_size);
fflush(stdout);
}
dgemm(&transa, &transb, &M, &N, &K, &alpha,
A, &LDA, B, &LDB, &beta, C, &LDC);
free(C);
free(B);
free(A);
}
}
return err;
}
int tutorial_stream(double big_o, int do_report)
{
int err = 0;
if (big_o != 0.0) {
size_t cline_size = 64;
size_t num_stream = (size_t)big_o * 500000000;
size_t mem_size = sizeof(double) * num_stream;
double *a = NULL;
double *b = NULL;
double *c = NULL;
double scalar = 3.0;
err = posix_memalign((void *)&a, cline_size, mem_size);
if (!err) {
err = posix_memalign((void *)&b, cline_size, mem_size);
}
if (!err) {
err = posix_memalign((void *)&c, cline_size, mem_size);
}
if (!err) {
#pragma omp parallel for
for (int i = 0; i < num_stream; i++) {
a[i] = 0.0;
b[i] = 1.0;
c[i] = 2.0;
}
if (do_report) {
printf("Executing STREAM triad on length %ld vectors.\n", num_stream);
fflush(stdout);
}
#pragma omp parallel for
for (int i = 0; i < num_stream; ++i) {
a[i] = b[i] + scalar * c[i];
}
free(c);
free(b);
free(a);
}
}
return err;
}
int tutorial_all2all(double big_o, int do_report)
{
/* Best case scaling is O(ln(num_send) + num_rank) => */
/* num_send = exp(big_o_n - factor * num_rank) */
/* We have somewhat arbitrarily set factor to 1/128 */
int err = 0;
if (big_o != 0.0) {
int num_rank = 0;
int err = MPI_Comm_size(MPI_COMM_WORLD, &num_rank);
size_t num_send = (size_t)pow(2.0, 16 * big_o - num_rank / 128.0);
num_send = num_send ? num_send : 1;
size_t cline_size = 64;
char *send_buffer = NULL;
char *recv_buffer = NULL;
if (!err) {
err = posix_memalign((void *)&send_buffer, cline_size,
num_rank * num_send * sizeof(char));
}
if (!err) {
err = posix_memalign((void *)&recv_buffer, cline_size,
num_rank * num_send * sizeof(char));
}
if (!err) {
if (do_report) {
printf("Executing all2all of %ld byte buffer on %d ranks.\n",
num_send * sizeof(char), num_rank);
fflush(stdout);
}
err = MPI_Alltoall(send_buffer, num_send, MPI_CHAR, recv_buffer,
num_send, MPI_CHAR, MPI_COMM_WORLD);
}
if (!err) {
err = MPI_Barrier(MPI_COMM_WORLD);
}
if (!err) {
free(recv_buffer);
free(send_buffer);
}
}
return err;
}
int tutorial_dgemm_static(double big_o, int do_report)
{
static double big_o_last = 0.0;
static double *A = NULL;
static double *B = NULL;
static double *C = NULL;
int err = 0;
if (big_o != 0.0) {
int matrix_size = (int) pow(4e9 * big_o, 1.0/3.0);
int pad_size = 64;
size_t mem_size = sizeof(double) * (matrix_size * (matrix_size + pad_size));
char transa = 'n';
char transb = 'n';
int M = matrix_size;
int N = matrix_size;
int K = matrix_size;
int LDA = matrix_size + pad_size / sizeof(double);
int LDB = matrix_size + pad_size / sizeof(double);
int LDC = matrix_size + pad_size / sizeof(double);
double alpha = 2.0;
double beta = 3.0;
if (big_o != big_o_last) {
big_o_last = big_o;
if (A) {
free(C);
free(B);
free(A);
A = NULL;
B = NULL;
C = NULL;
}
err = posix_memalign((void *)&A, pad_size, mem_size);
if (!err) {
err = posix_memalign((void *)&B, pad_size, mem_size);
}
if (!err) {
err = posix_memalign((void *)&C, pad_size, mem_size);
}
if (!err) {
#pragma omp parallel for
for (int i = 0; i < mem_size / sizeof(double); ++i) {
A[i] = random() / RAND_MAX;
B[i] = random() / RAND_MAX;
}
}
}
if (!err) {
if (do_report) {
printf("Executing a %d x %d DGEMM\n", matrix_size, matrix_size);
fflush(stdout);
}
dgemm(&transa, &transb, &M, &N, &K, &alpha,
A, &LDA, B, &LDB, &beta, C, &LDC);
}
}
else if (A) {
free(C);
free(B);
free(A);
A = NULL;
B = NULL;
C = NULL;
}
return err;
}
|
GB_unaryop__abs_int32_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int32_fp64
// op(A') function: GB_tran__abs_int32_fp64
// C type: int32_t
// A type: double
// cast: int32_t cij ; GB_CAST_SIGNED(cij,aij,32)
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, x) \
int32_t z ; GB_CAST_SIGNED(z,x,32) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT32 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int32_fp64
(
int32_t *restrict Cx,
const double *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int32_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_fc32_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_fc32_bool
// op(A') function: GB_unop_tran__identity_fc32_bool
// C type: GxB_FC32_t
// A type: bool
// cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
bool aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_fc32_bool
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const bool *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (bool), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
bool aij = Ax [p] ;
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_fc32_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__asin_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__asin_fc32_fc32)
// op(A') function: GB (_unop_tran__asin_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = casinf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = casinf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = casinf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ASIN || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__asin_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = casinf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = casinf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__asin_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
barrier-1.c | /* { dg-do compile } */
/* { dg-options "-fopenmp -fdump-tree-gimple" } */
void f1(void)
{
#pragma omp barrier
}
void f2(_Bool p)
{
if (p)
{
#pragma omp barrier
}
}
/* { dg-final { scan-tree-dump-times "GOMP_barrier" 2 "gimple" } } */
|
OMPIRBuilder.h | //===- IR/OpenMPIRBuilder.h - OpenMP encoding builder for LLVM IR - C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the OpenMPIRBuilder class and helpers used as a convenient
// way to create LLVM instructions for OpenMP directives.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
#define LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/Support/Allocator.h"
#include <forward_list>
namespace llvm {
class CanonicalLoopInfo;
/// An interface to create LLVM-IR for OpenMP directives.
///
/// Each OpenMP directive has a corresponding public generator method.
class OpenMPIRBuilder {
public:
/// Create a new OpenMPIRBuilder operating on the given module \p M. This will
/// not have an effect on \p M (see initialize).
OpenMPIRBuilder(Module &M) : M(M), Builder(M.getContext()) {}
~OpenMPIRBuilder();
/// Initialize the internal state, this will put structures types and
/// potentially other helpers into the underlying module. Must be called
/// before any other method and only once!
void initialize();
/// Finalize the underlying module, e.g., by outlining regions.
/// \param Fn The function to be finalized. If not used,
/// all functions are finalized.
void finalize(Function *Fn = nullptr);
/// Add attributes known for \p FnID to \p Fn.
void addAttributes(omp::RuntimeFunction FnID, Function &Fn);
/// Type used throughout for insertion points.
using InsertPointTy = IRBuilder<>::InsertPoint;
/// Callback type for variable finalization (think destructors).
///
/// \param CodeGenIP is the insertion point at which the finalization code
/// should be placed.
///
/// A finalize callback knows about all objects that need finalization, e.g.
/// destruction, when the scope of the currently generated construct is left
/// at the time, and location, the callback is invoked.
using FinalizeCallbackTy = std::function<void(InsertPointTy CodeGenIP)>;
struct FinalizationInfo {
/// The finalization callback provided by the last in-flight invocation of
/// createXXXX for the directive of kind DK.
FinalizeCallbackTy FiniCB;
/// The directive kind of the innermost directive that has an associated
/// region which might require finalization when it is left.
omp::Directive DK;
/// Flag to indicate if the directive is cancellable.
bool IsCancellable;
};
/// Push a finalization callback on the finalization stack.
///
/// NOTE: Temporary solution until Clang CG is gone.
void pushFinalizationCB(const FinalizationInfo &FI) {
FinalizationStack.push_back(FI);
}
/// Pop the last finalization callback from the finalization stack.
///
/// NOTE: Temporary solution until Clang CG is gone.
void popFinalizationCB() { FinalizationStack.pop_back(); }
/// Callback type for body (=inner region) code generation
///
/// The callback takes code locations as arguments, each describing a
/// location at which code might need to be generated or a location that is
/// the target of control transfer.
///
/// \param AllocaIP is the insertion point at which new alloca instructions
/// should be placed.
/// \param CodeGenIP is the insertion point at which the body code should be
/// placed.
/// \param ContinuationBB is the basic block target to leave the body.
///
/// Note that all blocks pointed to by the arguments have terminators.
using BodyGenCallbackTy =
function_ref<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
BasicBlock &ContinuationBB)>;
// This is created primarily for sections construct as llvm::function_ref
// (BodyGenCallbackTy) is not storable (as described in the comments of
// function_ref class - function_ref contains non-ownable reference
// to the callable.
using StorableBodyGenCallbackTy =
std::function<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
BasicBlock &ContinuationBB)>;
/// Callback type for loop body code generation.
///
/// \param CodeGenIP is the insertion point where the loop's body code must be
/// placed. This will be a dedicated BasicBlock with a
/// conditional branch from the loop condition check and
/// terminated with an unconditional branch to the loop
/// latch.
/// \param IndVar is the induction variable usable at the insertion point.
using LoopBodyGenCallbackTy =
function_ref<void(InsertPointTy CodeGenIP, Value *IndVar)>;
/// Callback type for variable privatization (think copy & default
/// constructor).
///
/// \param AllocaIP is the insertion point at which new alloca instructions
/// should be placed.
/// \param CodeGenIP is the insertion point at which the privatization code
/// should be placed.
/// \param Original The value being copied/created, should not be used in the
/// generated IR.
/// \param Inner The equivalent of \p Original that should be used in the
/// generated IR; this is equal to \p Original if the value is
/// a pointer and can thus be passed directly, otherwise it is
/// an equivalent but different value.
/// \param ReplVal The replacement value, thus a copy or new created version
/// of \p Inner.
///
/// \returns The new insertion point where code generation continues and
/// \p ReplVal the replacement value.
using PrivatizeCallbackTy = function_ref<InsertPointTy(
InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &Original,
Value &Inner, Value *&ReplVal)>;
/// Description of a LLVM-IR insertion point (IP) and a debug/source location
/// (filename, line, column, ...).
struct LocationDescription {
template <typename T, typename U>
LocationDescription(const IRBuilder<T, U> &IRB)
: IP(IRB.saveIP()), DL(IRB.getCurrentDebugLocation()) {}
LocationDescription(const InsertPointTy &IP) : IP(IP) {}
LocationDescription(const InsertPointTy &IP, const DebugLoc &DL)
: IP(IP), DL(DL) {}
InsertPointTy IP;
DebugLoc DL;
};
/// Emitter methods for OpenMP directives.
///
///{
/// Generator for '#omp barrier'
///
/// \param Loc The location where the barrier directive was encountered.
/// \param DK The kind of directive that caused the barrier.
/// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier.
/// \param CheckCancelFlag Flag to indicate a cancel barrier return value
/// should be checked and acted upon.
///
/// \returns The insertion point after the barrier.
InsertPointTy createBarrier(const LocationDescription &Loc, omp::Directive DK,
bool ForceSimpleCall = false,
bool CheckCancelFlag = true);
/// Generator for '#omp cancel'
///
/// \param Loc The location where the directive was encountered.
/// \param IfCondition The evaluated 'if' clause expression, if any.
/// \param CanceledDirective The kind of directive that is cancled.
///
/// \returns The insertion point after the barrier.
InsertPointTy createCancel(const LocationDescription &Loc, Value *IfCondition,
omp::Directive CanceledDirective);
/// Generator for '#omp parallel'
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion points to be used for alloca instructions.
/// \param BodyGenCB Callback that will generate the region code.
/// \param PrivCB Callback to copy a given variable (think copy constructor).
/// \param FiniCB Callback to finalize variable copies.
/// \param IfCondition The evaluated 'if' clause expression, if any.
/// \param NumThreads The evaluated 'num_threads' clause expression, if any.
/// \param ProcBind The value of the 'proc_bind' clause (see ProcBindKind).
/// \param IsCancellable Flag to indicate a cancellable parallel region.
///
/// \returns The insertion position *after* the parallel.
IRBuilder<>::InsertPoint
createParallel(const LocationDescription &Loc, InsertPointTy AllocaIP,
BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB,
FinalizeCallbackTy FiniCB, Value *IfCondition,
Value *NumThreads, omp::ProcBindKind ProcBind,
bool IsCancellable);
/// Generator for the control flow structure of an OpenMP canonical loop.
///
/// This generator operates on the logical iteration space of the loop, i.e.
/// the caller only has to provide a loop trip count of the loop as defined by
/// base language semantics. The trip count is interpreted as an unsigned
/// integer. The induction variable passed to \p BodyGenCB will be of the same
/// type and run from 0 to \p TripCount - 1. It is up to the callback to
/// convert the logical iteration variable to the loop counter variable in the
/// loop body.
///
/// \param Loc The insert and source location description. The insert
/// location can be between two instructions or the end of a
/// degenerate block (e.g. a BB under construction).
/// \param BodyGenCB Callback that will generate the loop body code.
/// \param TripCount Number of iterations the loop body is executed.
/// \param Name Base name used to derive BB and instruction names.
///
/// \returns An object representing the created control flow structure which
/// can be used for loop-associated directives.
CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc,
LoopBodyGenCallbackTy BodyGenCB,
Value *TripCount,
const Twine &Name = "loop");
/// Generator for the control flow structure of an OpenMP canonical loop.
///
/// Instead of a logical iteration space, this allows specifying user-defined
/// loop counter values using increment, upper- and lower bounds. To
/// disambiguate the terminology when counting downwards, instead of lower
/// bounds we use \p Start for the loop counter value in the first body
/// iteration.
///
/// Consider the following limitations:
///
/// * A loop counter space over all integer values of its bit-width cannot be
/// represented. E.g using uint8_t, its loop trip count of 256 cannot be
/// stored into an 8 bit integer):
///
/// DO I = 0, 255, 1
///
/// * Unsigned wrapping is only supported when wrapping only "once"; E.g.
/// effectively counting downwards:
///
/// for (uint8_t i = 100u; i > 0; i += 127u)
///
///
/// TODO: May need to add additional parameters to represent:
///
/// * Allow representing downcounting with unsigned integers.
///
/// * Sign of the step and the comparison operator might disagree:
///
/// for (int i = 0; i < 42; i -= 1u)
///
//
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the loop body code.
/// \param Start Value of the loop counter for the first iterations.
/// \param Stop Loop counter values past this will stop the loop.
/// \param Step Loop counter increment after each iteration; negative
/// means counting down.
/// \param IsSigned Whether Start, Stop and Step are signed integers.
/// \param InclusiveStop Whether \p Stop itself is a valid value for the loop
/// counter.
/// \param ComputeIP Insertion point for instructions computing the trip
/// count. Can be used to ensure the trip count is available
/// at the outermost loop of a loop nest. If not set,
/// defaults to the preheader of the generated loop.
/// \param Name Base name used to derive BB and instruction names.
///
/// \returns An object representing the created control flow structure which
/// can be used for loop-associated directives.
CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc,
LoopBodyGenCallbackTy BodyGenCB,
Value *Start, Value *Stop, Value *Step,
bool IsSigned, bool InclusiveStop,
InsertPointTy ComputeIP = {},
const Twine &Name = "loop");
/// Collapse a loop nest into a single loop.
///
/// Merges loops of a loop nest into a single CanonicalLoopNest representation
/// that has the same number of innermost loop iterations as the origin loop
/// nest. The induction variables of the input loops are derived from the
/// collapsed loop's induction variable. This is intended to be used to
/// implement OpenMP's collapse clause. Before applying a directive,
/// collapseLoops normalizes a loop nest to contain only a single loop and the
/// directive's implementation does not need to handle multiple loops itself.
/// This does not remove the need to handle all loop nest handling by
/// directives, such as the ordered(<n>) clause or the simd schedule-clause
/// modifier of the worksharing-loop directive.
///
/// Example:
/// \code
/// for (int i = 0; i < 7; ++i) // Canonical loop "i"
/// for (int j = 0; j < 9; ++j) // Canonical loop "j"
/// body(i, j);
/// \endcode
///
/// After collapsing with Loops={i,j}, the loop is changed to
/// \code
/// for (int ij = 0; ij < 63; ++ij) {
/// int i = ij / 9;
/// int j = ij % 9;
/// body(i, j);
/// }
/// \endcode
///
/// In the current implementation, the following limitations apply:
///
/// * All input loops have an induction variable of the same type.
///
/// * The collapsed loop will have the same trip count integer type as the
/// input loops. Therefore it is possible that the collapsed loop cannot
/// represent all iterations of the input loops. For instance, assuming a
/// 32 bit integer type, and two input loops both iterating 2^16 times, the
/// theoretical trip count of the collapsed loop would be 2^32 iteration,
/// which cannot be represented in an 32-bit integer. Behavior is undefined
/// in this case.
///
/// * The trip counts of every input loop must be available at \p ComputeIP.
/// Non-rectangular loops are not yet supported.
///
/// * At each nest level, code between a surrounding loop and its nested loop
/// is hoisted into the loop body, and such code will be executed more
/// often than before collapsing (or not at all if any inner loop iteration
/// has a trip count of 0). This is permitted by the OpenMP specification.
///
/// \param DL Debug location for instructions added for collapsing,
/// such as instructions to compute/derive the input loop's
/// induction variables.
/// \param Loops Loops in the loop nest to collapse. Loops are specified
/// from outermost-to-innermost and every control flow of a
/// loop's body must pass through its directly nested loop.
/// \param ComputeIP Where additional instruction that compute the collapsed
/// trip count. If not set, defaults to before the generated
/// loop.
///
/// \returns The CanonicalLoopInfo object representing the collapsed loop.
CanonicalLoopInfo *collapseLoops(DebugLoc DL,
ArrayRef<CanonicalLoopInfo *> Loops,
InsertPointTy ComputeIP);
/// Modifies the canonical loop to be a statically-scheduled workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain the loop bounds to be used in
/// the current thread, updates the relevant instructions in the canonical
/// loop and calls to an OpenMP runtime finalization function after the loop.
///
/// TODO: Workshare loops with static scheduling may contain up to two loops
/// that fulfill the requirements of an OpenMP canonical loop. One for
/// iterating over all iterations of a chunk and another one for iterating
/// over all chunks that are executed on the same thread. Returning
/// CanonicalLoopInfo objects representing them may eventually be useful for
/// the apply clause planned in OpenMP 6.0, but currently whether these are
/// canonical loops is irrelevant.
///
/// \param DL Debug location for instructions added for the
/// workshare-loop construct itself.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param NeedsBarrier Indicates whether a barrier must be inserted after
/// the loop.
/// \param Chunk The size of loop chunk considered as a unit when
/// scheduling. If \p nullptr, defaults to 1.
///
/// \returns Point where to insert code after the workshare construct.
InsertPointTy applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP,
bool NeedsBarrier,
Value *Chunk = nullptr);
/// Modifies the canonical loop to be a dynamically-scheduled workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain, and then in each iteration
/// to update the loop counter.
///
/// \param DL Debug location for instructions added for the
/// workshare-loop construct itself.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param SchedType Type of scheduling to be passed to the init function.
/// \param NeedsBarrier Indicates whether a barrier must be insterted after
/// the loop.
/// \param Chunk The size of loop chunk considered as a unit when
/// scheduling. If \p nullptr, defaults to 1.
///
/// \returns Point where to insert code after the workshare construct.
InsertPointTy applyDynamicWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP,
omp::OMPScheduleType SchedType,
bool NeedsBarrier,
Value *Chunk = nullptr);
/// Modifies the canonical loop to be a workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain the loop bounds to be used in
/// the current thread, updates the relevant instructions in the canonical
/// loop and calls to an OpenMP runtime finalization function after the loop.
///
/// \param DL Debug location for instructions added for the
/// workshare-loop construct itself.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param NeedsBarrier Indicates whether a barrier must be insterted after
/// the loop.
///
/// \returns Point where to insert code after the workshare construct.
InsertPointTy applyWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP, bool NeedsBarrier);
/// Tile a loop nest.
///
/// Tiles the loops of \p Loops by the tile sizes in \p TileSizes. Loops in
/// \p/ Loops must be perfectly nested, from outermost to innermost loop
/// (i.e. Loops.front() is the outermost loop). The trip count llvm::Value
/// of every loop and every tile sizes must be usable in the outermost
/// loop's preheader. This implies that the loop nest is rectangular.
///
/// Example:
/// \code
/// for (int i = 0; i < 15; ++i) // Canonical loop "i"
/// for (int j = 0; j < 14; ++j) // Canonical loop "j"
/// body(i, j);
/// \endcode
///
/// After tiling with Loops={i,j} and TileSizes={5,7}, the loop is changed to
/// \code
/// for (int i1 = 0; i1 < 3; ++i1)
/// for (int j1 = 0; j1 < 2; ++j1)
/// for (int i2 = 0; i2 < 5; ++i2)
/// for (int j2 = 0; j2 < 7; ++j2)
/// body(i1*3+i2, j1*3+j2);
/// \endcode
///
/// The returned vector are the loops {i1,j1,i2,j2}. The loops i1 and j1 are
/// referred to the floor, and the loops i2 and j2 are the tiles. Tiling also
/// handles non-constant trip counts, non-constant tile sizes and trip counts
/// that are not multiples of the tile size. In the latter case the tile loop
/// of the last floor-loop iteration will have fewer iterations than specified
/// as its tile size.
///
///
/// @param DL Debug location for instructions added by tiling, for
/// instance the floor- and tile trip count computation.
/// @param Loops Loops to tile. The CanonicalLoopInfo objects are
/// invalidated by this method, i.e. should not used after
/// tiling.
/// @param TileSizes For each loop in \p Loops, the tile size for that
/// dimensions.
///
/// \returns A list of generated loops. Contains twice as many loops as the
/// input loop nest; the first half are the floor loops and the
/// second half are the tile loops.
std::vector<CanonicalLoopInfo *>
tileLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops,
ArrayRef<Value *> TileSizes);
/// Fully unroll a loop.
///
/// Instead of unrolling the loop immediately (and duplicating its body
/// instructions), it is deferred to LLVM's LoopUnrollPass by adding loop
/// metadata.
///
/// \param DL Debug location for instructions added by unrolling.
/// \param Loop The loop to unroll. The loop will be invalidated.
void unrollLoopFull(DebugLoc DL, CanonicalLoopInfo *Loop);
/// Fully or partially unroll a loop. How the loop is unrolled is determined
/// using LLVM's LoopUnrollPass.
///
/// \param DL Debug location for instructions added by unrolling.
/// \param Loop The loop to unroll. The loop will be invalidated.
void unrollLoopHeuristic(DebugLoc DL, CanonicalLoopInfo *Loop);
/// Partially unroll a loop.
///
/// The CanonicalLoopInfo of the unrolled loop for use with chained
/// loop-associated directive can be requested using \p UnrolledCLI. Not
/// needing the CanonicalLoopInfo allows more efficient code generation by
/// deferring the actual unrolling to the LoopUnrollPass using loop metadata.
/// A loop-associated directive applied to the unrolled loop needs to know the
/// new trip count which means that if using a heuristically determined unroll
/// factor (\p Factor == 0), that factor must be computed immediately. We are
/// using the same logic as the LoopUnrollPass to derived the unroll factor,
/// but which assumes that some canonicalization has taken place (e.g.
/// Mem2Reg, LICM, GVN, Inlining, etc.). That is, the heuristic will perform
/// better when the unrolled loop's CanonicalLoopInfo is not needed.
///
/// \param DL Debug location for instructions added by unrolling.
/// \param Loop The loop to unroll. The loop will be invalidated.
/// \param Factor The factor to unroll the loop by. A factor of 0
/// indicates that a heuristic should be used to determine
/// the unroll-factor.
/// \param UnrolledCLI If non-null, receives the CanonicalLoopInfo of the
/// partially unrolled loop. Otherwise, uses loop metadata
/// to defer unrolling to the LoopUnrollPass.
void unrollLoopPartial(DebugLoc DL, CanonicalLoopInfo *Loop, int32_t Factor,
CanonicalLoopInfo **UnrolledCLI);
/// Add metadata to simd-ize a loop.
///
/// \param DL Debug location for instructions added by unrolling.
/// \param Loop The loop to simd-ize.
void applySimd(DebugLoc DL, CanonicalLoopInfo *Loop);
/// Generator for '#omp flush'
///
/// \param Loc The location where the flush directive was encountered
void createFlush(const LocationDescription &Loc);
/// Generator for '#omp taskwait'
///
/// \param Loc The location where the taskwait directive was encountered.
void createTaskwait(const LocationDescription &Loc);
/// Generator for '#omp taskyield'
///
/// \param Loc The location where the taskyield directive was encountered.
void createTaskyield(const LocationDescription &Loc);
/// Functions used to generate reductions. Such functions take two Values
/// representing LHS and RHS of the reduction, respectively, and a reference
/// to the value that is updated to refer to the reduction result.
using ReductionGenTy =
function_ref<InsertPointTy(InsertPointTy, Value *, Value *, Value *&)>;
/// Functions used to generate atomic reductions. Such functions take two
/// Values representing pointers to LHS and RHS of the reduction, as well as
/// the element type of these pointers. They are expected to atomically
/// update the LHS to the reduced value.
using AtomicReductionGenTy =
function_ref<InsertPointTy(InsertPointTy, Type *, Value *, Value *)>;
/// Information about an OpenMP reduction.
struct ReductionInfo {
ReductionInfo(Type *ElementType, Value *Variable, Value *PrivateVariable,
ReductionGenTy ReductionGen,
AtomicReductionGenTy AtomicReductionGen)
: ElementType(ElementType), Variable(Variable),
PrivateVariable(PrivateVariable), ReductionGen(ReductionGen),
AtomicReductionGen(AtomicReductionGen) {
assert(cast<PointerType>(Variable->getType())
->isOpaqueOrPointeeTypeMatches(ElementType) && "Invalid elem type");
}
/// Reduction element type, must match pointee type of variable.
Type *ElementType;
/// Reduction variable of pointer type.
Value *Variable;
/// Thread-private partial reduction variable.
Value *PrivateVariable;
/// Callback for generating the reduction body. The IR produced by this will
/// be used to combine two values in a thread-safe context, e.g., under
/// lock or within the same thread, and therefore need not be atomic.
ReductionGenTy ReductionGen;
/// Callback for generating the atomic reduction body, may be null. The IR
/// produced by this will be used to atomically combine two values during
/// reduction. If null, the implementation will use the non-atomic version
/// along with the appropriate synchronization mechanisms.
AtomicReductionGenTy AtomicReductionGen;
};
// TODO: provide atomic and non-atomic reduction generators for reduction
// operators defined by the OpenMP specification.
/// Generator for '#omp reduction'.
///
/// Emits the IR instructing the runtime to perform the specific kind of
/// reductions. Expects reduction variables to have been privatized and
/// initialized to reduction-neutral values separately. Emits the calls to
/// runtime functions as well as the reduction function and the basic blocks
/// performing the reduction atomically and non-atomically.
///
/// The code emitted for the following:
///
/// \code
/// type var_1;
/// type var_2;
/// #pragma omp <directive> reduction(reduction-op:var_1,var_2)
/// /* body */;
/// \endcode
///
/// corresponds to the following sketch.
///
/// \code
/// void _outlined_par() {
/// // N is the number of different reductions.
/// void *red_array[] = {privatized_var_1, privatized_var_2, ...};
/// switch(__kmpc_reduce(..., N, /*size of data in red array*/, red_array,
/// _omp_reduction_func,
/// _gomp_critical_user.reduction.var)) {
/// case 1: {
/// var_1 = var_1 <reduction-op> privatized_var_1;
/// var_2 = var_2 <reduction-op> privatized_var_2;
/// // ...
/// __kmpc_end_reduce(...);
/// break;
/// }
/// case 2: {
/// _Atomic<ReductionOp>(var_1, privatized_var_1);
/// _Atomic<ReductionOp>(var_2, privatized_var_2);
/// // ...
/// break;
/// }
/// default: break;
/// }
/// }
///
/// void _omp_reduction_func(void **lhs, void **rhs) {
/// *(type *)lhs[0] = *(type *)lhs[0] <reduction-op> *(type *)rhs[0];
/// *(type *)lhs[1] = *(type *)lhs[1] <reduction-op> *(type *)rhs[1];
/// // ...
/// }
/// \endcode
///
/// \param Loc The location where the reduction was
/// encountered. Must be within the associate
/// directive and after the last local access to the
/// reduction variables.
/// \param AllocaIP An insertion point suitable for allocas usable
/// in reductions.
/// \param ReductionInfos A list of info on each reduction variable.
/// \param IsNoWait A flag set if the reduction is marked as nowait.
InsertPointTy createReductions(const LocationDescription &Loc,
InsertPointTy AllocaIP,
ArrayRef<ReductionInfo> ReductionInfos,
bool IsNoWait = false);
///}
/// Return the insertion point used by the underlying IRBuilder.
InsertPointTy getInsertionPoint() { return Builder.saveIP(); }
/// Update the internal location to \p Loc.
bool updateToLocation(const LocationDescription &Loc) {
Builder.restoreIP(Loc.IP);
Builder.SetCurrentDebugLocation(Loc.DL);
return Loc.IP.getBlock() != nullptr;
}
/// Return the function declaration for the runtime function with \p FnID.
FunctionCallee getOrCreateRuntimeFunction(Module &M,
omp::RuntimeFunction FnID);
Function *getOrCreateRuntimeFunctionPtr(omp::RuntimeFunction FnID);
/// Return the (LLVM-IR) string describing the source location \p LocStr.
Constant *getOrCreateSrcLocStr(StringRef LocStr, uint32_t &SrcLocStrSize);
/// Return the (LLVM-IR) string describing the default source location.
Constant *getOrCreateDefaultSrcLocStr(uint32_t &SrcLocStrSize);
/// Return the (LLVM-IR) string describing the source location identified by
/// the arguments.
Constant *getOrCreateSrcLocStr(StringRef FunctionName, StringRef FileName,
unsigned Line, unsigned Column,
uint32_t &SrcLocStrSize);
/// Return the (LLVM-IR) string describing the DebugLoc \p DL. Use \p F as
/// fallback if \p DL does not specify the function name.
Constant *getOrCreateSrcLocStr(DebugLoc DL, uint32_t &SrcLocStrSize,
Function *F = nullptr);
/// Return the (LLVM-IR) string describing the source location \p Loc.
Constant *getOrCreateSrcLocStr(const LocationDescription &Loc,
uint32_t &SrcLocStrSize);
/// Return an ident_t* encoding the source location \p SrcLocStr and \p Flags.
/// TODO: Create a enum class for the Reserve2Flags
Constant *getOrCreateIdent(Constant *SrcLocStr, uint32_t SrcLocStrSize,
omp::IdentFlag Flags = omp::IdentFlag(0),
unsigned Reserve2Flags = 0);
/// Create a hidden global flag \p Name in the module with initial value \p
/// Value.
GlobalValue *createGlobalFlag(unsigned Value, StringRef Name);
/// Generate control flow and cleanup for cancellation.
///
/// \param CancelFlag Flag indicating if the cancellation is performed.
/// \param CanceledDirective The kind of directive that is cancled.
/// \param ExitCB Extra code to be generated in the exit block.
void emitCancelationCheckImpl(Value *CancelFlag,
omp::Directive CanceledDirective,
FinalizeCallbackTy ExitCB = {});
/// Generate a barrier runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
/// \param DK The directive which caused the barrier
/// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier.
/// \param CheckCancelFlag Flag to indicate a cancel barrier return value
/// should be checked and acted upon.
///
/// \returns The insertion point after the barrier.
InsertPointTy emitBarrierImpl(const LocationDescription &Loc,
omp::Directive DK, bool ForceSimpleCall,
bool CheckCancelFlag);
/// Generate a flush runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitFlush(const LocationDescription &Loc);
/// The finalization stack made up of finalize callbacks currently in-flight,
/// wrapped into FinalizationInfo objects that reference also the finalization
/// target block and the kind of cancellable directive.
SmallVector<FinalizationInfo, 8> FinalizationStack;
/// Return true if the last entry in the finalization stack is of kind \p DK
/// and cancellable.
bool isLastFinalizationInfoCancellable(omp::Directive DK) {
return !FinalizationStack.empty() &&
FinalizationStack.back().IsCancellable &&
FinalizationStack.back().DK == DK;
}
/// Generate a taskwait runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitTaskwaitImpl(const LocationDescription &Loc);
/// Generate a taskyield runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitTaskyieldImpl(const LocationDescription &Loc);
/// Return the current thread ID.
///
/// \param Ident The ident (ident_t*) describing the query origin.
Value *getOrCreateThreadID(Value *Ident);
/// The underlying LLVM-IR module
Module &M;
/// The LLVM-IR Builder used to create IR.
IRBuilder<> Builder;
/// Map to remember source location strings
StringMap<Constant *> SrcLocStrMap;
/// Map to remember existing ident_t*.
DenseMap<std::pair<Constant *, uint64_t>, Constant *> IdentMap;
/// Helper that contains information about regions we need to outline
/// during finalization.
struct OutlineInfo {
using PostOutlineCBTy = std::function<void(Function &)>;
PostOutlineCBTy PostOutlineCB;
BasicBlock *EntryBB, *ExitBB;
SmallVector<Value *, 2> ExcludeArgsFromAggregate;
/// Collect all blocks in between EntryBB and ExitBB in both the given
/// vector and set.
void collectBlocks(SmallPtrSetImpl<BasicBlock *> &BlockSet,
SmallVectorImpl<BasicBlock *> &BlockVector);
/// Return the function that contains the region to be outlined.
Function *getFunction() const { return EntryBB->getParent(); }
};
/// Collection of regions that need to be outlined during finalization.
SmallVector<OutlineInfo, 16> OutlineInfos;
/// Collection of owned canonical loop objects that eventually need to be
/// free'd.
std::forward_list<CanonicalLoopInfo> LoopInfos;
/// Add a new region that will be outlined later.
void addOutlineInfo(OutlineInfo &&OI) { OutlineInfos.emplace_back(OI); }
/// An ordered map of auto-generated variables to their unique names.
/// It stores variables with the following names: 1) ".gomp_critical_user_" +
/// <critical_section_name> + ".var" for "omp critical" directives; 2)
/// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
/// variables.
StringMap<AssertingVH<Constant>, BumpPtrAllocator> InternalVars;
/// Create the global variable holding the offload mappings information.
GlobalVariable *createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings,
std::string VarName);
/// Create the global variable holding the offload names information.
GlobalVariable *
createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names,
std::string VarName);
struct MapperAllocas {
AllocaInst *ArgsBase = nullptr;
AllocaInst *Args = nullptr;
AllocaInst *ArgSizes = nullptr;
};
/// Create the allocas instruction used in call to mapper functions.
void createMapperAllocas(const LocationDescription &Loc,
InsertPointTy AllocaIP, unsigned NumOperands,
struct MapperAllocas &MapperAllocas);
/// Create the call for the target mapper function.
/// \param Loc The source location description.
/// \param MapperFunc Function to be called.
/// \param SrcLocInfo Source location information global.
/// \param MaptypesArg The argument types.
/// \param MapnamesArg The argument names.
/// \param MapperAllocas The AllocaInst used for the call.
/// \param DeviceID Device ID for the call.
/// \param NumOperands Number of operands in the call.
void emitMapperCall(const LocationDescription &Loc, Function *MapperFunc,
Value *SrcLocInfo, Value *MaptypesArg, Value *MapnamesArg,
struct MapperAllocas &MapperAllocas, int64_t DeviceID,
unsigned NumOperands);
public:
/// Generator for __kmpc_copyprivate
///
/// \param Loc The source location description.
/// \param BufSize Number of elements in the buffer.
/// \param CpyBuf List of pointers to data to be copied.
/// \param CpyFn function to call for copying data.
/// \param DidIt flag variable; 1 for 'single' thread, 0 otherwise.
///
/// \return The insertion position *after* the CopyPrivate call.
InsertPointTy createCopyPrivate(const LocationDescription &Loc,
llvm::Value *BufSize, llvm::Value *CpyBuf,
llvm::Value *CpyFn, llvm::Value *DidIt);
/// Generator for '#omp single'
///
/// \param Loc The source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finalize variable copies.
/// \param DidIt Local variable used as a flag to indicate 'single' thread
///
/// \returns The insertion position *after* the single call.
InsertPointTy createSingle(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, llvm::Value *DidIt);
/// Generator for '#omp master'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finalize variable copies.
///
/// \returns The insertion position *after* the master.
InsertPointTy createMaster(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB);
/// Generator for '#omp masked'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finialize variable copies.
///
/// \returns The insertion position *after* the masked.
InsertPointTy createMasked(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, Value *Filter);
/// Generator for '#omp critical'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region body code.
/// \param FiniCB Callback to finalize variable copies.
/// \param CriticalName name of the lock used by the critical directive
/// \param HintInst Hint Instruction for hint clause associated with critical
///
/// \returns The insertion position *after* the critical.
InsertPointTy createCritical(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB,
StringRef CriticalName, Value *HintInst);
/// Generator for '#omp ordered depend (source | sink)'
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion point to be used for alloca instructions.
/// \param NumLoops The number of loops in depend clause.
/// \param StoreValues The value will be stored in vector address.
/// \param Name The name of alloca instruction.
/// \param IsDependSource If true, depend source; otherwise, depend sink.
///
/// \return The insertion position *after* the ordered.
InsertPointTy createOrderedDepend(const LocationDescription &Loc,
InsertPointTy AllocaIP, unsigned NumLoops,
ArrayRef<llvm::Value *> StoreValues,
const Twine &Name, bool IsDependSource);
/// Generator for '#omp ordered [threads | simd]'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finalize variable copies.
/// \param IsThreads If true, with threads clause or without clause;
/// otherwise, with simd clause;
///
/// \returns The insertion position *after* the ordered.
InsertPointTy createOrderedThreadsSimd(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB,
bool IsThreads);
/// Generator for '#omp sections'
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion points to be used for alloca instructions.
/// \param SectionCBs Callbacks that will generate body of each section.
/// \param PrivCB Callback to copy a given variable (think copy constructor).
/// \param FiniCB Callback to finalize variable copies.
/// \param IsCancellable Flag to indicate a cancellable parallel region.
/// \param IsNowait If true, barrier - to ensure all sections are executed
/// before moving forward will not be generated.
/// \returns The insertion position *after* the sections.
InsertPointTy createSections(const LocationDescription &Loc,
InsertPointTy AllocaIP,
ArrayRef<StorableBodyGenCallbackTy> SectionCBs,
PrivatizeCallbackTy PrivCB,
FinalizeCallbackTy FiniCB, bool IsCancellable,
bool IsNowait);
/// Generator for '#omp section'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region body code.
/// \param FiniCB Callback to finalize variable copies.
/// \returns The insertion position *after* the section.
InsertPointTy createSection(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB);
/// Generate conditional branch and relevant BasicBlocks through which private
/// threads copy the 'copyin' variables from Master copy to threadprivate
/// copies.
///
/// \param IP insertion block for copyin conditional
/// \param MasterVarPtr a pointer to the master variable
/// \param PrivateVarPtr a pointer to the threadprivate variable
/// \param IntPtrTy Pointer size type
/// \param BranchtoEnd Create a branch between the copyin.not.master blocks
// and copy.in.end block
///
/// \returns The insertion point where copying operation to be emitted.
InsertPointTy createCopyinClauseBlocks(InsertPointTy IP, Value *MasterAddr,
Value *PrivateAddr,
llvm::IntegerType *IntPtrTy,
bool BranchtoEnd = true);
/// Create a runtime call for kmpc_Alloc
///
/// \param Loc The insert and source location description.
/// \param Size Size of allocated memory space
/// \param Allocator Allocator information instruction
/// \param Name Name of call Instruction for OMP_alloc
///
/// \returns CallInst to the OMP_Alloc call
CallInst *createOMPAlloc(const LocationDescription &Loc, Value *Size,
Value *Allocator, std::string Name = "");
/// Create a runtime call for kmpc_free
///
/// \param Loc The insert and source location description.
/// \param Addr Address of memory space to be freed
/// \param Allocator Allocator information instruction
/// \param Name Name of call Instruction for OMP_Free
///
/// \returns CallInst to the OMP_Free call
CallInst *createOMPFree(const LocationDescription &Loc, Value *Addr,
Value *Allocator, std::string Name = "");
/// Create a runtime call for kmpc_threadprivate_cached
///
/// \param Loc The insert and source location description.
/// \param Pointer pointer to data to be cached
/// \param Size size of data to be cached
/// \param Name Name of call Instruction for callinst
///
/// \returns CallInst to the thread private cache call.
CallInst *createCachedThreadPrivate(const LocationDescription &Loc,
llvm::Value *Pointer,
llvm::ConstantInt *Size,
const llvm::Twine &Name = Twine(""));
/// Create a runtime call for __tgt_interop_init
///
/// \param Loc The insert and source location description.
/// \param InteropVar variable to be allocated
/// \param InteropType type of interop operation
/// \param Device devide to which offloading will occur
/// \param NumDependences number of dependence variables
/// \param DependenceAddress pointer to dependence variables
/// \param HaveNowaitClause does nowait clause exist
///
/// \returns CallInst to the __tgt_interop_init call
CallInst *createOMPInteropInit(const LocationDescription &Loc,
Value *InteropVar,
omp::OMPInteropType InteropType, Value *Device,
Value *NumDependences,
Value *DependenceAddress,
bool HaveNowaitClause);
/// Create a runtime call for __tgt_interop_destroy
///
/// \param Loc The insert and source location description.
/// \param InteropVar variable to be allocated
/// \param Device devide to which offloading will occur
/// \param NumDependences number of dependence variables
/// \param DependenceAddress pointer to dependence variables
/// \param HaveNowaitClause does nowait clause exist
///
/// \returns CallInst to the __tgt_interop_destroy call
CallInst *createOMPInteropDestroy(const LocationDescription &Loc,
Value *InteropVar, Value *Device,
Value *NumDependences,
Value *DependenceAddress,
bool HaveNowaitClause);
/// Create a runtime call for __tgt_interop_use
///
/// \param Loc The insert and source location description.
/// \param InteropVar variable to be allocated
/// \param Device devide to which offloading will occur
/// \param NumDependences number of dependence variables
/// \param DependenceAddress pointer to dependence variables
/// \param HaveNowaitClause does nowait clause exist
///
/// \returns CallInst to the __tgt_interop_use call
CallInst *createOMPInteropUse(const LocationDescription &Loc,
Value *InteropVar, Value *Device,
Value *NumDependences, Value *DependenceAddress,
bool HaveNowaitClause);
/// The `omp target` interface
///
/// For more information about the usage of this interface,
/// \see openmp/libomptarget/deviceRTLs/common/include/target.h
///
///{
/// Create a runtime call for kmpc_target_init
///
/// \param Loc The insert and source location description.
/// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not.
/// \param RequiresFullRuntime Indicate if a full device runtime is necessary.
InsertPointTy createTargetInit(const LocationDescription &Loc, bool IsSPMD,
bool RequiresFullRuntime);
/// Create a runtime call for kmpc_target_deinit
///
/// \param Loc The insert and source location description.
/// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not.
/// \param RequiresFullRuntime Indicate if a full device runtime is necessary.
void createTargetDeinit(const LocationDescription &Loc, bool IsSPMD,
bool RequiresFullRuntime);
///}
/// Declarations for LLVM-IR types (simple, array, function and structure) are
/// generated below. Their names are defined and used in OpenMPKinds.def. Here
/// we provide the declarations, the initializeTypes function will provide the
/// values.
///
///{
#define OMP_TYPE(VarName, InitValue) Type *VarName = nullptr;
#define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \
ArrayType *VarName##Ty = nullptr; \
PointerType *VarName##PtrTy = nullptr;
#define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \
FunctionType *VarName = nullptr; \
PointerType *VarName##Ptr = nullptr;
#define OMP_STRUCT_TYPE(VarName, StrName, ...) \
StructType *VarName = nullptr; \
PointerType *VarName##Ptr = nullptr;
#include "llvm/Frontend/OpenMP/OMPKinds.def"
///}
private:
/// Create all simple and struct types exposed by the runtime and remember
/// the llvm::PointerTypes of them for easy access later.
void initializeTypes(Module &M);
/// Common interface for generating entry calls for OMP Directives.
/// if the directive has a region/body, It will set the insertion
/// point to the body
///
/// \param OMPD Directive to generate entry blocks for
/// \param EntryCall Call to the entry OMP Runtime Function
/// \param ExitBB block where the region ends.
/// \param Conditional indicate if the entry call result will be used
/// to evaluate a conditional of whether a thread will execute
/// body code or not.
///
/// \return The insertion position in exit block
InsertPointTy emitCommonDirectiveEntry(omp::Directive OMPD, Value *EntryCall,
BasicBlock *ExitBB,
bool Conditional = false);
/// Common interface to finalize the region
///
/// \param OMPD Directive to generate exiting code for
/// \param FinIP Insertion point for emitting Finalization code and exit call
/// \param ExitCall Call to the ending OMP Runtime Function
/// \param HasFinalize indicate if the directive will require finalization
/// and has a finalization callback in the stack that
/// should be called.
///
/// \return The insertion position in exit block
InsertPointTy emitCommonDirectiveExit(omp::Directive OMPD,
InsertPointTy FinIP,
Instruction *ExitCall,
bool HasFinalize = true);
/// Common Interface to generate OMP inlined regions
///
/// \param OMPD Directive to generate inlined region for
/// \param EntryCall Call to the entry OMP Runtime Function
/// \param ExitCall Call to the ending OMP Runtime Function
/// \param BodyGenCB Body code generation callback.
/// \param FiniCB Finalization Callback. Will be called when finalizing region
/// \param Conditional indicate if the entry call result will be used
/// to evaluate a conditional of whether a thread will execute
/// body code or not.
/// \param HasFinalize indicate if the directive will require finalization
/// and has a finalization callback in the stack that
/// should be called.
/// \param IsCancellable if HasFinalize is set to true, indicate if the
/// the directive should be cancellable.
/// \return The insertion point after the region
InsertPointTy
EmitOMPInlinedRegion(omp::Directive OMPD, Instruction *EntryCall,
Instruction *ExitCall, BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, bool Conditional = false,
bool HasFinalize = true, bool IsCancellable = false);
/// Get the platform-specific name separator.
/// \param Parts different parts of the final name that needs separation
/// \param FirstSeparator First separator used between the initial two
/// parts of the name.
/// \param Separator separator used between all of the rest consecutive
/// parts of the name
static std::string getNameWithSeparators(ArrayRef<StringRef> Parts,
StringRef FirstSeparator,
StringRef Separator);
/// Gets (if variable with the given name already exist) or creates
/// internal global variable with the specified Name. The created variable has
/// linkage CommonLinkage by default and is initialized by null value.
/// \param Ty Type of the global variable. If it is exist already the type
/// must be the same.
/// \param Name Name of the variable.
Constant *getOrCreateOMPInternalVariable(Type *Ty, const Twine &Name,
unsigned AddressSpace = 0);
/// Returns corresponding lock object for the specified critical region
/// name. If the lock object does not exist it is created, otherwise the
/// reference to the existing copy is returned.
/// \param CriticalName Name of the critical region.
///
Value *getOMPCriticalRegionLock(StringRef CriticalName);
/// Callback type for Atomic Expression update
/// ex:
/// \code{.cpp}
/// unsigned x = 0;
/// #pragma omp atomic update
/// x = Expr(x_old); //Expr() is any legal operation
/// \endcode
///
/// \param XOld the value of the atomic memory address to use for update
/// \param IRB reference to the IRBuilder to use
///
/// \returns Value to update X to.
using AtomicUpdateCallbackTy =
const function_ref<Value *(Value *XOld, IRBuilder<> &IRB)>;
private:
enum AtomicKind { Read, Write, Update, Capture };
/// Determine whether to emit flush or not
///
/// \param Loc The insert and source location description.
/// \param AO The required atomic ordering
/// \param AK The OpenMP atomic operation kind used.
///
/// \returns wether a flush was emitted or not
bool checkAndEmitFlushAfterAtomic(const LocationDescription &Loc,
AtomicOrdering AO, AtomicKind AK);
/// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
/// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
/// Only Scalar data types.
///
/// \param AllocIP Instruction to create AllocaInst before.
/// \param X The target atomic pointer to be updated
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic
/// instructions.
/// \param RMWOp The binary operation used for update. If
/// operation is not supported by atomicRMW,
/// or belong to {FADD, FSUB, BAD_BINOP}.
/// Then a `cmpExch` based atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param VolatileX true if \a X volatile?
/// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the
/// update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
///
/// \returns A pair of the old value of X before the update, and the value
/// used for the update.
std::pair<Value *, Value *> emitAtomicUpdate(Instruction *AllocIP, Value *X,
Value *Expr, AtomicOrdering AO,
AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp,
bool VolatileX,
bool IsXBinopExpr);
/// Emit the binary op. described by \p RMWOp, using \p Src1 and \p Src2 .
///
/// \Return The instruction
Value *emitRMWOpAsInstruction(Value *Src1, Value *Src2,
AtomicRMWInst::BinOp RMWOp);
public:
/// a struct to pack relevant information while generating atomic Ops
struct AtomicOpValue {
Value *Var = nullptr;
bool IsSigned = false;
bool IsVolatile = false;
};
/// Emit atomic Read for : V = X --- Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param X The target pointer to be atomically read
/// \param V Memory address where to store atomically read
/// value
/// \param AO Atomic ordering of the generated atomic
/// instructions.
///
/// \return Insertion point after generated atomic read IR.
InsertPointTy createAtomicRead(const LocationDescription &Loc,
AtomicOpValue &X, AtomicOpValue &V,
AtomicOrdering AO);
/// Emit atomic write for : X = Expr --- Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param X The target pointer to be atomically written to
/// \param Expr The value to store.
/// \param AO Atomic ordering of the generated atomic
/// instructions.
///
/// \return Insertion point after generated atomic Write IR.
InsertPointTy createAtomicWrite(const LocationDescription &Loc,
AtomicOpValue &X, Value *Expr,
AtomicOrdering AO);
/// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
/// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
/// Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param AllocIP Instruction to create AllocaInst before.
/// \param X The target atomic pointer to be updated
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic instructions.
/// \param RMWOp The binary operation used for update. If operation
/// is not supported by atomicRMW, or belong to
/// {FADD, FSUB, BAD_BINOP}. Then a `cmpExch` based
/// atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the
/// update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
///
/// \return Insertion point after generated atomic update IR.
InsertPointTy createAtomicUpdate(const LocationDescription &Loc,
Instruction *AllocIP, AtomicOpValue &X,
Value *Expr, AtomicOrdering AO,
AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp,
bool IsXBinopExpr);
/// Emit atomic update for constructs: --- Only Scalar data types
/// V = X; X = X BinOp Expr ,
/// X = X BinOp Expr; V = X,
/// V = X; X = Expr BinOp X,
/// X = Expr BinOp X; V = X,
/// V = X; X = UpdateOp(X),
/// X = UpdateOp(X); V = X,
///
/// \param Loc The insert and source location description.
/// \param AllocIP Instruction to create AllocaInst before.
/// \param X The target atomic pointer to be updated
/// \param V Memory address where to store captured value
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic instructions
/// \param RMWOp The binary operation used for update. If
/// operation is not supported by atomicRMW, or belong to
/// {FADD, FSUB, BAD_BINOP}. Then a cmpExch based
/// atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param UpdateExpr true if X is an in place update of the form
/// X = X BinOp Expr or X = Expr BinOp X
/// \param IsXBinopExpr true if X is Left H.S. in Right H.S. part of the
/// update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
/// \param IsPostfixUpdate true if original value of 'x' must be stored in
/// 'v', not an updated one.
///
/// \return Insertion point after generated atomic capture IR.
InsertPointTy
createAtomicCapture(const LocationDescription &Loc, Instruction *AllocIP,
AtomicOpValue &X, AtomicOpValue &V, Value *Expr,
AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp, bool UpdateExpr,
bool IsPostfixUpdate, bool IsXBinopExpr);
/// Create the control flow structure of a canonical OpenMP loop.
///
/// The emitted loop will be disconnected, i.e. no edge to the loop's
/// preheader and no terminator in the AfterBB. The OpenMPIRBuilder's
/// IRBuilder location is not preserved.
///
/// \param DL DebugLoc used for the instructions in the skeleton.
/// \param TripCount Value to be used for the trip count.
/// \param F Function in which to insert the BasicBlocks.
/// \param PreInsertBefore Where to insert BBs that execute before the body,
/// typically the body itself.
/// \param PostInsertBefore Where to insert BBs that execute after the body.
/// \param Name Base name used to derive BB
/// and instruction names.
///
/// \returns The CanonicalLoopInfo that represents the emitted loop.
CanonicalLoopInfo *createLoopSkeleton(DebugLoc DL, Value *TripCount,
Function *F,
BasicBlock *PreInsertBefore,
BasicBlock *PostInsertBefore,
const Twine &Name = {});
};
/// Class to represented the control flow structure of an OpenMP canonical loop.
///
/// The control-flow structure is standardized for easy consumption by
/// directives associated with loops. For instance, the worksharing-loop
/// construct may change this control flow such that each loop iteration is
/// executed on only one thread. The constraints of a canonical loop in brief
/// are:
///
/// * The number of loop iterations must have been computed before entering the
/// loop.
///
/// * Has an (unsigned) logical induction variable that starts at zero and
/// increments by one.
///
/// * The loop's CFG itself has no side-effects. The OpenMP specification
/// itself allows side-effects, but the order in which they happen, including
/// how often or whether at all, is unspecified. We expect that the frontend
/// will emit those side-effect instructions somewhere (e.g. before the loop)
/// such that the CanonicalLoopInfo itself can be side-effect free.
///
/// Keep in mind that CanonicalLoopInfo is meant to only describe a repeated
/// execution of a loop body that satifies these constraints. It does NOT
/// represent arbitrary SESE regions that happen to contain a loop. Do not use
/// CanonicalLoopInfo for such purposes.
///
/// The control flow can be described as follows:
///
/// Preheader
/// |
/// /-> Header
/// | |
/// | Cond---\
/// | | |
/// | Body |
/// | | | |
/// | <...> |
/// | | | |
/// \--Latch |
/// |
/// Exit
/// |
/// After
///
/// The loop is thought to start at PreheaderIP (at the Preheader's terminator,
/// including) and end at AfterIP (at the After's first instruction, excluding).
/// That is, instructions in the Preheader and After blocks (except the
/// Preheader's terminator) are out of CanonicalLoopInfo's control and may have
/// side-effects. Typically, the Preheader is used to compute the loop's trip
/// count. The instructions from BodyIP (at the Body block's first instruction,
/// excluding) until the Latch are also considered outside CanonicalLoopInfo's
/// control and thus can have side-effects. The body block is the single entry
/// point into the loop body, which may contain arbitrary control flow as long
/// as all control paths eventually branch to the Latch block.
///
/// TODO: Consider adding another standardized BasicBlock between Body CFG and
/// Latch to guarantee that there is only a single edge to the latch. It would
/// make loop transformations easier to not needing to consider multiple
/// predecessors of the latch (See redirectAllPredecessorsTo) and would give us
/// an equivalant to PreheaderIP, AfterIP and BodyIP for inserting code that
/// executes after each body iteration.
///
/// There must be no loop-carried dependencies through llvm::Values. This is
/// equivalant to that the Latch has no PHINode and the Header's only PHINode is
/// for the induction variable.
///
/// All code in Header, Cond, Latch and Exit (plus the terminator of the
/// Preheader) are CanonicalLoopInfo's responsibility and their build-up checked
/// by assertOK(). They are expected to not be modified unless explicitly
/// modifying the CanonicalLoopInfo through a methods that applies a OpenMP
/// loop-associated construct such as applyWorkshareLoop, tileLoops, unrollLoop,
/// etc. These methods usually invalidate the CanonicalLoopInfo and re-use its
/// basic blocks. After invalidation, the CanonicalLoopInfo must not be used
/// anymore as its underlying control flow may not exist anymore.
/// Loop-transformation methods such as tileLoops, collapseLoops and unrollLoop
/// may also return a new CanonicalLoopInfo that can be passed to other
/// loop-associated construct implementing methods. These loop-transforming
/// methods may either create a new CanonicalLoopInfo usually using
/// createLoopSkeleton and invalidate the input CanonicalLoopInfo, or reuse and
/// modify one of the input CanonicalLoopInfo and return it as representing the
/// modified loop. What is done is an implementation detail of
/// transformation-implementing method and callers should always assume that the
/// CanonicalLoopInfo passed to it is invalidated and a new object is returned.
/// Returned CanonicalLoopInfo have the same structure and guarantees as the one
/// created by createCanonicalLoop, such that transforming methods do not have
/// to special case where the CanonicalLoopInfo originated from.
///
/// Generally, methods consuming CanonicalLoopInfo do not need an
/// OpenMPIRBuilder::InsertPointTy as argument, but use the locations of the
/// CanonicalLoopInfo to insert new or modify existing instructions. Unless
/// documented otherwise, methods consuming CanonicalLoopInfo do not invalidate
/// any InsertPoint that is outside CanonicalLoopInfo's control. Specifically,
/// any InsertPoint in the Preheader, After or Block can still be used after
/// calling such a method.
///
/// TODO: Provide mechanisms for exception handling and cancellation points.
///
/// Defined outside OpenMPIRBuilder because nested classes cannot be
/// forward-declared, e.g. to avoid having to include the entire OMPIRBuilder.h.
class CanonicalLoopInfo {
friend class OpenMPIRBuilder;
private:
BasicBlock *Header = nullptr;
BasicBlock *Cond = nullptr;
BasicBlock *Latch = nullptr;
BasicBlock *Exit = nullptr;
/// Add the control blocks of this loop to \p BBs.
///
/// This does not include any block from the body, including the one returned
/// by getBody().
///
/// FIXME: This currently includes the Preheader and After blocks even though
/// their content is (mostly) not under CanonicalLoopInfo's control.
/// Re-evaluated whether this makes sense.
void collectControlBlocks(SmallVectorImpl<BasicBlock *> &BBs);
public:
/// Returns whether this object currently represents the IR of a loop. If
/// returning false, it may have been consumed by a loop transformation or not
/// been intialized. Do not use in this case;
bool isValid() const { return Header; }
/// The preheader ensures that there is only a single edge entering the loop.
/// Code that must be execute before any loop iteration can be emitted here,
/// such as computing the loop trip count and begin lifetime markers. Code in
/// the preheader is not considered part of the canonical loop.
BasicBlock *getPreheader() const;
/// The header is the entry for each iteration. In the canonical control flow,
/// it only contains the PHINode for the induction variable.
BasicBlock *getHeader() const {
assert(isValid() && "Requires a valid canonical loop");
return Header;
}
/// The condition block computes whether there is another loop iteration. If
/// yes, branches to the body; otherwise to the exit block.
BasicBlock *getCond() const {
assert(isValid() && "Requires a valid canonical loop");
return Cond;
}
/// The body block is the single entry for a loop iteration and not controlled
/// by CanonicalLoopInfo. It can contain arbitrary control flow but must
/// eventually branch to the \p Latch block.
BasicBlock *getBody() const {
assert(isValid() && "Requires a valid canonical loop");
return cast<BranchInst>(Cond->getTerminator())->getSuccessor(0);
}
/// Reaching the latch indicates the end of the loop body code. In the
/// canonical control flow, it only contains the increment of the induction
/// variable.
BasicBlock *getLatch() const {
assert(isValid() && "Requires a valid canonical loop");
return Latch;
}
/// Reaching the exit indicates no more iterations are being executed.
BasicBlock *getExit() const {
assert(isValid() && "Requires a valid canonical loop");
return Exit;
}
/// The after block is intended for clean-up code such as lifetime end
/// markers. It is separate from the exit block to ensure, analogous to the
/// preheader, it having just a single entry edge and being free from PHI
/// nodes should there be multiple loop exits (such as from break
/// statements/cancellations).
BasicBlock *getAfter() const {
assert(isValid() && "Requires a valid canonical loop");
return Exit->getSingleSuccessor();
}
/// Returns the llvm::Value containing the number of loop iterations. It must
/// be valid in the preheader and always interpreted as an unsigned integer of
/// any bit-width.
Value *getTripCount() const {
assert(isValid() && "Requires a valid canonical loop");
Instruction *CmpI = &Cond->front();
assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount");
return CmpI->getOperand(1);
}
/// Returns the instruction representing the current logical induction
/// variable. Always unsigned, always starting at 0 with an increment of one.
Instruction *getIndVar() const {
assert(isValid() && "Requires a valid canonical loop");
Instruction *IndVarPHI = &Header->front();
assert(isa<PHINode>(IndVarPHI) && "First inst must be the IV PHI");
return IndVarPHI;
}
/// Return the type of the induction variable (and the trip count).
Type *getIndVarType() const {
assert(isValid() && "Requires a valid canonical loop");
return getIndVar()->getType();
}
/// Return the insertion point for user code before the loop.
OpenMPIRBuilder::InsertPointTy getPreheaderIP() const {
assert(isValid() && "Requires a valid canonical loop");
BasicBlock *Preheader = getPreheader();
return {Preheader, std::prev(Preheader->end())};
};
/// Return the insertion point for user code in the body.
OpenMPIRBuilder::InsertPointTy getBodyIP() const {
assert(isValid() && "Requires a valid canonical loop");
BasicBlock *Body = getBody();
return {Body, Body->begin()};
};
/// Return the insertion point for user code after the loop.
OpenMPIRBuilder::InsertPointTy getAfterIP() const {
assert(isValid() && "Requires a valid canonical loop");
BasicBlock *After = getAfter();
return {After, After->begin()};
};
Function *getFunction() const {
assert(isValid() && "Requires a valid canonical loop");
return Header->getParent();
}
/// Consistency self-check.
void assertOK() const;
/// Invalidate this loop. That is, the underlying IR does not fulfill the
/// requirements of an OpenMP canonical loop anymore.
void invalidate();
};
} // end namespace llvm
#endif // LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
|
omptest-ori.c | #include <nautilus/nautilus.h>
#include <nautilus/shell.h>
#include <nautilus/libccompat.h>
#include <nautilus/random.h>
//#include <nautilus/scheduler.h>
#ifndef NAUT_CONFIG_DEBUG_GPUDEV
#undef DEBUG_PRINT
#define DEBUG_PRINT(fmt, args...)
#endif
#define ERROR(fmt, args...) ERROR_PRINT("omptest: " fmt, ##args)
#define DEBUG(fmt, args...) DEBUG_PRINT("omptest: " fmt, ##args)
#define INFO(fmt, args...) INFO_PRINT("omptest: " fmt, ##args)
static inline uint16_t random()
{
uint16_t t;
nk_get_rand_bytes((uint8_t *)&t,sizeof(t));
return t;
}
#define MAXN 5100 /* Max value of N */
int N; /* Matrix size */
int procs; /* Number of processors to use */
/* Matrices and vectors */
volatile float A[MAXN][MAXN], B[MAXN], X[MAXN];
volatile float ORA[MAXN][MAXN], ORB[MAXN], ORX[MAXN];
/* A * X = B, solve for X */
int seed;
/* Prototype */
void gauss(); /* The function you will provide.
* It is this routine that is timed.
* It is called only on the parent.
*/
/* Initialize A and B (and X to 0.0s) */
void initialize_inputs() {
int row, col;
printf("\nInitializing...\n");
// #pragma omp parallel num_threads(8)
{
// #pragma omp for private(row,col) schedule(static,1) nowait
for (col = 0; col < N; col++) {
for (row = 0; row < N; row++) {
ORA[col][row] = (float) random()/32768.0;
}
ORB[col] = (float)random()/32768.0;
}
}
}
void reset_inputs(){
int row, col;
printf("\n reseting...\n");
for (col = 0; col < N; col++) {
for (row = 0; row < N; row++) {
A[row][col] = ORA[row][col];
}
B[col] = ORB[col];
X[col] = 0.0;
}
}
/* Print input matrices */
void print_inputs() {
int row, col;
if (N < 1000) {
printf("\nA =\n\t");
for (row = 0; row < N; row++) {
for (col = 0; col < N; col++) {
printf("%5.2f%s", A[row][col], (col < N-1) ? ", " : ";\n\t");
}
}
printf("\nB = [");
for (col = 0; col < N; col++) {
printf("%5.2f%s", B[col], (col < N-1) ? "; " : "]\n");
}
}
}
void serialgauss(){
int norm, row, col; /* Normalization row, and zeroing
* element row and col */
float multiplier;
printf("Computing serially.\n");
/* Gaussian elimination */
for (norm = 0; norm < N - 1; norm++) {
// int num = N - norm;
{
//printf("%f ", A[norm][norm]);
for (row = norm + 1; row < N; row++) {
multiplier = A[row][norm] / A[norm][norm];
for (col = norm; col < N; col++) {
A[row][col] -= A[norm][col] * multiplier;
}
B[row] -= B[norm] * multiplier;
}
}
}
/* (Diagonal elements are not normalized to 1. This is treated in back
* substitution.)
*/
/* Back substitution */
for (row = N - 1; row >= 0; row--) {
X[row] = B[row];
for (col = N-1; col > row; col--) {
X[row] -= A[row][col] * X[col];
}
X[row] /= A[row][row];
//printf("%5.2f ", X[row]);
}
}
void ompgauss() {
int norm, row, col; /* Normalization row, and zeroing
* element row and col */
float multiplier;
//doneflag[0] = 1;
printf("Computing using omp.\n");
/* Gaussian elimination */
#pragma omp parallel private(row, col, multiplier, norm) num_threads(procs)
{
for (norm = 0; norm < N - 1; norm++) {
#pragma omp for schedule(static,1)
for (row = norm + 1; row < N; row++) {
multiplier = A[row][norm]/A[norm][norm];
for (col = norm; col < N; col++) {
A[row][col] -= A[norm][col] * multiplier;
}
B[row] -= B[norm] * multiplier;
}
}
}
// nk_vc_printf("I am done\n");
/* (Diagonal elements are not normalized to 1. This is treated in back
* substitution.)
*/
/* Back substitution */
for (row = N - 1; row >= 0; row--) {
X[row] = B[row];
for (col = N-1; col > row; col--) {
X[row] -= A[row][col] * X[col];
}
X[row] /= A[row][row];
}
}
#define TIME() (double)nk_sched_get_realtime()/1e9;
static int handle_omptest (char * buf, void * priv)
{
int seed, size, np;
if ((sscanf(buf,"omptest %d %d %d",&seed,&size,&np)!=3)) {
nk_vc_printf("Don't understand %s please input seed, matrix size and nprocs\n",buf);
return -1;
}
nk_rand_seed(seed);
N = size;
procs = np;
nk_vc_printf("seed %d, size, %d, nprocs: %d\n", seed, N, procs);
initialize_inputs();
reset_inputs();
// print_inputs();
/* unsigned mxcsr; */
/* __asm__ volatile("ldmxcsr %0"::"m"(*&mxcsr):"memory"); */
/* printf("ld %04x \n", mxcsr); */
/* mxcsr = mxcsr ^ 0x0200; */
/* printf("st %08x \n", mxcsr); */
/* __asm__ volatile("stmxcsr %0"::"m"(*&mxcsr):"memory"); */
/* __asm__ volatile("ldmxcsr %0"::"m"(*&mxcsr):"memory"); */
/* printf("ld %08x \n", mxcsr); */
double start = TIME();
ompgauss();
double end = TIME();
double omp = end-start;
nk_vc_printf("openmp done %lf\n", omp);
float OMP[N];
for(int row =0; row<N; row++){
OMP[row] = X[row];
}
reset_inputs();
start = TIME();
serialgauss();
end = TIME();
double serial = end-start;
nk_vc_printf("serial done %lf\n", serial);
float difference = 0.0;
for(int row =0; row<N; row++){
difference += (OMP[row]- X[row]);
}
nk_vc_printf("OMP difference %f speed up %f !\n", difference, serial/omp);
return 0;
}
static struct shell_cmd_impl omptest_impl = {
.cmd = "omptest",
.help_str = "openmp test",
.handler = handle_omptest,
};
nk_register_shell_cmd(omptest_impl);
|
3d25pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 16;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,3);t1++) {
lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6));
ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(max(0,ceild(3*t1-3*t2,2)),ceild(3*t1-2,4)),ceild(24*t2-Nz-3,16));t3<=min(min(min(floord(4*Nt+Ny-9,16),floord(12*t1+Ny+15,16)),floord(24*t2+Ny+11,16)),floord(24*t1-24*t2+Nz+Ny+13,16));t3++) {
for (t4=max(max(max(max(0,ceild(3*t1-3*t2-14,16)),ceild(3*t1-30,32)),ceild(24*t2-Nz-115,128)),ceild(16*t3-Ny-115,128));t4<=min(min(min(min(floord(4*Nt+Nx-9,128),floord(12*t1+Nx+15,128)),floord(24*t2+Nx+11,128)),floord(16*t3+Nx+3,128)),floord(24*t1-24*t2+Nz+Nx+13,128));t4++) {
for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(16*t3-Ny+5,4)),ceild(128*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),Nt-1),3*t1+5),6*t2+4),4*t3+2),32*t4+30);t5++) {
for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(16*t3,4*t5+4);t7<=min(16*t3+15,4*t5+Ny-5);t7++) {
lbv=max(128*t4,4*t5+4);
ubv=min(128*t4+127,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
csr_block_matvec.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Matvec functions for hypre_CSRBlockMatrix class.
*
*****************************************************************************/
#include "csr_block_matrix.h"
#include "../seq_mv/seq_mv.h"
#include <assert.h>
/*--------------------------------------------------------------------------
* hypre_CSRBlockMatrixMatvec
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRBlockMatrixMatvec(HYPRE_Complex alpha, hypre_CSRBlockMatrix *A,
hypre_Vector *x, HYPRE_Complex beta, hypre_Vector *y)
{
HYPRE_Complex *A_data = hypre_CSRBlockMatrixData(A);
HYPRE_Int *A_i = hypre_CSRBlockMatrixI(A);
HYPRE_Int *A_j = hypre_CSRBlockMatrixJ(A);
HYPRE_Int num_rows = hypre_CSRBlockMatrixNumRows(A);
HYPRE_Int num_cols = hypre_CSRBlockMatrixNumCols(A);
HYPRE_Int blk_size = hypre_CSRBlockMatrixBlockSize(A);
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int x_size = hypre_VectorSize(x);
HYPRE_Int y_size = hypre_VectorSize(y);
HYPRE_Int i, b1, b2, jj, bnnz=blk_size*blk_size;
HYPRE_Int ierr = 0;
HYPRE_Complex temp;
/*---------------------------------------------------------------------
* Check for size compatibility. Matvec returns ierr = 1 if
* length of X doesn't equal the number of columns of A,
* ierr = 2 if the length of Y doesn't equal the number of rows
* of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in Matvec, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
if (num_cols*blk_size != x_size) ierr = 1;
if (num_rows*blk_size != y_size) ierr = 2;
if (num_cols*blk_size != x_size && num_rows*blk_size != y_size) ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows*blk_size; i++) y_data[i] *= beta;
return ierr;
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows*blk_size; i++)
y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows*blk_size; i++)
y_data[i] *= temp;
}
}
/*-----------------------------------------------------------------
* y += A*x
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,jj,b1,b2,temp) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
for (b1 = 0; b1 < blk_size; b1++)
{
temp = y_data[i*blk_size+b1];
for (b2 = 0; b2 < blk_size; b2++)
temp += A_data[jj*bnnz+b1*blk_size+b2] * x_data[A_j[jj]*blk_size+b2];
y_data[i*blk_size+b1] = temp;
}
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows*blk_size; i++)
y_data[i] *= alpha;
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_CSRBlockMatrixMatvecT
*
* Performs y <- alpha * A^T * x + beta * y
*
* From Van Henson's modification of hypre_CSRMatrixMatvec.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRBlockMatrixMatvecT( HYPRE_Complex alpha,
hypre_CSRBlockMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *y )
{
HYPRE_Complex *A_data = hypre_CSRBlockMatrixData(A);
HYPRE_Int *A_i = hypre_CSRBlockMatrixI(A);
HYPRE_Int *A_j = hypre_CSRBlockMatrixJ(A);
HYPRE_Int num_rows = hypre_CSRBlockMatrixNumRows(A);
HYPRE_Int num_cols = hypre_CSRBlockMatrixNumCols(A);
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int x_size = hypre_VectorSize(x);
HYPRE_Int y_size = hypre_VectorSize(y);
HYPRE_Complex temp;
HYPRE_Int i, j, jj;
HYPRE_Int ierr = 0;
HYPRE_Int b1, b2;
HYPRE_Int blk_size = hypre_CSRBlockMatrixBlockSize(A);
HYPRE_Int bnnz=blk_size*blk_size;
/*---------------------------------------------------------------------
* Check for size compatibility. MatvecT returns ierr = 1 if
* length of X doesn't equal the number of rows of A,
* ierr = 2 if the length of Y doesn't equal the number of
* columns of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in MatvecT, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
if (num_rows*blk_size != x_size)
ierr = 1;
if (num_cols*blk_size != y_size)
ierr = 2;
if (num_rows*blk_size != x_size && num_cols*blk_size != y_size)
ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols*blk_size; i++)
y_data[i] *= beta;
return ierr;
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols*blk_size; i++)
y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols*blk_size; i++)
y_data[i] *= temp;
}
}
/*-----------------------------------------------------------------
* y += A^T*x
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i, jj,j, b1, b2) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++) /*each nonzero in that row*/
{
for (b1 = 0; b1 < blk_size; b1++) /*row */
{
for (b2 = 0; b2 < blk_size; b2++) /*col*/
{
j = A_j[jj]; /*col */
y_data[j*blk_size+b2] +=
A_data[jj*bnnz+b1*blk_size+b2] * x_data[i*blk_size + b1];
}
}
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols*blk_size; i++)
y_data[i] *= alpha;
}
return ierr;
}
|
a.22.1.c | /* { dg-do compile } */
/* { dg-require-effective-target tls } */
int counter = 0;
#pragma omp threadprivate(counter)
int
increment_counter ()
{
counter++;
return (counter);
}
|
test.c |
#include <stdio.h>
#include <omp.h>
#pragma omp requires unified_shared_memory
#include "../utilities/check.h"
#include "../utilities/utilities.h"
#define TRIALS (1)
#define N (1024*3)
#define M (16*32)
#define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;})
#define ZERO(X) ZERO_ARRAY(N, X)
double A[M][N], B[M][N], C[N], D[N], E[N];
double S[M];
double p[2];
int main(void) {
check_offloading();
INIT();
int cpuExec = 0;
#pragma omp target map(tofrom: cpuExec)
{
cpuExec = omp_is_initial_device();
}
int tms = 16;
int th = 32;
int threads[1]; threads[0] = th-1;
//
// Test: proc_bind clause
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES proc_bind(master)
#include "defines.h"
NESTED_PARALLEL_FOR(
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
},
for (int i = 0; i < N; i++) { \
A[idx][i] += C[i] + D[i]; \
B[idx][i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES proc_bind(close)
#include "defines.h"
NESTED_PARALLEL_FOR(
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
},
for (int i = 0; i < N; i++) { \
A[idx][i] += C[i] + D[i]; \
B[idx][i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES proc_bind(spread)
#include "defines.h"
NESTED_PARALLEL_FOR(
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
},
for (int i = 0; i < N; i++) { \
A[idx][i] += C[i] + D[i]; \
B[idx][i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
//
// Test: private, shared clauses on omp target teams distribute parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES private(p,q) shared(A,B,C,D,E)
#include "defines.h"
NESTED_PARALLEL_FOR(
double p = 2; \
double q = 4; \
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
},
for (int i = 0; i < N; i++) { \
p = C[i] + D[i]; \
q = D[i] + E[i]; \
A[idx][i] += p; \
B[idx][i] += q; \
}
,
{
double tmp = p + q;
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) 6 + SUMS * (N/2*(N+1))))
//
// Test: firstprivate clause on omp target teams distribute parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES firstprivate(p,q)
#include "defines.h"
NESTED_PARALLEL_FOR(
double p = -4; \
double q = 4; \
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
},
for (int i = 0; i < N; i++) { \
A[idx][i] += C[i] + D[i] + p; \
B[idx][i] += D[i] + E[i] + q; \
if (i == N-1) { \
p += 6; \
q += 9; \
} \
}
,
{
double tmp = p + q;
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
//
// Test: lastprivate clause on omp target teams distribute parallel for with nested parallel.
//
TESTD("omp target teams distribute parallel for num_teams(tms) num_threads(th)",
for (int idx = 0; idx < tms*th; idx++) {
double q0[1];
double q1[1];
double q2[1];
double q3[1];
S[idx] = 0;
for (int i = 0; i < N; i++) {
A[idx][i] = B[idx][i] = 0;
}
_Pragma("omp parallel for lastprivate(q0) if(threads[0] > 1) num_threads(threads[0])")
for (int i = 0; i < N; i++) {
q0[0] = C[i] + D[i];
A[idx][i] += q0[0];
}
_Pragma("omp parallel for schedule(auto) lastprivate(q1) if(threads[0] > 1) num_threads(threads[0])")
for (int i = 0; i < N; i++) {
q1[0] = C[i] + D[i];
A[idx][i] += q1[0];
}
_Pragma("omp parallel for schedule(static) lastprivate(q2) if(threads[0] > 1) num_threads(threads[0])")
for (int i = 0; i < N; i++) {
q2[0] = D[i] + E[i];
B[idx][i] += q2[0];
}
_Pragma("omp parallel for schedule(static,9) lastprivate(q3) if(threads[0] > 1) num_threads(threads[0])")
for (int i = 0; i < N; i++) {
q3[0] = D[i] + E[i];
B[idx][i] += q3[0];
}
double tmp = q0[0] + q1[0] + q2[0] + q3[0];
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
}
, VERIFY(0, tms*th, S[i], (double) 2 * (N + (N/2*(N+1))) ));
//
// Test: private clause on omp target teams distribute parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES private(p)
#include "defines.h"
NESTED_PARALLEL_FOR(
double p[2]; \
p[0] = 2; p[1] = 4; \
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
p[0] = C[i] + D[i]; \
p[1] = D[i] + E[i]; \
A[idx][i] += p[0]; \
B[idx][i] += p[1]; \
}
,
{
double tmp = p[0] + p[1];
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) 6 + SUMS * (N/2*(N+1))))
//
// Test: firstprivate clause on omp target teams distribute parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES firstprivate(p)
#include "defines.h"
NESTED_PARALLEL_FOR(
double p[2]; \
p[0] = -4; p[1] = 4; \
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
A[idx][i] += C[i] + D[i] + p[0]; \
B[idx][i] += D[i] + E[i] + p[1]; \
if (i == N-1) { \
p[0] += 6; \
p[1] += 9; \
} \
}
,
{
double tmp = p[0] + p[1];
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
//
// Test: collapse clause on omp target teams distribute parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES collapse(2)
#include "defines.h"
NESTED_PARALLEL_FOR(
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
}
,
for (int i = 0; i < 1024; i++) { \
for (int j = 0; j < 3; j++) { \
A[idx][i*3+j] += C[i*3+j] + D[i*3+j]; \
B[idx][i*3+j] += D[i*3+j] + E[i*3+j]; \
} \
}
,
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
//
// Test: ordered clause on omp target teams distribute parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES ordered
#include "defines.h"
NESTED_PARALLEL_FOR(
S[idx] = 0; \
,
for (int i = 0; i < N; i++) { \
_Pragma("omp ordered") \
S[idx] += C[i] + D[i]; \
}
,
{
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
//
// Test: Ensure coalesced scheduling on GPU.
//
if (!cpuExec) {
TESTD("omp target teams distribute parallel for num_teams(tms) num_threads(th)",
for (int idx = 0; idx < tms*th; idx++) {
S[idx] = 0;
for (int i = 0; i < 96; i++) {
A[idx][i] = 0;
}
_Pragma("omp parallel for num_threads(32)")
for (int i = 0; i < 96; i++) {
A[idx][i] += i - omp_get_thread_num();
}
_Pragma("omp parallel for schedule(auto) num_threads(32)")
for (int i = 0; i < 96; i++) {
A[idx][i] += i - omp_get_thread_num();
}
_Pragma("omp parallel for schedule(static,1) num_threads(32)")
for (int i = 0; i < 96; i++) {
A[idx][i] += i - omp_get_thread_num();
}
double tmp = 0;
for (int i = 0; i < 96; i++) {
tmp += A[idx][i];
}
S[idx] = tmp;
}
, VERIFY(0, tms*th, S[i], (double) 3 * 95 * 48 ));
} else {
DUMP_SUCCESS(1);
}
//DUMP_SUCCESS(1);
return 0;
}
|
generator_gemm_common.c | /******************************************************************************
** Copyright (c) 2015-2018, Intel Corporation **
** All rights reserved. **
** **
** Redistribution and use in source and binary forms, with or without **
** modification, are permitted provided that the following conditions **
** are met: **
** 1. Redistributions of source code must retain the above copyright **
** notice, this list of conditions and the following disclaimer. **
** 2. Redistributions in binary form must reproduce the above copyright **
** notice, this list of conditions and the following disclaimer in the **
** documentation and/or other materials provided with the distribution. **
** 3. Neither the name of the copyright holder nor the names of its **
** contributors may be used to endorse or promote products derived **
** from this software without specific prior written permission. **
** **
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
#include "generator_gemm_common.h"
#include "generator_common.h"
#include "generator_x86_instructions.h"
#include "libxsmm_main.h"
#if defined(LIBXSMM_OFFLOAD_TARGET)
# pragma offload_attribute(push,target(LIBXSMM_OFFLOAD_TARGET))
#endif
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <stdio.h>
#if defined(LIBXSMM_OFFLOAD_TARGET)
# pragma offload_attribute(pop)
#endif
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_init_micro_kernel_config_fullvector( libxsmm_micro_kernel_config* io_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const char* i_arch,
const unsigned int i_use_masking_a_c ) {
memset(io_micro_kernel_config, 0, sizeof(*io_micro_kernel_config)); /* avoid warning "maybe used uninitialized" */
if ( strcmp( i_arch, "wsm" ) == 0 ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_SSE3;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 2;
io_micro_kernel_config->datatype_size = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPD;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVDDUP;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPD;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULPD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDPD;
} else {
io_micro_kernel_config->vector_length = 4;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_SHUFPS;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPS;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULPS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDPS;
}
} else if ( strcmp( i_arch, "snb" ) == 0 ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'y';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 4;
io_micro_kernel_config->datatype_size = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD;
} else {
io_micro_kernel_config->vector_length = 8;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS;
}
} else if ( strcmp( i_arch, "hsw" ) == 0 ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX2;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'y';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 4;
io_micro_kernel_config->datatype_size = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
} else {
io_micro_kernel_config->vector_length = 8;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
} else if ( (strcmp( i_arch, "knc" ) == 0) ||
(strcmp( i_arch, "knl" ) == 0) ||
(strcmp( i_arch, "knm" ) == 0) ||
(strcmp( i_arch, "skx" ) == 0) ||
(strcmp( i_arch, "icl" ) == 0) ) {
if ((strcmp( i_arch, "knc" ) == 0)) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_IMCI;
} else if ((strcmp( i_arch, "knl" ) == 0)) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX512_MIC;
} else if ((strcmp( i_arch, "knm" ) == 0)) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX512_KNM;
} else if ((strcmp( i_arch, "skx" ) == 0)) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX512_CORE;
} else {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX512_ICL;
}
io_micro_kernel_config->vector_reg_count = 32;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'z';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 8;
io_micro_kernel_config->datatype_size = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD;
} else if ( LIBXSMM_GEMM_PRECISION_F32 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 16;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS;
} else if ( LIBXSMM_GEMM_PRECISION_I16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
/* C is 32bit, so we treat all 3 matrices as 32bit element arrays */
io_micro_kernel_config->vector_length = 16;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VPADDD;
} else {
/* shouldn't happen as we caught this case earlier */
io_micro_kernel_config->instruction_set = LIBXSMM_X86_GENERIC;
io_micro_kernel_config->vector_reg_count = 0;
io_micro_kernel_config->use_masking_a_c = 0;
io_micro_kernel_config->vector_name = 'a';
io_micro_kernel_config->vector_length = 0;
io_micro_kernel_config->datatype_size = 0;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
} else {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_GENERIC;
io_micro_kernel_config->vector_reg_count = 0;
io_micro_kernel_config->use_masking_a_c = 0;
io_micro_kernel_config->vector_name = 'a';
io_micro_kernel_config->vector_length = 0;
io_micro_kernel_config->datatype_size = 0;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
io_micro_kernel_config->prefetch_instruction = LIBXSMM_X86_INSTR_PREFETCHT1;
io_micro_kernel_config->alu_add_instruction = LIBXSMM_X86_INSTR_ADDQ;
io_micro_kernel_config->alu_sub_instruction = LIBXSMM_X86_INSTR_SUBQ;
io_micro_kernel_config->alu_cmp_instruction = LIBXSMM_X86_INSTR_CMPQ;
io_micro_kernel_config->alu_jmp_instruction = LIBXSMM_X86_INSTR_JL;
io_micro_kernel_config->alu_mov_instruction = LIBXSMM_X86_INSTR_MOVQ;
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_init_micro_kernel_config_halfvector( libxsmm_micro_kernel_config* io_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const char* i_arch,
const unsigned int i_use_masking_a_c ) {
if ( strcmp( i_arch, "wsm" ) == 0 ) {
#if !defined(NDEBUG)
fprintf(stderr, "LIBXSMM WARNING, libxsmm_generator_gemm_init_micro_kernel_config_halfvector, redirecting to scalar, please fix the generation code!!!\n");
#endif
libxsmm_generator_gemm_init_micro_kernel_config_scalar( io_micro_kernel_config, i_xgemm_desc, i_arch, i_use_masking_a_c );
} else if ( strcmp( i_arch, "snb" ) == 0 ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 2;
io_micro_kernel_config->datatype_size = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVDDUP;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD;
} else {
io_micro_kernel_config->vector_length = 4;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS;
}
} else if ( strcmp( i_arch, "hsw" ) == 0 ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX2;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 2;
io_micro_kernel_config->datatype_size = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVDDUP;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
} else {
io_micro_kernel_config->vector_length = 4;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
} else if ( (strcmp( i_arch, "knc" ) == 0) ||
(strcmp( i_arch, "knl" ) == 0) ||
(strcmp( i_arch, "knm" ) == 0) ||
(strcmp( i_arch, "skx" ) == 0) ||
(strcmp( i_arch, "icl" ) == 0) ) {
#if !defined(NDEBUG)
fprintf(stderr, "LIBXSMM WARNING, libxsmm_generator_gemm_init_micro_kernel_config_halfvector, IMCI/AVX512 redirecting to fullvector!\n");
#endif
libxsmm_generator_gemm_init_micro_kernel_config_fullvector( io_micro_kernel_config, i_xgemm_desc, i_arch, i_use_masking_a_c );
} else {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_GENERIC;
io_micro_kernel_config->vector_reg_count = 0;
io_micro_kernel_config->use_masking_a_c = 0;
io_micro_kernel_config->vector_name = 'a';
io_micro_kernel_config->vector_length = 0;
io_micro_kernel_config->datatype_size = 0;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
io_micro_kernel_config->prefetch_instruction = LIBXSMM_X86_INSTR_PREFETCHT1;
io_micro_kernel_config->alu_add_instruction = LIBXSMM_X86_INSTR_ADDQ;
io_micro_kernel_config->alu_sub_instruction = LIBXSMM_X86_INSTR_SUBQ;
io_micro_kernel_config->alu_cmp_instruction = LIBXSMM_X86_INSTR_CMPQ;
io_micro_kernel_config->alu_jmp_instruction = LIBXSMM_X86_INSTR_JL;
io_micro_kernel_config->alu_mov_instruction = LIBXSMM_X86_INSTR_MOVQ;
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_init_micro_kernel_config_scalar( libxsmm_micro_kernel_config* io_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const char* i_arch,
const unsigned int i_use_masking_a_c ) {
if ( strcmp( i_arch, "wsm" ) == 0 ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_SSE3;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 8;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVSD;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVSD;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULSD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDSD;
} else {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 4;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPS;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULSS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDSS;
}
} else if ( strcmp( i_arch, "snb" ) == 0 ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 8;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULSD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDSD;
} else {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 4;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULSS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDSS;
}
} else if ( strcmp( i_arch, "hsw" ) == 0 ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX2;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 8;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231SD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
} else {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 4;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231SS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
} else if ( (strcmp( i_arch, "knc" ) == 0) ||
(strcmp( i_arch, "knl" ) == 0) ||
(strcmp( i_arch, "knm" ) == 0) ||
(strcmp( i_arch, "skx" ) == 0) ||
(strcmp( i_arch, "icl" ) == 0) ) {
if ((strcmp( i_arch, "knc" ) == 0)) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_IMCI;
#if !defined(NDEBUG)
fprintf(stderr, "LIBXSMM WARNING, libxsmm_generator_gemm_init_micro_kernel_config_scalar, IMCI redirecting to fullvector!\n");
#endif
libxsmm_generator_gemm_init_micro_kernel_config_fullvector( io_micro_kernel_config, i_xgemm_desc, i_arch, i_use_masking_a_c );
} else if ((strcmp( i_arch, "knl" ) == 0)) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX512_MIC;
} else if ((strcmp( i_arch, "knm" ) == 0)) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX512_KNM;
} else if ((strcmp( i_arch, "skx" ) == 0)) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX512_CORE;
} else {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX512_ICL;
}
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 8;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231SD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDSD;
} else {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 4;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231SS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDSS;
}
} else {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_GENERIC;
io_micro_kernel_config->vector_reg_count = 0;
io_micro_kernel_config->use_masking_a_c = 0;
io_micro_kernel_config->vector_name = 'a';
io_micro_kernel_config->vector_length = 0;
io_micro_kernel_config->datatype_size = 0;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
io_micro_kernel_config->prefetch_instruction = LIBXSMM_X86_INSTR_PREFETCHT1;
io_micro_kernel_config->alu_add_instruction = LIBXSMM_X86_INSTR_ADDQ;
io_micro_kernel_config->alu_sub_instruction = LIBXSMM_X86_INSTR_SUBQ;
io_micro_kernel_config->alu_cmp_instruction = LIBXSMM_X86_INSTR_CMPQ;
io_micro_kernel_config->alu_jmp_instruction = LIBXSMM_X86_INSTR_JL;
io_micro_kernel_config->alu_mov_instruction = LIBXSMM_X86_INSTR_MOVQ;
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_add_flop_counter( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc ) {
if ( io_generated_code->code_type == 0 ) {
char l_new_code[512];
const unsigned int l_max_code_length = sizeof(l_new_code) - 1;
int l_code_length = 0;
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#ifndef NDEBUG\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#ifdef _OPENMP\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#pragma omp atomic\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#endif\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "libxsmm_num_total_flops += %u;\n", 2u * i_xgemm_desc->m * i_xgemm_desc->n * i_xgemm_desc->k);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#endif\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_header_kloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const unsigned int i_m_blocking,
const unsigned int i_k_blocking ) {
LIBXSMM_UNUSED(i_m_blocking);
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_kloop, 0);
libxsmm_x86_instruction_register_jump_label( io_generated_code, io_loop_label_tracker );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_kloop, i_k_blocking);
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_footer_kloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_m_blocking,
const unsigned int i_max_blocked_k,
const unsigned int i_kloop_complete ) {
LIBXSMM_UNUSED(i_m_blocking);
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_kloop, i_max_blocked_k );
libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker );
if ( i_kloop_complete != 0 ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction,
i_gp_reg_mapping->gp_reg_b, (i_xgemm_desc->k)*(i_micro_kernel_config->datatype_size) );
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_header_nloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const unsigned int i_n_blocking) {
libxsmm_x86_instruction_register_jump_label( io_generated_code, io_loop_label_tracker );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_nloop, i_n_blocking );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_mloop, 0 );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_footer_nloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_n_blocking,
const unsigned int i_n_done ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c,
(i_n_blocking*(i_xgemm_desc->ldc)*(i_micro_kernel_config->datatype_size)) - ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) );
#if 0
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c_prefetch,
(i_n_blocking*(i_xgemm_desc->ldc)*(i_micro_kernel_config->datatype_size)) - ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) );
}
#endif
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_b, (i_n_blocking*(i_xgemm_desc->ldb)*(i_micro_kernel_config->datatype_size)) );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction,
i_gp_reg_mapping->gp_reg_a, ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_nloop, i_n_done );
libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_header_mloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const unsigned int i_m_blocking ) {
libxsmm_x86_instruction_register_jump_label( io_generated_code, io_loop_label_tracker );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_mloop, i_m_blocking );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_footer_mloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_m_blocking,
const unsigned int i_m_done,
const unsigned int i_k_unrolled ) {
/* advance C pointer */
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_c, i_m_blocking*(i_micro_kernel_config->datatype_size) );
/* C prefetch */
#if 0
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_c_prefetch, i_m_blocking*(i_micro_kernel_config->datatype_size) );
}
#endif
/* B prefetch */
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_JPST) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_b_prefetch, i_m_blocking*(i_micro_kernel_config->datatype_size) );
}
if (i_k_unrolled == 0) {
/* A prefetch */
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_a_prefetch,
((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size) * (i_xgemm_desc->lda) ) -
(i_m_blocking * (i_micro_kernel_config->datatype_size)) );
}
/* advance A pointer */
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_a,
((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size) * (i_xgemm_desc->lda) ) - (i_m_blocking * (i_micro_kernel_config->datatype_size)) );
} else {
/* A prefetch */
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_a_prefetch,
(i_m_blocking * (i_micro_kernel_config->datatype_size)) );
}
/* advance A pointer */
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_a,
(i_m_blocking * (i_micro_kernel_config->datatype_size)) );
}
/* loop handling */
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_mloop, i_m_done );
libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_load_C( libxsmm_generated_code* io_generated_code,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_m_blocking,
const unsigned int i_n_blocking ) {
unsigned int l_m_blocking, l_vec_reg_acc_start;
/* register blocking counter in n */
unsigned int l_n = 0;
/* register blocking counter in m */
unsigned int l_m = 0;
assert(0 < i_micro_kernel_config->vector_length);
/* deriving register blocking from kernel config */
l_m_blocking = i_m_blocking / i_micro_kernel_config->vector_length;
/* start register of accumulator */
l_vec_reg_acc_start = i_micro_kernel_config->vector_reg_count - (i_n_blocking * l_m_blocking);
#if !defined(NDEBUG)
/* Do some test if it's possible to generated the requested code.
This is not done in release mode and therefore bad
things might happen.... HUAAH */
if (i_micro_kernel_config->instruction_set == LIBXSMM_X86_SSE3 ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX2 ) {
if ( (i_n_blocking > 3) || (i_n_blocking < 1) || (i_m_blocking < 1) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else if (i_micro_kernel_config->instruction_set == LIBXSMM_X86_IMCI ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_MIC ||
( (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CORE) && (i_m_blocking == i_micro_kernel_config->vector_length) ) ) {
if ( (i_n_blocking > 30) || (i_n_blocking < 1) || (i_m_blocking != i_micro_kernel_config->vector_length) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else if ( i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CORE ) {
if ( (i_n_blocking > 6) || (i_n_blocking < 1) || (i_m_blocking < 1) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else {}
if ( i_m_blocking % i_micro_kernel_config->vector_length != 0 ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_M_BLOCK );
return;
}
#endif /*NDEBUG*/
/* load C accumulator */
if (i_xgemm_desc->beta == 1) {
/* let convert the int32 accumulator into a FP32 values */
if ( ( (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CORE) || (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_KNM) ||
(i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_ICL) ) &&
( (LIBXSMM_GEMM_PRECISION_I16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_GEMM_PRECISION_F32 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) {
/* we add when scaling during conversion to FP32 */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
libxsmm_x86_instruction_vec_compute_reg( io_generated_code,
i_micro_kernel_config->instruction_set,
i_micro_kernel_config->vxor_instruction,
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) );
}
}
} else {
/* adding to C, so let's load C */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
i_micro_kernel_config->c_vmove_instruction,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size),
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), i_micro_kernel_config->use_masking_a_c, 0 );
}
#if 0
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) {
for (l_m = 0; l_m < l_m_blocking; l_m += l_m++ ) {
libxsmm_x86_instruction_prefetch( io_generated_code,
i_micro_kernel_config->prefetch_instruction,
i_gp_reg_mapping->gp_reg_c_prefetch,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size));
}
}
#endif
}
}
} else {
/* overwriting C, so let's xout accumulator */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
libxsmm_x86_instruction_vec_compute_reg( io_generated_code,
i_micro_kernel_config->instruction_set,
i_micro_kernel_config->vxor_instruction,
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) );
}
#if 0
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) {
for (l_m = 0; l_m < l_m_blocking; l_m += l_m++ ) {
libxsmm_x86_instruction_prefetch( io_generated_code,
i_micro_kernel_config->prefetch_instruction,
i_gp_reg_mapping->gp_reg_c_prefetch,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size));
}
}
#endif
}
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_store_C( libxsmm_generated_code* io_generated_code,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_m_blocking,
const unsigned int i_n_blocking )
{
/* deriving register blocking from kernel config */
unsigned int l_m_blocking = i_m_blocking/i_micro_kernel_config->vector_length;
/* register blocking counter in n */
unsigned int l_n = 0;
/* register blocking counter in m */
unsigned int l_m = 0;
/* start register of accumulator */
unsigned int l_vec_reg_acc_start = i_micro_kernel_config->vector_reg_count - (i_n_blocking * l_m_blocking);
/* @TODO fix this test */
#if !defined(NDEBUG)
if (i_micro_kernel_config->instruction_set == LIBXSMM_X86_SSE3 ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX2 ) {
if ( (i_n_blocking > 3) || (i_n_blocking < 1) || (i_m_blocking < 1) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else if (i_micro_kernel_config->instruction_set == LIBXSMM_X86_IMCI ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_MIC ||
( (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CORE) && (i_m_blocking == i_micro_kernel_config->vector_length) ) ) {
if ( (i_n_blocking > 30) || (i_n_blocking < 1) || (i_m_blocking != i_micro_kernel_config->vector_length) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else if ( i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CORE ) {
if ( (i_n_blocking > 6) || (i_n_blocking < 1) || (i_m_blocking < 1) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else {}
if ( i_m_blocking % i_micro_kernel_config->vector_length != 0 ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_M_BLOCK );
return;
}
#endif
/* in case of IGEMM just do some potentail conversion to FP */
/* let convert the int32 accumulator into a FP32 values */
if ( ( (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CORE) || (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_KNM) ||
(i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_ICL) ) &&
( (LIBXSMM_GEMM_PRECISION_I16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_GEMM_PRECISION_F32 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) {
/* load address of scaling factor from stack */
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
LIBXSMM_X86_GP_REG_RSP,
LIBXSMM_X86_GP_REG_UNDEF, 0,
48,
i_gp_reg_mapping->gp_reg_help_1,
0 );
/* broadcast scaling factor into a vector register */
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VBROADCASTSS,
i_gp_reg_mapping->gp_reg_help_1,
LIBXSMM_X86_GP_REG_UNDEF, 0,
0,
i_micro_kernel_config->vector_name, 0,
0, 0 );
/* loop over the accumulator, convert and scale */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
/* convert current accumulator register into FP32 */
libxsmm_x86_instruction_vec_compute_reg( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VCVTDQ2PS,
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
LIBXSMM_X86_VEC_REG_UNDEF );
/* scale it */
if (i_xgemm_desc->beta == 1) {
libxsmm_x86_instruction_vec_compute_mem( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VFMADD213PS,
0,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF,
0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size),
i_micro_kernel_config->vector_name,
0,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n));
} else {
libxsmm_x86_instruction_vec_compute_reg( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VMULPS,
i_micro_kernel_config->vector_name,
0,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) );
}
}
}
}
/* storing C accumulator */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
i_micro_kernel_config->c_vmove_instruction,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size),
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), i_micro_kernel_config->use_masking_a_c, 1 );
}
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_JPST) {
/* determining how many prefetches we need in M direction as we just need one prefetch per cache line */
unsigned int l_m_advance = 64 / ((i_micro_kernel_config->vector_length) * (i_micro_kernel_config->datatype_size)); /* 64: hardcoded cache line length */
for (l_m = 0; l_m < l_m_blocking; l_m += l_m_advance ) {
libxsmm_x86_instruction_prefetch( io_generated_code,
i_micro_kernel_config->prefetch_instruction,
i_gp_reg_mapping->gp_reg_b_prefetch,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size));
}
}
}
}
|
feature_group.h | /*!
* Copyright (c) 2017 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_FEATURE_GROUP_H_
#define LIGHTGBM_FEATURE_GROUP_H_
#include <LightGBM/bin.h>
#include <LightGBM/meta.h>
#include <LightGBM/utils/random.h>
#include <cstdio>
#include <memory>
#include <vector>
namespace LightGBM {
class Dataset;
class DatasetLoader;
/*! \brief Using to store data and providing some operations on one feature group*/
class FeatureGroup {
public:
friend Dataset;
friend DatasetLoader;
/*!
* \brief Constructor
* \param num_feature number of features of this group
* \param bin_mappers Bin mapper for features
* \param num_data Total number of data
* \param is_enable_sparse True if enable sparse feature
*/
FeatureGroup(int num_feature, bool is_multi_val,
std::vector<std::unique_ptr<BinMapper>>* bin_mappers,
data_size_t num_data) : num_feature_(num_feature), is_multi_val_(is_multi_val), is_sparse_(false) {
CHECK_EQ(static_cast<int>(bin_mappers->size()), num_feature);
// use bin at zero to store most_freq_bin
num_total_bin_ = 1;
bin_offsets_.emplace_back(num_total_bin_);
auto& ref_bin_mappers = *bin_mappers;
for (int i = 0; i < num_feature_; ++i) {
bin_mappers_.emplace_back(ref_bin_mappers[i].release());
auto num_bin = bin_mappers_[i]->num_bin();
if (bin_mappers_[i]->GetMostFreqBin() == 0) {
num_bin -= 1;
}
num_total_bin_ += num_bin;
bin_offsets_.emplace_back(num_total_bin_);
}
CreateBinData(num_data, is_multi_val_, true, false);
}
FeatureGroup(const FeatureGroup& other, int num_data) {
num_feature_ = other.num_feature_;
is_multi_val_ = other.is_multi_val_;
is_sparse_ = other.is_sparse_;
num_total_bin_ = other.num_total_bin_;
bin_offsets_ = other.bin_offsets_;
bin_mappers_.reserve(other.bin_mappers_.size());
for (auto& bin_mapper : other.bin_mappers_) {
bin_mappers_.emplace_back(new BinMapper(*bin_mapper));
}
CreateBinData(num_data, is_multi_val_, !is_sparse_, is_sparse_);
}
FeatureGroup(std::vector<std::unique_ptr<BinMapper>>* bin_mappers,
data_size_t num_data) : num_feature_(1), is_multi_val_(false) {
CHECK_EQ(static_cast<int>(bin_mappers->size()), 1);
// use bin at zero to store default_bin
num_total_bin_ = 1;
bin_offsets_.emplace_back(num_total_bin_);
auto& ref_bin_mappers = *bin_mappers;
for (int i = 0; i < num_feature_; ++i) {
bin_mappers_.emplace_back(ref_bin_mappers[i].release());
auto num_bin = bin_mappers_[i]->num_bin();
if (bin_mappers_[i]->GetMostFreqBin() == 0) {
num_bin -= 1;
}
num_total_bin_ += num_bin;
bin_offsets_.emplace_back(num_total_bin_);
}
CreateBinData(num_data, false, false, false);
}
/*!
* \brief Constructor from memory
* \param memory Pointer of memory
* \param num_all_data Number of global data
* \param local_used_indices Local used indices, empty means using all data
*/
FeatureGroup(const void* memory, data_size_t num_all_data,
const std::vector<data_size_t>& local_used_indices) {
const char* memory_ptr = reinterpret_cast<const char*>(memory);
// get is_sparse
is_multi_val_ = *(reinterpret_cast<const bool*>(memory_ptr));
memory_ptr += sizeof(is_multi_val_);
is_sparse_ = *(reinterpret_cast<const bool*>(memory_ptr));
memory_ptr += sizeof(is_sparse_);
num_feature_ = *(reinterpret_cast<const int*>(memory_ptr));
memory_ptr += sizeof(num_feature_);
// get bin mapper
bin_mappers_.clear();
bin_offsets_.clear();
// start from 1, due to need to store zero bin in this slot
num_total_bin_ = 1;
bin_offsets_.emplace_back(num_total_bin_);
for (int i = 0; i < num_feature_; ++i) {
bin_mappers_.emplace_back(new BinMapper(memory_ptr));
auto num_bin = bin_mappers_[i]->num_bin();
if (bin_mappers_[i]->GetMostFreqBin() == 0) {
num_bin -= 1;
}
num_total_bin_ += num_bin;
bin_offsets_.emplace_back(num_total_bin_);
memory_ptr += bin_mappers_[i]->SizesInByte();
}
data_size_t num_data = num_all_data;
if (!local_used_indices.empty()) {
num_data = static_cast<data_size_t>(local_used_indices.size());
}
if (is_multi_val_) {
for (int i = 0; i < num_feature_; ++i) {
int addi = bin_mappers_[i]->GetMostFreqBin() == 0 ? 0 : 1;
if (bin_mappers_[i]->sparse_rate() >= kSparseThreshold) {
multi_bin_data_.emplace_back(Bin::CreateSparseBin(num_data, bin_mappers_[i]->num_bin() + addi));
} else {
multi_bin_data_.emplace_back(Bin::CreateDenseBin(num_data, bin_mappers_[i]->num_bin() + addi));
}
multi_bin_data_.back()->LoadFromMemory(memory_ptr, local_used_indices);
memory_ptr += multi_bin_data_.back()->SizesInByte();
}
} else {
if (is_sparse_) {
bin_data_.reset(Bin::CreateSparseBin(num_data, num_total_bin_));
} else {
bin_data_.reset(Bin::CreateDenseBin(num_data, num_total_bin_));
}
// get bin data
bin_data_->LoadFromMemory(memory_ptr, local_used_indices);
}
}
/*! \brief Destructor */
~FeatureGroup() {
}
/*!
* \brief Push one record, will auto convert to bin and push to bin data
* \param tid Thread id
* \param idx Index of record
* \param value feature value of record
*/
inline void PushData(int tid, int sub_feature_idx, data_size_t line_idx, double value) {
uint32_t bin = bin_mappers_[sub_feature_idx]->ValueToBin(value);
if (bin == bin_mappers_[sub_feature_idx]->GetMostFreqBin()) { return; }
if (bin_mappers_[sub_feature_idx]->GetMostFreqBin() == 0) {
bin -= 1;
}
if (is_multi_val_) {
multi_bin_data_[sub_feature_idx]->Push(tid, line_idx, bin + 1);
} else {
bin += bin_offsets_[sub_feature_idx];
bin_data_->Push(tid, line_idx, bin);
}
}
void ReSize(int num_data) {
if (!is_multi_val_) {
bin_data_->ReSize(num_data);
} else {
for (int i = 0; i < num_feature_; ++i) {
multi_bin_data_[i]->ReSize(num_data);
}
}
}
inline void CopySubrow(const FeatureGroup* full_feature, const data_size_t* used_indices, data_size_t num_used_indices) {
if (!is_multi_val_) {
bin_data_->CopySubrow(full_feature->bin_data_.get(), used_indices, num_used_indices);
} else {
for (int i = 0; i < num_feature_; ++i) {
multi_bin_data_[i]->CopySubrow(full_feature->multi_bin_data_[i].get(), used_indices, num_used_indices);
}
}
}
inline BinIterator* SubFeatureIterator(int sub_feature) {
uint32_t most_freq_bin = bin_mappers_[sub_feature]->GetMostFreqBin();
if (!is_multi_val_) {
uint32_t min_bin = bin_offsets_[sub_feature];
uint32_t max_bin = bin_offsets_[sub_feature + 1] - 1;
return bin_data_->GetIterator(min_bin, max_bin, most_freq_bin);
} else {
int addi = bin_mappers_[sub_feature]->GetMostFreqBin() == 0 ? 0 : 1;
uint32_t min_bin = 1;
uint32_t max_bin = bin_mappers_[sub_feature]->num_bin() - 1 + addi;
return multi_bin_data_[sub_feature]->GetIterator(min_bin, max_bin, most_freq_bin);
}
}
inline void FinishLoad() {
if (is_multi_val_) {
OMP_INIT_EX();
#pragma omp parallel for schedule(guided)
for (int i = 0; i < num_feature_; ++i) {
OMP_LOOP_EX_BEGIN();
multi_bin_data_[i]->FinishLoad();
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
} else {
bin_data_->FinishLoad();
}
}
/*!
* \brief Returns a BinIterator that can access the entire feature group's raw data.
* The RawGet() function of the iterator should be called for best efficiency.
* \return A pointer to the BinIterator object
*/
inline BinIterator* FeatureGroupIterator() {
if (is_multi_val_) {
return nullptr;
}
uint32_t min_bin = bin_offsets_[0];
uint32_t max_bin = bin_offsets_.back() - 1;
uint32_t most_freq_bin = 0;
return bin_data_->GetIterator(min_bin, max_bin, most_freq_bin);
}
inline data_size_t Split(int sub_feature, const uint32_t* threshold,
int num_threshold, bool default_left,
const data_size_t* data_indices, data_size_t cnt,
data_size_t* lte_indices,
data_size_t* gt_indices) const {
uint32_t default_bin = bin_mappers_[sub_feature]->GetDefaultBin();
uint32_t most_freq_bin = bin_mappers_[sub_feature]->GetMostFreqBin();
if (!is_multi_val_) {
uint32_t min_bin = bin_offsets_[sub_feature];
uint32_t max_bin = bin_offsets_[sub_feature + 1] - 1;
if (bin_mappers_[sub_feature]->bin_type() == BinType::NumericalBin) {
auto missing_type = bin_mappers_[sub_feature]->missing_type();
if (num_feature_ == 1) {
return bin_data_->Split(max_bin, default_bin, most_freq_bin,
missing_type, default_left, *threshold,
data_indices, cnt, lte_indices, gt_indices);
} else {
return bin_data_->Split(min_bin, max_bin, default_bin, most_freq_bin,
missing_type, default_left, *threshold,
data_indices, cnt, lte_indices, gt_indices);
}
} else {
if (num_feature_ == 1) {
return bin_data_->SplitCategorical(max_bin, most_freq_bin, threshold,
num_threshold, data_indices, cnt,
lte_indices, gt_indices);
} else {
return bin_data_->SplitCategorical(
min_bin, max_bin, most_freq_bin, threshold, num_threshold,
data_indices, cnt, lte_indices, gt_indices);
}
}
} else {
int addi = bin_mappers_[sub_feature]->GetMostFreqBin() == 0 ? 0 : 1;
uint32_t max_bin = bin_mappers_[sub_feature]->num_bin() - 1 + addi;
if (bin_mappers_[sub_feature]->bin_type() == BinType::NumericalBin) {
auto missing_type = bin_mappers_[sub_feature]->missing_type();
return multi_bin_data_[sub_feature]->Split(
max_bin, default_bin, most_freq_bin, missing_type, default_left,
*threshold, data_indices, cnt, lte_indices, gt_indices);
} else {
return multi_bin_data_[sub_feature]->SplitCategorical(
max_bin, most_freq_bin, threshold, num_threshold, data_indices, cnt,
lte_indices, gt_indices);
}
}
}
/*!
* \brief From bin to feature value
* \param bin
* \return FeatureGroup value of this bin
*/
inline double BinToValue(int sub_feature_idx, uint32_t bin) const {
return bin_mappers_[sub_feature_idx]->BinToValue(bin);
}
/*!
* \brief Save binary data to file
* \param file File want to write
*/
void SaveBinaryToFile(const VirtualFileWriter* writer) const {
writer->Write(&is_multi_val_, sizeof(is_multi_val_));
writer->Write(&is_sparse_, sizeof(is_sparse_));
writer->Write(&num_feature_, sizeof(num_feature_));
for (int i = 0; i < num_feature_; ++i) {
bin_mappers_[i]->SaveBinaryToFile(writer);
}
if (is_multi_val_) {
for (int i = 0; i < num_feature_; ++i) {
multi_bin_data_[i]->SaveBinaryToFile(writer);
}
} else {
bin_data_->SaveBinaryToFile(writer);
}
}
/*!
* \brief Get sizes in byte of this object
*/
size_t SizesInByte() const {
size_t ret = sizeof(is_multi_val_) + sizeof(is_sparse_) + sizeof(num_feature_);
for (int i = 0; i < num_feature_; ++i) {
ret += bin_mappers_[i]->SizesInByte();
}
if (!is_multi_val_) {
ret += bin_data_->SizesInByte();
} else {
for (int i = 0; i < num_feature_; ++i) {
ret += multi_bin_data_[i]->SizesInByte();
}
}
return ret;
}
/*! \brief Disable copy */
FeatureGroup& operator=(const FeatureGroup&) = delete;
/*! \brief Deep copy */
FeatureGroup(const FeatureGroup& other) {
num_feature_ = other.num_feature_;
is_multi_val_ = other.is_multi_val_;
is_sparse_ = other.is_sparse_;
num_total_bin_ = other.num_total_bin_;
bin_offsets_ = other.bin_offsets_;
bin_mappers_.reserve(other.bin_mappers_.size());
for (auto& bin_mapper : other.bin_mappers_) {
bin_mappers_.emplace_back(new BinMapper(*bin_mapper));
}
if (!is_multi_val_) {
bin_data_.reset(other.bin_data_->Clone());
} else {
multi_bin_data_.clear();
for (int i = 0; i < num_feature_; ++i) {
multi_bin_data_.emplace_back(other.multi_bin_data_[i]->Clone());
}
}
}
private:
void CreateBinData(int num_data, bool is_multi_val, bool force_dense, bool force_sparse) {
if (is_multi_val) {
multi_bin_data_.clear();
for (int i = 0; i < num_feature_; ++i) {
int addi = bin_mappers_[i]->GetMostFreqBin() == 0 ? 0 : 1;
if (bin_mappers_[i]->sparse_rate() >= kSparseThreshold) {
multi_bin_data_.emplace_back(Bin::CreateSparseBin(
num_data, bin_mappers_[i]->num_bin() + addi));
} else {
multi_bin_data_.emplace_back(
Bin::CreateDenseBin(num_data, bin_mappers_[i]->num_bin() + addi));
}
}
is_multi_val_ = true;
} else {
if (force_sparse || (!force_dense && num_feature_ == 1 &&
bin_mappers_[0]->sparse_rate() >= kSparseThreshold)) {
is_sparse_ = true;
bin_data_.reset(Bin::CreateSparseBin(num_data, num_total_bin_));
} else {
is_sparse_ = false;
bin_data_.reset(Bin::CreateDenseBin(num_data, num_total_bin_));
}
is_multi_val_ = false;
}
}
/*! \brief Number of features */
int num_feature_;
/*! \brief Bin mapper for sub features */
std::vector<std::unique_ptr<BinMapper>> bin_mappers_;
/*! \brief Bin offsets for sub features */
std::vector<uint32_t> bin_offsets_;
/*! \brief Bin data of this feature */
std::unique_ptr<Bin> bin_data_;
std::vector<std::unique_ptr<Bin>> multi_bin_data_;
/*! \brief True if this feature is sparse */
bool is_multi_val_;
bool is_sparse_;
int num_total_bin_;
};
} // namespace LightGBM
#endif // LIGHTGBM_FEATURE_GROUP_H_
|
pr91987.c | /* PR c++/91987 */
int bar (void);
void baz (int *);
#pragma omp declare target to (baz)
void
foo (int *a, int (*b)[10][10])
{
#pragma omp target map(a[bar ()])
baz (a);
#pragma omp target map(a[bar ():1])
baz (a);
#pragma omp target map(a[10:bar ()])
baz (a);
#pragma omp task depend(inout:a[10:bar ()])
baz (a);
#pragma omp task depend(inout:a[10:bar ()])
baz (a);
#pragma omp parallel reduction(+:a[bar ():2])
baz (a);
#pragma omp parallel reduction(+:a[2:bar ()])
baz (a);
#pragma omp parallel reduction(+:b[bar ():2][bar ():10][bar ():10])
baz (a);
}
|
core_ctsqrt.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_ztsqrt.c, normal z -> c, Fri Sep 28 17:38:24 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "plasma_internal.h"
#include "core_lapack.h"
#include <omp.h>
// This will be swapped during the automatic code generation.
#undef REAL
#define COMPLEX
/***************************************************************************//**
*
* @ingroup core_tsqrt
*
* Computes a QR factorization of a rectangular matrix
* formed by coupling an n-by-n upper triangular tile A1
* on top of an m-by-n tile A2:
*
* | A1 | = Q * R
* | A2 |
*
*******************************************************************************
*
* @param[in] m
* The number of columns of the tile A2. m >= 0.
*
* @param[in] n
* The number of rows of the tile A1.
* The number of columns of the tiles A1 and A2. n >= 0.
*
* @param[in] ib
* The inner-blocking size. ib >= 0.
*
* @param[in,out] A1
* On entry, the n-by-n tile A1.
* On exit, the elements on and above the diagonal of the array
* contain the n-by-n upper trapezoidal tile R;
* the elements below the diagonal are not referenced.
*
* @param[in] lda1
* The leading dimension of the array A1. LDA1 >= max(1,N).
*
* @param[in,out] A2
* On entry, the m-by-n tile A2.
* On exit, all the elements with the array tau, represent
* the unitary tile Q as a product of elementary reflectors
* (see Further Details).
*
* @param[in] lda2
* The leading dimension of the tile A2. lda2 >= max(1,m).
*
* @param[out] T
* The ib-by-n triangular factor T of the block reflector.
* T is upper triangular by block (economic storage);
* The rest of the array is not referenced.
*
* @param[in] ldt
* The leading dimension of the array T. ldt >= ib.
*
* @param tau
* Auxiliary workspace array of length n.
*
* @param work
* Auxiliary workspace array of length ib*n.
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
******************************************************************************/
__attribute__((weak))
int plasma_core_ctsqrt(int m, int n, int ib,
plasma_complex32_t *A1, int lda1,
plasma_complex32_t *A2, int lda2,
plasma_complex32_t *T, int ldt,
plasma_complex32_t *tau,
plasma_complex32_t *work)
{
// Check input arguments.
if (m < 0) {
plasma_coreblas_error("illegal value of m");
return -1;
}
if (n < 0) {
plasma_coreblas_error("illegal value of n");
return -2;
}
if (ib < 0) {
plasma_coreblas_error("illegal value of ib");
return -3;
}
if (A1 == NULL) {
plasma_coreblas_error("NULL A1");
return -4;
}
if (lda1 < imax(1, m) && m > 0) {
plasma_coreblas_error("illegal value of lda1");
return -5;
}
if (A2 == NULL) {
plasma_coreblas_error("NULL A2");
return -6;
}
if (lda2 < imax(1, m) && m > 0) {
plasma_coreblas_error("illegal value of lda2");
return -7;
}
if (T == NULL) {
plasma_coreblas_error("NULL T");
return -8;
}
if (ldt < imax(1, ib) && ib > 0) {
plasma_coreblas_error("illegal value of ldt");
return -9;
}
if (tau == NULL) {
plasma_coreblas_error("NULL tau");
return -10;
}
if (work == NULL) {
plasma_coreblas_error("NULL work");
return -11;
}
// quick return
if (m == 0 || n == 0 || ib == 0)
return PlasmaSuccess;
static plasma_complex32_t zone = 1.0;
static plasma_complex32_t zzero = 0.0;
for (int ii = 0; ii < n; ii += ib) {
int sb = imin(n-ii, ib);
for (int i = 0; i < sb; i++) {
// Generate elementary reflector H( II*IB+I ) to annihilate
// A( II*IB+I:M, II*IB+I ).
LAPACKE_clarfg_work(m+1, &A1[lda1*(ii+i)+ii+i], &A2[lda2*(ii+i)], 1,
&tau[ii+i]);
if (ii+i+1 < n) {
// Apply H( II*IB+I ) to A( II*IB+I:M, II*IB+I+1:II*IB+IB )
// from the left.
plasma_complex32_t alpha = -conjf(tau[ii+i]);
cblas_ccopy(sb-i-1, &A1[lda1*(ii+i+1)+(ii+i)], lda1, work, 1);
#ifdef COMPLEX
LAPACKE_clacgv_work(sb-i-1, work, 1);
#endif
cblas_cgemv(CblasColMajor, (CBLAS_TRANSPOSE)Plasma_ConjTrans,
m, sb-i-1,
CBLAS_SADDR(zone), &A2[lda2*(ii+i+1)], lda2,
&A2[lda2*(ii+i)], 1,
CBLAS_SADDR(zone), work, 1);
#ifdef COMPLEX
LAPACKE_clacgv_work(sb-i-1, work, 1);
#endif
cblas_caxpy(sb-i-1, CBLAS_SADDR(alpha), work, 1,
&A1[lda1*(ii+i+1)+ii+i], lda1);
#ifdef COMPLEX
LAPACKE_clacgv_work(sb-i-1, work, 1);
#endif
cblas_cgerc(CblasColMajor,
m, sb-i-1,
CBLAS_SADDR(alpha), &A2[lda2*(ii+i)], 1,
work, 1,
&A2[lda2*(ii+i+1)], lda2);
}
// Calculate T.
plasma_complex32_t alpha = -tau[ii+i];
cblas_cgemv(CblasColMajor, (CBLAS_TRANSPOSE)Plasma_ConjTrans,
m, i,
CBLAS_SADDR(alpha), &A2[lda2*ii], lda2,
&A2[lda2*(ii+i)], 1,
CBLAS_SADDR(zzero), &T[ldt*(ii+i)], 1);
cblas_ctrmv(CblasColMajor, (CBLAS_UPLO)PlasmaUpper,
(CBLAS_TRANSPOSE)PlasmaNoTrans,
(CBLAS_DIAG)PlasmaNonUnit,
i,
&T[ldt*ii], ldt,
&T[ldt*(ii+i)], 1);
T[ldt*(ii+i)+i] = tau[ii+i];
}
if (n > ii+sb) {
plasma_core_ctsmqr(PlasmaLeft, Plasma_ConjTrans,
sb, n-(ii+sb), m, n-(ii+sb), ib, ib,
&A1[lda1*(ii+sb)+ii], lda1,
&A2[lda2*(ii+sb)], lda2,
&A2[lda2*ii], lda2,
&T[ldt*ii], ldt,
work, sb);
}
}
return PlasmaSuccess;
}
/******************************************************************************/
void plasma_core_omp_ctsqrt(int m, int n, int ib,
plasma_complex32_t *A1, int lda1,
plasma_complex32_t *A2, int lda2,
plasma_complex32_t *T, int ldt,
plasma_workspace_t work,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(inout:A1[0:lda1*n]) \
depend(inout:A2[0:lda2*n]) \
depend(out:T[0:ib*n])
{
if (sequence->status == PlasmaSuccess) {
// Prepare workspaces.
int tid = omp_get_thread_num();
plasma_complex32_t *tau = ((plasma_complex32_t*)work.spaces[tid]);
// Call the kernel.
int info = plasma_core_ctsqrt(m, n, ib,
A1, lda1,
A2, lda2,
T, ldt,
tau,
tau+n);
if (info != PlasmaSuccess) {
plasma_error("core_ctsqrt() failed");
plasma_request_fail(sequence, request, PlasmaErrorInternal);
}
}
}
}
|
qsort_arg_mt.c | /*
* Imported from PostgreSQL sources by Teodor Sigaev <teodor@sigaev.ru>, <sigaev@corp.mail.ru>
*/
/*
* qsort_arg.c: qsort with a passthrough "void *" argument
*
* Modifications from vanilla NetBSD source:
* Add do ... while() macro fix
* Remove __inline, _DIAGASSERTs, __P
* Remove ill-considered "swap_cnt" switch to insertion sort,
* in favor of a simple check for presorted input.
*
* CAUTION: if you change this file, see also qsort.c
*
* $PostgreSQL: pgsql/src/port/qsort_arg.c,v 1.4 2007/03/18 05:36:50 neilc Exp $
*/
/* $NetBSD: qsort.c,v 1.13 2003/08/07 16:43:42 agc Exp $ */
/*-
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <third_party/qsort_arg.h>
#include <stdint.h>
#if defined(__cplusplus)
extern "C" {
#endif /* defined(__cplusplus) */
#define min(a, b) (a) < (b) ? a : b
static char *med3(char *a, char *b, char *c,
int (*cmp)(const void *a, const void *b, void *arg), void *arg);
static void swapfunc(char *, char *, size_t, int);
/**
* @brief Reduce the current number of threads in the thread pool to the
* bare minimum. Doesn't prevent the pool from spawning new threads later
* if demand mounts.
*/
static void
thread_pool_trim()
{
/*
* Trim OpenMP thread pool.
* Though we lack the direct control the workaround below works for
* GNU OpenMP library. The library stops surplus threads on entering
* a parallel region. Can't go below 2 threads due to the
* implementation quirk.
*/
#pragma omp parallel num_threads(2)
;
}
/*
* Qsort routine based on J. L. Bentley and M. D. McIlroy,
* "Engineering a sort function",
* Software--Practice and Experience 23 (1993) 1249-1265.
* We have modified their original by adding a check for already-sorted input,
* which seems to be a win per discussions on pgsql-hackers around 2006-03-21.
*/
#define swapcode(TYPE, parmi, parmj, n) \
do { \
size_t i = (n) / sizeof (TYPE); \
TYPE *pi = (TYPE *)(void *)(parmi); \
TYPE *pj = (TYPE *)(void *)(parmj); \
do { \
TYPE t = *pi; \
*pi++ = *pj; \
*pj++ = t; \
} while (--i > 0); \
} while (0)
#define SWAPINIT(a, es) swaptype = ((char *)(a) - (char *)0) % sizeof(long) || \
(es) % sizeof(long) ? 2 : (es) == sizeof(long)? 0 : 1;
static void
swapfunc(char *a, char *b, size_t n, int swaptype)
{
if (swaptype <= 1)
swapcode(long, a, b, n);
else
swapcode(char, a, b, n);
}
#define swap(a, b) \
if (swaptype == 0) { \
long t = *(long *)(void *)(a); \
*(long *)(void *)(a) = *(long *)(void *)(b); \
*(long *)(void *)(b) = t; \
} else \
swapfunc(a, b, es, swaptype)
#define vecswap(a, b, n) if ((n) > 0) swapfunc((a), (b), (size_t)(n), swaptype)
static char *
med3(char *a, char *b, char *c, int (*cmp)(const void *a, const void *b, void *arg), void *arg)
{
return cmp(a, b, arg) < 0 ?
(cmp(b, c, arg) < 0 ? b : (cmp(a, c, arg) < 0 ? c : a))
: (cmp(b, c, arg) > 0 ? b : (cmp(a, c, arg) < 0 ? a : c));
}
static void
qsort_arg_mt_internal(void *a, size_t n, intptr_t es,
int (*cmp)(const void *a, const void *b, void *arg), void *arg)
{
char *pa, *pb, *pc, *pd, *pl, *pm, *pn;
intptr_t d, r, swaptype, presorted;
loop:SWAPINIT(a, es);
if (n < 7)
{
for (pm = (char *) a + es; pm < (char *) a + n * es; pm += es)
for (pl = pm; pl > (char *) a && cmp(pl - es, pl, arg) > 0;
pl -= es)
swap(pl, pl - es);
return;
}
presorted = 1;
for (pm = (char *) a + es; pm < (char *) a + n * es; pm += es)
{
if (cmp(pm - es, pm, arg) > 0)
{
presorted = 0;
break;
}
}
if (presorted)
return;
pm = (char *) a + (n / 2) * es;
if (n > 7)
{
pl = (char *) a;
pn = (char *) a + (n - 1) * es;
if (n > 40)
{
d = (n / 8) * es;
pl = med3(pl, pl + d, pl + 2 * d, cmp, arg);
pm = med3(pm - d, pm, pm + d, cmp, arg);
pn = med3(pn - 2 * d, pn - d, pn, cmp, arg);
}
pm = med3(pl, pm, pn, cmp, arg);
}
swap((char*)a, pm);
pa = pb = (char *) a + es;
pc = pd = (char *) a + (n - 1) * es;
for (;;)
{
while (pb <= pc && (r = cmp(pb, a, arg)) <= 0)
{
if (r == 0)
{
swap(pa, pb);
pa += es;
}
pb += es;
}
while (pb <= pc && (r = cmp(pc, a, arg)) >= 0)
{
if (r == 0)
{
swap(pc, pd);
pd -= es;
}
pc -= es;
}
if (pb > pc)
break;
swap(pb, pc);
pb += es;
pc -= es;
}
pn = (char *) a + n * es;
r = min(pa - (char *) a, pb - pa);
vecswap((char*)a, pb - r, r);
r = min(pd - pc, pn - pd - es);
vecswap(pb, pn - r, r);
if ((r = pb - pa) > es) {
#pragma omp task
qsort_arg_mt_internal(a, r / es, es, cmp, arg);
}
if ((r = pd - pc) > es)
{
/* Iterate rather than recurse to save stack space */
a = pn - r;
n = r / es;
goto loop;
}
}
void
qsort_arg(void *a, size_t n, size_t es,
int (*cmp)(const void *a, const void *b, void *arg), void *arg)
{
#pragma omp parallel
{
#pragma omp single
qsort_arg_mt_internal(a, n, es, cmp, arg);
}
thread_pool_trim();
}
#if defined(__cplusplus)
}
#endif /* defined(__cplusplus) */
|
datac.h | /***************************************************************************
* datac.h is part of Math Graphic Library
* Copyright (C) 2007-2016 Alexey Balakin <mathgl.abalakin@gmail.ru> *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU Library General Public License as *
* published by the Free Software Foundation; either version 3 of the *
* License, or (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU Library General Public *
* License along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
***************************************************************************/
#ifndef _MGL_DATAC_H_
#define _MGL_DATAC_H_
#include "mgl2/data.h"
#include "mgl2/datac_cf.h"
//-----------------------------------------------------------------------------
#include <vector>
#include <string>
//-----------------------------------------------------------------------------
#ifndef SWIG
dual MGL_EXPORT mglLinearC(const dual *a, long nx, long ny, long nz, mreal x, mreal y, mreal z);
dual MGL_EXPORT mglSpline3C(const dual *a, long nx, long ny, long nz, mreal x, mreal y, mreal z,dual *dx=0, dual *dy=0, dual *dz=0);
dual MGL_EXPORT mglSpline3Cs(const dual *a, long nx, long ny, long nz, mreal x, mreal y, mreal z);
//-----------------------------------------------------------------------------
/// Class for working with complex data array
class MGL_EXPORT mglDataC : public mglDataA
{
public:
using mglDataA::Momentum;
long nx; ///< number of points in 1st dimensions ('x' dimension)
long ny; ///< number of points in 2nd dimensions ('y' dimension)
long nz; ///< number of points in 3d dimensions ('z' dimension)
dual *a; ///< data array
std::string id; ///< column (or slice) names
bool link; ///< use external data (i.e. don't free it)
/// Initiate by other mglDataC variable
mglDataC(const mglDataC &d) { a=0; mgl_datac_set(this,&d); } // NOTE: must be constructor for mglDataC& to exclude copy one
mglDataC(const mglDataA &d) { a=0; mgl_datac_set(this,&d); }
#if MGL_HAVE_RVAL
mglDataC(mglDataC &&d):nx(d.nx),ny(d.ny),nz(d.nz),a(d.a),id(d.id),link(d.link)
{ s=d.s; temp=d.temp; func=d.func; o=d.o; d.a=0; d.func=0; }
#endif
mglDataC(const mglDataA &re, const mglDataA &im) { a=0; mgl_datac_set_ri(this,&re,&im); }
mglDataC(HCDT d) { a=0; mgl_datac_set(this, d); }
mglDataC(HCDT re, HCDT im) { a=0; mgl_datac_set_ri(this, re, im); }
mglDataC(bool, mglDataC *d) // NOTE: Variable d will be deleted!!!
{ if(d)
{ nx=d->nx; ny=d->ny; nz=d->nz; a=d->a; d->a=0;
temp=d->temp; func=d->func; o=d->o; s=d->s;
id=d->id; link=d->link; delete d; }
else { a=0; Create(1); } }
/// Initiate by flat array
mglDataC(int size, const dual *d) { a=0; Set(d,size); }
mglDataC(int rows, int cols, const dual *d) { a=0; Set(d,cols,rows); }
mglDataC(int size, const double *d) { a=0; Set(d,size); }
mglDataC(int rows, int cols, const double *d) { a=0; Set(d,cols,rows); }
mglDataC(int size, const float *d) { a=0; Set(d,size); }
mglDataC(int rows, int cols, const float *d) { a=0; Set(d,cols,rows); }
mglDataC(const dual *d, int size) { a=0; Set(d,size); }
mglDataC(const dual *d, int rows, int cols) { a=0; Set(d,cols,rows); }
mglDataC(const double *d, int size) { a=0; Set(d,size); }
mglDataC(const double *d, int rows, int cols) { a=0; Set(d,cols,rows); }
mglDataC(const float *d, int size) { a=0; Set(d,size); }
mglDataC(const float *d, int rows, int cols) { a=0; Set(d,cols,rows); }
/// Allocate memory and copy data from std::vector<T>
mglDataC(const std::vector<int> &d) { a=0; Set(d); }
mglDataC(const std::vector<float> &d) { a=0; Set(d); }
mglDataC(const std::vector<double> &d) { a=0; Set(d); }
mglDataC(const std::vector<std::complex<double> > &d) { a=0; Set(d); }
mglDataC(const std::vector<std::complex<float> > &d) { a=0; Set(d); }
/// Read data from file
mglDataC(const char *fname) { a=0; Read(fname); }
/// Allocate the memory for data array and initialize it zero
mglDataC(long xx=1,long yy=1,long zz=1) { a=0; Create(xx,yy,zz); }
/// Delete the array
virtual ~mglDataC() { if(!link && a) delete []a; }
/// Move all data from variable d, and delete this variable.
inline void Move(mglDataC *d) // NOTE: Variable d will be deleted!!!
{ if(d && d->GetNN()>1)
{ bool l=link; dual *b=a;
nx=d->nx; ny=d->ny; nz=d->nz; a=d->a; d->a=b;
temp=d->temp; func=d->func; o=d->o; s=d->s;
id=d->id; link=d->link; d->link=l; delete d; }
else if(d) { *this = d->a[0]; delete d; }
}
inline dual GetVal(long i, long j=0, long k=0) const
{ return mgl_datac_get_value(this,i,j,k);}
inline void SetVal(dual f, long i, long j=0, long k=0)
{ mgl_datac_set_value(this,f,i,j,k); }
/// Get sizes
long GetNx() const { return nx; }
long GetNy() const { return ny; }
long GetNz() const { return nz; }
/// Link external data array (don't delete it at exit)
inline void Link(dual *A, long NX, long NY=1, long NZ=1)
{ mgl_datac_link(this,A,NX,NY,NZ); }
inline void Link(mglDataC &d) { Link(d.a,d.nx,d.ny,d.nz); }
/// Allocate memory and copy the data from the gsl_vector
inline void Set(gsl_vector *m) { mgl_datac_set_vector(this,m); }
/// Allocate memory and copy the data from the gsl_matrix
inline void Set(gsl_matrix *m) { mgl_datac_set_matrix(this,m); }
/// Allocate memory and copy the data from the (float *) array
inline void Set(const float *A,long NX,long NY=1,long NZ=1)
{ mgl_datac_set_float(this,A,NX,NY,NZ); }
/// Allocate memory and copy the data from the (double *) array
inline void Set(const double *A,long NX,long NY=1,long NZ=1)
{ mgl_datac_set_double(this,A,NX,NY,NZ); }
/// Allocate memory and copy the data from the (complex *) array
inline void Set(const dual *A,long NX,long NY=1,long NZ=1)
{ mgl_datac_set_complex(this,A,NX,NY,NZ); }
/// Allocate memory and scanf the data from the string
inline void Set(const char *str,long NX,long NY=1,long NZ=1)
{ mgl_datac_set_values(this,str,NX,NY,NZ); }
/// Import data from abstract type
inline void Set(HCDT dat) { mgl_datac_set(this, dat); }
inline void Set(const mglDataA &dat) { mgl_datac_set(this, &dat); }
inline void Set(const mglDataA &re, const mglDataA &im) { mgl_datac_set_ri(this, &re, &im); }
inline void Set(HCDT re, HCDT im) { mgl_datac_set_ri(this, re, im); }
inline void SetAmpl(const mglDataA &l, const mglDataA &phase)
{ mgl_datac_set_ap(this, &l, &phase); }
/// Allocate memory and copy data from std::vector<T>
inline void Set(const std::vector<int> &d)
{ if(d.size()>0) { Create(d.size()); for(long i=0;i<nx;i++) a[i] = d[i]; }
else Create(1); }
inline void Set(const std::vector<float> &d)
{ if(d.size()>0) Set(&(a[0]),d.size()); else Create(1); }
inline void Set(const std::vector<double> &d)
{ if(d.size()>0) Set(&(a[0]),d.size()); else Create(1); }
inline void Set(const std::vector<std::complex<double> > &d)
{ if(d.size()>0) { Create(d.size()); for(long i=0;i<nx;i++) a[i] = d[i]; }
else Create(1); }
inline void Set(const std::vector<std::complex<float> > &d)
{ if(d.size()>0) { Create(d.size()); for(long i=0;i<nx;i++) a[i] = d[i]; }
else Create(1); }
/// Create or recreate the array with specified size and fill it by zero
inline void Create(long mx,long my=1,long mz=1)
{ mgl_datac_create(this,mx,my,mz); }
/// Rearange data dimensions
inline void Rearrange(long mx, long my=0, long mz=0)
{ mgl_datac_rearrange(this,mx,my,mz); }
/// Transpose dimensions of the data (generalization of Transpose)
inline void Transpose(const char *dim="yx")
{ mgl_datac_transpose(this,dim); }
/// Extend data dimensions
inline void Extend(long n1, long n2=0)
{ mgl_datac_extend(this,n1,n2); }
/// Reduce size of the data
inline void Squeeze(long rx,long ry=1,long rz=1,bool smooth=false)
{ mgl_datac_squeeze(this,rx,ry,rz,smooth); }
/// Crop the data
inline void Crop(long n1, long n2,char dir='x')
{ mgl_datac_crop(this,n1,n2,dir); }
/// Insert data
inline void Insert(char dir, long at=0, long num=1)
{ mgl_datac_insert(this,dir,at,num); }
/// Delete data
inline void Delete(char dir, long at=0, long num=1)
{ mgl_datac_delete(this,dir,at,num); }
/// Join with another data array
inline void Join(const mglDataA &d)
{ mgl_datac_join(this,&d); }
/// Modify the data by specified formula
inline void Modify(const char *eq,long dim=0)
{ mgl_datac_modify(this, eq, dim); }
/// Modify the data by specified formula
inline void Modify(const char *eq,const mglDataA &vdat, const mglDataA &wdat)
{ mgl_datac_modify_vw(this,eq,&vdat,&wdat); }
/// Modify the data by specified formula
inline void Modify(const char *eq,const mglDataA &vdat)
{ mgl_datac_modify_vw(this,eq,&vdat,0); }
/// Modify the data by specified formula assuming x,y,z in range [r1,r2]
inline void Fill(mglBase *gr, const char *eq, const char *opt="")
{ mgl_datac_fill_eq(gr,this,eq,0,0,opt); }
inline void Fill(mglBase *gr, const char *eq, const mglDataA &vdat, const char *opt="")
{ mgl_datac_fill_eq(gr,this,eq,&vdat,0,opt); }
inline void Fill(mglBase *gr, const char *eq, const mglDataA &vdat, const mglDataA &wdat,const char *opt="")
{ mgl_datac_fill_eq(gr,this,eq,&vdat,&wdat,opt); }
/// Equidistantly fill the data to range [x1,x2] in direction dir
inline void Fill(dual x1,dual x2=mglNaN,char dir='x')
{ mgl_datac_fill(this,x1,x2,dir); }
/// Fill the data by interpolated values of vdat parametrically depended on xdat,ydat,zdat for x,y,z in range [p1,p2] using global spline
inline void RefillGS(const mglDataA &xdat, const mglDataA &vdat, mreal x1, mreal x2,long sl=-1)
{ mgl_datac_refill_gs(this,&xdat,&vdat,x1,x2,sl); }
/// Fill the data by interpolated values of vdat parametrically depended on xdat,ydat,zdat for x,y,z in range [p1,p2]
inline void Refill(const mglDataA &xdat, const mglDataA &vdat, mreal x1, mreal x2,long sl=-1)
{ mgl_datac_refill_x(this,&xdat,&vdat,x1,x2,sl); }
inline void Refill(const mglDataA &xdat, const mglDataA &vdat, mglPoint p1, mglPoint p2,long sl=-1)
{ mgl_datac_refill_x(this,&xdat,&vdat,p1.x,p2.x,sl); }
inline void Refill(const mglDataA &xdat, const mglDataA &ydat, const mglDataA &vdat, mglPoint p1, mglPoint p2,long sl=-1)
{ mgl_datac_refill_xy(this,&xdat,&ydat,&vdat,p1.x,p2.x,p1.y,p2.y,sl); }
inline void Refill(const mglDataA &xdat, const mglDataA &ydat, const mglDataA &zdat, const mglDataA &vdat, mglPoint p1, mglPoint p2)
{ mgl_datac_refill_xyz(this,&xdat,&ydat,&zdat,&vdat,p1.x,p2.x,p1.y,p2.y,p1.z,p2.z); }
/// Fill the data by interpolated values of vdat parametrically depended on xdat,ydat,zdat for x,y,z in axis range of gr
inline void Refill(HMGL gr, const mglDataA &xdat, const mglDataA &vdat, long sl=-1, const char *opt="")
{ mgl_datac_refill_gr(gr,this,&xdat,0,0,&vdat,sl,opt); }
inline void Refill(HMGL gr, const mglDataA &xdat, const mglDataA &ydat, const mglDataA &vdat, long sl=-1, const char *opt="")
{ mgl_datac_refill_gr(gr,this,&xdat,&ydat,0,&vdat,sl,opt); }
inline void Refill(HMGL gr, const mglDataA &xdat, const mglDataA &ydat, const mglDataA &zdat, const mglDataA &vdat, const char *opt="")
{ mgl_datac_refill_gr(gr,this,&xdat,&ydat,&zdat,&vdat,-1,opt); }
/// Put value to data element(s)
inline void Put(dual val, long i=-1, long j=-1, long k=-1)
{ mgl_datac_put_val(this,val,i,j,k); }
/// Put array to data element(s)
inline void Put(const mglDataA &dat, long i=-1, long j=-1, long k=-1)
{ mgl_datac_put_dat(this,&dat,i,j,k); }
/// Set names for columns (slices)
inline void SetColumnId(const char *ids)
{ mgl_datac_set_id(this,ids); }
/// Make new id
inline void NewId() { id.clear(); }
/// Read data from tab-separated text file with auto determining size
inline bool Read(const char *fname)
{ return mgl_datac_read(this,fname); }
/// Read data from text file with specifeid size
inline bool Read(const char *fname,long mx,long my=1,long mz=1)
{ return mgl_datac_read_dim(this,fname,mx,my,mz); }
/// Save whole data array (for ns=-1) or only ns-th slice to text file
void Save(const char *fname,long ns=-1) const
{ mgl_datac_save(this,fname,ns); }
/// Get whole data array (for ns=-1) or only ns-th slice to string
std::string Get(long ns=-1) const
{ return mgl_datac_to_string(this,ns); }
/// Read data from tab-separated text files with auto determining size which filenames are result of sprintf(fname,templ,t) where t=from:step:to
inline bool ReadRange(const char *templ, double from, double to, double step=1, bool as_slice=false)
{ return mgl_datac_read_range(this,templ,from,to,step,as_slice); }
/// Read data from tab-separated text files with auto determining size which filenames are satisfied to template (like "t_*.dat")
inline bool ReadAll(const char *templ, bool as_slice=false)
{ return mgl_datac_read_all(this, templ, as_slice); }
/// Read data from text file with size specified at beginning of the file
inline bool ReadMat(const char *fname, long dim=2)
{ return mgl_datac_read_mat(this,fname,dim); }
/// Read data array from HDF file (parse HDF4 and HDF5 files)
inline int ReadHDF(const char *fname,const char *data)
{ return mgl_datac_read_hdf(this,fname,data); }
/// Save data to HDF file
void SaveHDF(const char *fname,const char *data,bool rewrite=false) const
{ mgl_datac_save_hdf(this,fname,data,rewrite); }
/// Get real part of data values
inline mglData Real() const
{ return mglData(true,mgl_datac_real(this)); }
/// Get imaginary part of data values
inline mglData Imag() const
{ return mglData(true,mgl_datac_imag(this)); }
/// Get absolute value of data values, i.e. |u|
inline mglData Abs() const
{ return mglData(true,mgl_datac_abs(this)); }
/// Get square of absolute value of data values, i.e. |u|^2
inline mglData Norm() const
{ return mglData(true,mgl_datac_norm(this)); }
/// Get argument of data values
inline mglData Arg() const
{ return mglData(true,mgl_datac_arg(this)); }
/// Get column (or slice) of the data filled by formulas of named columns
inline mglDataC Column(const char *eq) const
{ return mglDataC(true,mgl_datac_column(this,eq)); }
/// Get momentum (1D-array) of data along direction 'dir'. String looks like "x1" for median in x-direction, "x2" for width in x-dir and so on.
inline mglDataC Momentum(char dir, const char *how) const
{ return mglDataC(true,mgl_datac_momentum(this,dir,how)); }
/// Get sub-array of the data with given fixed indexes
inline mglDataC SubData(long xx,long yy=-1,long zz=-1) const
{ return mglDataC(true,mgl_datac_subdata(this,xx,yy,zz)); }
inline mglDataC SubData(const mglDataA &xx, const mglDataA &yy, const mglDataA &zz) const
{ return mglDataC(true,mgl_datac_subdata_ext(this,&xx,&yy,&zz)); }
inline mglDataC SubData(const mglDataA &xx, const mglDataA &yy) const
{ return mglDataC(true,mgl_datac_subdata_ext(this,&xx,&yy,0)); }
inline mglDataC SubData(const mglDataA &xx) const
{ return mglDataC(true,mgl_datac_subdata_ext(this,&xx,0,0)); }
/// Get trace of the data array
inline mglDataC Trace() const
{ return mglDataC(true,mgl_datac_trace(this)); }
/// Get array which is result of summation in given direction or directions
inline mglDataC Sum(const char *dir) const
{ return mglDataC(true,mgl_datac_sum(this,dir)); }
/// Get the data which is direct multiplication (like, d[i,j] = this[i]*a[j] and so on)
inline mglDataC Combine(const mglDataA &dat) const
{ return mglDataC(true,mgl_datac_combine(this,&dat)); }
/// Resize the data to new size of box [x1,x2]*[y1,y2]*[z1,z2]
inline mglDataC Resize(long mx,long my=1,long mz=1, mreal x1=0,mreal x2=1, mreal y1=0,mreal y2=1, mreal z1=0,mreal z2=1) const
{ return mglDataC(true,mgl_datac_resize_box(this,mx,my,mz,x1,x2,y1,y2,z1,z2)); }
/// Get array which values is result of interpolation this for coordinates from other arrays
inline mglDataC Evaluate(const mglData &idat, bool norm=true) const
{ return mglDataC(true,mgl_datac_evaluate(this,&idat,0,0,norm)); }
inline mglDataC Evaluate(const mglData &idat, const mglData &jdat, bool norm=true) const
{ return mglDataC(true,mgl_datac_evaluate(this,&idat,&jdat,0,norm)); }
inline mglDataC Evaluate(const mglData &idat, const mglData &jdat, const mglData &kdat, bool norm=true) const
{ return mglDataC(true,mgl_datac_evaluate(this,&idat,&jdat,&kdat,norm)); }
/// Find correlation with another data arrays
inline mglDataC Correl(const mglData &dat, const char *dir) const
{ return mglDataC(true,mgl_datac_correl(this,&dat,dir)); }
/// Find auto correlation function
inline mglDataC AutoCorrel(const char *dir) const
{ return mglDataC(true,mgl_datac_correl(this,this,dir)); }
/// Create n-th points distribution of this data values in range [v1, v2]
inline mglData Hist(long n,mreal v1=0,mreal v2=1, long nsub=0) const
{ return mglData(true,mgl_data_hist(this,n,v1,v2,nsub)); }
/// Create n-th points distribution of this data values in range [v1, v2] with weight w
inline mglData Hist(const mglDataA &w, long n,mreal v1=0,mreal v2=1, long nsub=0) const
{ return mglData(true,mgl_data_hist_w(this,&w,n,v1,v2,nsub)); }
/// Get array which is result of maximal values in given direction or directions
inline mglData Max(const char *dir) const
{ return mglData(true,mgl_data_max_dir(this,dir)); }
/// Get array which is result of minimal values in given direction or directions
inline mglData Min(const char *dir) const
{ return mglData(true,mgl_data_min_dir(this,dir)); }
/// Cumulative summation the data in given direction or directions
inline void CumSum(const char *dir) { mgl_datac_cumsum(this,dir); }
/// Integrate (cumulative summation) the data in given direction or directions
inline void Integral(const char *dir) { mgl_datac_integral(this,dir); }
/// Differentiate the data in given direction or directions
inline void Diff(const char *dir) { mgl_datac_diff(this,dir); }
/// Double-differentiate (like laplace operator) the data in given direction
inline void Diff2(const char *dir) { mgl_datac_diff2(this,dir); }
/// Swap left and right part of the data in given direction (useful for fourier spectrums)
inline void Swap(const char *dir) { mgl_datac_swap(this,dir); }
/// Roll data along direction dir by num slices
inline void Roll(char dir, long num) { mgl_datac_roll(this,dir,num); }
/// Mirror the data in given direction (useful for fourier spectrums)
inline void Mirror(const char *dir) { mgl_datac_mirror(this,dir); }
/// Smooth the data on specified direction or directions
/** String \a dir may contain:
* ‘x’, ‘y’, ‘z’ for 1st, 2nd or 3d dimension;
* ‘dN’ for linear averaging over N points;
* ‘3’ for linear averaging over 3 points;
* ‘5’ for linear averaging over 5 points.
* By default quadratic averaging over 5 points is used. */
inline void Smooth(const char *dirs="xyz",mreal delta=0)
{ mgl_datac_smooth(this,dirs,delta); }
/// Limit the data to be inside [-v,v], keeping the original sign
inline void Limit(mreal v)
{ mgl_datac_limit(this, v); }
/// Hankel transform
inline void Hankel(const char *dir) { mgl_datac_hankel(this,dir); }
/// Fourier transform
inline void FFT(const char *dir) { mgl_datac_fft(this,dir); }
/// Calculate one step of diffraction by finite-difference method with parameter q
inline void Diffraction(const char *how, mreal q) { mgl_datac_diffr(this,how,q); }
/// Interpolate by cubic spline the data to given point x=[0...nx-1], y=[0...ny-1], z=[0...nz-1]
inline dual Spline(mreal x,mreal y=0,mreal z=0) const
{ return mgl_datac_spline(this, x,y,z); }
/// Interpolate by cubic spline the data to given point x,\a y,\a z which normalized in range [0, 1]
inline dual Spline1(mreal x,mreal y=0,mreal z=0) const
{ return mgl_datac_spline(this, x*(nx-1),y*(ny-1),z*(nz-1)); }
/// Interpolate by linear function the data to given point x=[0...nx-1], y=[0...ny-1], z=[0...nz-1]
inline dual Linear(mreal x,mreal y=0,mreal z=0) const
{ return mgl_datac_linear_ext(this,x,y,z,0,0,0); }
/// Interpolate by line the data to given point x,\a y,\a z which normalized in range [0, 1]
inline dual Linear1(mreal x,mreal y=0,mreal z=0) const
{ return mgl_datac_linear_ext(this,x*(nx-1),y*(ny-1),z*(nz-1),0,0,0); }
/// Interpolate by linear function the data and return its derivatives at given point x=[0...nx-1], y=[0...ny-1], z=[0...nz-1]
inline dual Linear(mglPoint &dif, mreal x,mreal y=0,mreal z=0) const
{
dual val,dx,dy,dz;
val = mgl_datac_linear_ext(this,x,y,z, &dx, &dy, &dz);
dif.Set(dx.real(),dy.real(),dz.real()); return val;
}
/// Interpolate by line the data and return its derivatives at given point x,\a y,\a z which normalized in range [0, 1]
inline dual Linear1(mglPoint &dif, mreal x,mreal y=0,mreal z=0) const
{
dual val,dx,dy,dz;
val = mgl_datac_linear_ext(this,x,y,z, &dx, &dy, &dz);
dif.Set(dx.real(),dy.real(),dz.real());
dif.x/=nx>1?nx-1:1; dif.y/=ny>1?ny-1:1; dif.z/=nz>1?nz-1:1;
return val;
}
/// Return an approximated x-value (root) when dat(x) = val
inline mreal Solve(mreal val, bool use_spline=true, long i0=0) const
{ return mgl_data_solve_1d(this, val, use_spline, i0); }
/// Return an approximated value (root) when dat(x) = val
inline mglData Solve(mreal val, char dir, bool norm=true) const
{ return mglData(true,mgl_data_solve(this, val, dir, 0, norm)); }
inline mglData Solve(mreal val, char dir, const mglData &i0, bool norm=true) const
{ return mglData(true,mgl_data_solve(this, val, dir, &i0, norm)); }
/// Copy data from other mglDataA variable
inline const mglDataA &operator=(const mglDataA &d)
{ if(this!=&d) Set(&d); return d; }
inline const mglDataC &operator=(const mglDataC &d)
{ if(this!=&d) Set(&d); return d; }
inline dual operator=(dual val)
{
#pragma omp parallel for
for(long i=0;i<nx*ny*nz;i++) a[i]=val; return val; }
inline dual operator=(mreal val)
{
#pragma omp parallel for
for(long i=0;i<nx*ny*nz;i++) a[i]=val; return val; }
/// Multiply the data by other one for each element
inline void operator*=(const mglDataA &d) { mgl_datac_mul_dat(this,&d); }
/// Divide the data by other one for each element
inline void operator/=(const mglDataA &d) { mgl_datac_div_dat(this,&d); }
/// Add the other data
inline void operator+=(const mglDataA &d) { mgl_datac_add_dat(this,&d); }
/// Subtract the other data
inline void operator-=(const mglDataA &d) { mgl_datac_sub_dat(this,&d); }
/// Multiply each element by the number
inline void operator*=(dual d) { mgl_datac_mul_num(this,d); }
/// Divide each element by the number
inline void operator/=(dual d) { mgl_datac_div_num(this,d); }
/// Add the number
inline void operator+=(dual d) { mgl_datac_add_num(this,d); }
/// Subtract the number
inline void operator-=(dual d) { mgl_datac_sub_num(this,d); }
#ifndef SWIG
/// Direct access to the data cell
inline dual &operator[](long i) { return a[i]; }
#endif
#ifndef DEBUG
/// Get the value in given cell of the data
mreal v(long i,long j=0,long k=0) const { return abs(a[i+nx*(j+ny*k)]); }
/// Set the value in given cell of the data
void set_v(mreal val, long i,long j=0,long k=0) { a[i+nx*(j+ny*k)]=val; }
#else
/// Get the value in given cell of the data with border checking
mreal v(long i,long j=0,long k=0) const { return mgl_abs(mgl_datac_get_value(this,i,j,k)); }
/// Set the value in given cell of the data
void set_v(mreal val, long i,long j=0,long k=0) { mgl_datac_set_value(this,val,i,j,k); }
#endif
/// Get the complex value in given cell of the data
dual vc(long i,long j=0,long k=0) const { return a[i+nx*(j+ny*k)]; }
dual vcthr(long i) const { return a[i]; }
/// Get the interpolated value and its derivatives in given data cell without border checking
mreal valueD(mreal x,mreal y=0,mreal z=0,mreal *dx=0,mreal *dy=0,mreal *dz=0) const
{ dual aa,ax,ay,az; mreal res;
aa = mglSpline3C(a,nx,ny,nz,x,y,z,&ax,&ay,&az); res = abs(aa);
if(dx) *dx = res?(real(aa)*real(ax)+imag(aa)*imag(ax))/res:0;
if(dy) *dy = res?(real(aa)*real(ay)+imag(aa)*imag(ay))/res:0;
if(dz) *dz = res?(real(aa)*real(az)+imag(aa)*imag(az))/res:0; return res; }
/// Get the interpolated value in given data cell without border checking
mreal value(mreal x,mreal y=0,mreal z=0) const
{ return abs(mglSpline3Cs(a,nx,ny,nz,x,y,z)); }
mreal vthr(long i) const { return abs(a[i]); }
// add for speeding up !!!
mreal dvx(long i,long j=0,long k=0) const
{ register long i0=i+nx*(j+ny*k);
return i>0? abs(i<nx-1? (a[i0+1]-a[i0-1])/mreal(2):a[i0]-a[i0-1]) : abs(a[i0+1]-a[i0]); }
mreal dvy(long i,long j=0,long k=0) const
{ register long i0=i+nx*(j+ny*k);
return j>0? abs(j<ny-1? (a[i0+nx]-a[i0-nx])/mreal(2):a[i0]-a[i0-nx]) : abs(a[i0+nx]-a[i0]);}
mreal dvz(long i,long j=0,long k=0) const
{ register long i0=i+nx*(j+ny*k), n=nx*ny;
return k>0? abs(k<nz-1? (a[i0+n]-a[i0-n])/mreal(2):a[i0]-a[i0-n]) : abs(a[i0+n]-a[i0]); }
};
//-----------------------------------------------------------------------------
/// Saves result of PDE solving (|u|^2) for "Hamiltonian" ham with initial conditions ini
inline mglDataC mglPDEc(mglBase *gr, const char *ham, const mglDataA &ini_re, const mglDataA &ini_im, mreal dz=0.1, mreal k0=100,const char *opt="")
{ return mglDataC(true, mgl_pde_solve_c(gr,ham, &ini_re, &ini_im, dz, k0,opt)); }
/// Saves result of PDE solving for "Hamiltonian" ham with initial conditions ini along a curve ray (must have nx>=7 - x,y,z,px,py,pz,tau or nx=5 - x,y,px,py,tau)
inline mglDataC mglQO2dc(const char *ham, const mglDataA &ini_re, const mglDataA &ini_im, const mglDataA &ray, mreal r=1, mreal k0=100)
{ return mglDataC(true, mgl_qo2d_solve_c(ham, &ini_re, &ini_im, &ray, r, k0, 0, 0)); }
inline mglDataC mglQO2dc(const char *ham, const mglDataA &ini_re, const mglDataA &ini_im, const mglDataA &ray, mglData &xx, mglData &yy, mreal r=1, mreal k0=100)
{ return mglDataC(true, mgl_qo2d_solve_c(ham, &ini_re, &ini_im, &ray, r, k0, &xx, &yy)); }
/// Saves result of PDE solving for "Hamiltonian" ham with initial conditions ini along a curve ray (must have nx>=7 - x,y,z,px,py,pz,tau or nx=5 - x,y,px,py,tau)
inline mglDataC mglQO3dc(const char *ham, const mglDataA &ini_re, const mglDataA &ini_im, const mglDataA &ray, mreal r=1, mreal k0=100)
{ return mglDataC(true, mgl_qo3d_solve_c(ham, &ini_re, &ini_im, &ray, r, k0, 0, 0, 0)); }
inline mglDataC mglQO3dc(const char *ham, const mglDataA &ini_re, const mglDataA &ini_im, const mglDataA &ray, mglData &xx, mglData &yy, mglData &zz, mreal r=1, mreal k0=100)
{ return mglDataC(true, mgl_qo3d_solve_c(ham, &ini_re, &ini_im, &ray, r, k0, &xx, &yy, &zz)); }
//-----------------------------------------------------------------------------
/// Get array as solution of tridiagonal system of equations a[i]*x[i-1]+b[i]*x[i]+c[i]*x[i+1]=d[i]
/** String \a how may contain:
* 'x', 'y', 'z' for solving along x-,y-,z-directions, or
* 'h' for solving along hexagonal direction at x-y plain (need nx=ny),
* 'c' for using periodical boundary conditions,
* 'd' for diffraction/diffuse calculation. */
inline mglDataC mglTridMatC(const mglDataA &A, const mglDataA &B, const mglDataA &C, const mglDataC &D, const char *how)
{ return mglDataC(true, mgl_datac_tridmat(&A, &B, &C, &D, how)); }
//-----------------------------------------------------------------------------
/// Get sub-array of the data with given fixed indexes
inline mglDataC mglSubDataC(const mglDataA &dat, long xx, long yy=-1, long zz=-1)
{ return mglDataC(true,mgl_datac_subdata(&dat,xx,yy,zz)); }
inline mglDataC mglSubDataC(const mglDataA &dat, const mglDataA &xx, const mglDataA &yy, const mglDataA &zz)
{ return mglDataC(true,mgl_datac_subdata_ext(&dat,&xx,&yy,&zz)); }
inline mglDataC mglSubDataC(const mglDataA &dat, const mglDataA &xx, const mglDataA &yy)
{ return mglDataC(true,mgl_datac_subdata_ext(&dat,&xx,&yy,0)); }
inline mglDataC mglSubDataC(const mglDataA &dat, const mglDataA &xx)
{ return mglDataC(true,mgl_datac_subdata_ext(&dat,&xx,0,0)); }
//-----------------------------------------------------------------------------
/// Prepare coefficients for global spline interpolation
inline mglDataC mglGSplineCInit(const mglDataA &xdat, const mglDataA &ydat)
{ return mglDataC(true,mgl_gsplinec_init(&xdat, &ydat)); }
/// Evaluate global spline (and its derivatives d1, d2 if not NULL) using prepared coefficients \a coef
inline dual mglGSplineC(const mglDataA &coef, mreal dx, dual *d1=0, dual *d2=0)
{ return mgl_gsplinec(&coef, dx, d1,d2); }
//-----------------------------------------------------------------------------
#define _DN_(a) ((mglDataC *)*(a))
#define _DC_ ((mglDataC *)*d)
//-----------------------------------------------------------------------------
#ifndef SWIG
/// Wrapper class for complex expression evaluating
class MGL_EXPORT mglExprC
{
HAEX ex;
mglExprC(const mglExprC &){} // copying is not allowed
const mglExprC &operator=(const mglExprC &t){return t;} // copying is not allowed
public:
mglExprC(const char *expr) { ex = mgl_create_cexpr(expr); }
~mglExprC() { mgl_delete_cexpr(ex); }
/// Return value of expression for given x,y,z variables
inline dual Eval(dual x, dual y=0, dual z=0)
{ return mgl_cexpr_eval(ex,x,y,z); }
/// Return value of expression for given x,y,z,u,v,w variables
inline dual Eval(dual x, dual y, dual z, dual u, dual v, dual w)
{
dual var[26];
var['x'-'a']=x; var['y'-'a']=y; var['z'-'a']=z;
var['u'-'a']=u; var['v'-'a']=v; var['w'-'a']=w;
return mgl_cexpr_eval_v(ex,var); }
/// Return value of expression for given variables
inline dual Eval(dual var[26])
{ return mgl_cexpr_eval_v(ex,var); }
};
#endif
//-----------------------------------------------------------------------------
#endif
#endif
|
PoW.c | /* Copyright 2016-2018 The Ulord Core Foundation */
#include "PoW.h"
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <stdlib.h>
#include <assert.h>
// #include <omp.h>
#include "my_time.h"
#include "common.h"
#include "my_rand48_r.h"
#include "oneWayFunction.h"
const uint8_t sizeof_uint8_t = sizeof(uint8_t);
//#define SSE_VERSION
/*
* Step 1: Initialize working memory.
*/
void initWorkMemory(uint8_t *input, uint32_t inputLen, uint8_t *Maddr, const uint32_t K) {
uint32_t i, j;
uint8_t a[OUTPUT_LEN], b[OUTPUT_LEN];
funcInfor[0].func(input, inputLen, a);
uint64_t randSeed[4] = {0, 0, 0, 0};
#ifndef SSE_VERSION
struct my_rand48_data randBuffer[4];
#else
struct vrand48_data randBuffer[2];
#endif
const uint32_t iterNum = WORK_MEMORY_SIZE >> 5;
for (i = 0; i < iterNum; ++i) {
// K = 128
//if (i % K) {
if (i & 0x7F) {
#ifndef SSE_VERSION
uint64_t num = 0;
for (j = 0; j < 4; ++j) {
my_rand64_r(&randBuffer[j], &num);
//memcpy(b + (j << 3), (uint8_t *)&num, sizeof(uint8_t) << 3);
memcpy(b + (j << 3), (uint8_t *)&num, sizeof_uint8_t << 3);
}
#else
vrand64(b, randBuffer);
#endif
uint8_t shift_num;
uint8_t result[OUTPUT_LEN];
reduce_bit((uint8_t *)&i, 4, (uint8_t *)&shift_num, 8);
rrs(b, OUTPUT_LEN, result, shift_num);
//memcpy(Maddr + (i << 5), result, OUTPUT_LEN*sizeof(uint8_t));
memcpy(Maddr + (i << 5), result, sizeof_uint8_t << 5);
for (j = 0; j < 32; ++j) {
a[j] ^= result[j];
}
} else {
uint8_t t = 0, shift_num = 0;
reduce_bit(a, 32, (uint8_t *)&t, 8);
t = (t & 0x0f) ^ (t >> 4);
reduce_bit((uint8_t *)&i, 4, (uint8_t *)&shift_num, 8);
uint8_t a_rrs[INPUT_LEN];
rrs(a, OUTPUT_LEN, a_rrs, shift_num);
funcInfor[t].func(a_rrs, 32, a);
reduce_bit(a, 8, (uint8_t *)&randSeed[0], 48);
reduce_bit(a + 8, 8, (uint8_t *)&randSeed[1], 48);
reduce_bit(a + 16, 8, (uint8_t *)&randSeed[2], 48);
reduce_bit(a + 24, 8, (uint8_t *)&randSeed[3], 48);
#ifndef SSE_VERSION
my_seed48_r(randSeed[0], &randBuffer[0]);
my_seed48_r(randSeed[1], &randBuffer[1]);
my_seed48_r(randSeed[2], &randBuffer[2]);
my_seed48_r(randSeed[3], &randBuffer[3]);
#else
vseed48(randSeed , &randBuffer[0]);
vseed48(randSeed + 2, &randBuffer[1]);
#endif
//memcpy(Maddr + (i << 5), a, 32*sizeof(uint8_t));
memcpy(Maddr + (i << 5), a, sizeof_uint8_t << 5);
}
}
}
/*
* Step 2: Modify the working memory contents.
*/
void modifyWorkMemory(uint8_t *Maddr, const uint32_t L, const uint32_t C,
uint8_t *result) {
uint32_t i, j;
uint8_t a[OUTPUT_LEN], b[64];
funcInfor[0].func(Maddr + WORK_MEMORY_SIZE - 32, 32, a);
//memcpy(result, a, OUTPUT_LEN*sizeof(uint8_t));
memcpy(result, a, sizeof_uint8_t << 5);
uint64_t r = 0;
reduce_bit(a, 32, (uint8_t *)&r, 64);
const uint32_t iterNum = L << 6;
for (i = 0; i < C; ++i) {
uint64_t randSeed = 0;
reduce_bit(a, 32, (uint8_t *)&randSeed, 48);
struct my_rand48_data randBuffer;
my_seed48_r(randSeed, &randBuffer);
uint8_t t1, t2, s;
uint64_t randNum = 0, base = 0;
for (j = 0; j < iterNum; ++j) {
my_rand48_r(&randBuffer, &randNum);
base = randNum + r;
uint64_t offset = 0;
reduce_bit((uint8_t *)&r, 8, (uint8_t *)&offset, 8);
offset = (offset << 8) + 1;
//#define WORK_MEMORY_SIZE (1024*1024) 0x100000
//uint64_t addr1 = (base + WORK_MEMORY_SIZE - offset) % WORK_MEMORY_SIZE;
uint64_t addr1 = (base + WORK_MEMORY_SIZE - offset) & 0xFFFFF;
//uint64_t addr2 = (base + offset) % WORK_MEMORY_SIZE;
uint64_t addr2 = (base + offset) & 0xFFFFF;
t1 = Maddr[addr1];
t2 = Maddr[addr2];
s = a[j & 0x1f];
Maddr[addr1] = t2 ^ s;
Maddr[addr2] = t1 ^ s;
b[j & 0x3f] = t1 ^ t2;
r = r + s + t1 + t2;
}
uint8_t t = 0;
reduce_bit((uint8_t *)&r, 8, (uint8_t *)&t, 8);
t = (t & 0x0f) ^ (t >> 4);
reduce_bit(b, 64, a, 256);
uint8_t shift_num = 0;
uint64_t ir = r + i;
reduce_bit((uint8_t *)&ir, 8, (uint8_t *)&shift_num, 8);
uint8_t a_rrs[INPUT_LEN];
rrs(a, OUTPUT_LEN, a_rrs, shift_num);
funcInfor[t].func(a_rrs, 32, a);
for (j = 0; j < OUTPUT_LEN; ++j) {
result[j] ^= a[j];
}
}
}
/*
* Step 3: Calculate the final result.
*/
void calculateFinalResult(uint8_t *Maddr, uint8_t *c, const uint32_t D, uint8_t *result) {
uint32_t i = 0, j = 0, k = 0;
//memcpy(result, c, OUTPUT_LEN*sizeof(uint8_t));
memcpy(result, c, sizeof_uint8_t << 5);
const uint32_t num = (WORK_MEMORY_SIZE >> 5) - 1;
uint32_t it = 0;
uint8_t result_rrs[OUTPUT_LEN];
while(1) {
uint8_t t = 0, shift_num = 0;
uint32_t d = 0;
reduce_bit(result, 32, (uint8_t *)&t, 8);
t = (t & 0x0f) ^ (t >> 4);
reduce_bit(result, 32, (uint8_t *)&d, D);
++d;
for (j = 0; j < d; ++j) {
uint32_t index = i << 5;
for (k = 0; k < 32; ++k) {
result[k] ^= Maddr[index + k];
}
++i;
if (i == num) {
it = i + t;
reduce_bit((uint8_t *)&it, 4, (uint8_t *)&shift_num, 8);
rrs(result, OUTPUT_LEN, result_rrs, shift_num);
funcInfor[0].func(result_rrs, 32, result);
return;
}
}
it = t + i;
reduce_bit((uint8_t *)&it, 4, (uint8_t *)&shift_num, 8);
rrs(result, OUTPUT_LEN, result_rrs, shift_num);
funcInfor[t].func(result_rrs, 32, result);
}
}
/*
* Correctness & Performance test for Proof of work
*/
/*
void testPowFunction(uint8_t *mess, uint32_t messLen, const int64_t iterNum) {
int64_t j;
uint32_t inputLen = messLen;
uint8_t input[INPUT_LEN], output[OUTPUT_LEN];
memset(input, 0, INPUT_LEN*sizeof(uint8_t));
memcpy(input, mess, messLen*sizeof(char));
// Init all one-way function
initOneWayFunction();
uint8_t *Maddr = (uint8_t *)malloc(64 * WORK_MEMORY_SIZE*sizeof(uint8_t));
assert(NULL != Maddr);
memset(Maddr, 0, 64 * WORK_MEMORY_SIZE*sizeof(uint8_t));
printf("****************************** Correctness test (PoW function) ******************************\n");
printf("Test message: %s\n", mess);
powFunction(input, inputLen, Maddr, output);
view_data_u8("PoW", output, OUTPUT_LEN);
printf("*********************************************************************************************\n");
printf("*************************************************** Performance test (PoW function) ***************************************************\n");
uint8_t *result = (uint8_t *)malloc(iterNum * OUTPUT_LEN * sizeof(uint8_t));
assert(NULL != result);
memset(result, 0, iterNum * OUTPUT_LEN * sizeof(uint8_t));
uint32_t threadNumArr[] = {1, 4, 8, 12, 16, 20, 24, 32, 48, 64};
uint32_t threadNumTypes = sizeof(threadNumArr) / sizeof(uint32_t);
printf(" %-18s", "Algorithm");
for (uint32_t ix = 0; ix < threadNumTypes; ++ix)
printf("%12d", threadNumArr[ix]);
printf("\n");
printf("00 %-18s\t", "PoW");
for (uint32_t ix = 0; ix < threadNumTypes; ++ix) {
omp_set_num_threads(threadNumArr[ix]);
double startTime = get_wall_time();
if (threadNumArr[ix] == 1) {
for (j = 0; j < iterNum; ++j) {
powFunction(input, inputLen, Maddr, result + j * OUTPUT_LEN);
}
} else {
#pragma omp parallel for firstprivate(input), private(j) shared(result)
for (j = 0; j < iterNum; ++j) {
powFunction(input, inputLen, Maddr + omp_get_thread_num() * WORK_MEMORY_SIZE, result + j * OUTPUT_LEN);
}
}
double endTime = get_wall_time();
double costTime = endTime - startTime;
printf("%5.0f bps ", iterNum / costTime); fflush(stdout);
// Check result
for (j = 0; j < iterNum; j += 1) {
if (memcmp(output, result + j * OUTPUT_LEN, OUTPUT_LEN)) {
printf("Thread num: %d, j: %ld\n", threadNumArr[ix], j);
view_data_u8("output", output, OUTPUT_LEN);
view_data_u8("result", result + j * OUTPUT_LEN, OUTPUT_LEN);
abort();
}
}
}
printf("\n");
printf("***************************************************************************************************************************************\n");
if (NULL != result) {
free(result);
result = NULL;
}
if (NULL != Maddr) {
free(Maddr);
Maddr = NULL;
}
}
*/
#define OUTPUT_BUFFER_SIZE (32 * 1024UL * 1024UL)
#define MAX_TEST_INPUT_LEN 140
#define MAX_OUT_FILE_NAME_LEN 25
const char testInputCase[][MAX_TEST_INPUT_LEN] = {
"",
"HelloWorld",
"0123456789"
};
void powNistTest(const char *outFileName) {
const uint64_t iterNum = 1024UL * 1024UL;
// const uint64_t iterNum = 1024UL;
uint8_t *outputBuffer = (uint8_t *)malloc(OUTPUT_BUFFER_SIZE * sizeof(uint8_t));
assert(NULL != outputBuffer);
memset(outputBuffer, 0, OUTPUT_BUFFER_SIZE * sizeof(uint8_t));
uint8_t *Maddr = (uint8_t *)malloc(WORK_MEMORY_SIZE*sizeof(uint8_t));
assert(NULL != Maddr);
memset(Maddr, 0, WORK_MEMORY_SIZE*sizeof(uint8_t));
initOneWayFunction();
uint32_t testInputCaseNum = sizeof(testInputCase) / sizeof(const char [MAX_TEST_INPUT_LEN]);
for (uint32_t testCaseIx = 0; testCaseIx < testInputCaseNum; ++testCaseIx) {
char curOutFileName[MAX_OUT_FILE_NAME_LEN] = "";
sprintf(curOutFileName, "%s-%u.txt", outFileName, testCaseIx);
FILE *fp = NULL;
if (NULL != (fp = fopen(curOutFileName, "wb"))) {
const uint32_t testInputCaseLen = strlen((char *)testInputCase[testCaseIx]);
uint8_t input[MAX_TEST_INPUT_LEN];
memset(input, 0, MAX_TEST_INPUT_LEN*sizeof(uint8_t));
memcpy(input, testInputCase[testCaseIx], testInputCaseLen*sizeof(uint8_t));
double startTime = get_wall_time();
powFunction(input, testInputCaseLen, Maddr, outputBuffer);
for (uint64_t i = 1, j = 0; i < iterNum; ++i) {
memcpy(input, outputBuffer + j, OUTPUT_LEN * sizeof(uint32_t));
j += OUTPUT_LEN;
powFunction(input, OUTPUT_LEN, Maddr, outputBuffer + j);
/* if (j == OUTPUT_BUFFER_SIZE) {
fwrite(outputBuffer, sizeof(uint8_t), OUTPUT_BUFFER_SIZE / sizeof(uint8_t), fp);
j = 0;
} */
}
double endTime = get_wall_time();
double costTime = endTime - startTime;
fprintf(stdout, "TestCaseIx: %d, Input: %s, IterNum: %lu, Time: %4.2f, Performance: %5.2f bps\n", testCaseIx, \
testInputCase[testCaseIx], iterNum, costTime, ((double)(iterNum * OUTPUT_LEN)) / costTime); fflush(stdout);
fwrite(outputBuffer, sizeof(uint8_t), OUTPUT_BUFFER_SIZE / sizeof(uint8_t), fp);
fclose(fp);
} else {
fprintf(stderr, "Error: Open %s failed!\n", curOutFileName);
abort();
}
}
if (NULL != outputBuffer) {
free(outputBuffer);
outputBuffer = NULL;
}
if (NULL != Maddr) {
free(Maddr);
Maddr = NULL;
}
}
|
4.collapse.c | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <omp.h> /* OpenMP */
#define N 5
/* Q1: Which iterations of the loops are executed by each thread */
/* when the collapse clause is used? */
/* Q2: Is the execution correct if we remove the collapse clause? */
/* Add the appropriate clause to make it correct. */
int main()
{
int i,j;
omp_set_num_threads(8);
#pragma omp parallel for private(j) // collapse(2)
for (i=0; i < N; i++) {
for (j=0; j < N; j++) {
int id=omp_get_thread_num();
printf("(%d) Iter (%d %d)\n",id,i,j);
}
}
return 0;
}
|
omp_kmeans.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include "kmeans.h"
/* square of Euclid distance between two multi-dimensional points */
float distance(int numdims, float *X1, float *X2)
/* no. dimensions */
/* [numdims] */
/* [numdims] */
{
int i;
float ans=0.0;
for (i=0; i<numdims; i++)
ans += (X1[i]-X2[i]) * (X1[i]-X2[i]);
return(ans);
}
int clusterdecision(int numClusters, int numvariables, float *object, float **clusters)
/* no. clusters */
/* no. coordinates */
/* [numvariables] */
/* [numClusters][numvariables] */
{
int index, i;
float dist, min_dist;
/* find the cluster id that has min distance to object */
index = 0;
min_dist = distance(numvariables, object, clusters[0]);
for (i=1; i<numClusters; i++) {
dist = distance(numvariables, object, clusters[i]);
/* no need square root */
if (dist < min_dist) { /* find the min and its array index */
min_dist = dist;
index = i;
}
}
return(index);
}
/* return an array of cluster centers of size [numClusters][numvariables] */
float** omp_kmeans(float **dat, int numvariables, int numObs, int numClusters,float threshold, int *prediction,int *loop_iterations)
/* in: [numObs][numvariables] */
/* no. features */
/* no. dat */
/* no. clusters */
/* % dat change prediction */
/* out: [numObs] */
{
int i, j, index, loop=0;
int *newClusterSize; /* [numClusters]: no. dat assigned in each
new cluster */
float delta; /* % of dat change their clusters */
float **clusters; /* out: [numClusters][numvariables] */
float **newClusters; /* [numClusters][numvariables] */
/* allocate a 2D space for returning variable clusters[] (variables
of cluster centers) */
clusters = (float**) malloc(numClusters * sizeof(float*));
clusters[0] = (float*) malloc(numClusters * numvariables * sizeof(float));
for (i=1; i<numClusters; i++)
clusters[i] = clusters[i-1] + numvariables; //Question needs to be confirmed
/* pick first numClusters elements of dat[] as initial cluster centers*/
for (i=0; i<numClusters; i++)
for (j=0; j<numvariables; j++)
clusters[i][j] = dat[i][j];
// printf("%d\n",i); // 4
// printf("%d\n",j); // 63
/* initialize prediction[] by setting each prediction result -1*/
for (i=0; i<numObs; i++) prediction[i] = -1;
/* need to initialize newClusterSize and newClusters[0] to all 0 */
newClusterSize = (int*) calloc(numClusters, sizeof(int));
/*
The malloc only allocate the memory with single arguments;
The calloc do allocate the memory with double arguments and initialize the value to 0;
*/
newClusters = (float**) malloc(numClusters * sizeof(float*));
newClusters[0] = (float*) calloc(numClusters * numvariables, sizeof(float));
for (i=1; i<numClusters; i++)
newClusters[i] = newClusters[i-1] + numvariables;
do {
delta = 0.0;
#pragma omp parallel for private(i,j,index)
for (i=0; i<numObs; i++)
{
/* find the array index of nestest cluster center */
index = clusterdecision(numClusters, numvariables, dat[i], clusters);
/* if prediction changes, increase delta by 1 */
if (prediction[i] != index) delta += 1.0;
/* assign the prediction to object i */
prediction[i] = index;
/* update new cluster centers : sum of dat located within */
newClusterSize[index]++;
for (j=0; j<numvariables; j++)
newClusters[index][j] += dat[i][j];
}
/* average the sum and replace old cluster centers with newClusters */
for (i=0; i<numClusters; i++)
{
for (j=0; j<numvariables; j++)
{
if (newClusterSize[i] > 0)
clusters[i][j] = newClusters[i][j] / newClusterSize[i];
newClusters[i][j] = 0.0; /* set back to 0 */
}
newClusterSize[i] = 0; /* set back to 0 */
}
delta /= numObs;
} while (delta > threshold && loop++ < 500); // stop untill the results getting converge
*loop_iterations = loop + 1;
/* we want to know how many loops we need to perform in order to get the final
clustered prediction class in the output*/
free(newClusters[0]);
free(newClusters);
free(newClusterSize);
return clusters;
}
|
lensing.c | /** @file lensing.c Documented lensing module
*
* Simon Prunet and Julien Lesgourgues, 6.12.2010
*
* This module computes the lensed temperature and polarization
* anisotropy power spectra \f$ C_l^{X}, P(k), ... \f$'s given the
* unlensed temperature, polarization and lensing potential spectra.
*
* Follows Challinor and Lewis full-sky method, astro-ph/0502425
*
* The following functions can be called from other modules:
*
* -# lensing_init() at the beginning (but after spectra_init())
* -# lensing_cl_at_l() at any time for computing Cl_lensed at any l
* -# lensing_free() at the end
*/
#include "lensing.h"
#include <time.h>
/**
* Anisotropy power spectra \f$ C_l\f$'s for all types, modes and initial conditions.
* SO FAR: ONLY SCALAR
*
* This routine evaluates all the lensed \f$ C_l\f$'s at a given value of l by
* picking it in the pre-computed table. When relevant, it also
* sums over all initial conditions for each mode, and over all modes.
*
* This function can be called from whatever module at whatever time,
* provided that lensing_init() has been called before, and
* lensing_free() has not been called yet.
*
* @param ple Input: pointer to lensing structure
* @param l Input: multipole number
* @param cl_lensed Output: lensed \f$ C_l\f$'s for all types (TT, TE, EE, etc..)
* @return the error status
*/
int lensing_cl_at_l(
struct lensing * ple,
int l,
double * cl_lensed /* array with argument cl_lensed[index_ct] (must be already allocated) */
) {
int last_index;
int index_lt;
class_test(l > ple->l_lensed_max,
ple->error_message,
"you asked for lensed Cls at l=%d, they were computed only up to l=%d, you should increase l_max_scalars or decrease the precision parameter delta_l_max",l,ple->l_lensed_max);
class_call(array_interpolate_spline(ple->l,
ple->l_size,
ple->cl_lens,
ple->ddcl_lens,
ple->lt_size,
l,
&last_index,
cl_lensed,
ple->lt_size,
ple->error_message),
ple->error_message,
ple->error_message);
/* set to zero for the types such that l<l_max */
for (index_lt=0; index_lt<ple->lt_size; index_lt++)
if ((int)l > ple->l_max_lt[index_lt])
cl_lensed[index_lt]=0.;
return _SUCCESS_;
}
/**
* This routine initializes the lensing structure (in particular,
* computes table of lensed anisotropy spectra \f$ C_l^{X} \f$)
*
* @param ppr Input: pointer to precision structure
* @param ppt Input: pointer to perturbation structure (just in case, not used in current version...)
* @param psp Input: pointer to spectra structure
* @param pnl Input: pointer to nonlinear structure
* @param ple Output: pointer to initialized lensing structure
* @return the error status
*/
int lensing_init(
struct precision * ppr,
struct perturbs * ppt,
struct spectra * psp,
struct nonlinear * pnl,
struct lensing * ple
) {
/** Summary: */
/** - Define local variables */
double * mu; /* mu[index_mu]: discretized values of mu
between -1 and 1, roots of Legendre polynomial */
double * w8; /* Corresponding Gauss-Legendre quadrature weights */
double theta,delta_theta;
double ** d00; /* dmn[index_mu][index_l] */
double ** d11;
double ** d2m2;
double ** d22 = NULL;
double ** d20 = NULL;
double ** d1m1;
double ** d31 = NULL;
double ** d40 = NULL;
double ** d3m1 = NULL;
double ** d3m3 = NULL;
double ** d4m2 = NULL;
double ** d4m4 = NULL;
double * buf_dxx; /* buffer */
double * Cgl; /* Cgl[index_mu] */
double * Cgl2; /* Cgl2[index_mu] */
double * sigma2; /* sigma[index_mu] */
double * ksi = NULL; /* ksi[index_mu] */
double * ksiX = NULL; /* ksiX[index_mu] */
double * ksip = NULL; /* ksip[index_mu] */
double * ksim = NULL; /* ksim[index_mu] */
double fac,fac1;
double X_000;
double X_p000;
double X_220;
double X_022;
double X_p022;
double X_121;
double X_132;
double X_242;
int num_mu,index_mu,icount;
int l;
double ll;
double * cl_unlensed; /* cl_unlensed[index_ct] */
double * cl_tt; /* unlensed cl, to be filled to avoid repeated calls to spectra_cl_at_l */
double * cl_te = NULL; /* unlensed cl, to be filled to avoid repeated calls to spectra_cl_at_l */
double * cl_ee = NULL; /* unlensed cl, to be filled to avoid repeated calls to spectra_cl_at_l */
double * cl_bb = NULL; /* unlensed cl, to be filled to avoid repeated calls to spectra_cl_at_l */
double * cl_pp; /* potential cl, to be filled to avoid repeated calls to spectra_cl_at_l */
double res,resX,lens;
double resp, resm, lensp, lensm;
double * sqrt1;
double * sqrt2;
double * sqrt3;
double * sqrt4;
double * sqrt5;
double ** cl_md_ic; /* array with argument
cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct] */
double ** cl_md; /* array with argument
cl_md[index_md][index_ct] */
int index_md;
/* Timing */
//double debut, fin;
//double cpu_time;
/** - check that we really want to compute at least one spectrum */
if (ple->has_lensed_cls == _FALSE_) {
if (ple->lensing_verbose > 0)
printf("No lensing requested. Lensing module skipped.\n");
return _SUCCESS_;
}
else {
if (ple->lensing_verbose > 0) {
printf("Computing lensed spectra ");
if (ppr->accurate_lensing==_TRUE_)
printf("(accurate mode)\n");
else
printf("(fast mode)\n");
}
}
/** - initialize indices and allocate some of the arrays in the
lensing structure */
class_call(lensing_indices(ppr,psp,ple),
ple->error_message,
ple->error_message);
/** - put all precision variables hare; will be stored later in precision structure */
/** - Last element in \f$ \mu \f$ will be for \f$ \mu=1 \f$, needed for sigma2.
The rest will be chosen as roots of a Gauss-Legendre quadrature **/
if (ppr->accurate_lensing == _TRUE_) {
num_mu=(ple->l_unlensed_max+ppr->num_mu_minus_lmax); /* Must be even ?? CHECK */
num_mu += num_mu%2; /* Force it to be even */
} else {
/* Integrate correlation function difference on [0,pi/16] */
num_mu = (ple->l_unlensed_max * 2 )/16;
}
/** - allocate array of \f$ \mu \f$ values, as well as quadrature weights */
class_alloc(mu,
num_mu*sizeof(double),
ple->error_message);
/* Reserve last element of mu for mu=1, needed for sigma2 */
mu[num_mu-1] = 1.0;
class_alloc(w8,
(num_mu-1)*sizeof(double),
ple->error_message);
if (ppr->accurate_lensing == _TRUE_) {
//debut = omp_get_wtime();
class_call(quadrature_gauss_legendre(mu,
w8,
num_mu-1,
ppr->tol_gauss_legendre,
ple->error_message),
ple->error_message,
ple->error_message);
//fin = omp_get_wtime();
//cpu_time = (fin-debut);
//printf("time in quadrature_gauss_legendre=%4.3f s\n",cpu_time);
} else { /* Crude integration on [0,pi/16]: Riemann sum on theta */
delta_theta = _PI_/16. / (double)(num_mu-1);
for (index_mu=0;index_mu<num_mu-1;index_mu++) {
theta = (index_mu+1)*delta_theta;
mu[index_mu] = cos(theta);
w8[index_mu] = sin(theta)*delta_theta; /* We integrate on mu */
}
}
/** - Compute \f$ d^l_{mm'} (\mu) \f$*/
icount = 0;
class_alloc(d00,
num_mu*sizeof(double*),
ple->error_message);
class_alloc(d11,
num_mu*sizeof(double*),
ple->error_message);
class_alloc(d1m1,
num_mu*sizeof(double*),
ple->error_message);
class_alloc(d2m2,
num_mu*sizeof(double*),
ple->error_message);
icount += 4*num_mu*(ple->l_unlensed_max+1);
if(ple->has_te==_TRUE_) {
class_alloc(d20,
num_mu*sizeof(double*),
ple->error_message);
class_alloc(d3m1,
num_mu*sizeof(double*),
ple->error_message);
class_alloc(d4m2,
num_mu*sizeof(double*),
ple->error_message);
icount += 3*num_mu*(ple->l_unlensed_max+1);
}
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
class_alloc(d22,
num_mu*sizeof(double*),
ple->error_message);
class_alloc(d31,
num_mu*sizeof(double*),
ple->error_message);
class_alloc(d3m3,
num_mu*sizeof(double*),
ple->error_message);
class_alloc(d40,
num_mu*sizeof(double*),
ple->error_message);
class_alloc(d4m4,
num_mu*sizeof(double*),
ple->error_message);
icount += 5*num_mu*(ple->l_unlensed_max+1);
}
icount += 5*(ple->l_unlensed_max+1); /* for arrays sqrt1[l] to sqrt5[l] */
/** - Allocate main contiguous buffer **/
class_alloc(buf_dxx,
icount * sizeof(double),
ple->error_message);
icount = 0;
for (index_mu=0; index_mu<num_mu; index_mu++) {
d00[index_mu] = &(buf_dxx[icount+index_mu * (ple->l_unlensed_max+1)]);
d11[index_mu] = &(buf_dxx[icount+(index_mu+num_mu) * (ple->l_unlensed_max+1)]);
d1m1[index_mu]= &(buf_dxx[icount+(index_mu+2*num_mu) * (ple->l_unlensed_max+1)]);
d2m2[index_mu]= &(buf_dxx[icount+(index_mu+3*num_mu) * (ple->l_unlensed_max+1)]);
}
icount += 4*num_mu*(ple->l_unlensed_max+1);
if (ple->has_te==_TRUE_) {
for (index_mu=0; index_mu<num_mu; index_mu++) {
d20[index_mu] = &(buf_dxx[icount+index_mu * (ple->l_unlensed_max+1)]);
d3m1[index_mu]= &(buf_dxx[icount+(index_mu+num_mu) * (ple->l_unlensed_max+1)]);
d4m2[index_mu]= &(buf_dxx[icount+(index_mu+2*num_mu) * (ple->l_unlensed_max+1)]);
}
icount += 3*num_mu*(ple->l_unlensed_max+1);
}
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
for (index_mu=0; index_mu<num_mu; index_mu++) {
d22[index_mu] = &(buf_dxx[icount+index_mu * (ple->l_unlensed_max+1)]);
d31[index_mu] = &(buf_dxx[icount+(index_mu+num_mu) * (ple->l_unlensed_max+1)]);
d3m3[index_mu]= &(buf_dxx[icount+(index_mu+2*num_mu) * (ple->l_unlensed_max+1)]);
d40[index_mu] = &(buf_dxx[icount+(index_mu+3*num_mu) * (ple->l_unlensed_max+1)]);
d4m4[index_mu]= &(buf_dxx[icount+(index_mu+4*num_mu) * (ple->l_unlensed_max+1)]);
}
icount += 5*num_mu*(ple->l_unlensed_max+1);
}
sqrt1 = &(buf_dxx[icount]);
icount += ple->l_unlensed_max+1;
sqrt2 = &(buf_dxx[icount]);
icount += ple->l_unlensed_max+1;
sqrt3 = &(buf_dxx[icount]);
icount += ple->l_unlensed_max+1;
sqrt4 = &(buf_dxx[icount]);
icount += ple->l_unlensed_max+1;
sqrt5 = &(buf_dxx[icount]);
icount += ple->l_unlensed_max+1;
//debut = omp_get_wtime();
class_call(lensing_d00(mu,num_mu,ple->l_unlensed_max,d00),
ple->error_message,
ple->error_message);
class_call(lensing_d11(mu,num_mu,ple->l_unlensed_max,d11),
ple->error_message,
ple->error_message);
class_call(lensing_d1m1(mu,num_mu,ple->l_unlensed_max,d1m1),
ple->error_message,
ple->error_message);
class_call(lensing_d2m2(mu,num_mu,ple->l_unlensed_max,d2m2),
ple->error_message,
ple->error_message);
//fin = omp_get_wtime();
//cpu_time = (fin-debut);
//printf("time in lensing_dxx=%4.3f s\n",cpu_time);
if (ple->has_te==_TRUE_) {
class_call(lensing_d20(mu,num_mu,ple->l_unlensed_max,d20),
ple->error_message,
ple->error_message);
class_call(lensing_d3m1(mu,num_mu,ple->l_unlensed_max,d3m1),
ple->error_message,
ple->error_message);
class_call(lensing_d4m2(mu,num_mu,ple->l_unlensed_max,d4m2),
ple->error_message,
ple->error_message);
}
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
class_call(lensing_d22(mu,num_mu,ple->l_unlensed_max,d22),
ple->error_message,
ple->error_message);
class_call(lensing_d31(mu,num_mu,ple->l_unlensed_max,d31),
ple->error_message,
ple->error_message);
class_call(lensing_d3m3(mu,num_mu,ple->l_unlensed_max,d3m3),
ple->error_message,
ple->error_message);
class_call(lensing_d40(mu,num_mu,ple->l_unlensed_max,d40),
ple->error_message,
ple->error_message);
class_call(lensing_d4m4(mu,num_mu,ple->l_unlensed_max,d4m4),
ple->error_message,
ple->error_message);
}
/** - compute \f$ Cgl(\mu)\f$, \f$ Cgl2(\mu) \f$ and sigma2(\f$\mu\f$) */
class_alloc(Cgl,
num_mu*sizeof(double),
ple->error_message);
class_alloc(Cgl2,
num_mu*sizeof(double),
ple->error_message);
class_alloc(sigma2,
(num_mu-1)*sizeof(double), /* Zero separation is omitted */
ple->error_message);
class_alloc(cl_unlensed,
psp->ct_size*sizeof(double),
ple->error_message);
/** - Locally store unlensed temperature \f$ cl_{tt}\f$ and potential \f$ cl_{pp}\f$ spectra **/
class_alloc(cl_tt,
(ple->l_unlensed_max+1)*sizeof(double),
ple->error_message);
if (ple->has_te==_TRUE_) {
class_alloc(cl_te,
(ple->l_unlensed_max+1)*sizeof(double),
ple->error_message);
}
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
class_alloc(cl_ee,
(ple->l_unlensed_max+1)*sizeof(double),
ple->error_message);
class_alloc(cl_bb,
(ple->l_unlensed_max+1)*sizeof(double),
ple->error_message);
}
class_alloc(cl_pp,
(ple->l_unlensed_max+1)*sizeof(double),
ple->error_message);
class_alloc(cl_md_ic,
psp->md_size*sizeof(double *),
ple->error_message);
class_alloc(cl_md,
psp->md_size*sizeof(double *),
ple->error_message);
for (index_md = 0; index_md < psp->md_size; index_md++) {
if (psp->md_size > 1)
class_alloc(cl_md[index_md],
psp->ct_size*sizeof(double),
ple->error_message);
if (psp->ic_size[index_md] > 1)
class_alloc(cl_md_ic[index_md],
psp->ic_ic_size[index_md]*psp->ct_size*sizeof(double),
ple->error_message);
}
for (l=2; l<=ple->l_unlensed_max; l++) {
class_call(spectra_cl_at_l(psp,l,cl_unlensed,cl_md,cl_md_ic),
psp->error_message,
ple->error_message);
cl_tt[l] = cl_unlensed[ple->index_lt_tt];
cl_pp[l] = cl_unlensed[ple->index_lt_pp];
if (ple->has_te==_TRUE_) {
cl_te[l] = cl_unlensed[ple->index_lt_te];
}
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
cl_ee[l] = cl_unlensed[ple->index_lt_ee];
cl_bb[l] = cl_unlensed[ple->index_lt_bb];
}
}
for (index_md = 0; index_md < psp->md_size; index_md++) {
if (psp->md_size > 1)
free(cl_md[index_md]);
if (psp->ic_size[index_md] > 1)
free(cl_md_ic[index_md]);
}
free(cl_md_ic);
free(cl_md);
/** - Compute sigma2\f$(\mu)\f$ and Cgl2(\f$\mu\f$) **/
//debut = omp_get_wtime();
#pragma omp parallel for \
private (index_mu,l) \
schedule (static)
for (index_mu=0; index_mu<num_mu; index_mu++) {
Cgl[index_mu]=0;
Cgl2[index_mu]=0;
for (l=2; l<=ple->l_unlensed_max; l++) {
Cgl[index_mu] += (2.*l+1.)*l*(l+1.)*
cl_pp[l]*d11[index_mu][l];
Cgl2[index_mu] += (2.*l+1.)*l*(l+1.)*
cl_pp[l]*d1m1[index_mu][l];
}
Cgl[index_mu] /= 4.*_PI_;
Cgl2[index_mu] /= 4.*_PI_;
}
for (index_mu=0; index_mu<num_mu-1; index_mu++) {
/* Cgl(1.0) - Cgl(mu) */
sigma2[index_mu] = Cgl[num_mu-1] - Cgl[index_mu];
}
//fin = omp_get_wtime();
//cpu_time = (fin-debut);
//printf("time in Cgl,Cgl2,sigma2=%4.3f s\n",cpu_time);
/** - compute ksi, ksi+, ksi-, ksiX */
/** - --> ksi is for TT **/
if (ple->has_tt==_TRUE_) {
class_calloc(ksi,
(num_mu-1),
sizeof(double),
ple->error_message);
}
/** - --> ksiX is for TE **/
if (ple->has_te==_TRUE_) {
class_calloc(ksiX,
(num_mu-1),
sizeof(double),
ple->error_message);
}
/** - --> ksip, ksim for EE, BB **/
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
class_calloc(ksip,
(num_mu-1),
sizeof(double),
ple->error_message);
class_calloc(ksim,
(num_mu-1),
sizeof(double),
ple->error_message);
}
for (l=2;l<=ple->l_unlensed_max;l++) {
ll = (double)l;
sqrt1[l]=sqrt((ll+2)*(ll+1)*ll*(ll-1));
sqrt2[l]=sqrt((ll+2)*(ll-1));
sqrt3[l]=sqrt((ll+3)*(ll-2));
sqrt4[l]=sqrt((ll+4)*(ll+3)*(ll-2.)*(ll-3));
sqrt5[l]=sqrt(ll*(ll+1));
}
//debut = omp_get_wtime();
#pragma omp parallel for \
private (index_mu,l,ll,res,resX,resp,resm,lens,lensp,lensm, \
fac,fac1,X_000,X_p000,X_220,X_022,X_p022,X_121,X_132,X_242) \
schedule (static)
for (index_mu=0;index_mu<num_mu-1;index_mu++) {
for (l=2;l<=ple->l_unlensed_max;l++) {
ll = (double)l;
fac = ll*(ll+1)/4.;
fac1 = (2*ll+1)/(4.*_PI_);
/* In the following we will keep terms of the form (sigma2)^k*(Cgl2)^m
with k+m <= 2 */
X_000 = exp(-fac*sigma2[index_mu]);
X_p000 = -fac*X_000;
/* X_220 = 0.25*sqrt1[l] * exp(-(fac-0.5)*sigma2[index_mu]); */
X_220 = 0.25*sqrt1[l] * X_000; /* Order 0 */
/* next 5 lines useless, but avoid compiler warning 'may be used uninitialized' */
X_242=0.;
X_132=0.;
X_121=0.;
X_p022=0.;
X_022=0.;
if (ple->has_te==_TRUE_ || ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
/* X_022 = exp(-(fac-1.)*sigma2[index_mu]); */
X_022 = X_000 * (1+sigma2[index_mu]*(1+0.5*sigma2[index_mu])); /* Order 2 */
X_p022 = (fac-1.)*X_022;
/* X_242 = 0.25*sqrt4[l] * exp(-(fac-5./2.)*sigma2[index_mu]); */
X_242 = 0.25*sqrt4[l] * X_000; /* Order 0 */
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
/* X_121 = - 0.5*sqrt2[l] * exp(-(fac-2./3.)*sigma2[index_mu]);
X_132 = - 0.5*sqrt3[l] * exp(-(fac-5./3.)*sigma2[index_mu]); */
X_121 = -0.5*sqrt2[l] * X_000 * (1+2./3.*sigma2[index_mu]); /* Order 1 */
X_132 = -0.5*sqrt3[l] * X_000 * (1+5./3.*sigma2[index_mu]); /* Order 1 */
}
}
if (ple->has_tt==_TRUE_) {
res = fac1*cl_tt[l];
lens = (X_000*X_000*d00[index_mu][l] +
X_p000*X_p000*d1m1[index_mu][l]
*Cgl2[index_mu]*8./(ll*(ll+1)) +
(X_p000*X_p000*d00[index_mu][l] +
X_220*X_220*d2m2[index_mu][l])
*Cgl2[index_mu]*Cgl2[index_mu]);
if (ppr->accurate_lensing == _FALSE_) {
/* Remove unlensed correlation function */
lens -= d00[index_mu][l];
}
res *= lens;
ksi[index_mu] += res;
}
if (ple->has_te==_TRUE_) {
resX = fac1*cl_te[l];
lens = ( X_022*X_000*d20[index_mu][l] +
Cgl2[index_mu]*2.*X_p000/sqrt5[l] *
(X_121*d11[index_mu][l] + X_132*d3m1[index_mu][l]) +
0.5 * Cgl2[index_mu] * Cgl2[index_mu] *
( ( 2.*X_p022*X_p000+X_220*X_220 ) *
d20[index_mu][l] + X_220*X_242*d4m2[index_mu][l] ) );
if (ppr->accurate_lensing == _FALSE_) {
lens -= d20[index_mu][l];
}
resX *= lens;
ksiX[index_mu] += resX;
}
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
resp = fac1*(cl_ee[l]+cl_bb[l]);
resm = fac1*(cl_ee[l]-cl_bb[l]);
lensp = ( X_022*X_022*d22[index_mu][l] +
2.*Cgl2[index_mu]*X_132*X_121*d31[index_mu][l] +
Cgl2[index_mu]*Cgl2[index_mu] *
( X_p022*X_p022*d22[index_mu][l] +
X_242*X_220*d40[index_mu][l] ) );
lensm = ( X_022*X_022*d2m2[index_mu][l] +
Cgl2[index_mu] *
( X_121*X_121*d1m1[index_mu][l] +
X_132*X_132*d3m3[index_mu][l] ) +
0.5 * Cgl2[index_mu] * Cgl2[index_mu] *
( 2.*X_p022*X_p022*d2m2[index_mu][l] +
X_220*X_220*d00[index_mu][l] +
X_242*X_242*d4m4[index_mu][l] ) );
if (ppr->accurate_lensing == _FALSE_) {
lensp -= d22[index_mu][l];
lensm -= d2m2[index_mu][l];
}
resp *= lensp;
resm *= lensm;
ksip[index_mu] += resp;
ksim[index_mu] += resm;
}
}
}
//fin = omp_get_wtime();
//cpu_time = (fin-debut);
//printf("time in ksi=%4.3f s\n",cpu_time);
/** - compute lensed \f$ C_l\f$'s by integration */
//debut = omp_get_wtime();
if (ple->has_tt==_TRUE_) {
class_call(lensing_lensed_cl_tt(ksi,d00,w8,num_mu-1,ple),
ple->error_message,
ple->error_message);
if (ppr->accurate_lensing == _FALSE_) {
class_call(lensing_addback_cl_tt(ple,cl_tt),
ple->error_message,
ple->error_message);
}
}
if (ple->has_te==_TRUE_) {
class_call(lensing_lensed_cl_te(ksiX,d20,w8,num_mu-1,ple),
ple->error_message,
ple->error_message);
if (ppr->accurate_lensing == _FALSE_) {
class_call(lensing_addback_cl_te(ple,cl_te),
ple->error_message,
ple->error_message);
}
}
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
class_call(lensing_lensed_cl_ee_bb(ksip,ksim,d22,d2m2,w8,num_mu-1,ple),
ple->error_message,
ple->error_message);
if (ppr->accurate_lensing == _FALSE_) {
class_call(lensing_addback_cl_ee_bb(ple,cl_ee,cl_bb),
ple->error_message,
ple->error_message);
}
}
//fin=omp_get_wtime();
//cpu_time = (fin-debut);
//printf("time in final lensing computation=%4.3f s\n",cpu_time);
/** - spline computed \f$ C_l\f$'s in view of interpolation */
class_call(array_spline_table_lines(ple->l,
ple->l_size,
ple->cl_lens,
ple->lt_size,
ple->ddcl_lens,
_SPLINE_EST_DERIV_,
ple->error_message),
ple->error_message,
ple->error_message);
/** - Free lots of stuff **/
free(buf_dxx);
free(d00);
free(d11);
free(d1m1);
free(d2m2);
if (ple->has_te==_TRUE_) {
free(d20);
free(d3m1);
free(d4m2);
}
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
free(d22);
free(d31);
free(d3m3);
free(d40);
free(d4m4);
}
if (ple->has_tt==_TRUE_)
free(ksi);
if (ple->has_te==_TRUE_)
free(ksiX);
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
free(ksip);
free(ksim);
}
free(Cgl);
free(Cgl2);
free(sigma2);
free(mu);
free(w8);
free(cl_unlensed);
free(cl_tt);
if (ple->has_te==_TRUE_)
free(cl_te);
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
free(cl_ee);
free(cl_bb);
}
free(cl_pp);
/** - Exit **/
return _SUCCESS_;
}
/**
* This routine frees all the memory space allocated by lensing_init().
*
* To be called at the end of each run, only when no further calls to
* lensing_cl_at_l() are needed.
*
* @param ple Input: pointer to lensing structure (which fields must be freed)
* @return the error status
*/
int lensing_free(
struct lensing * ple
) {
if (ple->has_lensed_cls == _TRUE_) {
free(ple->l);
free(ple->cl_lens);
free(ple->ddcl_lens);
free(ple->l_max_lt);
}
return _SUCCESS_;
}
/**
* This routine defines indices and allocates tables in the lensing structure
*
* @param ppr Input: pointer to precision structure
* @param psp Input: pointer to spectra structure
* @param ple Input/output: pointer to lensing structure
* @return the error status
*/
int lensing_indices(
struct precision * ppr,
struct spectra * psp,
struct lensing * ple
){
int index_l;
double ** cl_md_ic; /* array with argument
cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct] */
double ** cl_md; /* array with argument
cl_md[index_md][index_ct] */
int index_md;
int index_lt;
/* indices of all Cl types (lensed and unlensed) */
if (psp->has_tt == _TRUE_) {
ple->has_tt = _TRUE_;
ple->index_lt_tt=psp->index_ct_tt;
}
else {
ple->has_tt = _FALSE_;
}
if (psp->has_ee == _TRUE_) {
ple->has_ee = _TRUE_;
ple->index_lt_ee=psp->index_ct_ee;
}
else {
ple->has_ee = _FALSE_;
}
if (psp->has_te == _TRUE_) {
ple->has_te = _TRUE_;
ple->index_lt_te=psp->index_ct_te;
}
else {
ple->has_te = _FALSE_;
}
if (psp->has_bb == _TRUE_) {
ple->has_bb = _TRUE_;
ple->index_lt_bb=psp->index_ct_bb;
}
else {
ple->has_bb = _FALSE_;
}
if (psp->has_pp == _TRUE_) {
ple->has_pp = _TRUE_;
ple->index_lt_pp=psp->index_ct_pp;
}
else {
ple->has_pp = _FALSE_;
}
if (psp->has_tp == _TRUE_) {
ple->has_tp = _TRUE_;
ple->index_lt_tp=psp->index_ct_tp;
}
else {
ple->has_tp = _FALSE_;
}
if (psp->has_dd == _TRUE_) {
ple->has_dd = _TRUE_;
ple->index_lt_dd=psp->index_ct_dd;
}
else {
ple->has_dd = _FALSE_;
}
if (psp->has_td == _TRUE_) {
ple->has_td = _TRUE_;
ple->index_lt_td=psp->index_ct_td;
}
else {
ple->has_td = _FALSE_;
}
if (psp->has_ll == _TRUE_) {
ple->has_ll = _TRUE_;
ple->index_lt_ll=psp->index_ct_ll;
}
else {
ple->has_ll = _FALSE_;
}
if (psp->has_tl == _TRUE_) {
ple->has_tl = _TRUE_;
ple->index_lt_tl=psp->index_ct_tl;
}
else {
ple->has_tl = _FALSE_;
}
ple->lt_size = psp->ct_size;
/* number of multipoles */
ple->l_unlensed_max = psp->l_max_tot;
ple->l_lensed_max = ple->l_unlensed_max - ppr->delta_l_max;
for (index_l=0; (index_l < psp->l_size_max) && (psp->l[index_l] <= ple->l_lensed_max); index_l++);
if (index_l < psp->l_size_max) index_l++; /* one more point in order to be able to interpolate till ple->l_lensed_max */
ple->l_size = index_l+1;
class_alloc(ple->l,ple->l_size*sizeof(double),ple->error_message);
for (index_l=0; index_l < ple->l_size; index_l++) {
ple->l[index_l] = psp->l[index_l];
}
/* allocate table where results will be stored */
class_alloc(ple->cl_lens,
ple->l_size*ple->lt_size*sizeof(double),
ple->error_message);
class_alloc(ple->ddcl_lens,
ple->l_size*ple->lt_size*sizeof(double),
ple->error_message);
/* fill with unlensed cls */
class_alloc(cl_md_ic,
psp->md_size*sizeof(double *),
ple->error_message);
class_alloc(cl_md,
psp->md_size*sizeof(double *),
ple->error_message);
for (index_md = 0; index_md < psp->md_size; index_md++) {
if (psp->md_size > 1)
class_alloc(cl_md[index_md],
psp->ct_size*sizeof(double),
ple->error_message);
if (psp->ic_size[index_md] > 1)
class_alloc(cl_md_ic[index_md],
psp->ic_ic_size[index_md]*psp->ct_size*sizeof(double),
ple->error_message);
}
for (index_l=0; index_l<ple->l_size; index_l++) {
class_call(spectra_cl_at_l(psp,ple->l[index_l],&(ple->cl_lens[index_l*ple->lt_size]),cl_md,cl_md_ic),
psp->error_message,
ple->error_message);
}
for (index_md = 0; index_md < psp->md_size; index_md++) {
if (psp->md_size > 1)
free(cl_md[index_md]);
if (psp->ic_size[index_md] > 1)
free(cl_md_ic[index_md]);
}
free(cl_md_ic);
free(cl_md);
/* we want to output Cl_lensed up to the same l_max as Cl_unlensed
(even if a number delta_l_max of extra values of l have been used
internally for more accurate results). Notable exception to the
above rule: ClBB_lensed(scalars) must be outputed at least up to the same l_max as
ClEE_unlensed(scalars) (since ClBB_unlensed is null for scalars)
*/
class_alloc(ple->l_max_lt,ple->lt_size*sizeof(double),ple->error_message);
for (index_lt = 0; index_lt < ple->lt_size; index_lt++) {
ple->l_max_lt[index_lt]=0.;
for (index_md = 0; index_md < psp->md_size; index_md++) {
ple->l_max_lt[index_lt]=MAX(ple->l_max_lt[index_lt],psp->l_max_ct[index_md][index_lt]);
if ((ple->has_bb == _TRUE_) && (ple->has_ee == _TRUE_) && (index_lt == ple->index_lt_bb)) {
ple->l_max_lt[index_lt]=MAX(ple->l_max_lt[index_lt],psp->l_max_ct[index_md][ple->index_lt_ee]);
}
}
}
return _SUCCESS_;
}
/**
* This routine computes the lensed power spectra by Gaussian quadrature
*
* @param ksi Input: Lensed correlation function (ksi[index_mu])
* @param d00 Input: Legendre polynomials (\f$ d^l_{00}\f$[l][index_mu])
* @param w8 Input: Legendre quadrature weights (w8[index_mu])
* @param nmu Input: Number of quadrature points (0<=index_mu<=nmu)
* @param ple Input/output: Pointer to the lensing structure
* @return the error status
*/
int lensing_lensed_cl_tt(
double *ksi,
double **d00,
double *w8,
int nmu,
struct lensing * ple
) {
double cle;
int imu;
int index_l;
/** Integration by Gauss-Legendre quadrature. **/
#pragma omp parallel for \
private (imu,index_l,cle) \
schedule (static)
for(index_l=0; index_l<ple->l_size; index_l++){
cle=0;
for (imu=0;imu<nmu;imu++) {
cle += ksi[imu]*d00[imu][(int)ple->l[index_l]]*w8[imu]; /* loop could be optimized */
}
ple->cl_lens[index_l*ple->lt_size+ple->index_lt_tt]=cle*2.0*_PI_;
}
return _SUCCESS_;
}
/**
* This routine adds back the unlensed \f$ cl_{tt}\f$ power spectrum
* Used in case of fast (and BB inaccurate) integration of
* correlation functions.
*
* @param ple Input/output: Pointer to the lensing structure
* @param cl_tt Input: Array of unlensed power spectrum
* @return the error status
*/
int lensing_addback_cl_tt(
struct lensing * ple,
double *cl_tt) {
int index_l, l;
for (index_l=0; index_l<ple->l_size; index_l++) {
l = (int)ple->l[index_l];
ple->cl_lens[index_l*ple->lt_size+ple->index_lt_tt] += cl_tt[l];
}
return _SUCCESS_;
}
/**
* This routine computes the lensed power spectra by Gaussian quadrature
*
* @param ksiX Input: Lensed correlation function (ksiX[index_mu])
* @param d20 Input: Wigner d-function (\f$ d^l_{20}\f$[l][index_mu])
* @param w8 Input: Legendre quadrature weights (w8[index_mu])
* @param nmu Input: Number of quadrature points (0<=index_mu<=nmu)
* @param ple Input/output: Pointer to the lensing structure
* @return the error status
*/
int lensing_lensed_cl_te(
double *ksiX,
double **d20,
double *w8,
int nmu,
struct lensing * ple
) {
double clte;
int imu;
int index_l;
/** Integration by Gauss-Legendre quadrature. **/
#pragma omp parallel for \
private (imu,index_l,clte) \
schedule (static)
for(index_l=0; index_l < ple->l_size; index_l++){
clte=0;
for (imu=0;imu<nmu;imu++) {
clte += ksiX[imu]*d20[imu][(int)ple->l[index_l]]*w8[imu]; /* loop could be optimized */
}
ple->cl_lens[index_l*ple->lt_size+ple->index_lt_te]=clte*2.0*_PI_;
}
return _SUCCESS_;
}
/**
* This routine adds back the unlensed \f$ cl_{te}\f$ power spectrum
* Used in case of fast (and BB inaccurate) integration of
* correlation functions.
*
* @param ple Input/output: Pointer to the lensing structure
* @param cl_te Input: Array of unlensed power spectrum
* @return the error status
*/
int lensing_addback_cl_te(
struct lensing * ple,
double *cl_te) {
int index_l, l;
for (index_l=0; index_l<ple->l_size; index_l++) {
l = (int)ple->l[index_l];
ple->cl_lens[index_l*ple->lt_size+ple->index_lt_te] += cl_te[l];
}
return _SUCCESS_;
}
/**
* This routine computes the lensed power spectra by Gaussian quadrature
*
* @param ksip Input: Lensed correlation function (ksi+[index_mu])
* @param ksim Input: Lensed correlation function (ksi-[index_mu])
* @param d22 Input: Wigner d-function (\f$ d^l_{22}\f$[l][index_mu])
* @param d2m2 Input: Wigner d-function (\f$ d^l_{2-2}\f$[l][index_mu])
* @param w8 Input: Legendre quadrature weights (w8[index_mu])
* @param nmu Input: Number of quadrature points (0<=index_mu<=nmu)
* @param ple Input/output: Pointer to the lensing structure
* @return the error status
*/
int lensing_lensed_cl_ee_bb(
double *ksip,
double *ksim,
double **d22,
double **d2m2,
double *w8,
int nmu,
struct lensing * ple
) {
double clp, clm;
int imu;
int index_l;
/** Integration by Gauss-Legendre quadrature. **/
#pragma omp parallel for \
private (imu,index_l,clp,clm) \
schedule (static)
for(index_l=0; index_l < ple->l_size; index_l++){
clp=0; clm=0;
for (imu=0;imu<nmu;imu++) {
clp += ksip[imu]*d22[imu][(int)ple->l[index_l]]*w8[imu]; /* loop could be optimized */
clm += ksim[imu]*d2m2[imu][(int)ple->l[index_l]]*w8[imu]; /* loop could be optimized */
}
ple->cl_lens[index_l*ple->lt_size+ple->index_lt_ee]=(clp+clm)*_PI_;
ple->cl_lens[index_l*ple->lt_size+ple->index_lt_bb]=(clp-clm)*_PI_;
}
return _SUCCESS_;
}
/**
* This routine adds back the unlensed \f$ cl_{ee}\f$, \f$ cl_{bb}\f$ power spectra
* Used in case of fast (and BB inaccurate) integration of
* correlation functions.
*
* @param ple Input/output: Pointer to the lensing structure
* @param cl_ee Input: Array of unlensed power spectrum
* @param cl_bb Input: Array of unlensed power spectrum
* @return the error status
*/
int lensing_addback_cl_ee_bb(
struct lensing * ple,
double * cl_ee,
double * cl_bb) {
int index_l, l;
for (index_l=0; index_l<ple->l_size; index_l++) {
l = (int)ple->l[index_l];
ple->cl_lens[index_l*ple->lt_size+ple->index_lt_ee] += cl_ee[l];
ple->cl_lens[index_l*ple->lt_size+ple->index_lt_bb] += cl_bb[l];
}
return _SUCCESS_;
}
/**
* This routine computes the d00 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d00 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d00(
double * mu,
int num_mu,
int lmax,
double ** d00
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac2, *fac3;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac2,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
for (l=1; l<lmax; l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)/(2*ll+1))*(2*ll+1)/(ll+1);
fac2[l] = sqrt((2*ll+3)/(2*ll-1))*ll/(ll+1);
fac3[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
dlm1=1.0/sqrt(2.); /* l=0 */
d00[index_mu][0]=dlm1*sqrt(2.);
dl=mu[index_mu] * sqrt(3./2.); /*l=1*/
d00[index_mu][1]=dl*sqrt(2./3.);
for(l=1;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d00 recurrence, supposed to be more stable */
dlp1 = fac1[l]*mu[index_mu]*dl - fac2[l]*dlm1;
d00[index_mu][l+1] = dlp1 * fac3[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac2); free(fac3);
return _SUCCESS_;
}
/**
* This routine computes the d11 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d11 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d11(
double * mu,
int num_mu,
int lmax,
double ** d11
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac2, *fac3, *fac4;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac2,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
class_alloc(fac4,lmax*sizeof(double),erreur);
for (l=2;l<lmax;l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)/(2*ll+1))*(ll+1)*(2*ll+1)/(ll*(ll+2));
fac2[l] = 1.0/(ll*(ll+1.));
fac3[l] = sqrt((2*ll+3)/(2*ll-1))*(ll-1)*(ll+1)/(ll*(ll+2))*(ll+1)/ll;
fac4[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
d11[index_mu][0]=0;
dlm1=(1.0+mu[index_mu])/2. * sqrt(3./2.); /*l=1*/
d11[index_mu][1]=dlm1 * sqrt(2./3.);
dl=(1.0+mu[index_mu])/2.*(2.0*mu[index_mu]-1.0) * sqrt(5./2.); /*l=2*/
d11[index_mu][2] = dl * sqrt(2./5.);
for(l=2;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d11 recurrence, supposed to be more stable */
dlp1 = fac1[l]*(mu[index_mu]-fac2[l])*dl - fac3[l]*dlm1;
d11[index_mu][l+1] = dlp1 * fac4[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac2); free(fac3); free(fac4);
return _SUCCESS_;
}
/**
* This routine computes the d1m1 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d1m1 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d1m1(
double * mu,
int num_mu,
int lmax,
double ** d1m1
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac2, *fac3, *fac4;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac2,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
class_alloc(fac4,lmax*sizeof(double),erreur);
for (l=2;l<lmax;l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)/(2*ll+1))*(ll+1)*(2*ll+1)/(ll*(ll+2));
fac2[l] = 1.0/(ll*(ll+1.));
fac3[l] = sqrt((2*ll+3)/(2*ll-1))*(ll-1)*(ll+1)/(ll*(ll+2))*(ll+1)/ll;
fac4[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
d1m1[index_mu][0]=0;
dlm1=(1.0-mu[index_mu])/2. * sqrt(3./2.); /*l=1*/
d1m1[index_mu][1]=dlm1 * sqrt(2./3.);
dl=(1.0-mu[index_mu])/2.*(2.0*mu[index_mu]+1.0) * sqrt(5./2.); /*l=2*/
d1m1[index_mu][2] = dl * sqrt(2./5.);
for(l=2;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d1m1 recurrence, supposed to be more stable */
dlp1 = fac1[l]*(mu[index_mu]+fac2[l])*dl - fac3[l]*dlm1;
d1m1[index_mu][l+1] = dlp1 * fac4[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac2); free(fac3); free(fac4);
return _SUCCESS_;
}
/**
* This routine computes the d2m2 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d2m2 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d2m2(
double * mu,
int num_mu,
int lmax,
double ** d2m2
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac2, *fac3, *fac4;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac2,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
class_alloc(fac4,lmax*sizeof(double),erreur);
for (l=2;l<lmax;l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)/(2*ll+1))*(ll+1)*(2*ll+1)/((ll-1)*(ll+3));
fac2[l] = 4.0/(ll*(ll+1));
fac3[l] = sqrt((2*ll+3)/(2*ll-1))*(ll-2)*(ll+2)/((ll-1)*(ll+3))*(ll+1)/ll;
fac4[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
d2m2[index_mu][0]=0;
dlm1=0.; /*l=1*/
d2m2[index_mu][1]=0;
dl=(1.0-mu[index_mu])*(1.0-mu[index_mu])/4. * sqrt(5./2.); /*l=2*/
d2m2[index_mu][2] = dl * sqrt(2./5.);
for(l=2;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d2m2 recurrence, supposed to be more stable */
dlp1 = fac1[l]*(mu[index_mu]+fac2[l])*dl - fac3[l]*dlm1;
d2m2[index_mu][l+1] = dlp1 * fac4[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac2); free(fac3); free(fac4);
return _SUCCESS_;
}
/**
* This routine computes the d22 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d22 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d22(
double * mu,
int num_mu,
int lmax,
double ** d22
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac2, *fac3, *fac4;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac2,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
class_alloc(fac4,lmax*sizeof(double),erreur);
for (l=2;l<lmax;l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)/(2*ll+1))*(ll+1)*(2*ll+1)/((ll-1)*(ll+3));
fac2[l] = 4.0/(ll*(ll+1));
fac3[l] = sqrt((2*ll+3)/(2*ll-1))*(ll-2)*(ll+2)/((ll-1)*(ll+3))*(ll+1)/ll;
fac4[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
d22[index_mu][0]=0;
dlm1=0.; /*l=1*/
d22[index_mu][1]=0;
dl=(1.0+mu[index_mu])*(1.0+mu[index_mu])/4. * sqrt(5./2.); /*l=2*/
d22[index_mu][2] = dl * sqrt(2./5.);
for(l=2;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */
dlp1 = fac1[l]*(mu[index_mu]-fac2[l])*dl - fac3[l]*dlm1;
d22[index_mu][l+1] = dlp1 * fac4[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac2); free(fac3); free(fac4);
return _SUCCESS_;
}
/**
* This routine computes the d20 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d20 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d20(
double * mu,
int num_mu,
int lmax,
double ** d20
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac3, *fac4;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
class_alloc(fac4,lmax*sizeof(double),erreur);
for (l=2;l<lmax;l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)*(2*ll+1)/((ll-1)*(ll+3)));
fac3[l] = sqrt((2*ll+3)*(ll-2)*(ll+2)/((2*ll-1)*(ll-1)*(ll+3)));
fac4[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
d20[index_mu][0]=0;
dlm1=0.; /*l=1*/
d20[index_mu][1]=0;
dl=sqrt(15.)/4.*(1-mu[index_mu]*mu[index_mu]); /*l=2*/
d20[index_mu][2] = dl * sqrt(2./5.);
for(l=2;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */
dlp1 = fac1[l]*mu[index_mu]*dl - fac3[l]*dlm1;
d20[index_mu][l+1] = dlp1 * fac4[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac3); free(fac4);
return _SUCCESS_;
}
/**
* This routine computes the d31 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d31 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d31(
double * mu,
int num_mu,
int lmax,
double ** d31
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac2, *fac3, *fac4;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac2,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
class_alloc(fac4,lmax*sizeof(double),erreur);
for (l=3;l<lmax;l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)*(2*ll+1)/((ll-2)*(ll+4)*ll*(ll+2))) * (ll+1);
fac2[l] = 3.0/(ll*(ll+1));
fac3[l] = sqrt((2*ll+3)/(2*ll-1)*(ll-3)*(ll+3)*(ll-1)*(ll+1)/((ll-2)*(ll+4)*ll*(ll+2)))*(ll+1)/ll;
fac4[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
d31[index_mu][0]=0;
d31[index_mu][1]=0;
dlm1=0.; /*l=2*/
d31[index_mu][2]=0;
dl=sqrt(105./2.)*(1+mu[index_mu])*(1+mu[index_mu])*(1-mu[index_mu])/8.; /*l=3*/
d31[index_mu][3] = dl * sqrt(2./7.);
for(l=3;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */
dlp1 = fac1[l]*(mu[index_mu]-fac2[l])*dl - fac3[l]*dlm1;
d31[index_mu][l+1] = dlp1 * fac4[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac2); free(fac3); free(fac4);
return _SUCCESS_;
}
/**
* This routine computes the d3m1 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d3m1 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d3m1(
double * mu,
int num_mu,
int lmax,
double ** d3m1
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac2, *fac3, *fac4;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac2,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
class_alloc(fac4,lmax*sizeof(double),erreur);
for (l=3;l<lmax;l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)*(2*ll+1)/((ll-2)*(ll+4)*ll*(ll+2))) * (ll+1);
fac2[l] = 3.0/(ll*(ll+1));
fac3[l] = sqrt((2*ll+3)/(2*ll-1)*(ll-3)*(ll+3)*(ll-1)*(ll+1)/((ll-2)*(ll+4)*ll*(ll+2)))*(ll+1)/ll;
fac4[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
d3m1[index_mu][0]=0;
d3m1[index_mu][1]=0;
dlm1=0.; /*l=2*/
d3m1[index_mu][2]=0;
dl=sqrt(105./2.)*(1+mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])/8.; /*l=3*/
d3m1[index_mu][3] = dl * sqrt(2./7.);
for(l=3;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */
dlp1 = fac1[l]*(mu[index_mu]+fac2[l])*dl - fac3[l]*dlm1;
d3m1[index_mu][l+1] = dlp1 * fac4[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac2); free(fac3); free(fac4);
return _SUCCESS_;
}
/**
* This routine computes the d3m3 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d3m3 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d3m3(
double * mu,
int num_mu,
int lmax,
double ** d3m3
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac2, *fac3, *fac4;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac2,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
class_alloc(fac4,lmax*sizeof(double),erreur);
for (l=3;l<lmax;l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)*(2*ll+1))*(ll+1)/((ll-2)*(ll+4));
fac2[l] = 9.0/(ll*(ll+1));
fac3[l] = sqrt((2*ll+3)/(2*ll-1))*(ll-3)*(ll+3)*(l+1)/((ll-2)*(ll+4)*ll);
fac4[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
d3m3[index_mu][0]=0;
d3m3[index_mu][1]=0;
dlm1=0.; /*l=2*/
d3m3[index_mu][2]=0;
dl=sqrt(7./2.)*(1-mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])/8.; /*l=3*/
d3m3[index_mu][3] = dl * sqrt(2./7.);
for(l=3;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */
dlp1 = fac1[l]*(mu[index_mu]+fac2[l])*dl - fac3[l]*dlm1;
d3m3[index_mu][l+1] = dlp1 * fac4[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac2); free(fac3); free(fac4);
return _SUCCESS_;
}
/**
* This routine computes the d40 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d40 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d40(
double * mu,
int num_mu,
int lmax,
double ** d40
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac3, *fac4;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
class_alloc(fac4,lmax*sizeof(double),erreur);
for (l=4;l<lmax;l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)*(2*ll+1)/((ll-3)*(ll+5)));
fac3[l] = sqrt((2*ll+3)*(ll-4)*(ll+4)/((2*ll-1)*(ll-3)*(ll+5)));
fac4[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
d40[index_mu][0]=0;
d40[index_mu][1]=0;
d40[index_mu][2]=0;
dlm1=0.; /*l=3*/
d40[index_mu][3]=0;
dl=sqrt(315.)*(1+mu[index_mu])*(1+mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])/16.; /*l=4*/
d40[index_mu][4] = dl * sqrt(2./9.);
for(l=4;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */
dlp1 = fac1[l]*mu[index_mu]*dl - fac3[l]*dlm1;
d40[index_mu][l+1] = dlp1 * fac4[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac3); free(fac4);
return _SUCCESS_;
}
/**
* This routine computes the d4m2 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d4m2 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d4m2(
double * mu,
int num_mu,
int lmax,
double ** d4m2
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac2, *fac3, *fac4;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac2,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
class_alloc(fac4,lmax*sizeof(double),erreur);
for (l=4;l<lmax;l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)*(2*ll+1)/((ll-3)*(ll+5)*(ll-1)*(ll+3))) * (ll+1.);
fac2[l] = 8./(ll*(ll+1));
fac3[l] = sqrt((2*ll+3)*(ll-4)*(ll+4)*(ll-2)*(ll+2)/((2*ll-1)*(ll-3)*(ll+5)*(ll-1)*(ll+3)))*(ll+1)/ll;
fac4[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
d4m2[index_mu][0]=0;
d4m2[index_mu][1]=0;
d4m2[index_mu][2]=0;
dlm1=0.; /*l=3*/
d4m2[index_mu][3]=0;
dl=sqrt(126.)*(1+mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])/16.; /*l=4*/
d4m2[index_mu][4] = dl * sqrt(2./9.);
for(l=4;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */
dlp1 = fac1[l]*(mu[index_mu]+fac2[l])*dl - fac3[l]*dlm1;
d4m2[index_mu][l+1] = dlp1 * fac4[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac2); free(fac3); free(fac4);
return _SUCCESS_;
}
/**
* This routine computes the d4m4 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d4m4 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d4m4(
double * mu,
int num_mu,
int lmax,
double ** d4m4
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac2, *fac3, *fac4;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac2,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
class_alloc(fac4,lmax*sizeof(double),erreur);
for (l=4;l<lmax;l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)*(2*ll+1))*(ll+1)/((ll-3)*(ll+5));
fac2[l] = 16./(ll*(ll+1));
fac3[l] = sqrt((2*ll+3)/(2*ll-1))*(ll-4)*(ll+4)*(ll+1)/((ll-3)*(ll+5)*ll);
fac4[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
d4m4[index_mu][0]=0;
d4m4[index_mu][1]=0;
d4m4[index_mu][2]=0;
dlm1=0.; /*l=3*/
d4m4[index_mu][3]=0;
dl=sqrt(9./2.)*(1-mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])/16.; /*l=4*/
d4m4[index_mu][4] = dl * sqrt(2./9.);
for(l=4;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */
dlp1 = fac1[l]*(mu[index_mu]+fac2[l])*dl - fac3[l]*dlm1;
d4m4[index_mu][l+1] = dlp1 * fac4[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac2); free(fac3); free(fac4);
return _SUCCESS_;
}
|
scheduled-clauseModificado.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
int main(int argc, char **argv) {
int i, n = 16,chunk, a[n],suma=0;
int modifier;
omp_sched_t kind;
if(argc < 2) {
fprintf(stderr,"\nFalta chunk \n");
exit(-1);
}
chunk = atoi(argv[1]);
for (i=0; i<n; i++) a[i] = i;
#pragma omp parallel
{
#pragma omp for firstprivate(suma) \
lastprivate(suma) schedule(dynamic,chunk)
for (i=0; i<n; i++)
{
suma = suma + a[i];
printf(" thread %d suma a[%d] suma=%d \n",
omp_get_thread_num(),i,suma);
}
#pragma omp single
{
printf("dyn-var: %d \n",omp_get_dynamic());
printf("nthreads_var: %d \n",omp_get_max_threads());
printf("thread_limit_var: %d \n",omp_get_thread_limit());
printf("thread_limit_var: %d \n",omp_get_thread_limit());
omp_get_schedule(&kind, &modifier);
printf("get_schedulre: kind %d,modifier %d \n",kind,modifier);
}
}
printf("Fuera de 'parallel for' suma=%d\n",suma);
printf("Fuera de la región paralell: \n");
printf("dyn-var: %d \n",omp_get_dynamic());
printf("nthreads_var: %d \n",omp_get_max_threads());
printf("thread_limit_var: %d \n",omp_get_thread_limit());
printf("thread_limit_var: %d \n",omp_get_thread_limit());
omp_get_schedule(&kind, &modifier);
printf("get_schedulre: kind %d,modifier %d \n",kind,modifier);
}
|
ourParallelKmeans2.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
typedef struct Point
{
double x;
double y;
} point;
int main(int argc, char **argv)
{
int max_threads = omp_get_max_threads();
printf("Maximum number of Threads = %d\n", max_threads);
srand(69420);
int stdin_input;
int k = 2;
stdin_input = 0;
printf("Number of Clusters (as an integer bigger than 1):\n");
scanf("%d", &stdin_input);
if (stdin_input < 2)
{
printf("Invalid number of Clusters, defaulting to 2\n");
}
else
{
k = stdin_input;
}
int n = 10;
stdin_input = 0;
printf("Number of Points (as an integer bigger than 9):\n");
scanf("%d", &stdin_input);
if (stdin_input < 10)
{
printf("Invalid number of Points, defaulting to 10\n");
}
else
{
n = stdin_input;
}
int max_executions = 1;
stdin_input = 0;
printf("Number of Executions (as an integer bigger than 0):\n");
scanf("%d", &stdin_input);
if (stdin_input < 1)
{
printf("Invalid number of Executions, defaulting to 1\n");
}
else
{
max_executions = stdin_input;
}
point *points;
points = (point *)malloc(sizeof(struct Point) * n);
for (int i = 0; i < n; i++)
{
points[i].x = (double)rand() / (double)(RAND_MAX / 10);
points[i].y = (double)rand() / (double)(RAND_MAX / 10);
}
point centroids[k], original_centroids[k];
for (int i = 0; i < k; i++)
{
centroids[i].x = original_centroids[i].x = (double)rand() / (double)(RAND_MAX / 10);
centroids[i].y = original_centroids[i].y = (double)rand() / (double)(RAND_MAX / 10);
}
point ***clusters;
clusters = (point ***)malloc(sizeof(point **) * k);
for (int i = 0; i < k; i++)
{
clusters[i] = (point **)malloc(sizeof(point *) * max_threads);
for (int j = 0; j < max_threads; j++)
{
clusters[i][j] = (point *)malloc(sizeof(struct Point) * n);
}
}
double meanExecTime = 0;
int execution = 0;
point previous_centroids[k];
int iterations = 0, changed = 1;
while (execution < max_executions)
{
for (int i = 0; i < k; i++)
{
centroids[i].x = original_centroids[i].x;
centroids[i].y = original_centroids[i].y;
}
changed = 1;
iterations = 0;
double b4 = omp_get_wtime();
for (; changed; iterations++)
{
int clusters_size[k][max_threads];
for (int i = 0; i < k; i++)
{
for (int j = 0; j < max_threads; j++)
{
clusters_size[i][j] = 0;
}
}
#pragma omp parallel for schedule(static)
for (int i = 0; i < n; i++)
{
int cluster_closest_to = 0;
double distance, smallest_distance;
for (int j = 0; j < k; j++)
{
distance = sqrt(powf((centroids[j].x - points[i].x), 2) + powf((centroids[j].y - points[i].y), 2));
if (j == 0)
{
smallest_distance = distance;
}
else
{
if (distance < smallest_distance)
{
smallest_distance = distance;
cluster_closest_to = j;
}
}
}
clusters[cluster_closest_to][omp_get_thread_num()][clusters_size[cluster_closest_to][omp_get_thread_num()]++] = points[i];
}
int has_changed = 1;
#pragma omp parallel for schedule(dynamic)
for (int i = 0; i < k; i++)
{
double x = 0, y = 0;
int cluster_size = 0;
for (int j = 0; j < max_threads; j++)
{
for (int w = 0; w < clusters_size[i][j]; w++)
{
x += clusters[i][j][w].x;
y += clusters[i][j][w].y;
cluster_size++;
}
}
if (!(x == 0 && y == 0))
{
previous_centroids[i] = centroids[i];
centroids[i].x = (double)x / cluster_size;
centroids[i].y = (double)y / cluster_size;
if (!(((previous_centroids[i].x - 0.00001f) < centroids[i].x && (previous_centroids[i].x + 0.00001f) > centroids[i].x) &&
((previous_centroids[i].y - 0.00001f) < centroids[i].y && (previous_centroids[i].y + 0.00001f) > centroids[i].y)))
{
has_changed = 0;
}
}
}
changed = !has_changed;
}
double time_delta = (omp_get_wtime() - b4);
printf("Time = %f seconds | execution = %d\n", time_delta, execution + 1);
meanExecTime += time_delta;
execution++;
}
printf("Average time of the %d executions: %f\n", execution, meanExecTime / execution);
stdin_input = 0;
printf("If you want to see the results please insert '1'\n");
scanf("%d", &stdin_input);
if (stdin_input == 1)
{
for (int i = 0; i < k; i++)
{
printf("Centroid %d -> (%f,%f)\n", i, centroids[i].x, centroids[i].y);
}
printf("The algorithm converged in %d iterations\n", iterations);
}
return 0;
} |
IPB2_fmt_plug.c | /*
* IPB2_fmt.c (version 4)
*
* Invision Power Board 2.x salted MD5 module for Solar Designer's JtR
* Uses Solar Designer's MD5 implementation.
* regenrecht at o2.pl, Jan 2006
*
* Hashes list should have form of username:$IPB2$salt$hash
* Values to be taken from IPB database, where:
* salt = bin2hex(ibf_members_converge.converge_pass_salt)
* hash = ibf_members_converge.converge_pass_hash
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_IPB2;
#elif FMT_REGISTERS_H
john_register_one(&fmt_IPB2);
#else
#include <string.h>
#include "arch.h"
#include "misc.h"
#include "md5.h"
#include "common.h"
#include "formats.h"
#include "simd-intrinsics.h"
#if defined(_OPENMP)
#include <omp.h>
static unsigned int omp_t = 1;
#ifdef SIMD_COEF_32
#ifndef OMP_SCALE
#define OMP_SCALE 512 // Tuned K8-dual HT
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 256
#endif
#endif
#else
#define omp_t 1
#endif
#include "memdbg.h"
#define FORMAT_LABEL "ipb2"
#define FORMAT_NAME "Invision Power Board 2.x"
#define FORMAT_TAG "$IPB2$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "MD5 " MD5_ALGORITHM_NAME
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define BINARY_ALIGN 4
#define BINARY_SIZE 16
#define MD5_HEX_SIZE (BINARY_SIZE * 2)
#define SALT_SIZE MD5_HEX_SIZE
#define SALT_ALIGN 4
#define SALT_LENGTH 5
#define PLAINTEXT_LENGTH 31
#define CIPHERTEXT_LENGTH (1 + 4 + 1 + SALT_LENGTH * 2 + 1 + MD5_HEX_SIZE)
#ifdef SIMD_COEF_32
#define NBKEYS (SIMD_COEF_32 * SIMD_PARA_MD5)
#define MIN_KEYS_PER_CRYPT NBKEYS
#define MAX_KEYS_PER_CRYPT NBKEYS
#define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&60)*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*64*SIMD_COEF_32 )
#define GETOUTPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&12)*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32 )
#else
#define NBKEYS 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests tests[] = {
{"$IPB2$2e75504633$d891f03a7327639bc632d62a7f302604", "welcome"},
{"$IPB2$735a213a4e$4f23de7bb115139660db5e953153f28a", "enter"},
{"$IPB2$5d75343455$de98ba8ca7bb16f43af05e9e4fb8afee", "matrix"},
{"$IPB2$556c576c39$16d4f29c71b05bd75e61d0254800bfa3", "123456"},
{NULL}
};
static const char itoa16_shr_04[] =
"0000000000000000"
"1111111111111111"
"2222222222222222"
"3333333333333333"
"4444444444444444"
"5555555555555555"
"6666666666666666"
"7777777777777777"
"8888888888888888"
"9999999999999999"
"aaaaaaaaaaaaaaaa"
"bbbbbbbbbbbbbbbb"
"cccccccccccccccc"
"dddddddddddddddd"
"eeeeeeeeeeeeeeee"
"ffffffffffffffff";
static const char itoa16_and_0f[] =
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef";
static char (*saved_plain)[PLAINTEXT_LENGTH + 1];
#if SIMD_COEF_32
static unsigned char *saved_key;
static unsigned char *key_buf;
static unsigned char *empty_key;
static unsigned char *crypt_key;
static uint32_t *cur_salt;
static int new_salt;
static int new_key;
#else
static char (*saved_key)[2*MD5_HEX_SIZE];
static uint32_t (*crypt_key)[BINARY_SIZE / sizeof(uint32_t)];
#endif
static void init(struct fmt_main *self)
{
#if SIMD_COEF_32
unsigned int i;
#endif
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
// these 2 lines of change, allows the format to work with
// [Options] FormatBlockScaleTuneMultiplier= without other format change
omp_t *= self->params.max_keys_per_crypt;
omp_t /= NBKEYS;
self->params.max_keys_per_crypt = (omp_t*NBKEYS);
#endif
#if SIMD_COEF_32
key_buf = mem_calloc_align(self->params.max_keys_per_crypt,
64, MEM_ALIGN_SIMD);
empty_key = mem_calloc_align(64 * NBKEYS,
sizeof(empty_key), MEM_ALIGN_SIMD);
for (i = 0; i < NBKEYS; ++i) {
empty_key[GETPOS(0, i)] = 0x80;
((unsigned int*)empty_key)[14*SIMD_COEF_32 + (i&(SIMD_COEF_32-1)) + i/SIMD_COEF_32*16*SIMD_COEF_32] = (2 * MD5_HEX_SIZE)<<3;
}
crypt_key = mem_calloc_align(self->params.max_keys_per_crypt,
BINARY_SIZE, MEM_ALIGN_SIMD);
saved_key = mem_calloc_align(self->params.max_keys_per_crypt,
64, MEM_ALIGN_SIMD);
#else
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
#endif
saved_plain = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_plain));
}
static void done(void)
{
MEM_FREE(saved_plain);
MEM_FREE(saved_key);
MEM_FREE(crypt_key);
#if SIMD_COEF_32
MEM_FREE(empty_key);
MEM_FREE(key_buf);
#endif
}
static int valid(char *ciphertext, struct fmt_main *self)
{
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
if (strnlen(ciphertext, CIPHERTEXT_LENGTH + 1) != CIPHERTEXT_LENGTH)
return 0;
if (ciphertext[16] != '$')
return 0;
if (strspn(ciphertext+6, HEXCHARS_lc) != SALT_LENGTH*2)
return 0;
if (strspn(ciphertext+17, HEXCHARS_lc) != MD5_HEX_SIZE)
return 0;
return 1;
}
static void *get_binary(char *ciphertext)
{
static uint32_t out[BINARY_SIZE/4];
unsigned char *binary_cipher = (unsigned char*)out;
int i;
ciphertext += 17;
for (i = 0; i < BINARY_SIZE; ++i)
binary_cipher[i] =
(atoi16[ARCH_INDEX(ciphertext[i*2])] << 4)
+ atoi16[ARCH_INDEX(ciphertext[i*2+1])];
return (void*)out;
}
static void *get_salt(char *ciphertext)
{
static uint32_t hex_salt[MD5_HEX_SIZE/4];
unsigned char binary_salt[SALT_LENGTH];
unsigned char salt_hash[BINARY_SIZE];
static MD5_CTX ctx;
int i;
ciphertext += FORMAT_TAG_LEN;
for (i = 0; i < SALT_LENGTH; ++i)
binary_salt[i] =
(atoi16[ARCH_INDEX(ciphertext[i*2])] << 4)
+ atoi16[ARCH_INDEX(ciphertext[i*2+1])];
MD5_Init(&ctx);
MD5_Update(&ctx, binary_salt, SALT_LENGTH);
MD5_Final(salt_hash, &ctx);
for (i = 0; i < BINARY_SIZE; ++i) {
((char*)hex_salt)[i*2] = itoa16[ARCH_INDEX(salt_hash[i] >> 4)];
((char*)hex_salt)[i*2+1] = itoa16[ARCH_INDEX(salt_hash[i] & 0x0f)];
}
return (void*)hex_salt;
}
static void set_salt(void *salt)
{
#ifdef SIMD_COEF_32
cur_salt = salt;
new_salt = 1;
#else
int index;
for (index = 0; index < omp_t * MAX_KEYS_PER_CRYPT; index++)
memcpy(saved_key[index], salt, MD5_HEX_SIZE);
#endif
}
#ifndef SIMD_COEF_32
inline static int strnfcpy_count(char *dst, char *src, int size)
{
char *dptr = dst, *sptr = src;
int count = size;
while (count--)
if (!(*dptr++ = *sptr++)) break;
return size-count-1;
}
#endif
static void set_key(char *key, int index)
{
#ifdef SIMD_COEF_32
strcpy(saved_plain[index], key);
new_key = 1;
#else
unsigned char key_hash[BINARY_SIZE];
unsigned char *kh = key_hash;
unsigned char *key_ptr = (unsigned char*)saved_key[index] + MD5_HEX_SIZE;
unsigned char v;
int i, len;
MD5_CTX ctx;
len = strnfcpy_count(saved_plain[index], key, PLAINTEXT_LENGTH);
MD5_Init(&ctx);
MD5_Update(&ctx, key, len);
MD5_Final(key_hash, &ctx);
for (i = 0; i < BINARY_SIZE; ++i) {
v = *kh++;
*key_ptr++ = itoa16_shr_04[ARCH_INDEX(v)];
*key_ptr++ = itoa16_and_0f[ARCH_INDEX(v)];
}
#endif
}
static char *get_key(int index)
{
return saved_plain[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
#ifdef SIMD_COEF_32
#if defined(_OPENMP)
int t;
#pragma omp parallel for
for (t = 0; t < omp_t; t++)
#define ti (t*NBKEYS+index)
#else
#define t 0
#define ti index
#endif
{
unsigned int index, i;
if (new_salt)
for (index = 0; index < NBKEYS; index++) {
const uint32_t *sp = cur_salt;
uint32_t *kb = (uint32_t*)&saved_key[GETPOS(0, ti)];
for (i = 0; i < MD5_HEX_SIZE / 4; i++, kb += SIMD_COEF_32)
*kb = *sp++;
}
if (new_key)
for (index = 0; index < NBKEYS; index++) {
const uint32_t *key = (uint32_t*)saved_plain[ti];
uint32_t *kb = (uint32_t*)&key_buf[GETPOS(0, ti)];
uint32_t *keybuffer = kb;
int len, temp;
len = 0;
while((unsigned char)(temp = *key++)) {
if (!(temp & 0xff00)) {
*kb = (unsigned char)temp | (0x80 << 8);
len++;
goto key_cleaning;
}
if (!(temp & 0xff0000)) {
*kb = (unsigned short)temp | (0x80 << 16);
len+=2;
goto key_cleaning;
}
if (!(temp & 0xff000000)) {
*kb = temp | (0x80U << 24);
len+=3;
goto key_cleaning;
}
*kb = temp;
len += 4;
kb += SIMD_COEF_32;
}
*kb = 0x00000080;
key_cleaning:
kb += SIMD_COEF_32;
while(*kb) {
*kb = 0;
kb += SIMD_COEF_32;
}
keybuffer[14*SIMD_COEF_32] = len << 3;
}
SIMDmd5body(&key_buf[t*NBKEYS*64], (unsigned int*)&crypt_key[t*NBKEYS*16], NULL, SSEi_MIXED_IN);
for (index = 0; index < NBKEYS; index++) {
// Somehow when I optimised this it got faster in Valgrind but slower IRL
for (i = 0; i < BINARY_SIZE; i++) {
unsigned char v = crypt_key[GETOUTPOS(i, ti)];
saved_key[GETPOS(MD5_HEX_SIZE + 2 * i, ti)] = itoa16_shr_04[ARCH_INDEX(v)];
saved_key[GETPOS(MD5_HEX_SIZE + 2 * i + 1, ti)] = itoa16_and_0f[ARCH_INDEX(v)];
}
}
SIMDmd5body(&saved_key[t*NBKEYS*64], (unsigned int*)&crypt_key[t*NBKEYS*16], NULL, SSEi_MIXED_IN);
SIMDmd5body(empty_key, (unsigned int*)&crypt_key[t*NBKEYS*16], (unsigned int*)&crypt_key[t*NBKEYS*16], SSEi_RELOAD|SSEi_MIXED_IN);
}
//dump_stuff_mmx_msg("\nfinal ", saved_key, 64, count-1);
//dump_out_mmx_msg("result", crypt_key, 16, count-1);
new_salt = new_key = 0;
#else
#ifdef _OPENMP
int index;
#pragma omp parallel for
for (index = 0; index < count; index++)
#else
#define index 0
#endif
{
MD5_CTX ctx;
MD5_Init(&ctx);
MD5_Update(&ctx, saved_key[index], MD5_HEX_SIZE * 2);
MD5_Final((unsigned char*)crypt_key[index], &ctx);
}
#undef index
#endif
return count;
}
static int cmp_all(void *binary, int count) {
#ifdef SIMD_COEF_32
unsigned int x,y=0;
#ifdef _OPENMP
for (;y<SIMD_PARA_MD5*omp_t;y++)
#else
for (;y<SIMD_PARA_MD5;y++)
#endif
for (x = 0; x < SIMD_COEF_32; x++)
{
if ( ((uint32_t*)binary)[0] == ((uint32_t*)crypt_key)[y*SIMD_COEF_32*4+x] )
return 1;
}
return 0;
#else
int index;
for (index = 0; index < count; index++)
if (!memcmp(binary, crypt_key[index], BINARY_SIZE))
return 1;
return 0;
#endif
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int cmp_one(void * binary, int index)
{
#ifdef SIMD_COEF_32
unsigned int i,x,y;
x = index&(SIMD_COEF_32-1);
y = (unsigned int)index/SIMD_COEF_32;
for (i=0;i<(BINARY_SIZE/4);i++)
if ( ((uint32_t*)binary)[i] != ((uint32_t*)crypt_key)[y*SIMD_COEF_32*4+i*SIMD_COEF_32+x] )
return 0;
return 1;
#else
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
#endif
}
#ifdef SIMD_COEF_32
#define HASH_OFFSET (index&(SIMD_COEF_32-1))+((unsigned int)index/SIMD_COEF_32)*SIMD_COEF_32*4
static int get_hash_0(int index) { return ((uint32_t *)crypt_key)[HASH_OFFSET] & PH_MASK_0; }
static int get_hash_1(int index) { return ((uint32_t *)crypt_key)[HASH_OFFSET] & PH_MASK_1; }
static int get_hash_2(int index) { return ((uint32_t *)crypt_key)[HASH_OFFSET] & PH_MASK_2; }
static int get_hash_3(int index) { return ((uint32_t *)crypt_key)[HASH_OFFSET] & PH_MASK_3; }
static int get_hash_4(int index) { return ((uint32_t *)crypt_key)[HASH_OFFSET] & PH_MASK_4; }
static int get_hash_5(int index) { return ((uint32_t *)crypt_key)[HASH_OFFSET] & PH_MASK_5; }
static int get_hash_6(int index) { return ((uint32_t *)crypt_key)[HASH_OFFSET] & PH_MASK_6; }
#else
static int get_hash_0(int index) { return *(uint32_t*)crypt_key[index] & PH_MASK_0; }
static int get_hash_1(int index) { return *(uint32_t*)crypt_key[index] & PH_MASK_1; }
static int get_hash_2(int index) { return *(uint32_t*)crypt_key[index] & PH_MASK_2; }
static int get_hash_3(int index) { return *(uint32_t*)crypt_key[index] & PH_MASK_3; }
static int get_hash_4(int index) { return *(uint32_t*)crypt_key[index] & PH_MASK_4; }
static int get_hash_5(int index) { return *(uint32_t*)crypt_key[index] & PH_MASK_5; }
static int get_hash_6(int index) { return *(uint32_t*)crypt_key[index] & PH_MASK_6; }
#endif
static int salt_hash(void *salt)
{
return *(uint32_t*)salt & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_IPB2 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
tests
},
{
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 4;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
acado_solver.c | /*
* This file was auto-generated using the ACADO Toolkit.
*
* While ACADO Toolkit is free software released under the terms of
* the GNU Lesser General Public License (LGPL), the generated code
* as such remains the property of the user who used ACADO Toolkit
* to generate this code. In particular, user dependent data of the code
* do not inherit the GNU LGPL license. On the other hand, parts of the
* generated code that are a direct copy of source code from the
* ACADO Toolkit or the software tools it is based on, remain, as derived
* work, automatically covered by the LGPL license.
*
* ACADO Toolkit is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
*/
#include "acado_common.h"
/******************************************************************************/
/* */
/* ACADO code generation */
/* */
/******************************************************************************/
/** Row vector of size: 129 */
real_t state[ 129 ];
int acado_modelSimulation( )
{
int ret;
int lRun1;
ret = 0;
#pragma omp parallel for private(lRun1, state) shared(acadoWorkspace, acadoVariables)
for (lRun1 = 0; lRun1 < 20; ++lRun1)
{
state[0] = acadoVariables.x[lRun1 * 9];
state[1] = acadoVariables.x[lRun1 * 9 + 1];
state[2] = acadoVariables.x[lRun1 * 9 + 2];
state[3] = acadoVariables.x[lRun1 * 9 + 3];
state[4] = acadoVariables.x[lRun1 * 9 + 4];
state[5] = acadoVariables.x[lRun1 * 9 + 5];
state[6] = acadoVariables.x[lRun1 * 9 + 6];
state[7] = acadoVariables.x[lRun1 * 9 + 7];
state[8] = acadoVariables.x[lRun1 * 9 + 8];
state[117] = acadoVariables.u[lRun1 * 3];
state[118] = acadoVariables.u[lRun1 * 3 + 1];
state[119] = acadoVariables.u[lRun1 * 3 + 2];
state[120] = acadoVariables.od[lRun1 * 9];
state[121] = acadoVariables.od[lRun1 * 9 + 1];
state[122] = acadoVariables.od[lRun1 * 9 + 2];
state[123] = acadoVariables.od[lRun1 * 9 + 3];
state[124] = acadoVariables.od[lRun1 * 9 + 4];
state[125] = acadoVariables.od[lRun1 * 9 + 5];
state[126] = acadoVariables.od[lRun1 * 9 + 6];
state[127] = acadoVariables.od[lRun1 * 9 + 7];
state[128] = acadoVariables.od[lRun1 * 9 + 8];
ret = acado_integrate(state, 1);
acadoWorkspace.d[lRun1 * 9] = state[0] - acadoVariables.x[lRun1 * 9 + 9];
acadoWorkspace.d[lRun1 * 9 + 1] = state[1] - acadoVariables.x[lRun1 * 9 + 10];
acadoWorkspace.d[lRun1 * 9 + 2] = state[2] - acadoVariables.x[lRun1 * 9 + 11];
acadoWorkspace.d[lRun1 * 9 + 3] = state[3] - acadoVariables.x[lRun1 * 9 + 12];
acadoWorkspace.d[lRun1 * 9 + 4] = state[4] - acadoVariables.x[lRun1 * 9 + 13];
acadoWorkspace.d[lRun1 * 9 + 5] = state[5] - acadoVariables.x[lRun1 * 9 + 14];
acadoWorkspace.d[lRun1 * 9 + 6] = state[6] - acadoVariables.x[lRun1 * 9 + 15];
acadoWorkspace.d[lRun1 * 9 + 7] = state[7] - acadoVariables.x[lRun1 * 9 + 16];
acadoWorkspace.d[lRun1 * 9 + 8] = state[8] - acadoVariables.x[lRun1 * 9 + 17];
acadoWorkspace.evGx[lRun1 * 81] = state[9];
acadoWorkspace.evGx[lRun1 * 81 + 1] = state[10];
acadoWorkspace.evGx[lRun1 * 81 + 2] = state[11];
acadoWorkspace.evGx[lRun1 * 81 + 3] = state[12];
acadoWorkspace.evGx[lRun1 * 81 + 4] = state[13];
acadoWorkspace.evGx[lRun1 * 81 + 5] = state[14];
acadoWorkspace.evGx[lRun1 * 81 + 6] = state[15];
acadoWorkspace.evGx[lRun1 * 81 + 7] = state[16];
acadoWorkspace.evGx[lRun1 * 81 + 8] = state[17];
acadoWorkspace.evGx[lRun1 * 81 + 9] = state[18];
acadoWorkspace.evGx[lRun1 * 81 + 10] = state[19];
acadoWorkspace.evGx[lRun1 * 81 + 11] = state[20];
acadoWorkspace.evGx[lRun1 * 81 + 12] = state[21];
acadoWorkspace.evGx[lRun1 * 81 + 13] = state[22];
acadoWorkspace.evGx[lRun1 * 81 + 14] = state[23];
acadoWorkspace.evGx[lRun1 * 81 + 15] = state[24];
acadoWorkspace.evGx[lRun1 * 81 + 16] = state[25];
acadoWorkspace.evGx[lRun1 * 81 + 17] = state[26];
acadoWorkspace.evGx[lRun1 * 81 + 18] = state[27];
acadoWorkspace.evGx[lRun1 * 81 + 19] = state[28];
acadoWorkspace.evGx[lRun1 * 81 + 20] = state[29];
acadoWorkspace.evGx[lRun1 * 81 + 21] = state[30];
acadoWorkspace.evGx[lRun1 * 81 + 22] = state[31];
acadoWorkspace.evGx[lRun1 * 81 + 23] = state[32];
acadoWorkspace.evGx[lRun1 * 81 + 24] = state[33];
acadoWorkspace.evGx[lRun1 * 81 + 25] = state[34];
acadoWorkspace.evGx[lRun1 * 81 + 26] = state[35];
acadoWorkspace.evGx[lRun1 * 81 + 27] = state[36];
acadoWorkspace.evGx[lRun1 * 81 + 28] = state[37];
acadoWorkspace.evGx[lRun1 * 81 + 29] = state[38];
acadoWorkspace.evGx[lRun1 * 81 + 30] = state[39];
acadoWorkspace.evGx[lRun1 * 81 + 31] = state[40];
acadoWorkspace.evGx[lRun1 * 81 + 32] = state[41];
acadoWorkspace.evGx[lRun1 * 81 + 33] = state[42];
acadoWorkspace.evGx[lRun1 * 81 + 34] = state[43];
acadoWorkspace.evGx[lRun1 * 81 + 35] = state[44];
acadoWorkspace.evGx[lRun1 * 81 + 36] = state[45];
acadoWorkspace.evGx[lRun1 * 81 + 37] = state[46];
acadoWorkspace.evGx[lRun1 * 81 + 38] = state[47];
acadoWorkspace.evGx[lRun1 * 81 + 39] = state[48];
acadoWorkspace.evGx[lRun1 * 81 + 40] = state[49];
acadoWorkspace.evGx[lRun1 * 81 + 41] = state[50];
acadoWorkspace.evGx[lRun1 * 81 + 42] = state[51];
acadoWorkspace.evGx[lRun1 * 81 + 43] = state[52];
acadoWorkspace.evGx[lRun1 * 81 + 44] = state[53];
acadoWorkspace.evGx[lRun1 * 81 + 45] = state[54];
acadoWorkspace.evGx[lRun1 * 81 + 46] = state[55];
acadoWorkspace.evGx[lRun1 * 81 + 47] = state[56];
acadoWorkspace.evGx[lRun1 * 81 + 48] = state[57];
acadoWorkspace.evGx[lRun1 * 81 + 49] = state[58];
acadoWorkspace.evGx[lRun1 * 81 + 50] = state[59];
acadoWorkspace.evGx[lRun1 * 81 + 51] = state[60];
acadoWorkspace.evGx[lRun1 * 81 + 52] = state[61];
acadoWorkspace.evGx[lRun1 * 81 + 53] = state[62];
acadoWorkspace.evGx[lRun1 * 81 + 54] = state[63];
acadoWorkspace.evGx[lRun1 * 81 + 55] = state[64];
acadoWorkspace.evGx[lRun1 * 81 + 56] = state[65];
acadoWorkspace.evGx[lRun1 * 81 + 57] = state[66];
acadoWorkspace.evGx[lRun1 * 81 + 58] = state[67];
acadoWorkspace.evGx[lRun1 * 81 + 59] = state[68];
acadoWorkspace.evGx[lRun1 * 81 + 60] = state[69];
acadoWorkspace.evGx[lRun1 * 81 + 61] = state[70];
acadoWorkspace.evGx[lRun1 * 81 + 62] = state[71];
acadoWorkspace.evGx[lRun1 * 81 + 63] = state[72];
acadoWorkspace.evGx[lRun1 * 81 + 64] = state[73];
acadoWorkspace.evGx[lRun1 * 81 + 65] = state[74];
acadoWorkspace.evGx[lRun1 * 81 + 66] = state[75];
acadoWorkspace.evGx[lRun1 * 81 + 67] = state[76];
acadoWorkspace.evGx[lRun1 * 81 + 68] = state[77];
acadoWorkspace.evGx[lRun1 * 81 + 69] = state[78];
acadoWorkspace.evGx[lRun1 * 81 + 70] = state[79];
acadoWorkspace.evGx[lRun1 * 81 + 71] = state[80];
acadoWorkspace.evGx[lRun1 * 81 + 72] = state[81];
acadoWorkspace.evGx[lRun1 * 81 + 73] = state[82];
acadoWorkspace.evGx[lRun1 * 81 + 74] = state[83];
acadoWorkspace.evGx[lRun1 * 81 + 75] = state[84];
acadoWorkspace.evGx[lRun1 * 81 + 76] = state[85];
acadoWorkspace.evGx[lRun1 * 81 + 77] = state[86];
acadoWorkspace.evGx[lRun1 * 81 + 78] = state[87];
acadoWorkspace.evGx[lRun1 * 81 + 79] = state[88];
acadoWorkspace.evGx[lRun1 * 81 + 80] = state[89];
acadoWorkspace.evGu[lRun1 * 27] = state[90];
acadoWorkspace.evGu[lRun1 * 27 + 1] = state[91];
acadoWorkspace.evGu[lRun1 * 27 + 2] = state[92];
acadoWorkspace.evGu[lRun1 * 27 + 3] = state[93];
acadoWorkspace.evGu[lRun1 * 27 + 4] = state[94];
acadoWorkspace.evGu[lRun1 * 27 + 5] = state[95];
acadoWorkspace.evGu[lRun1 * 27 + 6] = state[96];
acadoWorkspace.evGu[lRun1 * 27 + 7] = state[97];
acadoWorkspace.evGu[lRun1 * 27 + 8] = state[98];
acadoWorkspace.evGu[lRun1 * 27 + 9] = state[99];
acadoWorkspace.evGu[lRun1 * 27 + 10] = state[100];
acadoWorkspace.evGu[lRun1 * 27 + 11] = state[101];
acadoWorkspace.evGu[lRun1 * 27 + 12] = state[102];
acadoWorkspace.evGu[lRun1 * 27 + 13] = state[103];
acadoWorkspace.evGu[lRun1 * 27 + 14] = state[104];
acadoWorkspace.evGu[lRun1 * 27 + 15] = state[105];
acadoWorkspace.evGu[lRun1 * 27 + 16] = state[106];
acadoWorkspace.evGu[lRun1 * 27 + 17] = state[107];
acadoWorkspace.evGu[lRun1 * 27 + 18] = state[108];
acadoWorkspace.evGu[lRun1 * 27 + 19] = state[109];
acadoWorkspace.evGu[lRun1 * 27 + 20] = state[110];
acadoWorkspace.evGu[lRun1 * 27 + 21] = state[111];
acadoWorkspace.evGu[lRun1 * 27 + 22] = state[112];
acadoWorkspace.evGu[lRun1 * 27 + 23] = state[113];
acadoWorkspace.evGu[lRun1 * 27 + 24] = state[114];
acadoWorkspace.evGu[lRun1 * 27 + 25] = state[115];
acadoWorkspace.evGu[lRun1 * 27 + 26] = state[116];
}
return ret;
}
void acado_evaluateLSQ(const real_t* in, real_t* out)
{
const real_t* xd = in;
const real_t* u = in + 9;
/* Vector of auxiliary variables; number of elements: 4. */
real_t* a = acadoWorkspace.objAuxVar;
/* Compute intermediate quantities: */
a[0] = (cos(xd[4]));
a[1] = (cos(xd[3]));
a[2] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[3])));
a[3] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[4])));
/* Compute outputs: */
out[0] = xd[6];
out[1] = xd[7];
out[2] = xd[8];
out[3] = xd[0];
out[4] = xd[1];
out[5] = xd[2];
out[6] = xd[3];
out[7] = xd[4];
out[8] = u[0];
out[9] = u[1];
out[10] = ((real_t)(-9.8065999999999995e+00)+((a[0]*a[1])*u[2]));
out[11] = (real_t)(0.0000000000000000e+00);
out[12] = (real_t)(0.0000000000000000e+00);
out[13] = (real_t)(0.0000000000000000e+00);
out[14] = (real_t)(0.0000000000000000e+00);
out[15] = (real_t)(0.0000000000000000e+00);
out[16] = (real_t)(0.0000000000000000e+00);
out[17] = (real_t)(1.0000000000000000e+00);
out[18] = (real_t)(0.0000000000000000e+00);
out[19] = (real_t)(0.0000000000000000e+00);
out[20] = (real_t)(0.0000000000000000e+00);
out[21] = (real_t)(0.0000000000000000e+00);
out[22] = (real_t)(0.0000000000000000e+00);
out[23] = (real_t)(0.0000000000000000e+00);
out[24] = (real_t)(0.0000000000000000e+00);
out[25] = (real_t)(0.0000000000000000e+00);
out[26] = (real_t)(0.0000000000000000e+00);
out[27] = (real_t)(1.0000000000000000e+00);
out[28] = (real_t)(0.0000000000000000e+00);
out[29] = (real_t)(0.0000000000000000e+00);
out[30] = (real_t)(0.0000000000000000e+00);
out[31] = (real_t)(0.0000000000000000e+00);
out[32] = (real_t)(0.0000000000000000e+00);
out[33] = (real_t)(0.0000000000000000e+00);
out[34] = (real_t)(0.0000000000000000e+00);
out[35] = (real_t)(0.0000000000000000e+00);
out[36] = (real_t)(0.0000000000000000e+00);
out[37] = (real_t)(1.0000000000000000e+00);
out[38] = (real_t)(1.0000000000000000e+00);
out[39] = (real_t)(0.0000000000000000e+00);
out[40] = (real_t)(0.0000000000000000e+00);
out[41] = (real_t)(0.0000000000000000e+00);
out[42] = (real_t)(0.0000000000000000e+00);
out[43] = (real_t)(0.0000000000000000e+00);
out[44] = (real_t)(0.0000000000000000e+00);
out[45] = (real_t)(0.0000000000000000e+00);
out[46] = (real_t)(0.0000000000000000e+00);
out[47] = (real_t)(0.0000000000000000e+00);
out[48] = (real_t)(1.0000000000000000e+00);
out[49] = (real_t)(0.0000000000000000e+00);
out[50] = (real_t)(0.0000000000000000e+00);
out[51] = (real_t)(0.0000000000000000e+00);
out[52] = (real_t)(0.0000000000000000e+00);
out[53] = (real_t)(0.0000000000000000e+00);
out[54] = (real_t)(0.0000000000000000e+00);
out[55] = (real_t)(0.0000000000000000e+00);
out[56] = (real_t)(0.0000000000000000e+00);
out[57] = (real_t)(0.0000000000000000e+00);
out[58] = (real_t)(1.0000000000000000e+00);
out[59] = (real_t)(0.0000000000000000e+00);
out[60] = (real_t)(0.0000000000000000e+00);
out[61] = (real_t)(0.0000000000000000e+00);
out[62] = (real_t)(0.0000000000000000e+00);
out[63] = (real_t)(0.0000000000000000e+00);
out[64] = (real_t)(0.0000000000000000e+00);
out[65] = (real_t)(0.0000000000000000e+00);
out[66] = (real_t)(0.0000000000000000e+00);
out[67] = (real_t)(0.0000000000000000e+00);
out[68] = (real_t)(1.0000000000000000e+00);
out[69] = (real_t)(0.0000000000000000e+00);
out[70] = (real_t)(0.0000000000000000e+00);
out[71] = (real_t)(0.0000000000000000e+00);
out[72] = (real_t)(0.0000000000000000e+00);
out[73] = (real_t)(0.0000000000000000e+00);
out[74] = (real_t)(0.0000000000000000e+00);
out[75] = (real_t)(0.0000000000000000e+00);
out[76] = (real_t)(0.0000000000000000e+00);
out[77] = (real_t)(0.0000000000000000e+00);
out[78] = (real_t)(1.0000000000000000e+00);
out[79] = (real_t)(0.0000000000000000e+00);
out[80] = (real_t)(0.0000000000000000e+00);
out[81] = (real_t)(0.0000000000000000e+00);
out[82] = (real_t)(0.0000000000000000e+00);
out[83] = (real_t)(0.0000000000000000e+00);
out[84] = (real_t)(0.0000000000000000e+00);
out[85] = (real_t)(0.0000000000000000e+00);
out[86] = (real_t)(0.0000000000000000e+00);
out[87] = (real_t)(0.0000000000000000e+00);
out[88] = (real_t)(0.0000000000000000e+00);
out[89] = (real_t)(0.0000000000000000e+00);
out[90] = (real_t)(0.0000000000000000e+00);
out[91] = (real_t)(0.0000000000000000e+00);
out[92] = (real_t)(0.0000000000000000e+00);
out[93] = (real_t)(0.0000000000000000e+00);
out[94] = (real_t)(0.0000000000000000e+00);
out[95] = (real_t)(0.0000000000000000e+00);
out[96] = (real_t)(0.0000000000000000e+00);
out[97] = (real_t)(0.0000000000000000e+00);
out[98] = (real_t)(0.0000000000000000e+00);
out[99] = (real_t)(0.0000000000000000e+00);
out[100] = (real_t)(0.0000000000000000e+00);
out[101] = (real_t)(0.0000000000000000e+00);
out[102] = (real_t)(0.0000000000000000e+00);
out[103] = (real_t)(0.0000000000000000e+00);
out[104] = ((a[0]*a[2])*u[2]);
out[105] = ((a[3]*a[1])*u[2]);
out[106] = (real_t)(0.0000000000000000e+00);
out[107] = (real_t)(0.0000000000000000e+00);
out[108] = (real_t)(0.0000000000000000e+00);
out[109] = (real_t)(0.0000000000000000e+00);
out[110] = (real_t)(0.0000000000000000e+00);
out[111] = (real_t)(0.0000000000000000e+00);
out[112] = (real_t)(0.0000000000000000e+00);
out[113] = (real_t)(0.0000000000000000e+00);
out[114] = (real_t)(0.0000000000000000e+00);
out[115] = (real_t)(0.0000000000000000e+00);
out[116] = (real_t)(0.0000000000000000e+00);
out[117] = (real_t)(0.0000000000000000e+00);
out[118] = (real_t)(0.0000000000000000e+00);
out[119] = (real_t)(0.0000000000000000e+00);
out[120] = (real_t)(0.0000000000000000e+00);
out[121] = (real_t)(0.0000000000000000e+00);
out[122] = (real_t)(0.0000000000000000e+00);
out[123] = (real_t)(0.0000000000000000e+00);
out[124] = (real_t)(0.0000000000000000e+00);
out[125] = (real_t)(0.0000000000000000e+00);
out[126] = (real_t)(0.0000000000000000e+00);
out[127] = (real_t)(0.0000000000000000e+00);
out[128] = (real_t)(0.0000000000000000e+00);
out[129] = (real_t)(0.0000000000000000e+00);
out[130] = (real_t)(0.0000000000000000e+00);
out[131] = (real_t)(0.0000000000000000e+00);
out[132] = (real_t)(0.0000000000000000e+00);
out[133] = (real_t)(0.0000000000000000e+00);
out[134] = (real_t)(1.0000000000000000e+00);
out[135] = (real_t)(0.0000000000000000e+00);
out[136] = (real_t)(0.0000000000000000e+00);
out[137] = (real_t)(0.0000000000000000e+00);
out[138] = (real_t)(1.0000000000000000e+00);
out[139] = (real_t)(0.0000000000000000e+00);
out[140] = (real_t)(0.0000000000000000e+00);
out[141] = (real_t)(0.0000000000000000e+00);
out[142] = (a[0]*a[1]);
}
void acado_evaluateLSQEndTerm(const real_t* in, real_t* out)
{
const real_t* xd = in;
/* Compute outputs: */
out[0] = xd[6];
out[1] = xd[7];
out[2] = xd[8];
out[3] = xd[0];
out[4] = xd[1];
out[5] = xd[2];
}
void acado_setObjQ1Q2( real_t* const tmpFx, real_t* const tmpObjS, real_t* const tmpQ1, real_t* const tmpQ2 )
{
tmpQ2[0] = + tmpFx[0]*tmpObjS[0] + tmpFx[9]*tmpObjS[11] + tmpFx[18]*tmpObjS[22] + tmpFx[27]*tmpObjS[33] + tmpFx[36]*tmpObjS[44] + tmpFx[45]*tmpObjS[55] + tmpFx[54]*tmpObjS[66] + tmpFx[63]*tmpObjS[77] + tmpFx[72]*tmpObjS[88] + tmpFx[81]*tmpObjS[99] + tmpFx[90]*tmpObjS[110];
tmpQ2[1] = + tmpFx[0]*tmpObjS[1] + tmpFx[9]*tmpObjS[12] + tmpFx[18]*tmpObjS[23] + tmpFx[27]*tmpObjS[34] + tmpFx[36]*tmpObjS[45] + tmpFx[45]*tmpObjS[56] + tmpFx[54]*tmpObjS[67] + tmpFx[63]*tmpObjS[78] + tmpFx[72]*tmpObjS[89] + tmpFx[81]*tmpObjS[100] + tmpFx[90]*tmpObjS[111];
tmpQ2[2] = + tmpFx[0]*tmpObjS[2] + tmpFx[9]*tmpObjS[13] + tmpFx[18]*tmpObjS[24] + tmpFx[27]*tmpObjS[35] + tmpFx[36]*tmpObjS[46] + tmpFx[45]*tmpObjS[57] + tmpFx[54]*tmpObjS[68] + tmpFx[63]*tmpObjS[79] + tmpFx[72]*tmpObjS[90] + tmpFx[81]*tmpObjS[101] + tmpFx[90]*tmpObjS[112];
tmpQ2[3] = + tmpFx[0]*tmpObjS[3] + tmpFx[9]*tmpObjS[14] + tmpFx[18]*tmpObjS[25] + tmpFx[27]*tmpObjS[36] + tmpFx[36]*tmpObjS[47] + tmpFx[45]*tmpObjS[58] + tmpFx[54]*tmpObjS[69] + tmpFx[63]*tmpObjS[80] + tmpFx[72]*tmpObjS[91] + tmpFx[81]*tmpObjS[102] + tmpFx[90]*tmpObjS[113];
tmpQ2[4] = + tmpFx[0]*tmpObjS[4] + tmpFx[9]*tmpObjS[15] + tmpFx[18]*tmpObjS[26] + tmpFx[27]*tmpObjS[37] + tmpFx[36]*tmpObjS[48] + tmpFx[45]*tmpObjS[59] + tmpFx[54]*tmpObjS[70] + tmpFx[63]*tmpObjS[81] + tmpFx[72]*tmpObjS[92] + tmpFx[81]*tmpObjS[103] + tmpFx[90]*tmpObjS[114];
tmpQ2[5] = + tmpFx[0]*tmpObjS[5] + tmpFx[9]*tmpObjS[16] + tmpFx[18]*tmpObjS[27] + tmpFx[27]*tmpObjS[38] + tmpFx[36]*tmpObjS[49] + tmpFx[45]*tmpObjS[60] + tmpFx[54]*tmpObjS[71] + tmpFx[63]*tmpObjS[82] + tmpFx[72]*tmpObjS[93] + tmpFx[81]*tmpObjS[104] + tmpFx[90]*tmpObjS[115];
tmpQ2[6] = + tmpFx[0]*tmpObjS[6] + tmpFx[9]*tmpObjS[17] + tmpFx[18]*tmpObjS[28] + tmpFx[27]*tmpObjS[39] + tmpFx[36]*tmpObjS[50] + tmpFx[45]*tmpObjS[61] + tmpFx[54]*tmpObjS[72] + tmpFx[63]*tmpObjS[83] + tmpFx[72]*tmpObjS[94] + tmpFx[81]*tmpObjS[105] + tmpFx[90]*tmpObjS[116];
tmpQ2[7] = + tmpFx[0]*tmpObjS[7] + tmpFx[9]*tmpObjS[18] + tmpFx[18]*tmpObjS[29] + tmpFx[27]*tmpObjS[40] + tmpFx[36]*tmpObjS[51] + tmpFx[45]*tmpObjS[62] + tmpFx[54]*tmpObjS[73] + tmpFx[63]*tmpObjS[84] + tmpFx[72]*tmpObjS[95] + tmpFx[81]*tmpObjS[106] + tmpFx[90]*tmpObjS[117];
tmpQ2[8] = + tmpFx[0]*tmpObjS[8] + tmpFx[9]*tmpObjS[19] + tmpFx[18]*tmpObjS[30] + tmpFx[27]*tmpObjS[41] + tmpFx[36]*tmpObjS[52] + tmpFx[45]*tmpObjS[63] + tmpFx[54]*tmpObjS[74] + tmpFx[63]*tmpObjS[85] + tmpFx[72]*tmpObjS[96] + tmpFx[81]*tmpObjS[107] + tmpFx[90]*tmpObjS[118];
tmpQ2[9] = + tmpFx[0]*tmpObjS[9] + tmpFx[9]*tmpObjS[20] + tmpFx[18]*tmpObjS[31] + tmpFx[27]*tmpObjS[42] + tmpFx[36]*tmpObjS[53] + tmpFx[45]*tmpObjS[64] + tmpFx[54]*tmpObjS[75] + tmpFx[63]*tmpObjS[86] + tmpFx[72]*tmpObjS[97] + tmpFx[81]*tmpObjS[108] + tmpFx[90]*tmpObjS[119];
tmpQ2[10] = + tmpFx[0]*tmpObjS[10] + tmpFx[9]*tmpObjS[21] + tmpFx[18]*tmpObjS[32] + tmpFx[27]*tmpObjS[43] + tmpFx[36]*tmpObjS[54] + tmpFx[45]*tmpObjS[65] + tmpFx[54]*tmpObjS[76] + tmpFx[63]*tmpObjS[87] + tmpFx[72]*tmpObjS[98] + tmpFx[81]*tmpObjS[109] + tmpFx[90]*tmpObjS[120];
tmpQ2[11] = + tmpFx[1]*tmpObjS[0] + tmpFx[10]*tmpObjS[11] + tmpFx[19]*tmpObjS[22] + tmpFx[28]*tmpObjS[33] + tmpFx[37]*tmpObjS[44] + tmpFx[46]*tmpObjS[55] + tmpFx[55]*tmpObjS[66] + tmpFx[64]*tmpObjS[77] + tmpFx[73]*tmpObjS[88] + tmpFx[82]*tmpObjS[99] + tmpFx[91]*tmpObjS[110];
tmpQ2[12] = + tmpFx[1]*tmpObjS[1] + tmpFx[10]*tmpObjS[12] + tmpFx[19]*tmpObjS[23] + tmpFx[28]*tmpObjS[34] + tmpFx[37]*tmpObjS[45] + tmpFx[46]*tmpObjS[56] + tmpFx[55]*tmpObjS[67] + tmpFx[64]*tmpObjS[78] + tmpFx[73]*tmpObjS[89] + tmpFx[82]*tmpObjS[100] + tmpFx[91]*tmpObjS[111];
tmpQ2[13] = + tmpFx[1]*tmpObjS[2] + tmpFx[10]*tmpObjS[13] + tmpFx[19]*tmpObjS[24] + tmpFx[28]*tmpObjS[35] + tmpFx[37]*tmpObjS[46] + tmpFx[46]*tmpObjS[57] + tmpFx[55]*tmpObjS[68] + tmpFx[64]*tmpObjS[79] + tmpFx[73]*tmpObjS[90] + tmpFx[82]*tmpObjS[101] + tmpFx[91]*tmpObjS[112];
tmpQ2[14] = + tmpFx[1]*tmpObjS[3] + tmpFx[10]*tmpObjS[14] + tmpFx[19]*tmpObjS[25] + tmpFx[28]*tmpObjS[36] + tmpFx[37]*tmpObjS[47] + tmpFx[46]*tmpObjS[58] + tmpFx[55]*tmpObjS[69] + tmpFx[64]*tmpObjS[80] + tmpFx[73]*tmpObjS[91] + tmpFx[82]*tmpObjS[102] + tmpFx[91]*tmpObjS[113];
tmpQ2[15] = + tmpFx[1]*tmpObjS[4] + tmpFx[10]*tmpObjS[15] + tmpFx[19]*tmpObjS[26] + tmpFx[28]*tmpObjS[37] + tmpFx[37]*tmpObjS[48] + tmpFx[46]*tmpObjS[59] + tmpFx[55]*tmpObjS[70] + tmpFx[64]*tmpObjS[81] + tmpFx[73]*tmpObjS[92] + tmpFx[82]*tmpObjS[103] + tmpFx[91]*tmpObjS[114];
tmpQ2[16] = + tmpFx[1]*tmpObjS[5] + tmpFx[10]*tmpObjS[16] + tmpFx[19]*tmpObjS[27] + tmpFx[28]*tmpObjS[38] + tmpFx[37]*tmpObjS[49] + tmpFx[46]*tmpObjS[60] + tmpFx[55]*tmpObjS[71] + tmpFx[64]*tmpObjS[82] + tmpFx[73]*tmpObjS[93] + tmpFx[82]*tmpObjS[104] + tmpFx[91]*tmpObjS[115];
tmpQ2[17] = + tmpFx[1]*tmpObjS[6] + tmpFx[10]*tmpObjS[17] + tmpFx[19]*tmpObjS[28] + tmpFx[28]*tmpObjS[39] + tmpFx[37]*tmpObjS[50] + tmpFx[46]*tmpObjS[61] + tmpFx[55]*tmpObjS[72] + tmpFx[64]*tmpObjS[83] + tmpFx[73]*tmpObjS[94] + tmpFx[82]*tmpObjS[105] + tmpFx[91]*tmpObjS[116];
tmpQ2[18] = + tmpFx[1]*tmpObjS[7] + tmpFx[10]*tmpObjS[18] + tmpFx[19]*tmpObjS[29] + tmpFx[28]*tmpObjS[40] + tmpFx[37]*tmpObjS[51] + tmpFx[46]*tmpObjS[62] + tmpFx[55]*tmpObjS[73] + tmpFx[64]*tmpObjS[84] + tmpFx[73]*tmpObjS[95] + tmpFx[82]*tmpObjS[106] + tmpFx[91]*tmpObjS[117];
tmpQ2[19] = + tmpFx[1]*tmpObjS[8] + tmpFx[10]*tmpObjS[19] + tmpFx[19]*tmpObjS[30] + tmpFx[28]*tmpObjS[41] + tmpFx[37]*tmpObjS[52] + tmpFx[46]*tmpObjS[63] + tmpFx[55]*tmpObjS[74] + tmpFx[64]*tmpObjS[85] + tmpFx[73]*tmpObjS[96] + tmpFx[82]*tmpObjS[107] + tmpFx[91]*tmpObjS[118];
tmpQ2[20] = + tmpFx[1]*tmpObjS[9] + tmpFx[10]*tmpObjS[20] + tmpFx[19]*tmpObjS[31] + tmpFx[28]*tmpObjS[42] + tmpFx[37]*tmpObjS[53] + tmpFx[46]*tmpObjS[64] + tmpFx[55]*tmpObjS[75] + tmpFx[64]*tmpObjS[86] + tmpFx[73]*tmpObjS[97] + tmpFx[82]*tmpObjS[108] + tmpFx[91]*tmpObjS[119];
tmpQ2[21] = + tmpFx[1]*tmpObjS[10] + tmpFx[10]*tmpObjS[21] + tmpFx[19]*tmpObjS[32] + tmpFx[28]*tmpObjS[43] + tmpFx[37]*tmpObjS[54] + tmpFx[46]*tmpObjS[65] + tmpFx[55]*tmpObjS[76] + tmpFx[64]*tmpObjS[87] + tmpFx[73]*tmpObjS[98] + tmpFx[82]*tmpObjS[109] + tmpFx[91]*tmpObjS[120];
tmpQ2[22] = + tmpFx[2]*tmpObjS[0] + tmpFx[11]*tmpObjS[11] + tmpFx[20]*tmpObjS[22] + tmpFx[29]*tmpObjS[33] + tmpFx[38]*tmpObjS[44] + tmpFx[47]*tmpObjS[55] + tmpFx[56]*tmpObjS[66] + tmpFx[65]*tmpObjS[77] + tmpFx[74]*tmpObjS[88] + tmpFx[83]*tmpObjS[99] + tmpFx[92]*tmpObjS[110];
tmpQ2[23] = + tmpFx[2]*tmpObjS[1] + tmpFx[11]*tmpObjS[12] + tmpFx[20]*tmpObjS[23] + tmpFx[29]*tmpObjS[34] + tmpFx[38]*tmpObjS[45] + tmpFx[47]*tmpObjS[56] + tmpFx[56]*tmpObjS[67] + tmpFx[65]*tmpObjS[78] + tmpFx[74]*tmpObjS[89] + tmpFx[83]*tmpObjS[100] + tmpFx[92]*tmpObjS[111];
tmpQ2[24] = + tmpFx[2]*tmpObjS[2] + tmpFx[11]*tmpObjS[13] + tmpFx[20]*tmpObjS[24] + tmpFx[29]*tmpObjS[35] + tmpFx[38]*tmpObjS[46] + tmpFx[47]*tmpObjS[57] + tmpFx[56]*tmpObjS[68] + tmpFx[65]*tmpObjS[79] + tmpFx[74]*tmpObjS[90] + tmpFx[83]*tmpObjS[101] + tmpFx[92]*tmpObjS[112];
tmpQ2[25] = + tmpFx[2]*tmpObjS[3] + tmpFx[11]*tmpObjS[14] + tmpFx[20]*tmpObjS[25] + tmpFx[29]*tmpObjS[36] + tmpFx[38]*tmpObjS[47] + tmpFx[47]*tmpObjS[58] + tmpFx[56]*tmpObjS[69] + tmpFx[65]*tmpObjS[80] + tmpFx[74]*tmpObjS[91] + tmpFx[83]*tmpObjS[102] + tmpFx[92]*tmpObjS[113];
tmpQ2[26] = + tmpFx[2]*tmpObjS[4] + tmpFx[11]*tmpObjS[15] + tmpFx[20]*tmpObjS[26] + tmpFx[29]*tmpObjS[37] + tmpFx[38]*tmpObjS[48] + tmpFx[47]*tmpObjS[59] + tmpFx[56]*tmpObjS[70] + tmpFx[65]*tmpObjS[81] + tmpFx[74]*tmpObjS[92] + tmpFx[83]*tmpObjS[103] + tmpFx[92]*tmpObjS[114];
tmpQ2[27] = + tmpFx[2]*tmpObjS[5] + tmpFx[11]*tmpObjS[16] + tmpFx[20]*tmpObjS[27] + tmpFx[29]*tmpObjS[38] + tmpFx[38]*tmpObjS[49] + tmpFx[47]*tmpObjS[60] + tmpFx[56]*tmpObjS[71] + tmpFx[65]*tmpObjS[82] + tmpFx[74]*tmpObjS[93] + tmpFx[83]*tmpObjS[104] + tmpFx[92]*tmpObjS[115];
tmpQ2[28] = + tmpFx[2]*tmpObjS[6] + tmpFx[11]*tmpObjS[17] + tmpFx[20]*tmpObjS[28] + tmpFx[29]*tmpObjS[39] + tmpFx[38]*tmpObjS[50] + tmpFx[47]*tmpObjS[61] + tmpFx[56]*tmpObjS[72] + tmpFx[65]*tmpObjS[83] + tmpFx[74]*tmpObjS[94] + tmpFx[83]*tmpObjS[105] + tmpFx[92]*tmpObjS[116];
tmpQ2[29] = + tmpFx[2]*tmpObjS[7] + tmpFx[11]*tmpObjS[18] + tmpFx[20]*tmpObjS[29] + tmpFx[29]*tmpObjS[40] + tmpFx[38]*tmpObjS[51] + tmpFx[47]*tmpObjS[62] + tmpFx[56]*tmpObjS[73] + tmpFx[65]*tmpObjS[84] + tmpFx[74]*tmpObjS[95] + tmpFx[83]*tmpObjS[106] + tmpFx[92]*tmpObjS[117];
tmpQ2[30] = + tmpFx[2]*tmpObjS[8] + tmpFx[11]*tmpObjS[19] + tmpFx[20]*tmpObjS[30] + tmpFx[29]*tmpObjS[41] + tmpFx[38]*tmpObjS[52] + tmpFx[47]*tmpObjS[63] + tmpFx[56]*tmpObjS[74] + tmpFx[65]*tmpObjS[85] + tmpFx[74]*tmpObjS[96] + tmpFx[83]*tmpObjS[107] + tmpFx[92]*tmpObjS[118];
tmpQ2[31] = + tmpFx[2]*tmpObjS[9] + tmpFx[11]*tmpObjS[20] + tmpFx[20]*tmpObjS[31] + tmpFx[29]*tmpObjS[42] + tmpFx[38]*tmpObjS[53] + tmpFx[47]*tmpObjS[64] + tmpFx[56]*tmpObjS[75] + tmpFx[65]*tmpObjS[86] + tmpFx[74]*tmpObjS[97] + tmpFx[83]*tmpObjS[108] + tmpFx[92]*tmpObjS[119];
tmpQ2[32] = + tmpFx[2]*tmpObjS[10] + tmpFx[11]*tmpObjS[21] + tmpFx[20]*tmpObjS[32] + tmpFx[29]*tmpObjS[43] + tmpFx[38]*tmpObjS[54] + tmpFx[47]*tmpObjS[65] + tmpFx[56]*tmpObjS[76] + tmpFx[65]*tmpObjS[87] + tmpFx[74]*tmpObjS[98] + tmpFx[83]*tmpObjS[109] + tmpFx[92]*tmpObjS[120];
tmpQ2[33] = + tmpFx[3]*tmpObjS[0] + tmpFx[12]*tmpObjS[11] + tmpFx[21]*tmpObjS[22] + tmpFx[30]*tmpObjS[33] + tmpFx[39]*tmpObjS[44] + tmpFx[48]*tmpObjS[55] + tmpFx[57]*tmpObjS[66] + tmpFx[66]*tmpObjS[77] + tmpFx[75]*tmpObjS[88] + tmpFx[84]*tmpObjS[99] + tmpFx[93]*tmpObjS[110];
tmpQ2[34] = + tmpFx[3]*tmpObjS[1] + tmpFx[12]*tmpObjS[12] + tmpFx[21]*tmpObjS[23] + tmpFx[30]*tmpObjS[34] + tmpFx[39]*tmpObjS[45] + tmpFx[48]*tmpObjS[56] + tmpFx[57]*tmpObjS[67] + tmpFx[66]*tmpObjS[78] + tmpFx[75]*tmpObjS[89] + tmpFx[84]*tmpObjS[100] + tmpFx[93]*tmpObjS[111];
tmpQ2[35] = + tmpFx[3]*tmpObjS[2] + tmpFx[12]*tmpObjS[13] + tmpFx[21]*tmpObjS[24] + tmpFx[30]*tmpObjS[35] + tmpFx[39]*tmpObjS[46] + tmpFx[48]*tmpObjS[57] + tmpFx[57]*tmpObjS[68] + tmpFx[66]*tmpObjS[79] + tmpFx[75]*tmpObjS[90] + tmpFx[84]*tmpObjS[101] + tmpFx[93]*tmpObjS[112];
tmpQ2[36] = + tmpFx[3]*tmpObjS[3] + tmpFx[12]*tmpObjS[14] + tmpFx[21]*tmpObjS[25] + tmpFx[30]*tmpObjS[36] + tmpFx[39]*tmpObjS[47] + tmpFx[48]*tmpObjS[58] + tmpFx[57]*tmpObjS[69] + tmpFx[66]*tmpObjS[80] + tmpFx[75]*tmpObjS[91] + tmpFx[84]*tmpObjS[102] + tmpFx[93]*tmpObjS[113];
tmpQ2[37] = + tmpFx[3]*tmpObjS[4] + tmpFx[12]*tmpObjS[15] + tmpFx[21]*tmpObjS[26] + tmpFx[30]*tmpObjS[37] + tmpFx[39]*tmpObjS[48] + tmpFx[48]*tmpObjS[59] + tmpFx[57]*tmpObjS[70] + tmpFx[66]*tmpObjS[81] + tmpFx[75]*tmpObjS[92] + tmpFx[84]*tmpObjS[103] + tmpFx[93]*tmpObjS[114];
tmpQ2[38] = + tmpFx[3]*tmpObjS[5] + tmpFx[12]*tmpObjS[16] + tmpFx[21]*tmpObjS[27] + tmpFx[30]*tmpObjS[38] + tmpFx[39]*tmpObjS[49] + tmpFx[48]*tmpObjS[60] + tmpFx[57]*tmpObjS[71] + tmpFx[66]*tmpObjS[82] + tmpFx[75]*tmpObjS[93] + tmpFx[84]*tmpObjS[104] + tmpFx[93]*tmpObjS[115];
tmpQ2[39] = + tmpFx[3]*tmpObjS[6] + tmpFx[12]*tmpObjS[17] + tmpFx[21]*tmpObjS[28] + tmpFx[30]*tmpObjS[39] + tmpFx[39]*tmpObjS[50] + tmpFx[48]*tmpObjS[61] + tmpFx[57]*tmpObjS[72] + tmpFx[66]*tmpObjS[83] + tmpFx[75]*tmpObjS[94] + tmpFx[84]*tmpObjS[105] + tmpFx[93]*tmpObjS[116];
tmpQ2[40] = + tmpFx[3]*tmpObjS[7] + tmpFx[12]*tmpObjS[18] + tmpFx[21]*tmpObjS[29] + tmpFx[30]*tmpObjS[40] + tmpFx[39]*tmpObjS[51] + tmpFx[48]*tmpObjS[62] + tmpFx[57]*tmpObjS[73] + tmpFx[66]*tmpObjS[84] + tmpFx[75]*tmpObjS[95] + tmpFx[84]*tmpObjS[106] + tmpFx[93]*tmpObjS[117];
tmpQ2[41] = + tmpFx[3]*tmpObjS[8] + tmpFx[12]*tmpObjS[19] + tmpFx[21]*tmpObjS[30] + tmpFx[30]*tmpObjS[41] + tmpFx[39]*tmpObjS[52] + tmpFx[48]*tmpObjS[63] + tmpFx[57]*tmpObjS[74] + tmpFx[66]*tmpObjS[85] + tmpFx[75]*tmpObjS[96] + tmpFx[84]*tmpObjS[107] + tmpFx[93]*tmpObjS[118];
tmpQ2[42] = + tmpFx[3]*tmpObjS[9] + tmpFx[12]*tmpObjS[20] + tmpFx[21]*tmpObjS[31] + tmpFx[30]*tmpObjS[42] + tmpFx[39]*tmpObjS[53] + tmpFx[48]*tmpObjS[64] + tmpFx[57]*tmpObjS[75] + tmpFx[66]*tmpObjS[86] + tmpFx[75]*tmpObjS[97] + tmpFx[84]*tmpObjS[108] + tmpFx[93]*tmpObjS[119];
tmpQ2[43] = + tmpFx[3]*tmpObjS[10] + tmpFx[12]*tmpObjS[21] + tmpFx[21]*tmpObjS[32] + tmpFx[30]*tmpObjS[43] + tmpFx[39]*tmpObjS[54] + tmpFx[48]*tmpObjS[65] + tmpFx[57]*tmpObjS[76] + tmpFx[66]*tmpObjS[87] + tmpFx[75]*tmpObjS[98] + tmpFx[84]*tmpObjS[109] + tmpFx[93]*tmpObjS[120];
tmpQ2[44] = + tmpFx[4]*tmpObjS[0] + tmpFx[13]*tmpObjS[11] + tmpFx[22]*tmpObjS[22] + tmpFx[31]*tmpObjS[33] + tmpFx[40]*tmpObjS[44] + tmpFx[49]*tmpObjS[55] + tmpFx[58]*tmpObjS[66] + tmpFx[67]*tmpObjS[77] + tmpFx[76]*tmpObjS[88] + tmpFx[85]*tmpObjS[99] + tmpFx[94]*tmpObjS[110];
tmpQ2[45] = + tmpFx[4]*tmpObjS[1] + tmpFx[13]*tmpObjS[12] + tmpFx[22]*tmpObjS[23] + tmpFx[31]*tmpObjS[34] + tmpFx[40]*tmpObjS[45] + tmpFx[49]*tmpObjS[56] + tmpFx[58]*tmpObjS[67] + tmpFx[67]*tmpObjS[78] + tmpFx[76]*tmpObjS[89] + tmpFx[85]*tmpObjS[100] + tmpFx[94]*tmpObjS[111];
tmpQ2[46] = + tmpFx[4]*tmpObjS[2] + tmpFx[13]*tmpObjS[13] + tmpFx[22]*tmpObjS[24] + tmpFx[31]*tmpObjS[35] + tmpFx[40]*tmpObjS[46] + tmpFx[49]*tmpObjS[57] + tmpFx[58]*tmpObjS[68] + tmpFx[67]*tmpObjS[79] + tmpFx[76]*tmpObjS[90] + tmpFx[85]*tmpObjS[101] + tmpFx[94]*tmpObjS[112];
tmpQ2[47] = + tmpFx[4]*tmpObjS[3] + tmpFx[13]*tmpObjS[14] + tmpFx[22]*tmpObjS[25] + tmpFx[31]*tmpObjS[36] + tmpFx[40]*tmpObjS[47] + tmpFx[49]*tmpObjS[58] + tmpFx[58]*tmpObjS[69] + tmpFx[67]*tmpObjS[80] + tmpFx[76]*tmpObjS[91] + tmpFx[85]*tmpObjS[102] + tmpFx[94]*tmpObjS[113];
tmpQ2[48] = + tmpFx[4]*tmpObjS[4] + tmpFx[13]*tmpObjS[15] + tmpFx[22]*tmpObjS[26] + tmpFx[31]*tmpObjS[37] + tmpFx[40]*tmpObjS[48] + tmpFx[49]*tmpObjS[59] + tmpFx[58]*tmpObjS[70] + tmpFx[67]*tmpObjS[81] + tmpFx[76]*tmpObjS[92] + tmpFx[85]*tmpObjS[103] + tmpFx[94]*tmpObjS[114];
tmpQ2[49] = + tmpFx[4]*tmpObjS[5] + tmpFx[13]*tmpObjS[16] + tmpFx[22]*tmpObjS[27] + tmpFx[31]*tmpObjS[38] + tmpFx[40]*tmpObjS[49] + tmpFx[49]*tmpObjS[60] + tmpFx[58]*tmpObjS[71] + tmpFx[67]*tmpObjS[82] + tmpFx[76]*tmpObjS[93] + tmpFx[85]*tmpObjS[104] + tmpFx[94]*tmpObjS[115];
tmpQ2[50] = + tmpFx[4]*tmpObjS[6] + tmpFx[13]*tmpObjS[17] + tmpFx[22]*tmpObjS[28] + tmpFx[31]*tmpObjS[39] + tmpFx[40]*tmpObjS[50] + tmpFx[49]*tmpObjS[61] + tmpFx[58]*tmpObjS[72] + tmpFx[67]*tmpObjS[83] + tmpFx[76]*tmpObjS[94] + tmpFx[85]*tmpObjS[105] + tmpFx[94]*tmpObjS[116];
tmpQ2[51] = + tmpFx[4]*tmpObjS[7] + tmpFx[13]*tmpObjS[18] + tmpFx[22]*tmpObjS[29] + tmpFx[31]*tmpObjS[40] + tmpFx[40]*tmpObjS[51] + tmpFx[49]*tmpObjS[62] + tmpFx[58]*tmpObjS[73] + tmpFx[67]*tmpObjS[84] + tmpFx[76]*tmpObjS[95] + tmpFx[85]*tmpObjS[106] + tmpFx[94]*tmpObjS[117];
tmpQ2[52] = + tmpFx[4]*tmpObjS[8] + tmpFx[13]*tmpObjS[19] + tmpFx[22]*tmpObjS[30] + tmpFx[31]*tmpObjS[41] + tmpFx[40]*tmpObjS[52] + tmpFx[49]*tmpObjS[63] + tmpFx[58]*tmpObjS[74] + tmpFx[67]*tmpObjS[85] + tmpFx[76]*tmpObjS[96] + tmpFx[85]*tmpObjS[107] + tmpFx[94]*tmpObjS[118];
tmpQ2[53] = + tmpFx[4]*tmpObjS[9] + tmpFx[13]*tmpObjS[20] + tmpFx[22]*tmpObjS[31] + tmpFx[31]*tmpObjS[42] + tmpFx[40]*tmpObjS[53] + tmpFx[49]*tmpObjS[64] + tmpFx[58]*tmpObjS[75] + tmpFx[67]*tmpObjS[86] + tmpFx[76]*tmpObjS[97] + tmpFx[85]*tmpObjS[108] + tmpFx[94]*tmpObjS[119];
tmpQ2[54] = + tmpFx[4]*tmpObjS[10] + tmpFx[13]*tmpObjS[21] + tmpFx[22]*tmpObjS[32] + tmpFx[31]*tmpObjS[43] + tmpFx[40]*tmpObjS[54] + tmpFx[49]*tmpObjS[65] + tmpFx[58]*tmpObjS[76] + tmpFx[67]*tmpObjS[87] + tmpFx[76]*tmpObjS[98] + tmpFx[85]*tmpObjS[109] + tmpFx[94]*tmpObjS[120];
tmpQ2[55] = + tmpFx[5]*tmpObjS[0] + tmpFx[14]*tmpObjS[11] + tmpFx[23]*tmpObjS[22] + tmpFx[32]*tmpObjS[33] + tmpFx[41]*tmpObjS[44] + tmpFx[50]*tmpObjS[55] + tmpFx[59]*tmpObjS[66] + tmpFx[68]*tmpObjS[77] + tmpFx[77]*tmpObjS[88] + tmpFx[86]*tmpObjS[99] + tmpFx[95]*tmpObjS[110];
tmpQ2[56] = + tmpFx[5]*tmpObjS[1] + tmpFx[14]*tmpObjS[12] + tmpFx[23]*tmpObjS[23] + tmpFx[32]*tmpObjS[34] + tmpFx[41]*tmpObjS[45] + tmpFx[50]*tmpObjS[56] + tmpFx[59]*tmpObjS[67] + tmpFx[68]*tmpObjS[78] + tmpFx[77]*tmpObjS[89] + tmpFx[86]*tmpObjS[100] + tmpFx[95]*tmpObjS[111];
tmpQ2[57] = + tmpFx[5]*tmpObjS[2] + tmpFx[14]*tmpObjS[13] + tmpFx[23]*tmpObjS[24] + tmpFx[32]*tmpObjS[35] + tmpFx[41]*tmpObjS[46] + tmpFx[50]*tmpObjS[57] + tmpFx[59]*tmpObjS[68] + tmpFx[68]*tmpObjS[79] + tmpFx[77]*tmpObjS[90] + tmpFx[86]*tmpObjS[101] + tmpFx[95]*tmpObjS[112];
tmpQ2[58] = + tmpFx[5]*tmpObjS[3] + tmpFx[14]*tmpObjS[14] + tmpFx[23]*tmpObjS[25] + tmpFx[32]*tmpObjS[36] + tmpFx[41]*tmpObjS[47] + tmpFx[50]*tmpObjS[58] + tmpFx[59]*tmpObjS[69] + tmpFx[68]*tmpObjS[80] + tmpFx[77]*tmpObjS[91] + tmpFx[86]*tmpObjS[102] + tmpFx[95]*tmpObjS[113];
tmpQ2[59] = + tmpFx[5]*tmpObjS[4] + tmpFx[14]*tmpObjS[15] + tmpFx[23]*tmpObjS[26] + tmpFx[32]*tmpObjS[37] + tmpFx[41]*tmpObjS[48] + tmpFx[50]*tmpObjS[59] + tmpFx[59]*tmpObjS[70] + tmpFx[68]*tmpObjS[81] + tmpFx[77]*tmpObjS[92] + tmpFx[86]*tmpObjS[103] + tmpFx[95]*tmpObjS[114];
tmpQ2[60] = + tmpFx[5]*tmpObjS[5] + tmpFx[14]*tmpObjS[16] + tmpFx[23]*tmpObjS[27] + tmpFx[32]*tmpObjS[38] + tmpFx[41]*tmpObjS[49] + tmpFx[50]*tmpObjS[60] + tmpFx[59]*tmpObjS[71] + tmpFx[68]*tmpObjS[82] + tmpFx[77]*tmpObjS[93] + tmpFx[86]*tmpObjS[104] + tmpFx[95]*tmpObjS[115];
tmpQ2[61] = + tmpFx[5]*tmpObjS[6] + tmpFx[14]*tmpObjS[17] + tmpFx[23]*tmpObjS[28] + tmpFx[32]*tmpObjS[39] + tmpFx[41]*tmpObjS[50] + tmpFx[50]*tmpObjS[61] + tmpFx[59]*tmpObjS[72] + tmpFx[68]*tmpObjS[83] + tmpFx[77]*tmpObjS[94] + tmpFx[86]*tmpObjS[105] + tmpFx[95]*tmpObjS[116];
tmpQ2[62] = + tmpFx[5]*tmpObjS[7] + tmpFx[14]*tmpObjS[18] + tmpFx[23]*tmpObjS[29] + tmpFx[32]*tmpObjS[40] + tmpFx[41]*tmpObjS[51] + tmpFx[50]*tmpObjS[62] + tmpFx[59]*tmpObjS[73] + tmpFx[68]*tmpObjS[84] + tmpFx[77]*tmpObjS[95] + tmpFx[86]*tmpObjS[106] + tmpFx[95]*tmpObjS[117];
tmpQ2[63] = + tmpFx[5]*tmpObjS[8] + tmpFx[14]*tmpObjS[19] + tmpFx[23]*tmpObjS[30] + tmpFx[32]*tmpObjS[41] + tmpFx[41]*tmpObjS[52] + tmpFx[50]*tmpObjS[63] + tmpFx[59]*tmpObjS[74] + tmpFx[68]*tmpObjS[85] + tmpFx[77]*tmpObjS[96] + tmpFx[86]*tmpObjS[107] + tmpFx[95]*tmpObjS[118];
tmpQ2[64] = + tmpFx[5]*tmpObjS[9] + tmpFx[14]*tmpObjS[20] + tmpFx[23]*tmpObjS[31] + tmpFx[32]*tmpObjS[42] + tmpFx[41]*tmpObjS[53] + tmpFx[50]*tmpObjS[64] + tmpFx[59]*tmpObjS[75] + tmpFx[68]*tmpObjS[86] + tmpFx[77]*tmpObjS[97] + tmpFx[86]*tmpObjS[108] + tmpFx[95]*tmpObjS[119];
tmpQ2[65] = + tmpFx[5]*tmpObjS[10] + tmpFx[14]*tmpObjS[21] + tmpFx[23]*tmpObjS[32] + tmpFx[32]*tmpObjS[43] + tmpFx[41]*tmpObjS[54] + tmpFx[50]*tmpObjS[65] + tmpFx[59]*tmpObjS[76] + tmpFx[68]*tmpObjS[87] + tmpFx[77]*tmpObjS[98] + tmpFx[86]*tmpObjS[109] + tmpFx[95]*tmpObjS[120];
tmpQ2[66] = + tmpFx[6]*tmpObjS[0] + tmpFx[15]*tmpObjS[11] + tmpFx[24]*tmpObjS[22] + tmpFx[33]*tmpObjS[33] + tmpFx[42]*tmpObjS[44] + tmpFx[51]*tmpObjS[55] + tmpFx[60]*tmpObjS[66] + tmpFx[69]*tmpObjS[77] + tmpFx[78]*tmpObjS[88] + tmpFx[87]*tmpObjS[99] + tmpFx[96]*tmpObjS[110];
tmpQ2[67] = + tmpFx[6]*tmpObjS[1] + tmpFx[15]*tmpObjS[12] + tmpFx[24]*tmpObjS[23] + tmpFx[33]*tmpObjS[34] + tmpFx[42]*tmpObjS[45] + tmpFx[51]*tmpObjS[56] + tmpFx[60]*tmpObjS[67] + tmpFx[69]*tmpObjS[78] + tmpFx[78]*tmpObjS[89] + tmpFx[87]*tmpObjS[100] + tmpFx[96]*tmpObjS[111];
tmpQ2[68] = + tmpFx[6]*tmpObjS[2] + tmpFx[15]*tmpObjS[13] + tmpFx[24]*tmpObjS[24] + tmpFx[33]*tmpObjS[35] + tmpFx[42]*tmpObjS[46] + tmpFx[51]*tmpObjS[57] + tmpFx[60]*tmpObjS[68] + tmpFx[69]*tmpObjS[79] + tmpFx[78]*tmpObjS[90] + tmpFx[87]*tmpObjS[101] + tmpFx[96]*tmpObjS[112];
tmpQ2[69] = + tmpFx[6]*tmpObjS[3] + tmpFx[15]*tmpObjS[14] + tmpFx[24]*tmpObjS[25] + tmpFx[33]*tmpObjS[36] + tmpFx[42]*tmpObjS[47] + tmpFx[51]*tmpObjS[58] + tmpFx[60]*tmpObjS[69] + tmpFx[69]*tmpObjS[80] + tmpFx[78]*tmpObjS[91] + tmpFx[87]*tmpObjS[102] + tmpFx[96]*tmpObjS[113];
tmpQ2[70] = + tmpFx[6]*tmpObjS[4] + tmpFx[15]*tmpObjS[15] + tmpFx[24]*tmpObjS[26] + tmpFx[33]*tmpObjS[37] + tmpFx[42]*tmpObjS[48] + tmpFx[51]*tmpObjS[59] + tmpFx[60]*tmpObjS[70] + tmpFx[69]*tmpObjS[81] + tmpFx[78]*tmpObjS[92] + tmpFx[87]*tmpObjS[103] + tmpFx[96]*tmpObjS[114];
tmpQ2[71] = + tmpFx[6]*tmpObjS[5] + tmpFx[15]*tmpObjS[16] + tmpFx[24]*tmpObjS[27] + tmpFx[33]*tmpObjS[38] + tmpFx[42]*tmpObjS[49] + tmpFx[51]*tmpObjS[60] + tmpFx[60]*tmpObjS[71] + tmpFx[69]*tmpObjS[82] + tmpFx[78]*tmpObjS[93] + tmpFx[87]*tmpObjS[104] + tmpFx[96]*tmpObjS[115];
tmpQ2[72] = + tmpFx[6]*tmpObjS[6] + tmpFx[15]*tmpObjS[17] + tmpFx[24]*tmpObjS[28] + tmpFx[33]*tmpObjS[39] + tmpFx[42]*tmpObjS[50] + tmpFx[51]*tmpObjS[61] + tmpFx[60]*tmpObjS[72] + tmpFx[69]*tmpObjS[83] + tmpFx[78]*tmpObjS[94] + tmpFx[87]*tmpObjS[105] + tmpFx[96]*tmpObjS[116];
tmpQ2[73] = + tmpFx[6]*tmpObjS[7] + tmpFx[15]*tmpObjS[18] + tmpFx[24]*tmpObjS[29] + tmpFx[33]*tmpObjS[40] + tmpFx[42]*tmpObjS[51] + tmpFx[51]*tmpObjS[62] + tmpFx[60]*tmpObjS[73] + tmpFx[69]*tmpObjS[84] + tmpFx[78]*tmpObjS[95] + tmpFx[87]*tmpObjS[106] + tmpFx[96]*tmpObjS[117];
tmpQ2[74] = + tmpFx[6]*tmpObjS[8] + tmpFx[15]*tmpObjS[19] + tmpFx[24]*tmpObjS[30] + tmpFx[33]*tmpObjS[41] + tmpFx[42]*tmpObjS[52] + tmpFx[51]*tmpObjS[63] + tmpFx[60]*tmpObjS[74] + tmpFx[69]*tmpObjS[85] + tmpFx[78]*tmpObjS[96] + tmpFx[87]*tmpObjS[107] + tmpFx[96]*tmpObjS[118];
tmpQ2[75] = + tmpFx[6]*tmpObjS[9] + tmpFx[15]*tmpObjS[20] + tmpFx[24]*tmpObjS[31] + tmpFx[33]*tmpObjS[42] + tmpFx[42]*tmpObjS[53] + tmpFx[51]*tmpObjS[64] + tmpFx[60]*tmpObjS[75] + tmpFx[69]*tmpObjS[86] + tmpFx[78]*tmpObjS[97] + tmpFx[87]*tmpObjS[108] + tmpFx[96]*tmpObjS[119];
tmpQ2[76] = + tmpFx[6]*tmpObjS[10] + tmpFx[15]*tmpObjS[21] + tmpFx[24]*tmpObjS[32] + tmpFx[33]*tmpObjS[43] + tmpFx[42]*tmpObjS[54] + tmpFx[51]*tmpObjS[65] + tmpFx[60]*tmpObjS[76] + tmpFx[69]*tmpObjS[87] + tmpFx[78]*tmpObjS[98] + tmpFx[87]*tmpObjS[109] + tmpFx[96]*tmpObjS[120];
tmpQ2[77] = + tmpFx[7]*tmpObjS[0] + tmpFx[16]*tmpObjS[11] + tmpFx[25]*tmpObjS[22] + tmpFx[34]*tmpObjS[33] + tmpFx[43]*tmpObjS[44] + tmpFx[52]*tmpObjS[55] + tmpFx[61]*tmpObjS[66] + tmpFx[70]*tmpObjS[77] + tmpFx[79]*tmpObjS[88] + tmpFx[88]*tmpObjS[99] + tmpFx[97]*tmpObjS[110];
tmpQ2[78] = + tmpFx[7]*tmpObjS[1] + tmpFx[16]*tmpObjS[12] + tmpFx[25]*tmpObjS[23] + tmpFx[34]*tmpObjS[34] + tmpFx[43]*tmpObjS[45] + tmpFx[52]*tmpObjS[56] + tmpFx[61]*tmpObjS[67] + tmpFx[70]*tmpObjS[78] + tmpFx[79]*tmpObjS[89] + tmpFx[88]*tmpObjS[100] + tmpFx[97]*tmpObjS[111];
tmpQ2[79] = + tmpFx[7]*tmpObjS[2] + tmpFx[16]*tmpObjS[13] + tmpFx[25]*tmpObjS[24] + tmpFx[34]*tmpObjS[35] + tmpFx[43]*tmpObjS[46] + tmpFx[52]*tmpObjS[57] + tmpFx[61]*tmpObjS[68] + tmpFx[70]*tmpObjS[79] + tmpFx[79]*tmpObjS[90] + tmpFx[88]*tmpObjS[101] + tmpFx[97]*tmpObjS[112];
tmpQ2[80] = + tmpFx[7]*tmpObjS[3] + tmpFx[16]*tmpObjS[14] + tmpFx[25]*tmpObjS[25] + tmpFx[34]*tmpObjS[36] + tmpFx[43]*tmpObjS[47] + tmpFx[52]*tmpObjS[58] + tmpFx[61]*tmpObjS[69] + tmpFx[70]*tmpObjS[80] + tmpFx[79]*tmpObjS[91] + tmpFx[88]*tmpObjS[102] + tmpFx[97]*tmpObjS[113];
tmpQ2[81] = + tmpFx[7]*tmpObjS[4] + tmpFx[16]*tmpObjS[15] + tmpFx[25]*tmpObjS[26] + tmpFx[34]*tmpObjS[37] + tmpFx[43]*tmpObjS[48] + tmpFx[52]*tmpObjS[59] + tmpFx[61]*tmpObjS[70] + tmpFx[70]*tmpObjS[81] + tmpFx[79]*tmpObjS[92] + tmpFx[88]*tmpObjS[103] + tmpFx[97]*tmpObjS[114];
tmpQ2[82] = + tmpFx[7]*tmpObjS[5] + tmpFx[16]*tmpObjS[16] + tmpFx[25]*tmpObjS[27] + tmpFx[34]*tmpObjS[38] + tmpFx[43]*tmpObjS[49] + tmpFx[52]*tmpObjS[60] + tmpFx[61]*tmpObjS[71] + tmpFx[70]*tmpObjS[82] + tmpFx[79]*tmpObjS[93] + tmpFx[88]*tmpObjS[104] + tmpFx[97]*tmpObjS[115];
tmpQ2[83] = + tmpFx[7]*tmpObjS[6] + tmpFx[16]*tmpObjS[17] + tmpFx[25]*tmpObjS[28] + tmpFx[34]*tmpObjS[39] + tmpFx[43]*tmpObjS[50] + tmpFx[52]*tmpObjS[61] + tmpFx[61]*tmpObjS[72] + tmpFx[70]*tmpObjS[83] + tmpFx[79]*tmpObjS[94] + tmpFx[88]*tmpObjS[105] + tmpFx[97]*tmpObjS[116];
tmpQ2[84] = + tmpFx[7]*tmpObjS[7] + tmpFx[16]*tmpObjS[18] + tmpFx[25]*tmpObjS[29] + tmpFx[34]*tmpObjS[40] + tmpFx[43]*tmpObjS[51] + tmpFx[52]*tmpObjS[62] + tmpFx[61]*tmpObjS[73] + tmpFx[70]*tmpObjS[84] + tmpFx[79]*tmpObjS[95] + tmpFx[88]*tmpObjS[106] + tmpFx[97]*tmpObjS[117];
tmpQ2[85] = + tmpFx[7]*tmpObjS[8] + tmpFx[16]*tmpObjS[19] + tmpFx[25]*tmpObjS[30] + tmpFx[34]*tmpObjS[41] + tmpFx[43]*tmpObjS[52] + tmpFx[52]*tmpObjS[63] + tmpFx[61]*tmpObjS[74] + tmpFx[70]*tmpObjS[85] + tmpFx[79]*tmpObjS[96] + tmpFx[88]*tmpObjS[107] + tmpFx[97]*tmpObjS[118];
tmpQ2[86] = + tmpFx[7]*tmpObjS[9] + tmpFx[16]*tmpObjS[20] + tmpFx[25]*tmpObjS[31] + tmpFx[34]*tmpObjS[42] + tmpFx[43]*tmpObjS[53] + tmpFx[52]*tmpObjS[64] + tmpFx[61]*tmpObjS[75] + tmpFx[70]*tmpObjS[86] + tmpFx[79]*tmpObjS[97] + tmpFx[88]*tmpObjS[108] + tmpFx[97]*tmpObjS[119];
tmpQ2[87] = + tmpFx[7]*tmpObjS[10] + tmpFx[16]*tmpObjS[21] + tmpFx[25]*tmpObjS[32] + tmpFx[34]*tmpObjS[43] + tmpFx[43]*tmpObjS[54] + tmpFx[52]*tmpObjS[65] + tmpFx[61]*tmpObjS[76] + tmpFx[70]*tmpObjS[87] + tmpFx[79]*tmpObjS[98] + tmpFx[88]*tmpObjS[109] + tmpFx[97]*tmpObjS[120];
tmpQ2[88] = + tmpFx[8]*tmpObjS[0] + tmpFx[17]*tmpObjS[11] + tmpFx[26]*tmpObjS[22] + tmpFx[35]*tmpObjS[33] + tmpFx[44]*tmpObjS[44] + tmpFx[53]*tmpObjS[55] + tmpFx[62]*tmpObjS[66] + tmpFx[71]*tmpObjS[77] + tmpFx[80]*tmpObjS[88] + tmpFx[89]*tmpObjS[99] + tmpFx[98]*tmpObjS[110];
tmpQ2[89] = + tmpFx[8]*tmpObjS[1] + tmpFx[17]*tmpObjS[12] + tmpFx[26]*tmpObjS[23] + tmpFx[35]*tmpObjS[34] + tmpFx[44]*tmpObjS[45] + tmpFx[53]*tmpObjS[56] + tmpFx[62]*tmpObjS[67] + tmpFx[71]*tmpObjS[78] + tmpFx[80]*tmpObjS[89] + tmpFx[89]*tmpObjS[100] + tmpFx[98]*tmpObjS[111];
tmpQ2[90] = + tmpFx[8]*tmpObjS[2] + tmpFx[17]*tmpObjS[13] + tmpFx[26]*tmpObjS[24] + tmpFx[35]*tmpObjS[35] + tmpFx[44]*tmpObjS[46] + tmpFx[53]*tmpObjS[57] + tmpFx[62]*tmpObjS[68] + tmpFx[71]*tmpObjS[79] + tmpFx[80]*tmpObjS[90] + tmpFx[89]*tmpObjS[101] + tmpFx[98]*tmpObjS[112];
tmpQ2[91] = + tmpFx[8]*tmpObjS[3] + tmpFx[17]*tmpObjS[14] + tmpFx[26]*tmpObjS[25] + tmpFx[35]*tmpObjS[36] + tmpFx[44]*tmpObjS[47] + tmpFx[53]*tmpObjS[58] + tmpFx[62]*tmpObjS[69] + tmpFx[71]*tmpObjS[80] + tmpFx[80]*tmpObjS[91] + tmpFx[89]*tmpObjS[102] + tmpFx[98]*tmpObjS[113];
tmpQ2[92] = + tmpFx[8]*tmpObjS[4] + tmpFx[17]*tmpObjS[15] + tmpFx[26]*tmpObjS[26] + tmpFx[35]*tmpObjS[37] + tmpFx[44]*tmpObjS[48] + tmpFx[53]*tmpObjS[59] + tmpFx[62]*tmpObjS[70] + tmpFx[71]*tmpObjS[81] + tmpFx[80]*tmpObjS[92] + tmpFx[89]*tmpObjS[103] + tmpFx[98]*tmpObjS[114];
tmpQ2[93] = + tmpFx[8]*tmpObjS[5] + tmpFx[17]*tmpObjS[16] + tmpFx[26]*tmpObjS[27] + tmpFx[35]*tmpObjS[38] + tmpFx[44]*tmpObjS[49] + tmpFx[53]*tmpObjS[60] + tmpFx[62]*tmpObjS[71] + tmpFx[71]*tmpObjS[82] + tmpFx[80]*tmpObjS[93] + tmpFx[89]*tmpObjS[104] + tmpFx[98]*tmpObjS[115];
tmpQ2[94] = + tmpFx[8]*tmpObjS[6] + tmpFx[17]*tmpObjS[17] + tmpFx[26]*tmpObjS[28] + tmpFx[35]*tmpObjS[39] + tmpFx[44]*tmpObjS[50] + tmpFx[53]*tmpObjS[61] + tmpFx[62]*tmpObjS[72] + tmpFx[71]*tmpObjS[83] + tmpFx[80]*tmpObjS[94] + tmpFx[89]*tmpObjS[105] + tmpFx[98]*tmpObjS[116];
tmpQ2[95] = + tmpFx[8]*tmpObjS[7] + tmpFx[17]*tmpObjS[18] + tmpFx[26]*tmpObjS[29] + tmpFx[35]*tmpObjS[40] + tmpFx[44]*tmpObjS[51] + tmpFx[53]*tmpObjS[62] + tmpFx[62]*tmpObjS[73] + tmpFx[71]*tmpObjS[84] + tmpFx[80]*tmpObjS[95] + tmpFx[89]*tmpObjS[106] + tmpFx[98]*tmpObjS[117];
tmpQ2[96] = + tmpFx[8]*tmpObjS[8] + tmpFx[17]*tmpObjS[19] + tmpFx[26]*tmpObjS[30] + tmpFx[35]*tmpObjS[41] + tmpFx[44]*tmpObjS[52] + tmpFx[53]*tmpObjS[63] + tmpFx[62]*tmpObjS[74] + tmpFx[71]*tmpObjS[85] + tmpFx[80]*tmpObjS[96] + tmpFx[89]*tmpObjS[107] + tmpFx[98]*tmpObjS[118];
tmpQ2[97] = + tmpFx[8]*tmpObjS[9] + tmpFx[17]*tmpObjS[20] + tmpFx[26]*tmpObjS[31] + tmpFx[35]*tmpObjS[42] + tmpFx[44]*tmpObjS[53] + tmpFx[53]*tmpObjS[64] + tmpFx[62]*tmpObjS[75] + tmpFx[71]*tmpObjS[86] + tmpFx[80]*tmpObjS[97] + tmpFx[89]*tmpObjS[108] + tmpFx[98]*tmpObjS[119];
tmpQ2[98] = + tmpFx[8]*tmpObjS[10] + tmpFx[17]*tmpObjS[21] + tmpFx[26]*tmpObjS[32] + tmpFx[35]*tmpObjS[43] + tmpFx[44]*tmpObjS[54] + tmpFx[53]*tmpObjS[65] + tmpFx[62]*tmpObjS[76] + tmpFx[71]*tmpObjS[87] + tmpFx[80]*tmpObjS[98] + tmpFx[89]*tmpObjS[109] + tmpFx[98]*tmpObjS[120];
tmpQ1[0] = + tmpQ2[0]*tmpFx[0] + tmpQ2[1]*tmpFx[9] + tmpQ2[2]*tmpFx[18] + tmpQ2[3]*tmpFx[27] + tmpQ2[4]*tmpFx[36] + tmpQ2[5]*tmpFx[45] + tmpQ2[6]*tmpFx[54] + tmpQ2[7]*tmpFx[63] + tmpQ2[8]*tmpFx[72] + tmpQ2[9]*tmpFx[81] + tmpQ2[10]*tmpFx[90];
tmpQ1[1] = + tmpQ2[0]*tmpFx[1] + tmpQ2[1]*tmpFx[10] + tmpQ2[2]*tmpFx[19] + tmpQ2[3]*tmpFx[28] + tmpQ2[4]*tmpFx[37] + tmpQ2[5]*tmpFx[46] + tmpQ2[6]*tmpFx[55] + tmpQ2[7]*tmpFx[64] + tmpQ2[8]*tmpFx[73] + tmpQ2[9]*tmpFx[82] + tmpQ2[10]*tmpFx[91];
tmpQ1[2] = + tmpQ2[0]*tmpFx[2] + tmpQ2[1]*tmpFx[11] + tmpQ2[2]*tmpFx[20] + tmpQ2[3]*tmpFx[29] + tmpQ2[4]*tmpFx[38] + tmpQ2[5]*tmpFx[47] + tmpQ2[6]*tmpFx[56] + tmpQ2[7]*tmpFx[65] + tmpQ2[8]*tmpFx[74] + tmpQ2[9]*tmpFx[83] + tmpQ2[10]*tmpFx[92];
tmpQ1[3] = + tmpQ2[0]*tmpFx[3] + tmpQ2[1]*tmpFx[12] + tmpQ2[2]*tmpFx[21] + tmpQ2[3]*tmpFx[30] + tmpQ2[4]*tmpFx[39] + tmpQ2[5]*tmpFx[48] + tmpQ2[6]*tmpFx[57] + tmpQ2[7]*tmpFx[66] + tmpQ2[8]*tmpFx[75] + tmpQ2[9]*tmpFx[84] + tmpQ2[10]*tmpFx[93];
tmpQ1[4] = + tmpQ2[0]*tmpFx[4] + tmpQ2[1]*tmpFx[13] + tmpQ2[2]*tmpFx[22] + tmpQ2[3]*tmpFx[31] + tmpQ2[4]*tmpFx[40] + tmpQ2[5]*tmpFx[49] + tmpQ2[6]*tmpFx[58] + tmpQ2[7]*tmpFx[67] + tmpQ2[8]*tmpFx[76] + tmpQ2[9]*tmpFx[85] + tmpQ2[10]*tmpFx[94];
tmpQ1[5] = + tmpQ2[0]*tmpFx[5] + tmpQ2[1]*tmpFx[14] + tmpQ2[2]*tmpFx[23] + tmpQ2[3]*tmpFx[32] + tmpQ2[4]*tmpFx[41] + tmpQ2[5]*tmpFx[50] + tmpQ2[6]*tmpFx[59] + tmpQ2[7]*tmpFx[68] + tmpQ2[8]*tmpFx[77] + tmpQ2[9]*tmpFx[86] + tmpQ2[10]*tmpFx[95];
tmpQ1[6] = + tmpQ2[0]*tmpFx[6] + tmpQ2[1]*tmpFx[15] + tmpQ2[2]*tmpFx[24] + tmpQ2[3]*tmpFx[33] + tmpQ2[4]*tmpFx[42] + tmpQ2[5]*tmpFx[51] + tmpQ2[6]*tmpFx[60] + tmpQ2[7]*tmpFx[69] + tmpQ2[8]*tmpFx[78] + tmpQ2[9]*tmpFx[87] + tmpQ2[10]*tmpFx[96];
tmpQ1[7] = + tmpQ2[0]*tmpFx[7] + tmpQ2[1]*tmpFx[16] + tmpQ2[2]*tmpFx[25] + tmpQ2[3]*tmpFx[34] + tmpQ2[4]*tmpFx[43] + tmpQ2[5]*tmpFx[52] + tmpQ2[6]*tmpFx[61] + tmpQ2[7]*tmpFx[70] + tmpQ2[8]*tmpFx[79] + tmpQ2[9]*tmpFx[88] + tmpQ2[10]*tmpFx[97];
tmpQ1[8] = + tmpQ2[0]*tmpFx[8] + tmpQ2[1]*tmpFx[17] + tmpQ2[2]*tmpFx[26] + tmpQ2[3]*tmpFx[35] + tmpQ2[4]*tmpFx[44] + tmpQ2[5]*tmpFx[53] + tmpQ2[6]*tmpFx[62] + tmpQ2[7]*tmpFx[71] + tmpQ2[8]*tmpFx[80] + tmpQ2[9]*tmpFx[89] + tmpQ2[10]*tmpFx[98];
tmpQ1[9] = + tmpQ2[11]*tmpFx[0] + tmpQ2[12]*tmpFx[9] + tmpQ2[13]*tmpFx[18] + tmpQ2[14]*tmpFx[27] + tmpQ2[15]*tmpFx[36] + tmpQ2[16]*tmpFx[45] + tmpQ2[17]*tmpFx[54] + tmpQ2[18]*tmpFx[63] + tmpQ2[19]*tmpFx[72] + tmpQ2[20]*tmpFx[81] + tmpQ2[21]*tmpFx[90];
tmpQ1[10] = + tmpQ2[11]*tmpFx[1] + tmpQ2[12]*tmpFx[10] + tmpQ2[13]*tmpFx[19] + tmpQ2[14]*tmpFx[28] + tmpQ2[15]*tmpFx[37] + tmpQ2[16]*tmpFx[46] + tmpQ2[17]*tmpFx[55] + tmpQ2[18]*tmpFx[64] + tmpQ2[19]*tmpFx[73] + tmpQ2[20]*tmpFx[82] + tmpQ2[21]*tmpFx[91];
tmpQ1[11] = + tmpQ2[11]*tmpFx[2] + tmpQ2[12]*tmpFx[11] + tmpQ2[13]*tmpFx[20] + tmpQ2[14]*tmpFx[29] + tmpQ2[15]*tmpFx[38] + tmpQ2[16]*tmpFx[47] + tmpQ2[17]*tmpFx[56] + tmpQ2[18]*tmpFx[65] + tmpQ2[19]*tmpFx[74] + tmpQ2[20]*tmpFx[83] + tmpQ2[21]*tmpFx[92];
tmpQ1[12] = + tmpQ2[11]*tmpFx[3] + tmpQ2[12]*tmpFx[12] + tmpQ2[13]*tmpFx[21] + tmpQ2[14]*tmpFx[30] + tmpQ2[15]*tmpFx[39] + tmpQ2[16]*tmpFx[48] + tmpQ2[17]*tmpFx[57] + tmpQ2[18]*tmpFx[66] + tmpQ2[19]*tmpFx[75] + tmpQ2[20]*tmpFx[84] + tmpQ2[21]*tmpFx[93];
tmpQ1[13] = + tmpQ2[11]*tmpFx[4] + tmpQ2[12]*tmpFx[13] + tmpQ2[13]*tmpFx[22] + tmpQ2[14]*tmpFx[31] + tmpQ2[15]*tmpFx[40] + tmpQ2[16]*tmpFx[49] + tmpQ2[17]*tmpFx[58] + tmpQ2[18]*tmpFx[67] + tmpQ2[19]*tmpFx[76] + tmpQ2[20]*tmpFx[85] + tmpQ2[21]*tmpFx[94];
tmpQ1[14] = + tmpQ2[11]*tmpFx[5] + tmpQ2[12]*tmpFx[14] + tmpQ2[13]*tmpFx[23] + tmpQ2[14]*tmpFx[32] + tmpQ2[15]*tmpFx[41] + tmpQ2[16]*tmpFx[50] + tmpQ2[17]*tmpFx[59] + tmpQ2[18]*tmpFx[68] + tmpQ2[19]*tmpFx[77] + tmpQ2[20]*tmpFx[86] + tmpQ2[21]*tmpFx[95];
tmpQ1[15] = + tmpQ2[11]*tmpFx[6] + tmpQ2[12]*tmpFx[15] + tmpQ2[13]*tmpFx[24] + tmpQ2[14]*tmpFx[33] + tmpQ2[15]*tmpFx[42] + tmpQ2[16]*tmpFx[51] + tmpQ2[17]*tmpFx[60] + tmpQ2[18]*tmpFx[69] + tmpQ2[19]*tmpFx[78] + tmpQ2[20]*tmpFx[87] + tmpQ2[21]*tmpFx[96];
tmpQ1[16] = + tmpQ2[11]*tmpFx[7] + tmpQ2[12]*tmpFx[16] + tmpQ2[13]*tmpFx[25] + tmpQ2[14]*tmpFx[34] + tmpQ2[15]*tmpFx[43] + tmpQ2[16]*tmpFx[52] + tmpQ2[17]*tmpFx[61] + tmpQ2[18]*tmpFx[70] + tmpQ2[19]*tmpFx[79] + tmpQ2[20]*tmpFx[88] + tmpQ2[21]*tmpFx[97];
tmpQ1[17] = + tmpQ2[11]*tmpFx[8] + tmpQ2[12]*tmpFx[17] + tmpQ2[13]*tmpFx[26] + tmpQ2[14]*tmpFx[35] + tmpQ2[15]*tmpFx[44] + tmpQ2[16]*tmpFx[53] + tmpQ2[17]*tmpFx[62] + tmpQ2[18]*tmpFx[71] + tmpQ2[19]*tmpFx[80] + tmpQ2[20]*tmpFx[89] + tmpQ2[21]*tmpFx[98];
tmpQ1[18] = + tmpQ2[22]*tmpFx[0] + tmpQ2[23]*tmpFx[9] + tmpQ2[24]*tmpFx[18] + tmpQ2[25]*tmpFx[27] + tmpQ2[26]*tmpFx[36] + tmpQ2[27]*tmpFx[45] + tmpQ2[28]*tmpFx[54] + tmpQ2[29]*tmpFx[63] + tmpQ2[30]*tmpFx[72] + tmpQ2[31]*tmpFx[81] + tmpQ2[32]*tmpFx[90];
tmpQ1[19] = + tmpQ2[22]*tmpFx[1] + tmpQ2[23]*tmpFx[10] + tmpQ2[24]*tmpFx[19] + tmpQ2[25]*tmpFx[28] + tmpQ2[26]*tmpFx[37] + tmpQ2[27]*tmpFx[46] + tmpQ2[28]*tmpFx[55] + tmpQ2[29]*tmpFx[64] + tmpQ2[30]*tmpFx[73] + tmpQ2[31]*tmpFx[82] + tmpQ2[32]*tmpFx[91];
tmpQ1[20] = + tmpQ2[22]*tmpFx[2] + tmpQ2[23]*tmpFx[11] + tmpQ2[24]*tmpFx[20] + tmpQ2[25]*tmpFx[29] + tmpQ2[26]*tmpFx[38] + tmpQ2[27]*tmpFx[47] + tmpQ2[28]*tmpFx[56] + tmpQ2[29]*tmpFx[65] + tmpQ2[30]*tmpFx[74] + tmpQ2[31]*tmpFx[83] + tmpQ2[32]*tmpFx[92];
tmpQ1[21] = + tmpQ2[22]*tmpFx[3] + tmpQ2[23]*tmpFx[12] + tmpQ2[24]*tmpFx[21] + tmpQ2[25]*tmpFx[30] + tmpQ2[26]*tmpFx[39] + tmpQ2[27]*tmpFx[48] + tmpQ2[28]*tmpFx[57] + tmpQ2[29]*tmpFx[66] + tmpQ2[30]*tmpFx[75] + tmpQ2[31]*tmpFx[84] + tmpQ2[32]*tmpFx[93];
tmpQ1[22] = + tmpQ2[22]*tmpFx[4] + tmpQ2[23]*tmpFx[13] + tmpQ2[24]*tmpFx[22] + tmpQ2[25]*tmpFx[31] + tmpQ2[26]*tmpFx[40] + tmpQ2[27]*tmpFx[49] + tmpQ2[28]*tmpFx[58] + tmpQ2[29]*tmpFx[67] + tmpQ2[30]*tmpFx[76] + tmpQ2[31]*tmpFx[85] + tmpQ2[32]*tmpFx[94];
tmpQ1[23] = + tmpQ2[22]*tmpFx[5] + tmpQ2[23]*tmpFx[14] + tmpQ2[24]*tmpFx[23] + tmpQ2[25]*tmpFx[32] + tmpQ2[26]*tmpFx[41] + tmpQ2[27]*tmpFx[50] + tmpQ2[28]*tmpFx[59] + tmpQ2[29]*tmpFx[68] + tmpQ2[30]*tmpFx[77] + tmpQ2[31]*tmpFx[86] + tmpQ2[32]*tmpFx[95];
tmpQ1[24] = + tmpQ2[22]*tmpFx[6] + tmpQ2[23]*tmpFx[15] + tmpQ2[24]*tmpFx[24] + tmpQ2[25]*tmpFx[33] + tmpQ2[26]*tmpFx[42] + tmpQ2[27]*tmpFx[51] + tmpQ2[28]*tmpFx[60] + tmpQ2[29]*tmpFx[69] + tmpQ2[30]*tmpFx[78] + tmpQ2[31]*tmpFx[87] + tmpQ2[32]*tmpFx[96];
tmpQ1[25] = + tmpQ2[22]*tmpFx[7] + tmpQ2[23]*tmpFx[16] + tmpQ2[24]*tmpFx[25] + tmpQ2[25]*tmpFx[34] + tmpQ2[26]*tmpFx[43] + tmpQ2[27]*tmpFx[52] + tmpQ2[28]*tmpFx[61] + tmpQ2[29]*tmpFx[70] + tmpQ2[30]*tmpFx[79] + tmpQ2[31]*tmpFx[88] + tmpQ2[32]*tmpFx[97];
tmpQ1[26] = + tmpQ2[22]*tmpFx[8] + tmpQ2[23]*tmpFx[17] + tmpQ2[24]*tmpFx[26] + tmpQ2[25]*tmpFx[35] + tmpQ2[26]*tmpFx[44] + tmpQ2[27]*tmpFx[53] + tmpQ2[28]*tmpFx[62] + tmpQ2[29]*tmpFx[71] + tmpQ2[30]*tmpFx[80] + tmpQ2[31]*tmpFx[89] + tmpQ2[32]*tmpFx[98];
tmpQ1[27] = + tmpQ2[33]*tmpFx[0] + tmpQ2[34]*tmpFx[9] + tmpQ2[35]*tmpFx[18] + tmpQ2[36]*tmpFx[27] + tmpQ2[37]*tmpFx[36] + tmpQ2[38]*tmpFx[45] + tmpQ2[39]*tmpFx[54] + tmpQ2[40]*tmpFx[63] + tmpQ2[41]*tmpFx[72] + tmpQ2[42]*tmpFx[81] + tmpQ2[43]*tmpFx[90];
tmpQ1[28] = + tmpQ2[33]*tmpFx[1] + tmpQ2[34]*tmpFx[10] + tmpQ2[35]*tmpFx[19] + tmpQ2[36]*tmpFx[28] + tmpQ2[37]*tmpFx[37] + tmpQ2[38]*tmpFx[46] + tmpQ2[39]*tmpFx[55] + tmpQ2[40]*tmpFx[64] + tmpQ2[41]*tmpFx[73] + tmpQ2[42]*tmpFx[82] + tmpQ2[43]*tmpFx[91];
tmpQ1[29] = + tmpQ2[33]*tmpFx[2] + tmpQ2[34]*tmpFx[11] + tmpQ2[35]*tmpFx[20] + tmpQ2[36]*tmpFx[29] + tmpQ2[37]*tmpFx[38] + tmpQ2[38]*tmpFx[47] + tmpQ2[39]*tmpFx[56] + tmpQ2[40]*tmpFx[65] + tmpQ2[41]*tmpFx[74] + tmpQ2[42]*tmpFx[83] + tmpQ2[43]*tmpFx[92];
tmpQ1[30] = + tmpQ2[33]*tmpFx[3] + tmpQ2[34]*tmpFx[12] + tmpQ2[35]*tmpFx[21] + tmpQ2[36]*tmpFx[30] + tmpQ2[37]*tmpFx[39] + tmpQ2[38]*tmpFx[48] + tmpQ2[39]*tmpFx[57] + tmpQ2[40]*tmpFx[66] + tmpQ2[41]*tmpFx[75] + tmpQ2[42]*tmpFx[84] + tmpQ2[43]*tmpFx[93];
tmpQ1[31] = + tmpQ2[33]*tmpFx[4] + tmpQ2[34]*tmpFx[13] + tmpQ2[35]*tmpFx[22] + tmpQ2[36]*tmpFx[31] + tmpQ2[37]*tmpFx[40] + tmpQ2[38]*tmpFx[49] + tmpQ2[39]*tmpFx[58] + tmpQ2[40]*tmpFx[67] + tmpQ2[41]*tmpFx[76] + tmpQ2[42]*tmpFx[85] + tmpQ2[43]*tmpFx[94];
tmpQ1[32] = + tmpQ2[33]*tmpFx[5] + tmpQ2[34]*tmpFx[14] + tmpQ2[35]*tmpFx[23] + tmpQ2[36]*tmpFx[32] + tmpQ2[37]*tmpFx[41] + tmpQ2[38]*tmpFx[50] + tmpQ2[39]*tmpFx[59] + tmpQ2[40]*tmpFx[68] + tmpQ2[41]*tmpFx[77] + tmpQ2[42]*tmpFx[86] + tmpQ2[43]*tmpFx[95];
tmpQ1[33] = + tmpQ2[33]*tmpFx[6] + tmpQ2[34]*tmpFx[15] + tmpQ2[35]*tmpFx[24] + tmpQ2[36]*tmpFx[33] + tmpQ2[37]*tmpFx[42] + tmpQ2[38]*tmpFx[51] + tmpQ2[39]*tmpFx[60] + tmpQ2[40]*tmpFx[69] + tmpQ2[41]*tmpFx[78] + tmpQ2[42]*tmpFx[87] + tmpQ2[43]*tmpFx[96];
tmpQ1[34] = + tmpQ2[33]*tmpFx[7] + tmpQ2[34]*tmpFx[16] + tmpQ2[35]*tmpFx[25] + tmpQ2[36]*tmpFx[34] + tmpQ2[37]*tmpFx[43] + tmpQ2[38]*tmpFx[52] + tmpQ2[39]*tmpFx[61] + tmpQ2[40]*tmpFx[70] + tmpQ2[41]*tmpFx[79] + tmpQ2[42]*tmpFx[88] + tmpQ2[43]*tmpFx[97];
tmpQ1[35] = + tmpQ2[33]*tmpFx[8] + tmpQ2[34]*tmpFx[17] + tmpQ2[35]*tmpFx[26] + tmpQ2[36]*tmpFx[35] + tmpQ2[37]*tmpFx[44] + tmpQ2[38]*tmpFx[53] + tmpQ2[39]*tmpFx[62] + tmpQ2[40]*tmpFx[71] + tmpQ2[41]*tmpFx[80] + tmpQ2[42]*tmpFx[89] + tmpQ2[43]*tmpFx[98];
tmpQ1[36] = + tmpQ2[44]*tmpFx[0] + tmpQ2[45]*tmpFx[9] + tmpQ2[46]*tmpFx[18] + tmpQ2[47]*tmpFx[27] + tmpQ2[48]*tmpFx[36] + tmpQ2[49]*tmpFx[45] + tmpQ2[50]*tmpFx[54] + tmpQ2[51]*tmpFx[63] + tmpQ2[52]*tmpFx[72] + tmpQ2[53]*tmpFx[81] + tmpQ2[54]*tmpFx[90];
tmpQ1[37] = + tmpQ2[44]*tmpFx[1] + tmpQ2[45]*tmpFx[10] + tmpQ2[46]*tmpFx[19] + tmpQ2[47]*tmpFx[28] + tmpQ2[48]*tmpFx[37] + tmpQ2[49]*tmpFx[46] + tmpQ2[50]*tmpFx[55] + tmpQ2[51]*tmpFx[64] + tmpQ2[52]*tmpFx[73] + tmpQ2[53]*tmpFx[82] + tmpQ2[54]*tmpFx[91];
tmpQ1[38] = + tmpQ2[44]*tmpFx[2] + tmpQ2[45]*tmpFx[11] + tmpQ2[46]*tmpFx[20] + tmpQ2[47]*tmpFx[29] + tmpQ2[48]*tmpFx[38] + tmpQ2[49]*tmpFx[47] + tmpQ2[50]*tmpFx[56] + tmpQ2[51]*tmpFx[65] + tmpQ2[52]*tmpFx[74] + tmpQ2[53]*tmpFx[83] + tmpQ2[54]*tmpFx[92];
tmpQ1[39] = + tmpQ2[44]*tmpFx[3] + tmpQ2[45]*tmpFx[12] + tmpQ2[46]*tmpFx[21] + tmpQ2[47]*tmpFx[30] + tmpQ2[48]*tmpFx[39] + tmpQ2[49]*tmpFx[48] + tmpQ2[50]*tmpFx[57] + tmpQ2[51]*tmpFx[66] + tmpQ2[52]*tmpFx[75] + tmpQ2[53]*tmpFx[84] + tmpQ2[54]*tmpFx[93];
tmpQ1[40] = + tmpQ2[44]*tmpFx[4] + tmpQ2[45]*tmpFx[13] + tmpQ2[46]*tmpFx[22] + tmpQ2[47]*tmpFx[31] + tmpQ2[48]*tmpFx[40] + tmpQ2[49]*tmpFx[49] + tmpQ2[50]*tmpFx[58] + tmpQ2[51]*tmpFx[67] + tmpQ2[52]*tmpFx[76] + tmpQ2[53]*tmpFx[85] + tmpQ2[54]*tmpFx[94];
tmpQ1[41] = + tmpQ2[44]*tmpFx[5] + tmpQ2[45]*tmpFx[14] + tmpQ2[46]*tmpFx[23] + tmpQ2[47]*tmpFx[32] + tmpQ2[48]*tmpFx[41] + tmpQ2[49]*tmpFx[50] + tmpQ2[50]*tmpFx[59] + tmpQ2[51]*tmpFx[68] + tmpQ2[52]*tmpFx[77] + tmpQ2[53]*tmpFx[86] + tmpQ2[54]*tmpFx[95];
tmpQ1[42] = + tmpQ2[44]*tmpFx[6] + tmpQ2[45]*tmpFx[15] + tmpQ2[46]*tmpFx[24] + tmpQ2[47]*tmpFx[33] + tmpQ2[48]*tmpFx[42] + tmpQ2[49]*tmpFx[51] + tmpQ2[50]*tmpFx[60] + tmpQ2[51]*tmpFx[69] + tmpQ2[52]*tmpFx[78] + tmpQ2[53]*tmpFx[87] + tmpQ2[54]*tmpFx[96];
tmpQ1[43] = + tmpQ2[44]*tmpFx[7] + tmpQ2[45]*tmpFx[16] + tmpQ2[46]*tmpFx[25] + tmpQ2[47]*tmpFx[34] + tmpQ2[48]*tmpFx[43] + tmpQ2[49]*tmpFx[52] + tmpQ2[50]*tmpFx[61] + tmpQ2[51]*tmpFx[70] + tmpQ2[52]*tmpFx[79] + tmpQ2[53]*tmpFx[88] + tmpQ2[54]*tmpFx[97];
tmpQ1[44] = + tmpQ2[44]*tmpFx[8] + tmpQ2[45]*tmpFx[17] + tmpQ2[46]*tmpFx[26] + tmpQ2[47]*tmpFx[35] + tmpQ2[48]*tmpFx[44] + tmpQ2[49]*tmpFx[53] + tmpQ2[50]*tmpFx[62] + tmpQ2[51]*tmpFx[71] + tmpQ2[52]*tmpFx[80] + tmpQ2[53]*tmpFx[89] + tmpQ2[54]*tmpFx[98];
tmpQ1[45] = + tmpQ2[55]*tmpFx[0] + tmpQ2[56]*tmpFx[9] + tmpQ2[57]*tmpFx[18] + tmpQ2[58]*tmpFx[27] + tmpQ2[59]*tmpFx[36] + tmpQ2[60]*tmpFx[45] + tmpQ2[61]*tmpFx[54] + tmpQ2[62]*tmpFx[63] + tmpQ2[63]*tmpFx[72] + tmpQ2[64]*tmpFx[81] + tmpQ2[65]*tmpFx[90];
tmpQ1[46] = + tmpQ2[55]*tmpFx[1] + tmpQ2[56]*tmpFx[10] + tmpQ2[57]*tmpFx[19] + tmpQ2[58]*tmpFx[28] + tmpQ2[59]*tmpFx[37] + tmpQ2[60]*tmpFx[46] + tmpQ2[61]*tmpFx[55] + tmpQ2[62]*tmpFx[64] + tmpQ2[63]*tmpFx[73] + tmpQ2[64]*tmpFx[82] + tmpQ2[65]*tmpFx[91];
tmpQ1[47] = + tmpQ2[55]*tmpFx[2] + tmpQ2[56]*tmpFx[11] + tmpQ2[57]*tmpFx[20] + tmpQ2[58]*tmpFx[29] + tmpQ2[59]*tmpFx[38] + tmpQ2[60]*tmpFx[47] + tmpQ2[61]*tmpFx[56] + tmpQ2[62]*tmpFx[65] + tmpQ2[63]*tmpFx[74] + tmpQ2[64]*tmpFx[83] + tmpQ2[65]*tmpFx[92];
tmpQ1[48] = + tmpQ2[55]*tmpFx[3] + tmpQ2[56]*tmpFx[12] + tmpQ2[57]*tmpFx[21] + tmpQ2[58]*tmpFx[30] + tmpQ2[59]*tmpFx[39] + tmpQ2[60]*tmpFx[48] + tmpQ2[61]*tmpFx[57] + tmpQ2[62]*tmpFx[66] + tmpQ2[63]*tmpFx[75] + tmpQ2[64]*tmpFx[84] + tmpQ2[65]*tmpFx[93];
tmpQ1[49] = + tmpQ2[55]*tmpFx[4] + tmpQ2[56]*tmpFx[13] + tmpQ2[57]*tmpFx[22] + tmpQ2[58]*tmpFx[31] + tmpQ2[59]*tmpFx[40] + tmpQ2[60]*tmpFx[49] + tmpQ2[61]*tmpFx[58] + tmpQ2[62]*tmpFx[67] + tmpQ2[63]*tmpFx[76] + tmpQ2[64]*tmpFx[85] + tmpQ2[65]*tmpFx[94];
tmpQ1[50] = + tmpQ2[55]*tmpFx[5] + tmpQ2[56]*tmpFx[14] + tmpQ2[57]*tmpFx[23] + tmpQ2[58]*tmpFx[32] + tmpQ2[59]*tmpFx[41] + tmpQ2[60]*tmpFx[50] + tmpQ2[61]*tmpFx[59] + tmpQ2[62]*tmpFx[68] + tmpQ2[63]*tmpFx[77] + tmpQ2[64]*tmpFx[86] + tmpQ2[65]*tmpFx[95];
tmpQ1[51] = + tmpQ2[55]*tmpFx[6] + tmpQ2[56]*tmpFx[15] + tmpQ2[57]*tmpFx[24] + tmpQ2[58]*tmpFx[33] + tmpQ2[59]*tmpFx[42] + tmpQ2[60]*tmpFx[51] + tmpQ2[61]*tmpFx[60] + tmpQ2[62]*tmpFx[69] + tmpQ2[63]*tmpFx[78] + tmpQ2[64]*tmpFx[87] + tmpQ2[65]*tmpFx[96];
tmpQ1[52] = + tmpQ2[55]*tmpFx[7] + tmpQ2[56]*tmpFx[16] + tmpQ2[57]*tmpFx[25] + tmpQ2[58]*tmpFx[34] + tmpQ2[59]*tmpFx[43] + tmpQ2[60]*tmpFx[52] + tmpQ2[61]*tmpFx[61] + tmpQ2[62]*tmpFx[70] + tmpQ2[63]*tmpFx[79] + tmpQ2[64]*tmpFx[88] + tmpQ2[65]*tmpFx[97];
tmpQ1[53] = + tmpQ2[55]*tmpFx[8] + tmpQ2[56]*tmpFx[17] + tmpQ2[57]*tmpFx[26] + tmpQ2[58]*tmpFx[35] + tmpQ2[59]*tmpFx[44] + tmpQ2[60]*tmpFx[53] + tmpQ2[61]*tmpFx[62] + tmpQ2[62]*tmpFx[71] + tmpQ2[63]*tmpFx[80] + tmpQ2[64]*tmpFx[89] + tmpQ2[65]*tmpFx[98];
tmpQ1[54] = + tmpQ2[66]*tmpFx[0] + tmpQ2[67]*tmpFx[9] + tmpQ2[68]*tmpFx[18] + tmpQ2[69]*tmpFx[27] + tmpQ2[70]*tmpFx[36] + tmpQ2[71]*tmpFx[45] + tmpQ2[72]*tmpFx[54] + tmpQ2[73]*tmpFx[63] + tmpQ2[74]*tmpFx[72] + tmpQ2[75]*tmpFx[81] + tmpQ2[76]*tmpFx[90];
tmpQ1[55] = + tmpQ2[66]*tmpFx[1] + tmpQ2[67]*tmpFx[10] + tmpQ2[68]*tmpFx[19] + tmpQ2[69]*tmpFx[28] + tmpQ2[70]*tmpFx[37] + tmpQ2[71]*tmpFx[46] + tmpQ2[72]*tmpFx[55] + tmpQ2[73]*tmpFx[64] + tmpQ2[74]*tmpFx[73] + tmpQ2[75]*tmpFx[82] + tmpQ2[76]*tmpFx[91];
tmpQ1[56] = + tmpQ2[66]*tmpFx[2] + tmpQ2[67]*tmpFx[11] + tmpQ2[68]*tmpFx[20] + tmpQ2[69]*tmpFx[29] + tmpQ2[70]*tmpFx[38] + tmpQ2[71]*tmpFx[47] + tmpQ2[72]*tmpFx[56] + tmpQ2[73]*tmpFx[65] + tmpQ2[74]*tmpFx[74] + tmpQ2[75]*tmpFx[83] + tmpQ2[76]*tmpFx[92];
tmpQ1[57] = + tmpQ2[66]*tmpFx[3] + tmpQ2[67]*tmpFx[12] + tmpQ2[68]*tmpFx[21] + tmpQ2[69]*tmpFx[30] + tmpQ2[70]*tmpFx[39] + tmpQ2[71]*tmpFx[48] + tmpQ2[72]*tmpFx[57] + tmpQ2[73]*tmpFx[66] + tmpQ2[74]*tmpFx[75] + tmpQ2[75]*tmpFx[84] + tmpQ2[76]*tmpFx[93];
tmpQ1[58] = + tmpQ2[66]*tmpFx[4] + tmpQ2[67]*tmpFx[13] + tmpQ2[68]*tmpFx[22] + tmpQ2[69]*tmpFx[31] + tmpQ2[70]*tmpFx[40] + tmpQ2[71]*tmpFx[49] + tmpQ2[72]*tmpFx[58] + tmpQ2[73]*tmpFx[67] + tmpQ2[74]*tmpFx[76] + tmpQ2[75]*tmpFx[85] + tmpQ2[76]*tmpFx[94];
tmpQ1[59] = + tmpQ2[66]*tmpFx[5] + tmpQ2[67]*tmpFx[14] + tmpQ2[68]*tmpFx[23] + tmpQ2[69]*tmpFx[32] + tmpQ2[70]*tmpFx[41] + tmpQ2[71]*tmpFx[50] + tmpQ2[72]*tmpFx[59] + tmpQ2[73]*tmpFx[68] + tmpQ2[74]*tmpFx[77] + tmpQ2[75]*tmpFx[86] + tmpQ2[76]*tmpFx[95];
tmpQ1[60] = + tmpQ2[66]*tmpFx[6] + tmpQ2[67]*tmpFx[15] + tmpQ2[68]*tmpFx[24] + tmpQ2[69]*tmpFx[33] + tmpQ2[70]*tmpFx[42] + tmpQ2[71]*tmpFx[51] + tmpQ2[72]*tmpFx[60] + tmpQ2[73]*tmpFx[69] + tmpQ2[74]*tmpFx[78] + tmpQ2[75]*tmpFx[87] + tmpQ2[76]*tmpFx[96];
tmpQ1[61] = + tmpQ2[66]*tmpFx[7] + tmpQ2[67]*tmpFx[16] + tmpQ2[68]*tmpFx[25] + tmpQ2[69]*tmpFx[34] + tmpQ2[70]*tmpFx[43] + tmpQ2[71]*tmpFx[52] + tmpQ2[72]*tmpFx[61] + tmpQ2[73]*tmpFx[70] + tmpQ2[74]*tmpFx[79] + tmpQ2[75]*tmpFx[88] + tmpQ2[76]*tmpFx[97];
tmpQ1[62] = + tmpQ2[66]*tmpFx[8] + tmpQ2[67]*tmpFx[17] + tmpQ2[68]*tmpFx[26] + tmpQ2[69]*tmpFx[35] + tmpQ2[70]*tmpFx[44] + tmpQ2[71]*tmpFx[53] + tmpQ2[72]*tmpFx[62] + tmpQ2[73]*tmpFx[71] + tmpQ2[74]*tmpFx[80] + tmpQ2[75]*tmpFx[89] + tmpQ2[76]*tmpFx[98];
tmpQ1[63] = + tmpQ2[77]*tmpFx[0] + tmpQ2[78]*tmpFx[9] + tmpQ2[79]*tmpFx[18] + tmpQ2[80]*tmpFx[27] + tmpQ2[81]*tmpFx[36] + tmpQ2[82]*tmpFx[45] + tmpQ2[83]*tmpFx[54] + tmpQ2[84]*tmpFx[63] + tmpQ2[85]*tmpFx[72] + tmpQ2[86]*tmpFx[81] + tmpQ2[87]*tmpFx[90];
tmpQ1[64] = + tmpQ2[77]*tmpFx[1] + tmpQ2[78]*tmpFx[10] + tmpQ2[79]*tmpFx[19] + tmpQ2[80]*tmpFx[28] + tmpQ2[81]*tmpFx[37] + tmpQ2[82]*tmpFx[46] + tmpQ2[83]*tmpFx[55] + tmpQ2[84]*tmpFx[64] + tmpQ2[85]*tmpFx[73] + tmpQ2[86]*tmpFx[82] + tmpQ2[87]*tmpFx[91];
tmpQ1[65] = + tmpQ2[77]*tmpFx[2] + tmpQ2[78]*tmpFx[11] + tmpQ2[79]*tmpFx[20] + tmpQ2[80]*tmpFx[29] + tmpQ2[81]*tmpFx[38] + tmpQ2[82]*tmpFx[47] + tmpQ2[83]*tmpFx[56] + tmpQ2[84]*tmpFx[65] + tmpQ2[85]*tmpFx[74] + tmpQ2[86]*tmpFx[83] + tmpQ2[87]*tmpFx[92];
tmpQ1[66] = + tmpQ2[77]*tmpFx[3] + tmpQ2[78]*tmpFx[12] + tmpQ2[79]*tmpFx[21] + tmpQ2[80]*tmpFx[30] + tmpQ2[81]*tmpFx[39] + tmpQ2[82]*tmpFx[48] + tmpQ2[83]*tmpFx[57] + tmpQ2[84]*tmpFx[66] + tmpQ2[85]*tmpFx[75] + tmpQ2[86]*tmpFx[84] + tmpQ2[87]*tmpFx[93];
tmpQ1[67] = + tmpQ2[77]*tmpFx[4] + tmpQ2[78]*tmpFx[13] + tmpQ2[79]*tmpFx[22] + tmpQ2[80]*tmpFx[31] + tmpQ2[81]*tmpFx[40] + tmpQ2[82]*tmpFx[49] + tmpQ2[83]*tmpFx[58] + tmpQ2[84]*tmpFx[67] + tmpQ2[85]*tmpFx[76] + tmpQ2[86]*tmpFx[85] + tmpQ2[87]*tmpFx[94];
tmpQ1[68] = + tmpQ2[77]*tmpFx[5] + tmpQ2[78]*tmpFx[14] + tmpQ2[79]*tmpFx[23] + tmpQ2[80]*tmpFx[32] + tmpQ2[81]*tmpFx[41] + tmpQ2[82]*tmpFx[50] + tmpQ2[83]*tmpFx[59] + tmpQ2[84]*tmpFx[68] + tmpQ2[85]*tmpFx[77] + tmpQ2[86]*tmpFx[86] + tmpQ2[87]*tmpFx[95];
tmpQ1[69] = + tmpQ2[77]*tmpFx[6] + tmpQ2[78]*tmpFx[15] + tmpQ2[79]*tmpFx[24] + tmpQ2[80]*tmpFx[33] + tmpQ2[81]*tmpFx[42] + tmpQ2[82]*tmpFx[51] + tmpQ2[83]*tmpFx[60] + tmpQ2[84]*tmpFx[69] + tmpQ2[85]*tmpFx[78] + tmpQ2[86]*tmpFx[87] + tmpQ2[87]*tmpFx[96];
tmpQ1[70] = + tmpQ2[77]*tmpFx[7] + tmpQ2[78]*tmpFx[16] + tmpQ2[79]*tmpFx[25] + tmpQ2[80]*tmpFx[34] + tmpQ2[81]*tmpFx[43] + tmpQ2[82]*tmpFx[52] + tmpQ2[83]*tmpFx[61] + tmpQ2[84]*tmpFx[70] + tmpQ2[85]*tmpFx[79] + tmpQ2[86]*tmpFx[88] + tmpQ2[87]*tmpFx[97];
tmpQ1[71] = + tmpQ2[77]*tmpFx[8] + tmpQ2[78]*tmpFx[17] + tmpQ2[79]*tmpFx[26] + tmpQ2[80]*tmpFx[35] + tmpQ2[81]*tmpFx[44] + tmpQ2[82]*tmpFx[53] + tmpQ2[83]*tmpFx[62] + tmpQ2[84]*tmpFx[71] + tmpQ2[85]*tmpFx[80] + tmpQ2[86]*tmpFx[89] + tmpQ2[87]*tmpFx[98];
tmpQ1[72] = + tmpQ2[88]*tmpFx[0] + tmpQ2[89]*tmpFx[9] + tmpQ2[90]*tmpFx[18] + tmpQ2[91]*tmpFx[27] + tmpQ2[92]*tmpFx[36] + tmpQ2[93]*tmpFx[45] + tmpQ2[94]*tmpFx[54] + tmpQ2[95]*tmpFx[63] + tmpQ2[96]*tmpFx[72] + tmpQ2[97]*tmpFx[81] + tmpQ2[98]*tmpFx[90];
tmpQ1[73] = + tmpQ2[88]*tmpFx[1] + tmpQ2[89]*tmpFx[10] + tmpQ2[90]*tmpFx[19] + tmpQ2[91]*tmpFx[28] + tmpQ2[92]*tmpFx[37] + tmpQ2[93]*tmpFx[46] + tmpQ2[94]*tmpFx[55] + tmpQ2[95]*tmpFx[64] + tmpQ2[96]*tmpFx[73] + tmpQ2[97]*tmpFx[82] + tmpQ2[98]*tmpFx[91];
tmpQ1[74] = + tmpQ2[88]*tmpFx[2] + tmpQ2[89]*tmpFx[11] + tmpQ2[90]*tmpFx[20] + tmpQ2[91]*tmpFx[29] + tmpQ2[92]*tmpFx[38] + tmpQ2[93]*tmpFx[47] + tmpQ2[94]*tmpFx[56] + tmpQ2[95]*tmpFx[65] + tmpQ2[96]*tmpFx[74] + tmpQ2[97]*tmpFx[83] + tmpQ2[98]*tmpFx[92];
tmpQ1[75] = + tmpQ2[88]*tmpFx[3] + tmpQ2[89]*tmpFx[12] + tmpQ2[90]*tmpFx[21] + tmpQ2[91]*tmpFx[30] + tmpQ2[92]*tmpFx[39] + tmpQ2[93]*tmpFx[48] + tmpQ2[94]*tmpFx[57] + tmpQ2[95]*tmpFx[66] + tmpQ2[96]*tmpFx[75] + tmpQ2[97]*tmpFx[84] + tmpQ2[98]*tmpFx[93];
tmpQ1[76] = + tmpQ2[88]*tmpFx[4] + tmpQ2[89]*tmpFx[13] + tmpQ2[90]*tmpFx[22] + tmpQ2[91]*tmpFx[31] + tmpQ2[92]*tmpFx[40] + tmpQ2[93]*tmpFx[49] + tmpQ2[94]*tmpFx[58] + tmpQ2[95]*tmpFx[67] + tmpQ2[96]*tmpFx[76] + tmpQ2[97]*tmpFx[85] + tmpQ2[98]*tmpFx[94];
tmpQ1[77] = + tmpQ2[88]*tmpFx[5] + tmpQ2[89]*tmpFx[14] + tmpQ2[90]*tmpFx[23] + tmpQ2[91]*tmpFx[32] + tmpQ2[92]*tmpFx[41] + tmpQ2[93]*tmpFx[50] + tmpQ2[94]*tmpFx[59] + tmpQ2[95]*tmpFx[68] + tmpQ2[96]*tmpFx[77] + tmpQ2[97]*tmpFx[86] + tmpQ2[98]*tmpFx[95];
tmpQ1[78] = + tmpQ2[88]*tmpFx[6] + tmpQ2[89]*tmpFx[15] + tmpQ2[90]*tmpFx[24] + tmpQ2[91]*tmpFx[33] + tmpQ2[92]*tmpFx[42] + tmpQ2[93]*tmpFx[51] + tmpQ2[94]*tmpFx[60] + tmpQ2[95]*tmpFx[69] + tmpQ2[96]*tmpFx[78] + tmpQ2[97]*tmpFx[87] + tmpQ2[98]*tmpFx[96];
tmpQ1[79] = + tmpQ2[88]*tmpFx[7] + tmpQ2[89]*tmpFx[16] + tmpQ2[90]*tmpFx[25] + tmpQ2[91]*tmpFx[34] + tmpQ2[92]*tmpFx[43] + tmpQ2[93]*tmpFx[52] + tmpQ2[94]*tmpFx[61] + tmpQ2[95]*tmpFx[70] + tmpQ2[96]*tmpFx[79] + tmpQ2[97]*tmpFx[88] + tmpQ2[98]*tmpFx[97];
tmpQ1[80] = + tmpQ2[88]*tmpFx[8] + tmpQ2[89]*tmpFx[17] + tmpQ2[90]*tmpFx[26] + tmpQ2[91]*tmpFx[35] + tmpQ2[92]*tmpFx[44] + tmpQ2[93]*tmpFx[53] + tmpQ2[94]*tmpFx[62] + tmpQ2[95]*tmpFx[71] + tmpQ2[96]*tmpFx[80] + tmpQ2[97]*tmpFx[89] + tmpQ2[98]*tmpFx[98];
}
void acado_setObjR1R2( real_t* const tmpFu, real_t* const tmpObjS, real_t* const tmpR1, real_t* const tmpR2 )
{
tmpR2[0] = + tmpFu[0]*tmpObjS[0] + tmpFu[3]*tmpObjS[11] + tmpFu[6]*tmpObjS[22] + tmpFu[9]*tmpObjS[33] + tmpFu[12]*tmpObjS[44] + tmpFu[15]*tmpObjS[55] + tmpFu[18]*tmpObjS[66] + tmpFu[21]*tmpObjS[77] + tmpFu[24]*tmpObjS[88] + tmpFu[27]*tmpObjS[99] + tmpFu[30]*tmpObjS[110];
tmpR2[1] = + tmpFu[0]*tmpObjS[1] + tmpFu[3]*tmpObjS[12] + tmpFu[6]*tmpObjS[23] + tmpFu[9]*tmpObjS[34] + tmpFu[12]*tmpObjS[45] + tmpFu[15]*tmpObjS[56] + tmpFu[18]*tmpObjS[67] + tmpFu[21]*tmpObjS[78] + tmpFu[24]*tmpObjS[89] + tmpFu[27]*tmpObjS[100] + tmpFu[30]*tmpObjS[111];
tmpR2[2] = + tmpFu[0]*tmpObjS[2] + tmpFu[3]*tmpObjS[13] + tmpFu[6]*tmpObjS[24] + tmpFu[9]*tmpObjS[35] + tmpFu[12]*tmpObjS[46] + tmpFu[15]*tmpObjS[57] + tmpFu[18]*tmpObjS[68] + tmpFu[21]*tmpObjS[79] + tmpFu[24]*tmpObjS[90] + tmpFu[27]*tmpObjS[101] + tmpFu[30]*tmpObjS[112];
tmpR2[3] = + tmpFu[0]*tmpObjS[3] + tmpFu[3]*tmpObjS[14] + tmpFu[6]*tmpObjS[25] + tmpFu[9]*tmpObjS[36] + tmpFu[12]*tmpObjS[47] + tmpFu[15]*tmpObjS[58] + tmpFu[18]*tmpObjS[69] + tmpFu[21]*tmpObjS[80] + tmpFu[24]*tmpObjS[91] + tmpFu[27]*tmpObjS[102] + tmpFu[30]*tmpObjS[113];
tmpR2[4] = + tmpFu[0]*tmpObjS[4] + tmpFu[3]*tmpObjS[15] + tmpFu[6]*tmpObjS[26] + tmpFu[9]*tmpObjS[37] + tmpFu[12]*tmpObjS[48] + tmpFu[15]*tmpObjS[59] + tmpFu[18]*tmpObjS[70] + tmpFu[21]*tmpObjS[81] + tmpFu[24]*tmpObjS[92] + tmpFu[27]*tmpObjS[103] + tmpFu[30]*tmpObjS[114];
tmpR2[5] = + tmpFu[0]*tmpObjS[5] + tmpFu[3]*tmpObjS[16] + tmpFu[6]*tmpObjS[27] + tmpFu[9]*tmpObjS[38] + tmpFu[12]*tmpObjS[49] + tmpFu[15]*tmpObjS[60] + tmpFu[18]*tmpObjS[71] + tmpFu[21]*tmpObjS[82] + tmpFu[24]*tmpObjS[93] + tmpFu[27]*tmpObjS[104] + tmpFu[30]*tmpObjS[115];
tmpR2[6] = + tmpFu[0]*tmpObjS[6] + tmpFu[3]*tmpObjS[17] + tmpFu[6]*tmpObjS[28] + tmpFu[9]*tmpObjS[39] + tmpFu[12]*tmpObjS[50] + tmpFu[15]*tmpObjS[61] + tmpFu[18]*tmpObjS[72] + tmpFu[21]*tmpObjS[83] + tmpFu[24]*tmpObjS[94] + tmpFu[27]*tmpObjS[105] + tmpFu[30]*tmpObjS[116];
tmpR2[7] = + tmpFu[0]*tmpObjS[7] + tmpFu[3]*tmpObjS[18] + tmpFu[6]*tmpObjS[29] + tmpFu[9]*tmpObjS[40] + tmpFu[12]*tmpObjS[51] + tmpFu[15]*tmpObjS[62] + tmpFu[18]*tmpObjS[73] + tmpFu[21]*tmpObjS[84] + tmpFu[24]*tmpObjS[95] + tmpFu[27]*tmpObjS[106] + tmpFu[30]*tmpObjS[117];
tmpR2[8] = + tmpFu[0]*tmpObjS[8] + tmpFu[3]*tmpObjS[19] + tmpFu[6]*tmpObjS[30] + tmpFu[9]*tmpObjS[41] + tmpFu[12]*tmpObjS[52] + tmpFu[15]*tmpObjS[63] + tmpFu[18]*tmpObjS[74] + tmpFu[21]*tmpObjS[85] + tmpFu[24]*tmpObjS[96] + tmpFu[27]*tmpObjS[107] + tmpFu[30]*tmpObjS[118];
tmpR2[9] = + tmpFu[0]*tmpObjS[9] + tmpFu[3]*tmpObjS[20] + tmpFu[6]*tmpObjS[31] + tmpFu[9]*tmpObjS[42] + tmpFu[12]*tmpObjS[53] + tmpFu[15]*tmpObjS[64] + tmpFu[18]*tmpObjS[75] + tmpFu[21]*tmpObjS[86] + tmpFu[24]*tmpObjS[97] + tmpFu[27]*tmpObjS[108] + tmpFu[30]*tmpObjS[119];
tmpR2[10] = + tmpFu[0]*tmpObjS[10] + tmpFu[3]*tmpObjS[21] + tmpFu[6]*tmpObjS[32] + tmpFu[9]*tmpObjS[43] + tmpFu[12]*tmpObjS[54] + tmpFu[15]*tmpObjS[65] + tmpFu[18]*tmpObjS[76] + tmpFu[21]*tmpObjS[87] + tmpFu[24]*tmpObjS[98] + tmpFu[27]*tmpObjS[109] + tmpFu[30]*tmpObjS[120];
tmpR2[11] = + tmpFu[1]*tmpObjS[0] + tmpFu[4]*tmpObjS[11] + tmpFu[7]*tmpObjS[22] + tmpFu[10]*tmpObjS[33] + tmpFu[13]*tmpObjS[44] + tmpFu[16]*tmpObjS[55] + tmpFu[19]*tmpObjS[66] + tmpFu[22]*tmpObjS[77] + tmpFu[25]*tmpObjS[88] + tmpFu[28]*tmpObjS[99] + tmpFu[31]*tmpObjS[110];
tmpR2[12] = + tmpFu[1]*tmpObjS[1] + tmpFu[4]*tmpObjS[12] + tmpFu[7]*tmpObjS[23] + tmpFu[10]*tmpObjS[34] + tmpFu[13]*tmpObjS[45] + tmpFu[16]*tmpObjS[56] + tmpFu[19]*tmpObjS[67] + tmpFu[22]*tmpObjS[78] + tmpFu[25]*tmpObjS[89] + tmpFu[28]*tmpObjS[100] + tmpFu[31]*tmpObjS[111];
tmpR2[13] = + tmpFu[1]*tmpObjS[2] + tmpFu[4]*tmpObjS[13] + tmpFu[7]*tmpObjS[24] + tmpFu[10]*tmpObjS[35] + tmpFu[13]*tmpObjS[46] + tmpFu[16]*tmpObjS[57] + tmpFu[19]*tmpObjS[68] + tmpFu[22]*tmpObjS[79] + tmpFu[25]*tmpObjS[90] + tmpFu[28]*tmpObjS[101] + tmpFu[31]*tmpObjS[112];
tmpR2[14] = + tmpFu[1]*tmpObjS[3] + tmpFu[4]*tmpObjS[14] + tmpFu[7]*tmpObjS[25] + tmpFu[10]*tmpObjS[36] + tmpFu[13]*tmpObjS[47] + tmpFu[16]*tmpObjS[58] + tmpFu[19]*tmpObjS[69] + tmpFu[22]*tmpObjS[80] + tmpFu[25]*tmpObjS[91] + tmpFu[28]*tmpObjS[102] + tmpFu[31]*tmpObjS[113];
tmpR2[15] = + tmpFu[1]*tmpObjS[4] + tmpFu[4]*tmpObjS[15] + tmpFu[7]*tmpObjS[26] + tmpFu[10]*tmpObjS[37] + tmpFu[13]*tmpObjS[48] + tmpFu[16]*tmpObjS[59] + tmpFu[19]*tmpObjS[70] + tmpFu[22]*tmpObjS[81] + tmpFu[25]*tmpObjS[92] + tmpFu[28]*tmpObjS[103] + tmpFu[31]*tmpObjS[114];
tmpR2[16] = + tmpFu[1]*tmpObjS[5] + tmpFu[4]*tmpObjS[16] + tmpFu[7]*tmpObjS[27] + tmpFu[10]*tmpObjS[38] + tmpFu[13]*tmpObjS[49] + tmpFu[16]*tmpObjS[60] + tmpFu[19]*tmpObjS[71] + tmpFu[22]*tmpObjS[82] + tmpFu[25]*tmpObjS[93] + tmpFu[28]*tmpObjS[104] + tmpFu[31]*tmpObjS[115];
tmpR2[17] = + tmpFu[1]*tmpObjS[6] + tmpFu[4]*tmpObjS[17] + tmpFu[7]*tmpObjS[28] + tmpFu[10]*tmpObjS[39] + tmpFu[13]*tmpObjS[50] + tmpFu[16]*tmpObjS[61] + tmpFu[19]*tmpObjS[72] + tmpFu[22]*tmpObjS[83] + tmpFu[25]*tmpObjS[94] + tmpFu[28]*tmpObjS[105] + tmpFu[31]*tmpObjS[116];
tmpR2[18] = + tmpFu[1]*tmpObjS[7] + tmpFu[4]*tmpObjS[18] + tmpFu[7]*tmpObjS[29] + tmpFu[10]*tmpObjS[40] + tmpFu[13]*tmpObjS[51] + tmpFu[16]*tmpObjS[62] + tmpFu[19]*tmpObjS[73] + tmpFu[22]*tmpObjS[84] + tmpFu[25]*tmpObjS[95] + tmpFu[28]*tmpObjS[106] + tmpFu[31]*tmpObjS[117];
tmpR2[19] = + tmpFu[1]*tmpObjS[8] + tmpFu[4]*tmpObjS[19] + tmpFu[7]*tmpObjS[30] + tmpFu[10]*tmpObjS[41] + tmpFu[13]*tmpObjS[52] + tmpFu[16]*tmpObjS[63] + tmpFu[19]*tmpObjS[74] + tmpFu[22]*tmpObjS[85] + tmpFu[25]*tmpObjS[96] + tmpFu[28]*tmpObjS[107] + tmpFu[31]*tmpObjS[118];
tmpR2[20] = + tmpFu[1]*tmpObjS[9] + tmpFu[4]*tmpObjS[20] + tmpFu[7]*tmpObjS[31] + tmpFu[10]*tmpObjS[42] + tmpFu[13]*tmpObjS[53] + tmpFu[16]*tmpObjS[64] + tmpFu[19]*tmpObjS[75] + tmpFu[22]*tmpObjS[86] + tmpFu[25]*tmpObjS[97] + tmpFu[28]*tmpObjS[108] + tmpFu[31]*tmpObjS[119];
tmpR2[21] = + tmpFu[1]*tmpObjS[10] + tmpFu[4]*tmpObjS[21] + tmpFu[7]*tmpObjS[32] + tmpFu[10]*tmpObjS[43] + tmpFu[13]*tmpObjS[54] + tmpFu[16]*tmpObjS[65] + tmpFu[19]*tmpObjS[76] + tmpFu[22]*tmpObjS[87] + tmpFu[25]*tmpObjS[98] + tmpFu[28]*tmpObjS[109] + tmpFu[31]*tmpObjS[120];
tmpR2[22] = + tmpFu[2]*tmpObjS[0] + tmpFu[5]*tmpObjS[11] + tmpFu[8]*tmpObjS[22] + tmpFu[11]*tmpObjS[33] + tmpFu[14]*tmpObjS[44] + tmpFu[17]*tmpObjS[55] + tmpFu[20]*tmpObjS[66] + tmpFu[23]*tmpObjS[77] + tmpFu[26]*tmpObjS[88] + tmpFu[29]*tmpObjS[99] + tmpFu[32]*tmpObjS[110];
tmpR2[23] = + tmpFu[2]*tmpObjS[1] + tmpFu[5]*tmpObjS[12] + tmpFu[8]*tmpObjS[23] + tmpFu[11]*tmpObjS[34] + tmpFu[14]*tmpObjS[45] + tmpFu[17]*tmpObjS[56] + tmpFu[20]*tmpObjS[67] + tmpFu[23]*tmpObjS[78] + tmpFu[26]*tmpObjS[89] + tmpFu[29]*tmpObjS[100] + tmpFu[32]*tmpObjS[111];
tmpR2[24] = + tmpFu[2]*tmpObjS[2] + tmpFu[5]*tmpObjS[13] + tmpFu[8]*tmpObjS[24] + tmpFu[11]*tmpObjS[35] + tmpFu[14]*tmpObjS[46] + tmpFu[17]*tmpObjS[57] + tmpFu[20]*tmpObjS[68] + tmpFu[23]*tmpObjS[79] + tmpFu[26]*tmpObjS[90] + tmpFu[29]*tmpObjS[101] + tmpFu[32]*tmpObjS[112];
tmpR2[25] = + tmpFu[2]*tmpObjS[3] + tmpFu[5]*tmpObjS[14] + tmpFu[8]*tmpObjS[25] + tmpFu[11]*tmpObjS[36] + tmpFu[14]*tmpObjS[47] + tmpFu[17]*tmpObjS[58] + tmpFu[20]*tmpObjS[69] + tmpFu[23]*tmpObjS[80] + tmpFu[26]*tmpObjS[91] + tmpFu[29]*tmpObjS[102] + tmpFu[32]*tmpObjS[113];
tmpR2[26] = + tmpFu[2]*tmpObjS[4] + tmpFu[5]*tmpObjS[15] + tmpFu[8]*tmpObjS[26] + tmpFu[11]*tmpObjS[37] + tmpFu[14]*tmpObjS[48] + tmpFu[17]*tmpObjS[59] + tmpFu[20]*tmpObjS[70] + tmpFu[23]*tmpObjS[81] + tmpFu[26]*tmpObjS[92] + tmpFu[29]*tmpObjS[103] + tmpFu[32]*tmpObjS[114];
tmpR2[27] = + tmpFu[2]*tmpObjS[5] + tmpFu[5]*tmpObjS[16] + tmpFu[8]*tmpObjS[27] + tmpFu[11]*tmpObjS[38] + tmpFu[14]*tmpObjS[49] + tmpFu[17]*tmpObjS[60] + tmpFu[20]*tmpObjS[71] + tmpFu[23]*tmpObjS[82] + tmpFu[26]*tmpObjS[93] + tmpFu[29]*tmpObjS[104] + tmpFu[32]*tmpObjS[115];
tmpR2[28] = + tmpFu[2]*tmpObjS[6] + tmpFu[5]*tmpObjS[17] + tmpFu[8]*tmpObjS[28] + tmpFu[11]*tmpObjS[39] + tmpFu[14]*tmpObjS[50] + tmpFu[17]*tmpObjS[61] + tmpFu[20]*tmpObjS[72] + tmpFu[23]*tmpObjS[83] + tmpFu[26]*tmpObjS[94] + tmpFu[29]*tmpObjS[105] + tmpFu[32]*tmpObjS[116];
tmpR2[29] = + tmpFu[2]*tmpObjS[7] + tmpFu[5]*tmpObjS[18] + tmpFu[8]*tmpObjS[29] + tmpFu[11]*tmpObjS[40] + tmpFu[14]*tmpObjS[51] + tmpFu[17]*tmpObjS[62] + tmpFu[20]*tmpObjS[73] + tmpFu[23]*tmpObjS[84] + tmpFu[26]*tmpObjS[95] + tmpFu[29]*tmpObjS[106] + tmpFu[32]*tmpObjS[117];
tmpR2[30] = + tmpFu[2]*tmpObjS[8] + tmpFu[5]*tmpObjS[19] + tmpFu[8]*tmpObjS[30] + tmpFu[11]*tmpObjS[41] + tmpFu[14]*tmpObjS[52] + tmpFu[17]*tmpObjS[63] + tmpFu[20]*tmpObjS[74] + tmpFu[23]*tmpObjS[85] + tmpFu[26]*tmpObjS[96] + tmpFu[29]*tmpObjS[107] + tmpFu[32]*tmpObjS[118];
tmpR2[31] = + tmpFu[2]*tmpObjS[9] + tmpFu[5]*tmpObjS[20] + tmpFu[8]*tmpObjS[31] + tmpFu[11]*tmpObjS[42] + tmpFu[14]*tmpObjS[53] + tmpFu[17]*tmpObjS[64] + tmpFu[20]*tmpObjS[75] + tmpFu[23]*tmpObjS[86] + tmpFu[26]*tmpObjS[97] + tmpFu[29]*tmpObjS[108] + tmpFu[32]*tmpObjS[119];
tmpR2[32] = + tmpFu[2]*tmpObjS[10] + tmpFu[5]*tmpObjS[21] + tmpFu[8]*tmpObjS[32] + tmpFu[11]*tmpObjS[43] + tmpFu[14]*tmpObjS[54] + tmpFu[17]*tmpObjS[65] + tmpFu[20]*tmpObjS[76] + tmpFu[23]*tmpObjS[87] + tmpFu[26]*tmpObjS[98] + tmpFu[29]*tmpObjS[109] + tmpFu[32]*tmpObjS[120];
tmpR1[0] = + tmpR2[0]*tmpFu[0] + tmpR2[1]*tmpFu[3] + tmpR2[2]*tmpFu[6] + tmpR2[3]*tmpFu[9] + tmpR2[4]*tmpFu[12] + tmpR2[5]*tmpFu[15] + tmpR2[6]*tmpFu[18] + tmpR2[7]*tmpFu[21] + tmpR2[8]*tmpFu[24] + tmpR2[9]*tmpFu[27] + tmpR2[10]*tmpFu[30];
tmpR1[1] = + tmpR2[0]*tmpFu[1] + tmpR2[1]*tmpFu[4] + tmpR2[2]*tmpFu[7] + tmpR2[3]*tmpFu[10] + tmpR2[4]*tmpFu[13] + tmpR2[5]*tmpFu[16] + tmpR2[6]*tmpFu[19] + tmpR2[7]*tmpFu[22] + tmpR2[8]*tmpFu[25] + tmpR2[9]*tmpFu[28] + tmpR2[10]*tmpFu[31];
tmpR1[2] = + tmpR2[0]*tmpFu[2] + tmpR2[1]*tmpFu[5] + tmpR2[2]*tmpFu[8] + tmpR2[3]*tmpFu[11] + tmpR2[4]*tmpFu[14] + tmpR2[5]*tmpFu[17] + tmpR2[6]*tmpFu[20] + tmpR2[7]*tmpFu[23] + tmpR2[8]*tmpFu[26] + tmpR2[9]*tmpFu[29] + tmpR2[10]*tmpFu[32];
tmpR1[3] = + tmpR2[11]*tmpFu[0] + tmpR2[12]*tmpFu[3] + tmpR2[13]*tmpFu[6] + tmpR2[14]*tmpFu[9] + tmpR2[15]*tmpFu[12] + tmpR2[16]*tmpFu[15] + tmpR2[17]*tmpFu[18] + tmpR2[18]*tmpFu[21] + tmpR2[19]*tmpFu[24] + tmpR2[20]*tmpFu[27] + tmpR2[21]*tmpFu[30];
tmpR1[4] = + tmpR2[11]*tmpFu[1] + tmpR2[12]*tmpFu[4] + tmpR2[13]*tmpFu[7] + tmpR2[14]*tmpFu[10] + tmpR2[15]*tmpFu[13] + tmpR2[16]*tmpFu[16] + tmpR2[17]*tmpFu[19] + tmpR2[18]*tmpFu[22] + tmpR2[19]*tmpFu[25] + tmpR2[20]*tmpFu[28] + tmpR2[21]*tmpFu[31];
tmpR1[5] = + tmpR2[11]*tmpFu[2] + tmpR2[12]*tmpFu[5] + tmpR2[13]*tmpFu[8] + tmpR2[14]*tmpFu[11] + tmpR2[15]*tmpFu[14] + tmpR2[16]*tmpFu[17] + tmpR2[17]*tmpFu[20] + tmpR2[18]*tmpFu[23] + tmpR2[19]*tmpFu[26] + tmpR2[20]*tmpFu[29] + tmpR2[21]*tmpFu[32];
tmpR1[6] = + tmpR2[22]*tmpFu[0] + tmpR2[23]*tmpFu[3] + tmpR2[24]*tmpFu[6] + tmpR2[25]*tmpFu[9] + tmpR2[26]*tmpFu[12] + tmpR2[27]*tmpFu[15] + tmpR2[28]*tmpFu[18] + tmpR2[29]*tmpFu[21] + tmpR2[30]*tmpFu[24] + tmpR2[31]*tmpFu[27] + tmpR2[32]*tmpFu[30];
tmpR1[7] = + tmpR2[22]*tmpFu[1] + tmpR2[23]*tmpFu[4] + tmpR2[24]*tmpFu[7] + tmpR2[25]*tmpFu[10] + tmpR2[26]*tmpFu[13] + tmpR2[27]*tmpFu[16] + tmpR2[28]*tmpFu[19] + tmpR2[29]*tmpFu[22] + tmpR2[30]*tmpFu[25] + tmpR2[31]*tmpFu[28] + tmpR2[32]*tmpFu[31];
tmpR1[8] = + tmpR2[22]*tmpFu[2] + tmpR2[23]*tmpFu[5] + tmpR2[24]*tmpFu[8] + tmpR2[25]*tmpFu[11] + tmpR2[26]*tmpFu[14] + tmpR2[27]*tmpFu[17] + tmpR2[28]*tmpFu[20] + tmpR2[29]*tmpFu[23] + tmpR2[30]*tmpFu[26] + tmpR2[31]*tmpFu[29] + tmpR2[32]*tmpFu[32];
}
void acado_setObjS1( real_t* const tmpFx, real_t* const tmpFu, real_t* const tmpObjS, real_t* const tmpS1 )
{
/** Matrix of size: 9 x 11 (row major format) */
real_t tmpS2[ 99 ];
tmpS2[0] = + tmpFx[0]*tmpObjS[0] + tmpFx[9]*tmpObjS[11] + tmpFx[18]*tmpObjS[22] + tmpFx[27]*tmpObjS[33] + tmpFx[36]*tmpObjS[44] + tmpFx[45]*tmpObjS[55] + tmpFx[54]*tmpObjS[66] + tmpFx[63]*tmpObjS[77] + tmpFx[72]*tmpObjS[88] + tmpFx[81]*tmpObjS[99] + tmpFx[90]*tmpObjS[110];
tmpS2[1] = + tmpFx[0]*tmpObjS[1] + tmpFx[9]*tmpObjS[12] + tmpFx[18]*tmpObjS[23] + tmpFx[27]*tmpObjS[34] + tmpFx[36]*tmpObjS[45] + tmpFx[45]*tmpObjS[56] + tmpFx[54]*tmpObjS[67] + tmpFx[63]*tmpObjS[78] + tmpFx[72]*tmpObjS[89] + tmpFx[81]*tmpObjS[100] + tmpFx[90]*tmpObjS[111];
tmpS2[2] = + tmpFx[0]*tmpObjS[2] + tmpFx[9]*tmpObjS[13] + tmpFx[18]*tmpObjS[24] + tmpFx[27]*tmpObjS[35] + tmpFx[36]*tmpObjS[46] + tmpFx[45]*tmpObjS[57] + tmpFx[54]*tmpObjS[68] + tmpFx[63]*tmpObjS[79] + tmpFx[72]*tmpObjS[90] + tmpFx[81]*tmpObjS[101] + tmpFx[90]*tmpObjS[112];
tmpS2[3] = + tmpFx[0]*tmpObjS[3] + tmpFx[9]*tmpObjS[14] + tmpFx[18]*tmpObjS[25] + tmpFx[27]*tmpObjS[36] + tmpFx[36]*tmpObjS[47] + tmpFx[45]*tmpObjS[58] + tmpFx[54]*tmpObjS[69] + tmpFx[63]*tmpObjS[80] + tmpFx[72]*tmpObjS[91] + tmpFx[81]*tmpObjS[102] + tmpFx[90]*tmpObjS[113];
tmpS2[4] = + tmpFx[0]*tmpObjS[4] + tmpFx[9]*tmpObjS[15] + tmpFx[18]*tmpObjS[26] + tmpFx[27]*tmpObjS[37] + tmpFx[36]*tmpObjS[48] + tmpFx[45]*tmpObjS[59] + tmpFx[54]*tmpObjS[70] + tmpFx[63]*tmpObjS[81] + tmpFx[72]*tmpObjS[92] + tmpFx[81]*tmpObjS[103] + tmpFx[90]*tmpObjS[114];
tmpS2[5] = + tmpFx[0]*tmpObjS[5] + tmpFx[9]*tmpObjS[16] + tmpFx[18]*tmpObjS[27] + tmpFx[27]*tmpObjS[38] + tmpFx[36]*tmpObjS[49] + tmpFx[45]*tmpObjS[60] + tmpFx[54]*tmpObjS[71] + tmpFx[63]*tmpObjS[82] + tmpFx[72]*tmpObjS[93] + tmpFx[81]*tmpObjS[104] + tmpFx[90]*tmpObjS[115];
tmpS2[6] = + tmpFx[0]*tmpObjS[6] + tmpFx[9]*tmpObjS[17] + tmpFx[18]*tmpObjS[28] + tmpFx[27]*tmpObjS[39] + tmpFx[36]*tmpObjS[50] + tmpFx[45]*tmpObjS[61] + tmpFx[54]*tmpObjS[72] + tmpFx[63]*tmpObjS[83] + tmpFx[72]*tmpObjS[94] + tmpFx[81]*tmpObjS[105] + tmpFx[90]*tmpObjS[116];
tmpS2[7] = + tmpFx[0]*tmpObjS[7] + tmpFx[9]*tmpObjS[18] + tmpFx[18]*tmpObjS[29] + tmpFx[27]*tmpObjS[40] + tmpFx[36]*tmpObjS[51] + tmpFx[45]*tmpObjS[62] + tmpFx[54]*tmpObjS[73] + tmpFx[63]*tmpObjS[84] + tmpFx[72]*tmpObjS[95] + tmpFx[81]*tmpObjS[106] + tmpFx[90]*tmpObjS[117];
tmpS2[8] = + tmpFx[0]*tmpObjS[8] + tmpFx[9]*tmpObjS[19] + tmpFx[18]*tmpObjS[30] + tmpFx[27]*tmpObjS[41] + tmpFx[36]*tmpObjS[52] + tmpFx[45]*tmpObjS[63] + tmpFx[54]*tmpObjS[74] + tmpFx[63]*tmpObjS[85] + tmpFx[72]*tmpObjS[96] + tmpFx[81]*tmpObjS[107] + tmpFx[90]*tmpObjS[118];
tmpS2[9] = + tmpFx[0]*tmpObjS[9] + tmpFx[9]*tmpObjS[20] + tmpFx[18]*tmpObjS[31] + tmpFx[27]*tmpObjS[42] + tmpFx[36]*tmpObjS[53] + tmpFx[45]*tmpObjS[64] + tmpFx[54]*tmpObjS[75] + tmpFx[63]*tmpObjS[86] + tmpFx[72]*tmpObjS[97] + tmpFx[81]*tmpObjS[108] + tmpFx[90]*tmpObjS[119];
tmpS2[10] = + tmpFx[0]*tmpObjS[10] + tmpFx[9]*tmpObjS[21] + tmpFx[18]*tmpObjS[32] + tmpFx[27]*tmpObjS[43] + tmpFx[36]*tmpObjS[54] + tmpFx[45]*tmpObjS[65] + tmpFx[54]*tmpObjS[76] + tmpFx[63]*tmpObjS[87] + tmpFx[72]*tmpObjS[98] + tmpFx[81]*tmpObjS[109] + tmpFx[90]*tmpObjS[120];
tmpS2[11] = + tmpFx[1]*tmpObjS[0] + tmpFx[10]*tmpObjS[11] + tmpFx[19]*tmpObjS[22] + tmpFx[28]*tmpObjS[33] + tmpFx[37]*tmpObjS[44] + tmpFx[46]*tmpObjS[55] + tmpFx[55]*tmpObjS[66] + tmpFx[64]*tmpObjS[77] + tmpFx[73]*tmpObjS[88] + tmpFx[82]*tmpObjS[99] + tmpFx[91]*tmpObjS[110];
tmpS2[12] = + tmpFx[1]*tmpObjS[1] + tmpFx[10]*tmpObjS[12] + tmpFx[19]*tmpObjS[23] + tmpFx[28]*tmpObjS[34] + tmpFx[37]*tmpObjS[45] + tmpFx[46]*tmpObjS[56] + tmpFx[55]*tmpObjS[67] + tmpFx[64]*tmpObjS[78] + tmpFx[73]*tmpObjS[89] + tmpFx[82]*tmpObjS[100] + tmpFx[91]*tmpObjS[111];
tmpS2[13] = + tmpFx[1]*tmpObjS[2] + tmpFx[10]*tmpObjS[13] + tmpFx[19]*tmpObjS[24] + tmpFx[28]*tmpObjS[35] + tmpFx[37]*tmpObjS[46] + tmpFx[46]*tmpObjS[57] + tmpFx[55]*tmpObjS[68] + tmpFx[64]*tmpObjS[79] + tmpFx[73]*tmpObjS[90] + tmpFx[82]*tmpObjS[101] + tmpFx[91]*tmpObjS[112];
tmpS2[14] = + tmpFx[1]*tmpObjS[3] + tmpFx[10]*tmpObjS[14] + tmpFx[19]*tmpObjS[25] + tmpFx[28]*tmpObjS[36] + tmpFx[37]*tmpObjS[47] + tmpFx[46]*tmpObjS[58] + tmpFx[55]*tmpObjS[69] + tmpFx[64]*tmpObjS[80] + tmpFx[73]*tmpObjS[91] + tmpFx[82]*tmpObjS[102] + tmpFx[91]*tmpObjS[113];
tmpS2[15] = + tmpFx[1]*tmpObjS[4] + tmpFx[10]*tmpObjS[15] + tmpFx[19]*tmpObjS[26] + tmpFx[28]*tmpObjS[37] + tmpFx[37]*tmpObjS[48] + tmpFx[46]*tmpObjS[59] + tmpFx[55]*tmpObjS[70] + tmpFx[64]*tmpObjS[81] + tmpFx[73]*tmpObjS[92] + tmpFx[82]*tmpObjS[103] + tmpFx[91]*tmpObjS[114];
tmpS2[16] = + tmpFx[1]*tmpObjS[5] + tmpFx[10]*tmpObjS[16] + tmpFx[19]*tmpObjS[27] + tmpFx[28]*tmpObjS[38] + tmpFx[37]*tmpObjS[49] + tmpFx[46]*tmpObjS[60] + tmpFx[55]*tmpObjS[71] + tmpFx[64]*tmpObjS[82] + tmpFx[73]*tmpObjS[93] + tmpFx[82]*tmpObjS[104] + tmpFx[91]*tmpObjS[115];
tmpS2[17] = + tmpFx[1]*tmpObjS[6] + tmpFx[10]*tmpObjS[17] + tmpFx[19]*tmpObjS[28] + tmpFx[28]*tmpObjS[39] + tmpFx[37]*tmpObjS[50] + tmpFx[46]*tmpObjS[61] + tmpFx[55]*tmpObjS[72] + tmpFx[64]*tmpObjS[83] + tmpFx[73]*tmpObjS[94] + tmpFx[82]*tmpObjS[105] + tmpFx[91]*tmpObjS[116];
tmpS2[18] = + tmpFx[1]*tmpObjS[7] + tmpFx[10]*tmpObjS[18] + tmpFx[19]*tmpObjS[29] + tmpFx[28]*tmpObjS[40] + tmpFx[37]*tmpObjS[51] + tmpFx[46]*tmpObjS[62] + tmpFx[55]*tmpObjS[73] + tmpFx[64]*tmpObjS[84] + tmpFx[73]*tmpObjS[95] + tmpFx[82]*tmpObjS[106] + tmpFx[91]*tmpObjS[117];
tmpS2[19] = + tmpFx[1]*tmpObjS[8] + tmpFx[10]*tmpObjS[19] + tmpFx[19]*tmpObjS[30] + tmpFx[28]*tmpObjS[41] + tmpFx[37]*tmpObjS[52] + tmpFx[46]*tmpObjS[63] + tmpFx[55]*tmpObjS[74] + tmpFx[64]*tmpObjS[85] + tmpFx[73]*tmpObjS[96] + tmpFx[82]*tmpObjS[107] + tmpFx[91]*tmpObjS[118];
tmpS2[20] = + tmpFx[1]*tmpObjS[9] + tmpFx[10]*tmpObjS[20] + tmpFx[19]*tmpObjS[31] + tmpFx[28]*tmpObjS[42] + tmpFx[37]*tmpObjS[53] + tmpFx[46]*tmpObjS[64] + tmpFx[55]*tmpObjS[75] + tmpFx[64]*tmpObjS[86] + tmpFx[73]*tmpObjS[97] + tmpFx[82]*tmpObjS[108] + tmpFx[91]*tmpObjS[119];
tmpS2[21] = + tmpFx[1]*tmpObjS[10] + tmpFx[10]*tmpObjS[21] + tmpFx[19]*tmpObjS[32] + tmpFx[28]*tmpObjS[43] + tmpFx[37]*tmpObjS[54] + tmpFx[46]*tmpObjS[65] + tmpFx[55]*tmpObjS[76] + tmpFx[64]*tmpObjS[87] + tmpFx[73]*tmpObjS[98] + tmpFx[82]*tmpObjS[109] + tmpFx[91]*tmpObjS[120];
tmpS2[22] = + tmpFx[2]*tmpObjS[0] + tmpFx[11]*tmpObjS[11] + tmpFx[20]*tmpObjS[22] + tmpFx[29]*tmpObjS[33] + tmpFx[38]*tmpObjS[44] + tmpFx[47]*tmpObjS[55] + tmpFx[56]*tmpObjS[66] + tmpFx[65]*tmpObjS[77] + tmpFx[74]*tmpObjS[88] + tmpFx[83]*tmpObjS[99] + tmpFx[92]*tmpObjS[110];
tmpS2[23] = + tmpFx[2]*tmpObjS[1] + tmpFx[11]*tmpObjS[12] + tmpFx[20]*tmpObjS[23] + tmpFx[29]*tmpObjS[34] + tmpFx[38]*tmpObjS[45] + tmpFx[47]*tmpObjS[56] + tmpFx[56]*tmpObjS[67] + tmpFx[65]*tmpObjS[78] + tmpFx[74]*tmpObjS[89] + tmpFx[83]*tmpObjS[100] + tmpFx[92]*tmpObjS[111];
tmpS2[24] = + tmpFx[2]*tmpObjS[2] + tmpFx[11]*tmpObjS[13] + tmpFx[20]*tmpObjS[24] + tmpFx[29]*tmpObjS[35] + tmpFx[38]*tmpObjS[46] + tmpFx[47]*tmpObjS[57] + tmpFx[56]*tmpObjS[68] + tmpFx[65]*tmpObjS[79] + tmpFx[74]*tmpObjS[90] + tmpFx[83]*tmpObjS[101] + tmpFx[92]*tmpObjS[112];
tmpS2[25] = + tmpFx[2]*tmpObjS[3] + tmpFx[11]*tmpObjS[14] + tmpFx[20]*tmpObjS[25] + tmpFx[29]*tmpObjS[36] + tmpFx[38]*tmpObjS[47] + tmpFx[47]*tmpObjS[58] + tmpFx[56]*tmpObjS[69] + tmpFx[65]*tmpObjS[80] + tmpFx[74]*tmpObjS[91] + tmpFx[83]*tmpObjS[102] + tmpFx[92]*tmpObjS[113];
tmpS2[26] = + tmpFx[2]*tmpObjS[4] + tmpFx[11]*tmpObjS[15] + tmpFx[20]*tmpObjS[26] + tmpFx[29]*tmpObjS[37] + tmpFx[38]*tmpObjS[48] + tmpFx[47]*tmpObjS[59] + tmpFx[56]*tmpObjS[70] + tmpFx[65]*tmpObjS[81] + tmpFx[74]*tmpObjS[92] + tmpFx[83]*tmpObjS[103] + tmpFx[92]*tmpObjS[114];
tmpS2[27] = + tmpFx[2]*tmpObjS[5] + tmpFx[11]*tmpObjS[16] + tmpFx[20]*tmpObjS[27] + tmpFx[29]*tmpObjS[38] + tmpFx[38]*tmpObjS[49] + tmpFx[47]*tmpObjS[60] + tmpFx[56]*tmpObjS[71] + tmpFx[65]*tmpObjS[82] + tmpFx[74]*tmpObjS[93] + tmpFx[83]*tmpObjS[104] + tmpFx[92]*tmpObjS[115];
tmpS2[28] = + tmpFx[2]*tmpObjS[6] + tmpFx[11]*tmpObjS[17] + tmpFx[20]*tmpObjS[28] + tmpFx[29]*tmpObjS[39] + tmpFx[38]*tmpObjS[50] + tmpFx[47]*tmpObjS[61] + tmpFx[56]*tmpObjS[72] + tmpFx[65]*tmpObjS[83] + tmpFx[74]*tmpObjS[94] + tmpFx[83]*tmpObjS[105] + tmpFx[92]*tmpObjS[116];
tmpS2[29] = + tmpFx[2]*tmpObjS[7] + tmpFx[11]*tmpObjS[18] + tmpFx[20]*tmpObjS[29] + tmpFx[29]*tmpObjS[40] + tmpFx[38]*tmpObjS[51] + tmpFx[47]*tmpObjS[62] + tmpFx[56]*tmpObjS[73] + tmpFx[65]*tmpObjS[84] + tmpFx[74]*tmpObjS[95] + tmpFx[83]*tmpObjS[106] + tmpFx[92]*tmpObjS[117];
tmpS2[30] = + tmpFx[2]*tmpObjS[8] + tmpFx[11]*tmpObjS[19] + tmpFx[20]*tmpObjS[30] + tmpFx[29]*tmpObjS[41] + tmpFx[38]*tmpObjS[52] + tmpFx[47]*tmpObjS[63] + tmpFx[56]*tmpObjS[74] + tmpFx[65]*tmpObjS[85] + tmpFx[74]*tmpObjS[96] + tmpFx[83]*tmpObjS[107] + tmpFx[92]*tmpObjS[118];
tmpS2[31] = + tmpFx[2]*tmpObjS[9] + tmpFx[11]*tmpObjS[20] + tmpFx[20]*tmpObjS[31] + tmpFx[29]*tmpObjS[42] + tmpFx[38]*tmpObjS[53] + tmpFx[47]*tmpObjS[64] + tmpFx[56]*tmpObjS[75] + tmpFx[65]*tmpObjS[86] + tmpFx[74]*tmpObjS[97] + tmpFx[83]*tmpObjS[108] + tmpFx[92]*tmpObjS[119];
tmpS2[32] = + tmpFx[2]*tmpObjS[10] + tmpFx[11]*tmpObjS[21] + tmpFx[20]*tmpObjS[32] + tmpFx[29]*tmpObjS[43] + tmpFx[38]*tmpObjS[54] + tmpFx[47]*tmpObjS[65] + tmpFx[56]*tmpObjS[76] + tmpFx[65]*tmpObjS[87] + tmpFx[74]*tmpObjS[98] + tmpFx[83]*tmpObjS[109] + tmpFx[92]*tmpObjS[120];
tmpS2[33] = + tmpFx[3]*tmpObjS[0] + tmpFx[12]*tmpObjS[11] + tmpFx[21]*tmpObjS[22] + tmpFx[30]*tmpObjS[33] + tmpFx[39]*tmpObjS[44] + tmpFx[48]*tmpObjS[55] + tmpFx[57]*tmpObjS[66] + tmpFx[66]*tmpObjS[77] + tmpFx[75]*tmpObjS[88] + tmpFx[84]*tmpObjS[99] + tmpFx[93]*tmpObjS[110];
tmpS2[34] = + tmpFx[3]*tmpObjS[1] + tmpFx[12]*tmpObjS[12] + tmpFx[21]*tmpObjS[23] + tmpFx[30]*tmpObjS[34] + tmpFx[39]*tmpObjS[45] + tmpFx[48]*tmpObjS[56] + tmpFx[57]*tmpObjS[67] + tmpFx[66]*tmpObjS[78] + tmpFx[75]*tmpObjS[89] + tmpFx[84]*tmpObjS[100] + tmpFx[93]*tmpObjS[111];
tmpS2[35] = + tmpFx[3]*tmpObjS[2] + tmpFx[12]*tmpObjS[13] + tmpFx[21]*tmpObjS[24] + tmpFx[30]*tmpObjS[35] + tmpFx[39]*tmpObjS[46] + tmpFx[48]*tmpObjS[57] + tmpFx[57]*tmpObjS[68] + tmpFx[66]*tmpObjS[79] + tmpFx[75]*tmpObjS[90] + tmpFx[84]*tmpObjS[101] + tmpFx[93]*tmpObjS[112];
tmpS2[36] = + tmpFx[3]*tmpObjS[3] + tmpFx[12]*tmpObjS[14] + tmpFx[21]*tmpObjS[25] + tmpFx[30]*tmpObjS[36] + tmpFx[39]*tmpObjS[47] + tmpFx[48]*tmpObjS[58] + tmpFx[57]*tmpObjS[69] + tmpFx[66]*tmpObjS[80] + tmpFx[75]*tmpObjS[91] + tmpFx[84]*tmpObjS[102] + tmpFx[93]*tmpObjS[113];
tmpS2[37] = + tmpFx[3]*tmpObjS[4] + tmpFx[12]*tmpObjS[15] + tmpFx[21]*tmpObjS[26] + tmpFx[30]*tmpObjS[37] + tmpFx[39]*tmpObjS[48] + tmpFx[48]*tmpObjS[59] + tmpFx[57]*tmpObjS[70] + tmpFx[66]*tmpObjS[81] + tmpFx[75]*tmpObjS[92] + tmpFx[84]*tmpObjS[103] + tmpFx[93]*tmpObjS[114];
tmpS2[38] = + tmpFx[3]*tmpObjS[5] + tmpFx[12]*tmpObjS[16] + tmpFx[21]*tmpObjS[27] + tmpFx[30]*tmpObjS[38] + tmpFx[39]*tmpObjS[49] + tmpFx[48]*tmpObjS[60] + tmpFx[57]*tmpObjS[71] + tmpFx[66]*tmpObjS[82] + tmpFx[75]*tmpObjS[93] + tmpFx[84]*tmpObjS[104] + tmpFx[93]*tmpObjS[115];
tmpS2[39] = + tmpFx[3]*tmpObjS[6] + tmpFx[12]*tmpObjS[17] + tmpFx[21]*tmpObjS[28] + tmpFx[30]*tmpObjS[39] + tmpFx[39]*tmpObjS[50] + tmpFx[48]*tmpObjS[61] + tmpFx[57]*tmpObjS[72] + tmpFx[66]*tmpObjS[83] + tmpFx[75]*tmpObjS[94] + tmpFx[84]*tmpObjS[105] + tmpFx[93]*tmpObjS[116];
tmpS2[40] = + tmpFx[3]*tmpObjS[7] + tmpFx[12]*tmpObjS[18] + tmpFx[21]*tmpObjS[29] + tmpFx[30]*tmpObjS[40] + tmpFx[39]*tmpObjS[51] + tmpFx[48]*tmpObjS[62] + tmpFx[57]*tmpObjS[73] + tmpFx[66]*tmpObjS[84] + tmpFx[75]*tmpObjS[95] + tmpFx[84]*tmpObjS[106] + tmpFx[93]*tmpObjS[117];
tmpS2[41] = + tmpFx[3]*tmpObjS[8] + tmpFx[12]*tmpObjS[19] + tmpFx[21]*tmpObjS[30] + tmpFx[30]*tmpObjS[41] + tmpFx[39]*tmpObjS[52] + tmpFx[48]*tmpObjS[63] + tmpFx[57]*tmpObjS[74] + tmpFx[66]*tmpObjS[85] + tmpFx[75]*tmpObjS[96] + tmpFx[84]*tmpObjS[107] + tmpFx[93]*tmpObjS[118];
tmpS2[42] = + tmpFx[3]*tmpObjS[9] + tmpFx[12]*tmpObjS[20] + tmpFx[21]*tmpObjS[31] + tmpFx[30]*tmpObjS[42] + tmpFx[39]*tmpObjS[53] + tmpFx[48]*tmpObjS[64] + tmpFx[57]*tmpObjS[75] + tmpFx[66]*tmpObjS[86] + tmpFx[75]*tmpObjS[97] + tmpFx[84]*tmpObjS[108] + tmpFx[93]*tmpObjS[119];
tmpS2[43] = + tmpFx[3]*tmpObjS[10] + tmpFx[12]*tmpObjS[21] + tmpFx[21]*tmpObjS[32] + tmpFx[30]*tmpObjS[43] + tmpFx[39]*tmpObjS[54] + tmpFx[48]*tmpObjS[65] + tmpFx[57]*tmpObjS[76] + tmpFx[66]*tmpObjS[87] + tmpFx[75]*tmpObjS[98] + tmpFx[84]*tmpObjS[109] + tmpFx[93]*tmpObjS[120];
tmpS2[44] = + tmpFx[4]*tmpObjS[0] + tmpFx[13]*tmpObjS[11] + tmpFx[22]*tmpObjS[22] + tmpFx[31]*tmpObjS[33] + tmpFx[40]*tmpObjS[44] + tmpFx[49]*tmpObjS[55] + tmpFx[58]*tmpObjS[66] + tmpFx[67]*tmpObjS[77] + tmpFx[76]*tmpObjS[88] + tmpFx[85]*tmpObjS[99] + tmpFx[94]*tmpObjS[110];
tmpS2[45] = + tmpFx[4]*tmpObjS[1] + tmpFx[13]*tmpObjS[12] + tmpFx[22]*tmpObjS[23] + tmpFx[31]*tmpObjS[34] + tmpFx[40]*tmpObjS[45] + tmpFx[49]*tmpObjS[56] + tmpFx[58]*tmpObjS[67] + tmpFx[67]*tmpObjS[78] + tmpFx[76]*tmpObjS[89] + tmpFx[85]*tmpObjS[100] + tmpFx[94]*tmpObjS[111];
tmpS2[46] = + tmpFx[4]*tmpObjS[2] + tmpFx[13]*tmpObjS[13] + tmpFx[22]*tmpObjS[24] + tmpFx[31]*tmpObjS[35] + tmpFx[40]*tmpObjS[46] + tmpFx[49]*tmpObjS[57] + tmpFx[58]*tmpObjS[68] + tmpFx[67]*tmpObjS[79] + tmpFx[76]*tmpObjS[90] + tmpFx[85]*tmpObjS[101] + tmpFx[94]*tmpObjS[112];
tmpS2[47] = + tmpFx[4]*tmpObjS[3] + tmpFx[13]*tmpObjS[14] + tmpFx[22]*tmpObjS[25] + tmpFx[31]*tmpObjS[36] + tmpFx[40]*tmpObjS[47] + tmpFx[49]*tmpObjS[58] + tmpFx[58]*tmpObjS[69] + tmpFx[67]*tmpObjS[80] + tmpFx[76]*tmpObjS[91] + tmpFx[85]*tmpObjS[102] + tmpFx[94]*tmpObjS[113];
tmpS2[48] = + tmpFx[4]*tmpObjS[4] + tmpFx[13]*tmpObjS[15] + tmpFx[22]*tmpObjS[26] + tmpFx[31]*tmpObjS[37] + tmpFx[40]*tmpObjS[48] + tmpFx[49]*tmpObjS[59] + tmpFx[58]*tmpObjS[70] + tmpFx[67]*tmpObjS[81] + tmpFx[76]*tmpObjS[92] + tmpFx[85]*tmpObjS[103] + tmpFx[94]*tmpObjS[114];
tmpS2[49] = + tmpFx[4]*tmpObjS[5] + tmpFx[13]*tmpObjS[16] + tmpFx[22]*tmpObjS[27] + tmpFx[31]*tmpObjS[38] + tmpFx[40]*tmpObjS[49] + tmpFx[49]*tmpObjS[60] + tmpFx[58]*tmpObjS[71] + tmpFx[67]*tmpObjS[82] + tmpFx[76]*tmpObjS[93] + tmpFx[85]*tmpObjS[104] + tmpFx[94]*tmpObjS[115];
tmpS2[50] = + tmpFx[4]*tmpObjS[6] + tmpFx[13]*tmpObjS[17] + tmpFx[22]*tmpObjS[28] + tmpFx[31]*tmpObjS[39] + tmpFx[40]*tmpObjS[50] + tmpFx[49]*tmpObjS[61] + tmpFx[58]*tmpObjS[72] + tmpFx[67]*tmpObjS[83] + tmpFx[76]*tmpObjS[94] + tmpFx[85]*tmpObjS[105] + tmpFx[94]*tmpObjS[116];
tmpS2[51] = + tmpFx[4]*tmpObjS[7] + tmpFx[13]*tmpObjS[18] + tmpFx[22]*tmpObjS[29] + tmpFx[31]*tmpObjS[40] + tmpFx[40]*tmpObjS[51] + tmpFx[49]*tmpObjS[62] + tmpFx[58]*tmpObjS[73] + tmpFx[67]*tmpObjS[84] + tmpFx[76]*tmpObjS[95] + tmpFx[85]*tmpObjS[106] + tmpFx[94]*tmpObjS[117];
tmpS2[52] = + tmpFx[4]*tmpObjS[8] + tmpFx[13]*tmpObjS[19] + tmpFx[22]*tmpObjS[30] + tmpFx[31]*tmpObjS[41] + tmpFx[40]*tmpObjS[52] + tmpFx[49]*tmpObjS[63] + tmpFx[58]*tmpObjS[74] + tmpFx[67]*tmpObjS[85] + tmpFx[76]*tmpObjS[96] + tmpFx[85]*tmpObjS[107] + tmpFx[94]*tmpObjS[118];
tmpS2[53] = + tmpFx[4]*tmpObjS[9] + tmpFx[13]*tmpObjS[20] + tmpFx[22]*tmpObjS[31] + tmpFx[31]*tmpObjS[42] + tmpFx[40]*tmpObjS[53] + tmpFx[49]*tmpObjS[64] + tmpFx[58]*tmpObjS[75] + tmpFx[67]*tmpObjS[86] + tmpFx[76]*tmpObjS[97] + tmpFx[85]*tmpObjS[108] + tmpFx[94]*tmpObjS[119];
tmpS2[54] = + tmpFx[4]*tmpObjS[10] + tmpFx[13]*tmpObjS[21] + tmpFx[22]*tmpObjS[32] + tmpFx[31]*tmpObjS[43] + tmpFx[40]*tmpObjS[54] + tmpFx[49]*tmpObjS[65] + tmpFx[58]*tmpObjS[76] + tmpFx[67]*tmpObjS[87] + tmpFx[76]*tmpObjS[98] + tmpFx[85]*tmpObjS[109] + tmpFx[94]*tmpObjS[120];
tmpS2[55] = + tmpFx[5]*tmpObjS[0] + tmpFx[14]*tmpObjS[11] + tmpFx[23]*tmpObjS[22] + tmpFx[32]*tmpObjS[33] + tmpFx[41]*tmpObjS[44] + tmpFx[50]*tmpObjS[55] + tmpFx[59]*tmpObjS[66] + tmpFx[68]*tmpObjS[77] + tmpFx[77]*tmpObjS[88] + tmpFx[86]*tmpObjS[99] + tmpFx[95]*tmpObjS[110];
tmpS2[56] = + tmpFx[5]*tmpObjS[1] + tmpFx[14]*tmpObjS[12] + tmpFx[23]*tmpObjS[23] + tmpFx[32]*tmpObjS[34] + tmpFx[41]*tmpObjS[45] + tmpFx[50]*tmpObjS[56] + tmpFx[59]*tmpObjS[67] + tmpFx[68]*tmpObjS[78] + tmpFx[77]*tmpObjS[89] + tmpFx[86]*tmpObjS[100] + tmpFx[95]*tmpObjS[111];
tmpS2[57] = + tmpFx[5]*tmpObjS[2] + tmpFx[14]*tmpObjS[13] + tmpFx[23]*tmpObjS[24] + tmpFx[32]*tmpObjS[35] + tmpFx[41]*tmpObjS[46] + tmpFx[50]*tmpObjS[57] + tmpFx[59]*tmpObjS[68] + tmpFx[68]*tmpObjS[79] + tmpFx[77]*tmpObjS[90] + tmpFx[86]*tmpObjS[101] + tmpFx[95]*tmpObjS[112];
tmpS2[58] = + tmpFx[5]*tmpObjS[3] + tmpFx[14]*tmpObjS[14] + tmpFx[23]*tmpObjS[25] + tmpFx[32]*tmpObjS[36] + tmpFx[41]*tmpObjS[47] + tmpFx[50]*tmpObjS[58] + tmpFx[59]*tmpObjS[69] + tmpFx[68]*tmpObjS[80] + tmpFx[77]*tmpObjS[91] + tmpFx[86]*tmpObjS[102] + tmpFx[95]*tmpObjS[113];
tmpS2[59] = + tmpFx[5]*tmpObjS[4] + tmpFx[14]*tmpObjS[15] + tmpFx[23]*tmpObjS[26] + tmpFx[32]*tmpObjS[37] + tmpFx[41]*tmpObjS[48] + tmpFx[50]*tmpObjS[59] + tmpFx[59]*tmpObjS[70] + tmpFx[68]*tmpObjS[81] + tmpFx[77]*tmpObjS[92] + tmpFx[86]*tmpObjS[103] + tmpFx[95]*tmpObjS[114];
tmpS2[60] = + tmpFx[5]*tmpObjS[5] + tmpFx[14]*tmpObjS[16] + tmpFx[23]*tmpObjS[27] + tmpFx[32]*tmpObjS[38] + tmpFx[41]*tmpObjS[49] + tmpFx[50]*tmpObjS[60] + tmpFx[59]*tmpObjS[71] + tmpFx[68]*tmpObjS[82] + tmpFx[77]*tmpObjS[93] + tmpFx[86]*tmpObjS[104] + tmpFx[95]*tmpObjS[115];
tmpS2[61] = + tmpFx[5]*tmpObjS[6] + tmpFx[14]*tmpObjS[17] + tmpFx[23]*tmpObjS[28] + tmpFx[32]*tmpObjS[39] + tmpFx[41]*tmpObjS[50] + tmpFx[50]*tmpObjS[61] + tmpFx[59]*tmpObjS[72] + tmpFx[68]*tmpObjS[83] + tmpFx[77]*tmpObjS[94] + tmpFx[86]*tmpObjS[105] + tmpFx[95]*tmpObjS[116];
tmpS2[62] = + tmpFx[5]*tmpObjS[7] + tmpFx[14]*tmpObjS[18] + tmpFx[23]*tmpObjS[29] + tmpFx[32]*tmpObjS[40] + tmpFx[41]*tmpObjS[51] + tmpFx[50]*tmpObjS[62] + tmpFx[59]*tmpObjS[73] + tmpFx[68]*tmpObjS[84] + tmpFx[77]*tmpObjS[95] + tmpFx[86]*tmpObjS[106] + tmpFx[95]*tmpObjS[117];
tmpS2[63] = + tmpFx[5]*tmpObjS[8] + tmpFx[14]*tmpObjS[19] + tmpFx[23]*tmpObjS[30] + tmpFx[32]*tmpObjS[41] + tmpFx[41]*tmpObjS[52] + tmpFx[50]*tmpObjS[63] + tmpFx[59]*tmpObjS[74] + tmpFx[68]*tmpObjS[85] + tmpFx[77]*tmpObjS[96] + tmpFx[86]*tmpObjS[107] + tmpFx[95]*tmpObjS[118];
tmpS2[64] = + tmpFx[5]*tmpObjS[9] + tmpFx[14]*tmpObjS[20] + tmpFx[23]*tmpObjS[31] + tmpFx[32]*tmpObjS[42] + tmpFx[41]*tmpObjS[53] + tmpFx[50]*tmpObjS[64] + tmpFx[59]*tmpObjS[75] + tmpFx[68]*tmpObjS[86] + tmpFx[77]*tmpObjS[97] + tmpFx[86]*tmpObjS[108] + tmpFx[95]*tmpObjS[119];
tmpS2[65] = + tmpFx[5]*tmpObjS[10] + tmpFx[14]*tmpObjS[21] + tmpFx[23]*tmpObjS[32] + tmpFx[32]*tmpObjS[43] + tmpFx[41]*tmpObjS[54] + tmpFx[50]*tmpObjS[65] + tmpFx[59]*tmpObjS[76] + tmpFx[68]*tmpObjS[87] + tmpFx[77]*tmpObjS[98] + tmpFx[86]*tmpObjS[109] + tmpFx[95]*tmpObjS[120];
tmpS2[66] = + tmpFx[6]*tmpObjS[0] + tmpFx[15]*tmpObjS[11] + tmpFx[24]*tmpObjS[22] + tmpFx[33]*tmpObjS[33] + tmpFx[42]*tmpObjS[44] + tmpFx[51]*tmpObjS[55] + tmpFx[60]*tmpObjS[66] + tmpFx[69]*tmpObjS[77] + tmpFx[78]*tmpObjS[88] + tmpFx[87]*tmpObjS[99] + tmpFx[96]*tmpObjS[110];
tmpS2[67] = + tmpFx[6]*tmpObjS[1] + tmpFx[15]*tmpObjS[12] + tmpFx[24]*tmpObjS[23] + tmpFx[33]*tmpObjS[34] + tmpFx[42]*tmpObjS[45] + tmpFx[51]*tmpObjS[56] + tmpFx[60]*tmpObjS[67] + tmpFx[69]*tmpObjS[78] + tmpFx[78]*tmpObjS[89] + tmpFx[87]*tmpObjS[100] + tmpFx[96]*tmpObjS[111];
tmpS2[68] = + tmpFx[6]*tmpObjS[2] + tmpFx[15]*tmpObjS[13] + tmpFx[24]*tmpObjS[24] + tmpFx[33]*tmpObjS[35] + tmpFx[42]*tmpObjS[46] + tmpFx[51]*tmpObjS[57] + tmpFx[60]*tmpObjS[68] + tmpFx[69]*tmpObjS[79] + tmpFx[78]*tmpObjS[90] + tmpFx[87]*tmpObjS[101] + tmpFx[96]*tmpObjS[112];
tmpS2[69] = + tmpFx[6]*tmpObjS[3] + tmpFx[15]*tmpObjS[14] + tmpFx[24]*tmpObjS[25] + tmpFx[33]*tmpObjS[36] + tmpFx[42]*tmpObjS[47] + tmpFx[51]*tmpObjS[58] + tmpFx[60]*tmpObjS[69] + tmpFx[69]*tmpObjS[80] + tmpFx[78]*tmpObjS[91] + tmpFx[87]*tmpObjS[102] + tmpFx[96]*tmpObjS[113];
tmpS2[70] = + tmpFx[6]*tmpObjS[4] + tmpFx[15]*tmpObjS[15] + tmpFx[24]*tmpObjS[26] + tmpFx[33]*tmpObjS[37] + tmpFx[42]*tmpObjS[48] + tmpFx[51]*tmpObjS[59] + tmpFx[60]*tmpObjS[70] + tmpFx[69]*tmpObjS[81] + tmpFx[78]*tmpObjS[92] + tmpFx[87]*tmpObjS[103] + tmpFx[96]*tmpObjS[114];
tmpS2[71] = + tmpFx[6]*tmpObjS[5] + tmpFx[15]*tmpObjS[16] + tmpFx[24]*tmpObjS[27] + tmpFx[33]*tmpObjS[38] + tmpFx[42]*tmpObjS[49] + tmpFx[51]*tmpObjS[60] + tmpFx[60]*tmpObjS[71] + tmpFx[69]*tmpObjS[82] + tmpFx[78]*tmpObjS[93] + tmpFx[87]*tmpObjS[104] + tmpFx[96]*tmpObjS[115];
tmpS2[72] = + tmpFx[6]*tmpObjS[6] + tmpFx[15]*tmpObjS[17] + tmpFx[24]*tmpObjS[28] + tmpFx[33]*tmpObjS[39] + tmpFx[42]*tmpObjS[50] + tmpFx[51]*tmpObjS[61] + tmpFx[60]*tmpObjS[72] + tmpFx[69]*tmpObjS[83] + tmpFx[78]*tmpObjS[94] + tmpFx[87]*tmpObjS[105] + tmpFx[96]*tmpObjS[116];
tmpS2[73] = + tmpFx[6]*tmpObjS[7] + tmpFx[15]*tmpObjS[18] + tmpFx[24]*tmpObjS[29] + tmpFx[33]*tmpObjS[40] + tmpFx[42]*tmpObjS[51] + tmpFx[51]*tmpObjS[62] + tmpFx[60]*tmpObjS[73] + tmpFx[69]*tmpObjS[84] + tmpFx[78]*tmpObjS[95] + tmpFx[87]*tmpObjS[106] + tmpFx[96]*tmpObjS[117];
tmpS2[74] = + tmpFx[6]*tmpObjS[8] + tmpFx[15]*tmpObjS[19] + tmpFx[24]*tmpObjS[30] + tmpFx[33]*tmpObjS[41] + tmpFx[42]*tmpObjS[52] + tmpFx[51]*tmpObjS[63] + tmpFx[60]*tmpObjS[74] + tmpFx[69]*tmpObjS[85] + tmpFx[78]*tmpObjS[96] + tmpFx[87]*tmpObjS[107] + tmpFx[96]*tmpObjS[118];
tmpS2[75] = + tmpFx[6]*tmpObjS[9] + tmpFx[15]*tmpObjS[20] + tmpFx[24]*tmpObjS[31] + tmpFx[33]*tmpObjS[42] + tmpFx[42]*tmpObjS[53] + tmpFx[51]*tmpObjS[64] + tmpFx[60]*tmpObjS[75] + tmpFx[69]*tmpObjS[86] + tmpFx[78]*tmpObjS[97] + tmpFx[87]*tmpObjS[108] + tmpFx[96]*tmpObjS[119];
tmpS2[76] = + tmpFx[6]*tmpObjS[10] + tmpFx[15]*tmpObjS[21] + tmpFx[24]*tmpObjS[32] + tmpFx[33]*tmpObjS[43] + tmpFx[42]*tmpObjS[54] + tmpFx[51]*tmpObjS[65] + tmpFx[60]*tmpObjS[76] + tmpFx[69]*tmpObjS[87] + tmpFx[78]*tmpObjS[98] + tmpFx[87]*tmpObjS[109] + tmpFx[96]*tmpObjS[120];
tmpS2[77] = + tmpFx[7]*tmpObjS[0] + tmpFx[16]*tmpObjS[11] + tmpFx[25]*tmpObjS[22] + tmpFx[34]*tmpObjS[33] + tmpFx[43]*tmpObjS[44] + tmpFx[52]*tmpObjS[55] + tmpFx[61]*tmpObjS[66] + tmpFx[70]*tmpObjS[77] + tmpFx[79]*tmpObjS[88] + tmpFx[88]*tmpObjS[99] + tmpFx[97]*tmpObjS[110];
tmpS2[78] = + tmpFx[7]*tmpObjS[1] + tmpFx[16]*tmpObjS[12] + tmpFx[25]*tmpObjS[23] + tmpFx[34]*tmpObjS[34] + tmpFx[43]*tmpObjS[45] + tmpFx[52]*tmpObjS[56] + tmpFx[61]*tmpObjS[67] + tmpFx[70]*tmpObjS[78] + tmpFx[79]*tmpObjS[89] + tmpFx[88]*tmpObjS[100] + tmpFx[97]*tmpObjS[111];
tmpS2[79] = + tmpFx[7]*tmpObjS[2] + tmpFx[16]*tmpObjS[13] + tmpFx[25]*tmpObjS[24] + tmpFx[34]*tmpObjS[35] + tmpFx[43]*tmpObjS[46] + tmpFx[52]*tmpObjS[57] + tmpFx[61]*tmpObjS[68] + tmpFx[70]*tmpObjS[79] + tmpFx[79]*tmpObjS[90] + tmpFx[88]*tmpObjS[101] + tmpFx[97]*tmpObjS[112];
tmpS2[80] = + tmpFx[7]*tmpObjS[3] + tmpFx[16]*tmpObjS[14] + tmpFx[25]*tmpObjS[25] + tmpFx[34]*tmpObjS[36] + tmpFx[43]*tmpObjS[47] + tmpFx[52]*tmpObjS[58] + tmpFx[61]*tmpObjS[69] + tmpFx[70]*tmpObjS[80] + tmpFx[79]*tmpObjS[91] + tmpFx[88]*tmpObjS[102] + tmpFx[97]*tmpObjS[113];
tmpS2[81] = + tmpFx[7]*tmpObjS[4] + tmpFx[16]*tmpObjS[15] + tmpFx[25]*tmpObjS[26] + tmpFx[34]*tmpObjS[37] + tmpFx[43]*tmpObjS[48] + tmpFx[52]*tmpObjS[59] + tmpFx[61]*tmpObjS[70] + tmpFx[70]*tmpObjS[81] + tmpFx[79]*tmpObjS[92] + tmpFx[88]*tmpObjS[103] + tmpFx[97]*tmpObjS[114];
tmpS2[82] = + tmpFx[7]*tmpObjS[5] + tmpFx[16]*tmpObjS[16] + tmpFx[25]*tmpObjS[27] + tmpFx[34]*tmpObjS[38] + tmpFx[43]*tmpObjS[49] + tmpFx[52]*tmpObjS[60] + tmpFx[61]*tmpObjS[71] + tmpFx[70]*tmpObjS[82] + tmpFx[79]*tmpObjS[93] + tmpFx[88]*tmpObjS[104] + tmpFx[97]*tmpObjS[115];
tmpS2[83] = + tmpFx[7]*tmpObjS[6] + tmpFx[16]*tmpObjS[17] + tmpFx[25]*tmpObjS[28] + tmpFx[34]*tmpObjS[39] + tmpFx[43]*tmpObjS[50] + tmpFx[52]*tmpObjS[61] + tmpFx[61]*tmpObjS[72] + tmpFx[70]*tmpObjS[83] + tmpFx[79]*tmpObjS[94] + tmpFx[88]*tmpObjS[105] + tmpFx[97]*tmpObjS[116];
tmpS2[84] = + tmpFx[7]*tmpObjS[7] + tmpFx[16]*tmpObjS[18] + tmpFx[25]*tmpObjS[29] + tmpFx[34]*tmpObjS[40] + tmpFx[43]*tmpObjS[51] + tmpFx[52]*tmpObjS[62] + tmpFx[61]*tmpObjS[73] + tmpFx[70]*tmpObjS[84] + tmpFx[79]*tmpObjS[95] + tmpFx[88]*tmpObjS[106] + tmpFx[97]*tmpObjS[117];
tmpS2[85] = + tmpFx[7]*tmpObjS[8] + tmpFx[16]*tmpObjS[19] + tmpFx[25]*tmpObjS[30] + tmpFx[34]*tmpObjS[41] + tmpFx[43]*tmpObjS[52] + tmpFx[52]*tmpObjS[63] + tmpFx[61]*tmpObjS[74] + tmpFx[70]*tmpObjS[85] + tmpFx[79]*tmpObjS[96] + tmpFx[88]*tmpObjS[107] + tmpFx[97]*tmpObjS[118];
tmpS2[86] = + tmpFx[7]*tmpObjS[9] + tmpFx[16]*tmpObjS[20] + tmpFx[25]*tmpObjS[31] + tmpFx[34]*tmpObjS[42] + tmpFx[43]*tmpObjS[53] + tmpFx[52]*tmpObjS[64] + tmpFx[61]*tmpObjS[75] + tmpFx[70]*tmpObjS[86] + tmpFx[79]*tmpObjS[97] + tmpFx[88]*tmpObjS[108] + tmpFx[97]*tmpObjS[119];
tmpS2[87] = + tmpFx[7]*tmpObjS[10] + tmpFx[16]*tmpObjS[21] + tmpFx[25]*tmpObjS[32] + tmpFx[34]*tmpObjS[43] + tmpFx[43]*tmpObjS[54] + tmpFx[52]*tmpObjS[65] + tmpFx[61]*tmpObjS[76] + tmpFx[70]*tmpObjS[87] + tmpFx[79]*tmpObjS[98] + tmpFx[88]*tmpObjS[109] + tmpFx[97]*tmpObjS[120];
tmpS2[88] = + tmpFx[8]*tmpObjS[0] + tmpFx[17]*tmpObjS[11] + tmpFx[26]*tmpObjS[22] + tmpFx[35]*tmpObjS[33] + tmpFx[44]*tmpObjS[44] + tmpFx[53]*tmpObjS[55] + tmpFx[62]*tmpObjS[66] + tmpFx[71]*tmpObjS[77] + tmpFx[80]*tmpObjS[88] + tmpFx[89]*tmpObjS[99] + tmpFx[98]*tmpObjS[110];
tmpS2[89] = + tmpFx[8]*tmpObjS[1] + tmpFx[17]*tmpObjS[12] + tmpFx[26]*tmpObjS[23] + tmpFx[35]*tmpObjS[34] + tmpFx[44]*tmpObjS[45] + tmpFx[53]*tmpObjS[56] + tmpFx[62]*tmpObjS[67] + tmpFx[71]*tmpObjS[78] + tmpFx[80]*tmpObjS[89] + tmpFx[89]*tmpObjS[100] + tmpFx[98]*tmpObjS[111];
tmpS2[90] = + tmpFx[8]*tmpObjS[2] + tmpFx[17]*tmpObjS[13] + tmpFx[26]*tmpObjS[24] + tmpFx[35]*tmpObjS[35] + tmpFx[44]*tmpObjS[46] + tmpFx[53]*tmpObjS[57] + tmpFx[62]*tmpObjS[68] + tmpFx[71]*tmpObjS[79] + tmpFx[80]*tmpObjS[90] + tmpFx[89]*tmpObjS[101] + tmpFx[98]*tmpObjS[112];
tmpS2[91] = + tmpFx[8]*tmpObjS[3] + tmpFx[17]*tmpObjS[14] + tmpFx[26]*tmpObjS[25] + tmpFx[35]*tmpObjS[36] + tmpFx[44]*tmpObjS[47] + tmpFx[53]*tmpObjS[58] + tmpFx[62]*tmpObjS[69] + tmpFx[71]*tmpObjS[80] + tmpFx[80]*tmpObjS[91] + tmpFx[89]*tmpObjS[102] + tmpFx[98]*tmpObjS[113];
tmpS2[92] = + tmpFx[8]*tmpObjS[4] + tmpFx[17]*tmpObjS[15] + tmpFx[26]*tmpObjS[26] + tmpFx[35]*tmpObjS[37] + tmpFx[44]*tmpObjS[48] + tmpFx[53]*tmpObjS[59] + tmpFx[62]*tmpObjS[70] + tmpFx[71]*tmpObjS[81] + tmpFx[80]*tmpObjS[92] + tmpFx[89]*tmpObjS[103] + tmpFx[98]*tmpObjS[114];
tmpS2[93] = + tmpFx[8]*tmpObjS[5] + tmpFx[17]*tmpObjS[16] + tmpFx[26]*tmpObjS[27] + tmpFx[35]*tmpObjS[38] + tmpFx[44]*tmpObjS[49] + tmpFx[53]*tmpObjS[60] + tmpFx[62]*tmpObjS[71] + tmpFx[71]*tmpObjS[82] + tmpFx[80]*tmpObjS[93] + tmpFx[89]*tmpObjS[104] + tmpFx[98]*tmpObjS[115];
tmpS2[94] = + tmpFx[8]*tmpObjS[6] + tmpFx[17]*tmpObjS[17] + tmpFx[26]*tmpObjS[28] + tmpFx[35]*tmpObjS[39] + tmpFx[44]*tmpObjS[50] + tmpFx[53]*tmpObjS[61] + tmpFx[62]*tmpObjS[72] + tmpFx[71]*tmpObjS[83] + tmpFx[80]*tmpObjS[94] + tmpFx[89]*tmpObjS[105] + tmpFx[98]*tmpObjS[116];
tmpS2[95] = + tmpFx[8]*tmpObjS[7] + tmpFx[17]*tmpObjS[18] + tmpFx[26]*tmpObjS[29] + tmpFx[35]*tmpObjS[40] + tmpFx[44]*tmpObjS[51] + tmpFx[53]*tmpObjS[62] + tmpFx[62]*tmpObjS[73] + tmpFx[71]*tmpObjS[84] + tmpFx[80]*tmpObjS[95] + tmpFx[89]*tmpObjS[106] + tmpFx[98]*tmpObjS[117];
tmpS2[96] = + tmpFx[8]*tmpObjS[8] + tmpFx[17]*tmpObjS[19] + tmpFx[26]*tmpObjS[30] + tmpFx[35]*tmpObjS[41] + tmpFx[44]*tmpObjS[52] + tmpFx[53]*tmpObjS[63] + tmpFx[62]*tmpObjS[74] + tmpFx[71]*tmpObjS[85] + tmpFx[80]*tmpObjS[96] + tmpFx[89]*tmpObjS[107] + tmpFx[98]*tmpObjS[118];
tmpS2[97] = + tmpFx[8]*tmpObjS[9] + tmpFx[17]*tmpObjS[20] + tmpFx[26]*tmpObjS[31] + tmpFx[35]*tmpObjS[42] + tmpFx[44]*tmpObjS[53] + tmpFx[53]*tmpObjS[64] + tmpFx[62]*tmpObjS[75] + tmpFx[71]*tmpObjS[86] + tmpFx[80]*tmpObjS[97] + tmpFx[89]*tmpObjS[108] + tmpFx[98]*tmpObjS[119];
tmpS2[98] = + tmpFx[8]*tmpObjS[10] + tmpFx[17]*tmpObjS[21] + tmpFx[26]*tmpObjS[32] + tmpFx[35]*tmpObjS[43] + tmpFx[44]*tmpObjS[54] + tmpFx[53]*tmpObjS[65] + tmpFx[62]*tmpObjS[76] + tmpFx[71]*tmpObjS[87] + tmpFx[80]*tmpObjS[98] + tmpFx[89]*tmpObjS[109] + tmpFx[98]*tmpObjS[120];
tmpS1[0] = + tmpS2[0]*tmpFu[0] + tmpS2[1]*tmpFu[3] + tmpS2[2]*tmpFu[6] + tmpS2[3]*tmpFu[9] + tmpS2[4]*tmpFu[12] + tmpS2[5]*tmpFu[15] + tmpS2[6]*tmpFu[18] + tmpS2[7]*tmpFu[21] + tmpS2[8]*tmpFu[24] + tmpS2[9]*tmpFu[27] + tmpS2[10]*tmpFu[30];
tmpS1[1] = + tmpS2[0]*tmpFu[1] + tmpS2[1]*tmpFu[4] + tmpS2[2]*tmpFu[7] + tmpS2[3]*tmpFu[10] + tmpS2[4]*tmpFu[13] + tmpS2[5]*tmpFu[16] + tmpS2[6]*tmpFu[19] + tmpS2[7]*tmpFu[22] + tmpS2[8]*tmpFu[25] + tmpS2[9]*tmpFu[28] + tmpS2[10]*tmpFu[31];
tmpS1[2] = + tmpS2[0]*tmpFu[2] + tmpS2[1]*tmpFu[5] + tmpS2[2]*tmpFu[8] + tmpS2[3]*tmpFu[11] + tmpS2[4]*tmpFu[14] + tmpS2[5]*tmpFu[17] + tmpS2[6]*tmpFu[20] + tmpS2[7]*tmpFu[23] + tmpS2[8]*tmpFu[26] + tmpS2[9]*tmpFu[29] + tmpS2[10]*tmpFu[32];
tmpS1[3] = + tmpS2[11]*tmpFu[0] + tmpS2[12]*tmpFu[3] + tmpS2[13]*tmpFu[6] + tmpS2[14]*tmpFu[9] + tmpS2[15]*tmpFu[12] + tmpS2[16]*tmpFu[15] + tmpS2[17]*tmpFu[18] + tmpS2[18]*tmpFu[21] + tmpS2[19]*tmpFu[24] + tmpS2[20]*tmpFu[27] + tmpS2[21]*tmpFu[30];
tmpS1[4] = + tmpS2[11]*tmpFu[1] + tmpS2[12]*tmpFu[4] + tmpS2[13]*tmpFu[7] + tmpS2[14]*tmpFu[10] + tmpS2[15]*tmpFu[13] + tmpS2[16]*tmpFu[16] + tmpS2[17]*tmpFu[19] + tmpS2[18]*tmpFu[22] + tmpS2[19]*tmpFu[25] + tmpS2[20]*tmpFu[28] + tmpS2[21]*tmpFu[31];
tmpS1[5] = + tmpS2[11]*tmpFu[2] + tmpS2[12]*tmpFu[5] + tmpS2[13]*tmpFu[8] + tmpS2[14]*tmpFu[11] + tmpS2[15]*tmpFu[14] + tmpS2[16]*tmpFu[17] + tmpS2[17]*tmpFu[20] + tmpS2[18]*tmpFu[23] + tmpS2[19]*tmpFu[26] + tmpS2[20]*tmpFu[29] + tmpS2[21]*tmpFu[32];
tmpS1[6] = + tmpS2[22]*tmpFu[0] + tmpS2[23]*tmpFu[3] + tmpS2[24]*tmpFu[6] + tmpS2[25]*tmpFu[9] + tmpS2[26]*tmpFu[12] + tmpS2[27]*tmpFu[15] + tmpS2[28]*tmpFu[18] + tmpS2[29]*tmpFu[21] + tmpS2[30]*tmpFu[24] + tmpS2[31]*tmpFu[27] + tmpS2[32]*tmpFu[30];
tmpS1[7] = + tmpS2[22]*tmpFu[1] + tmpS2[23]*tmpFu[4] + tmpS2[24]*tmpFu[7] + tmpS2[25]*tmpFu[10] + tmpS2[26]*tmpFu[13] + tmpS2[27]*tmpFu[16] + tmpS2[28]*tmpFu[19] + tmpS2[29]*tmpFu[22] + tmpS2[30]*tmpFu[25] + tmpS2[31]*tmpFu[28] + tmpS2[32]*tmpFu[31];
tmpS1[8] = + tmpS2[22]*tmpFu[2] + tmpS2[23]*tmpFu[5] + tmpS2[24]*tmpFu[8] + tmpS2[25]*tmpFu[11] + tmpS2[26]*tmpFu[14] + tmpS2[27]*tmpFu[17] + tmpS2[28]*tmpFu[20] + tmpS2[29]*tmpFu[23] + tmpS2[30]*tmpFu[26] + tmpS2[31]*tmpFu[29] + tmpS2[32]*tmpFu[32];
tmpS1[9] = + tmpS2[33]*tmpFu[0] + tmpS2[34]*tmpFu[3] + tmpS2[35]*tmpFu[6] + tmpS2[36]*tmpFu[9] + tmpS2[37]*tmpFu[12] + tmpS2[38]*tmpFu[15] + tmpS2[39]*tmpFu[18] + tmpS2[40]*tmpFu[21] + tmpS2[41]*tmpFu[24] + tmpS2[42]*tmpFu[27] + tmpS2[43]*tmpFu[30];
tmpS1[10] = + tmpS2[33]*tmpFu[1] + tmpS2[34]*tmpFu[4] + tmpS2[35]*tmpFu[7] + tmpS2[36]*tmpFu[10] + tmpS2[37]*tmpFu[13] + tmpS2[38]*tmpFu[16] + tmpS2[39]*tmpFu[19] + tmpS2[40]*tmpFu[22] + tmpS2[41]*tmpFu[25] + tmpS2[42]*tmpFu[28] + tmpS2[43]*tmpFu[31];
tmpS1[11] = + tmpS2[33]*tmpFu[2] + tmpS2[34]*tmpFu[5] + tmpS2[35]*tmpFu[8] + tmpS2[36]*tmpFu[11] + tmpS2[37]*tmpFu[14] + tmpS2[38]*tmpFu[17] + tmpS2[39]*tmpFu[20] + tmpS2[40]*tmpFu[23] + tmpS2[41]*tmpFu[26] + tmpS2[42]*tmpFu[29] + tmpS2[43]*tmpFu[32];
tmpS1[12] = + tmpS2[44]*tmpFu[0] + tmpS2[45]*tmpFu[3] + tmpS2[46]*tmpFu[6] + tmpS2[47]*tmpFu[9] + tmpS2[48]*tmpFu[12] + tmpS2[49]*tmpFu[15] + tmpS2[50]*tmpFu[18] + tmpS2[51]*tmpFu[21] + tmpS2[52]*tmpFu[24] + tmpS2[53]*tmpFu[27] + tmpS2[54]*tmpFu[30];
tmpS1[13] = + tmpS2[44]*tmpFu[1] + tmpS2[45]*tmpFu[4] + tmpS2[46]*tmpFu[7] + tmpS2[47]*tmpFu[10] + tmpS2[48]*tmpFu[13] + tmpS2[49]*tmpFu[16] + tmpS2[50]*tmpFu[19] + tmpS2[51]*tmpFu[22] + tmpS2[52]*tmpFu[25] + tmpS2[53]*tmpFu[28] + tmpS2[54]*tmpFu[31];
tmpS1[14] = + tmpS2[44]*tmpFu[2] + tmpS2[45]*tmpFu[5] + tmpS2[46]*tmpFu[8] + tmpS2[47]*tmpFu[11] + tmpS2[48]*tmpFu[14] + tmpS2[49]*tmpFu[17] + tmpS2[50]*tmpFu[20] + tmpS2[51]*tmpFu[23] + tmpS2[52]*tmpFu[26] + tmpS2[53]*tmpFu[29] + tmpS2[54]*tmpFu[32];
tmpS1[15] = + tmpS2[55]*tmpFu[0] + tmpS2[56]*tmpFu[3] + tmpS2[57]*tmpFu[6] + tmpS2[58]*tmpFu[9] + tmpS2[59]*tmpFu[12] + tmpS2[60]*tmpFu[15] + tmpS2[61]*tmpFu[18] + tmpS2[62]*tmpFu[21] + tmpS2[63]*tmpFu[24] + tmpS2[64]*tmpFu[27] + tmpS2[65]*tmpFu[30];
tmpS1[16] = + tmpS2[55]*tmpFu[1] + tmpS2[56]*tmpFu[4] + tmpS2[57]*tmpFu[7] + tmpS2[58]*tmpFu[10] + tmpS2[59]*tmpFu[13] + tmpS2[60]*tmpFu[16] + tmpS2[61]*tmpFu[19] + tmpS2[62]*tmpFu[22] + tmpS2[63]*tmpFu[25] + tmpS2[64]*tmpFu[28] + tmpS2[65]*tmpFu[31];
tmpS1[17] = + tmpS2[55]*tmpFu[2] + tmpS2[56]*tmpFu[5] + tmpS2[57]*tmpFu[8] + tmpS2[58]*tmpFu[11] + tmpS2[59]*tmpFu[14] + tmpS2[60]*tmpFu[17] + tmpS2[61]*tmpFu[20] + tmpS2[62]*tmpFu[23] + tmpS2[63]*tmpFu[26] + tmpS2[64]*tmpFu[29] + tmpS2[65]*tmpFu[32];
tmpS1[18] = + tmpS2[66]*tmpFu[0] + tmpS2[67]*tmpFu[3] + tmpS2[68]*tmpFu[6] + tmpS2[69]*tmpFu[9] + tmpS2[70]*tmpFu[12] + tmpS2[71]*tmpFu[15] + tmpS2[72]*tmpFu[18] + tmpS2[73]*tmpFu[21] + tmpS2[74]*tmpFu[24] + tmpS2[75]*tmpFu[27] + tmpS2[76]*tmpFu[30];
tmpS1[19] = + tmpS2[66]*tmpFu[1] + tmpS2[67]*tmpFu[4] + tmpS2[68]*tmpFu[7] + tmpS2[69]*tmpFu[10] + tmpS2[70]*tmpFu[13] + tmpS2[71]*tmpFu[16] + tmpS2[72]*tmpFu[19] + tmpS2[73]*tmpFu[22] + tmpS2[74]*tmpFu[25] + tmpS2[75]*tmpFu[28] + tmpS2[76]*tmpFu[31];
tmpS1[20] = + tmpS2[66]*tmpFu[2] + tmpS2[67]*tmpFu[5] + tmpS2[68]*tmpFu[8] + tmpS2[69]*tmpFu[11] + tmpS2[70]*tmpFu[14] + tmpS2[71]*tmpFu[17] + tmpS2[72]*tmpFu[20] + tmpS2[73]*tmpFu[23] + tmpS2[74]*tmpFu[26] + tmpS2[75]*tmpFu[29] + tmpS2[76]*tmpFu[32];
tmpS1[21] = + tmpS2[77]*tmpFu[0] + tmpS2[78]*tmpFu[3] + tmpS2[79]*tmpFu[6] + tmpS2[80]*tmpFu[9] + tmpS2[81]*tmpFu[12] + tmpS2[82]*tmpFu[15] + tmpS2[83]*tmpFu[18] + tmpS2[84]*tmpFu[21] + tmpS2[85]*tmpFu[24] + tmpS2[86]*tmpFu[27] + tmpS2[87]*tmpFu[30];
tmpS1[22] = + tmpS2[77]*tmpFu[1] + tmpS2[78]*tmpFu[4] + tmpS2[79]*tmpFu[7] + tmpS2[80]*tmpFu[10] + tmpS2[81]*tmpFu[13] + tmpS2[82]*tmpFu[16] + tmpS2[83]*tmpFu[19] + tmpS2[84]*tmpFu[22] + tmpS2[85]*tmpFu[25] + tmpS2[86]*tmpFu[28] + tmpS2[87]*tmpFu[31];
tmpS1[23] = + tmpS2[77]*tmpFu[2] + tmpS2[78]*tmpFu[5] + tmpS2[79]*tmpFu[8] + tmpS2[80]*tmpFu[11] + tmpS2[81]*tmpFu[14] + tmpS2[82]*tmpFu[17] + tmpS2[83]*tmpFu[20] + tmpS2[84]*tmpFu[23] + tmpS2[85]*tmpFu[26] + tmpS2[86]*tmpFu[29] + tmpS2[87]*tmpFu[32];
tmpS1[24] = + tmpS2[88]*tmpFu[0] + tmpS2[89]*tmpFu[3] + tmpS2[90]*tmpFu[6] + tmpS2[91]*tmpFu[9] + tmpS2[92]*tmpFu[12] + tmpS2[93]*tmpFu[15] + tmpS2[94]*tmpFu[18] + tmpS2[95]*tmpFu[21] + tmpS2[96]*tmpFu[24] + tmpS2[97]*tmpFu[27] + tmpS2[98]*tmpFu[30];
tmpS1[25] = + tmpS2[88]*tmpFu[1] + tmpS2[89]*tmpFu[4] + tmpS2[90]*tmpFu[7] + tmpS2[91]*tmpFu[10] + tmpS2[92]*tmpFu[13] + tmpS2[93]*tmpFu[16] + tmpS2[94]*tmpFu[19] + tmpS2[95]*tmpFu[22] + tmpS2[96]*tmpFu[25] + tmpS2[97]*tmpFu[28] + tmpS2[98]*tmpFu[31];
tmpS1[26] = + tmpS2[88]*tmpFu[2] + tmpS2[89]*tmpFu[5] + tmpS2[90]*tmpFu[8] + tmpS2[91]*tmpFu[11] + tmpS2[92]*tmpFu[14] + tmpS2[93]*tmpFu[17] + tmpS2[94]*tmpFu[20] + tmpS2[95]*tmpFu[23] + tmpS2[96]*tmpFu[26] + tmpS2[97]*tmpFu[29] + tmpS2[98]*tmpFu[32];
}
void acado_setObjQN1QN2( real_t* const tmpObjSEndTerm, real_t* const tmpQN1, real_t* const tmpQN2 )
{
tmpQN2[0] = +tmpObjSEndTerm[18];
tmpQN2[1] = +tmpObjSEndTerm[19];
tmpQN2[2] = +tmpObjSEndTerm[20];
tmpQN2[3] = +tmpObjSEndTerm[21];
tmpQN2[4] = +tmpObjSEndTerm[22];
tmpQN2[5] = +tmpObjSEndTerm[23];
tmpQN2[6] = +tmpObjSEndTerm[24];
tmpQN2[7] = +tmpObjSEndTerm[25];
tmpQN2[8] = +tmpObjSEndTerm[26];
tmpQN2[9] = +tmpObjSEndTerm[27];
tmpQN2[10] = +tmpObjSEndTerm[28];
tmpQN2[11] = +tmpObjSEndTerm[29];
tmpQN2[12] = +tmpObjSEndTerm[30];
tmpQN2[13] = +tmpObjSEndTerm[31];
tmpQN2[14] = +tmpObjSEndTerm[32];
tmpQN2[15] = +tmpObjSEndTerm[33];
tmpQN2[16] = +tmpObjSEndTerm[34];
tmpQN2[17] = +tmpObjSEndTerm[35];
tmpQN2[18] = 0.0;
;
tmpQN2[19] = 0.0;
;
tmpQN2[20] = 0.0;
;
tmpQN2[21] = 0.0;
;
tmpQN2[22] = 0.0;
;
tmpQN2[23] = 0.0;
;
tmpQN2[24] = 0.0;
;
tmpQN2[25] = 0.0;
;
tmpQN2[26] = 0.0;
;
tmpQN2[27] = 0.0;
;
tmpQN2[28] = 0.0;
;
tmpQN2[29] = 0.0;
;
tmpQN2[30] = 0.0;
;
tmpQN2[31] = 0.0;
;
tmpQN2[32] = 0.0;
;
tmpQN2[33] = 0.0;
;
tmpQN2[34] = 0.0;
;
tmpQN2[35] = 0.0;
;
tmpQN2[36] = +tmpObjSEndTerm[0];
tmpQN2[37] = +tmpObjSEndTerm[1];
tmpQN2[38] = +tmpObjSEndTerm[2];
tmpQN2[39] = +tmpObjSEndTerm[3];
tmpQN2[40] = +tmpObjSEndTerm[4];
tmpQN2[41] = +tmpObjSEndTerm[5];
tmpQN2[42] = +tmpObjSEndTerm[6];
tmpQN2[43] = +tmpObjSEndTerm[7];
tmpQN2[44] = +tmpObjSEndTerm[8];
tmpQN2[45] = +tmpObjSEndTerm[9];
tmpQN2[46] = +tmpObjSEndTerm[10];
tmpQN2[47] = +tmpObjSEndTerm[11];
tmpQN2[48] = +tmpObjSEndTerm[12];
tmpQN2[49] = +tmpObjSEndTerm[13];
tmpQN2[50] = +tmpObjSEndTerm[14];
tmpQN2[51] = +tmpObjSEndTerm[15];
tmpQN2[52] = +tmpObjSEndTerm[16];
tmpQN2[53] = +tmpObjSEndTerm[17];
tmpQN1[0] = + tmpQN2[3];
tmpQN1[1] = + tmpQN2[4];
tmpQN1[2] = + tmpQN2[5];
tmpQN1[3] = 0.0;
;
tmpQN1[4] = 0.0;
;
tmpQN1[5] = 0.0;
;
tmpQN1[6] = + tmpQN2[0];
tmpQN1[7] = + tmpQN2[1];
tmpQN1[8] = + tmpQN2[2];
tmpQN1[9] = + tmpQN2[9];
tmpQN1[10] = + tmpQN2[10];
tmpQN1[11] = + tmpQN2[11];
tmpQN1[12] = 0.0;
;
tmpQN1[13] = 0.0;
;
tmpQN1[14] = 0.0;
;
tmpQN1[15] = + tmpQN2[6];
tmpQN1[16] = + tmpQN2[7];
tmpQN1[17] = + tmpQN2[8];
tmpQN1[18] = + tmpQN2[15];
tmpQN1[19] = + tmpQN2[16];
tmpQN1[20] = + tmpQN2[17];
tmpQN1[21] = 0.0;
;
tmpQN1[22] = 0.0;
;
tmpQN1[23] = 0.0;
;
tmpQN1[24] = + tmpQN2[12];
tmpQN1[25] = + tmpQN2[13];
tmpQN1[26] = + tmpQN2[14];
tmpQN1[27] = + tmpQN2[21];
tmpQN1[28] = + tmpQN2[22];
tmpQN1[29] = + tmpQN2[23];
tmpQN1[30] = 0.0;
;
tmpQN1[31] = 0.0;
;
tmpQN1[32] = 0.0;
;
tmpQN1[33] = + tmpQN2[18];
tmpQN1[34] = + tmpQN2[19];
tmpQN1[35] = + tmpQN2[20];
tmpQN1[36] = + tmpQN2[27];
tmpQN1[37] = + tmpQN2[28];
tmpQN1[38] = + tmpQN2[29];
tmpQN1[39] = 0.0;
;
tmpQN1[40] = 0.0;
;
tmpQN1[41] = 0.0;
;
tmpQN1[42] = + tmpQN2[24];
tmpQN1[43] = + tmpQN2[25];
tmpQN1[44] = + tmpQN2[26];
tmpQN1[45] = + tmpQN2[33];
tmpQN1[46] = + tmpQN2[34];
tmpQN1[47] = + tmpQN2[35];
tmpQN1[48] = 0.0;
;
tmpQN1[49] = 0.0;
;
tmpQN1[50] = 0.0;
;
tmpQN1[51] = + tmpQN2[30];
tmpQN1[52] = + tmpQN2[31];
tmpQN1[53] = + tmpQN2[32];
tmpQN1[54] = + tmpQN2[39];
tmpQN1[55] = + tmpQN2[40];
tmpQN1[56] = + tmpQN2[41];
tmpQN1[57] = 0.0;
;
tmpQN1[58] = 0.0;
;
tmpQN1[59] = 0.0;
;
tmpQN1[60] = + tmpQN2[36];
tmpQN1[61] = + tmpQN2[37];
tmpQN1[62] = + tmpQN2[38];
tmpQN1[63] = + tmpQN2[45];
tmpQN1[64] = + tmpQN2[46];
tmpQN1[65] = + tmpQN2[47];
tmpQN1[66] = 0.0;
;
tmpQN1[67] = 0.0;
;
tmpQN1[68] = 0.0;
;
tmpQN1[69] = + tmpQN2[42];
tmpQN1[70] = + tmpQN2[43];
tmpQN1[71] = + tmpQN2[44];
tmpQN1[72] = + tmpQN2[51];
tmpQN1[73] = + tmpQN2[52];
tmpQN1[74] = + tmpQN2[53];
tmpQN1[75] = 0.0;
;
tmpQN1[76] = 0.0;
;
tmpQN1[77] = 0.0;
;
tmpQN1[78] = + tmpQN2[48];
tmpQN1[79] = + tmpQN2[49];
tmpQN1[80] = + tmpQN2[50];
}
void acado_evaluateObjective( )
{
int runObj;
for (runObj = 0; runObj < 20; ++runObj)
{
acadoWorkspace.objValueIn[0] = acadoVariables.x[runObj * 9];
acadoWorkspace.objValueIn[1] = acadoVariables.x[runObj * 9 + 1];
acadoWorkspace.objValueIn[2] = acadoVariables.x[runObj * 9 + 2];
acadoWorkspace.objValueIn[3] = acadoVariables.x[runObj * 9 + 3];
acadoWorkspace.objValueIn[4] = acadoVariables.x[runObj * 9 + 4];
acadoWorkspace.objValueIn[5] = acadoVariables.x[runObj * 9 + 5];
acadoWorkspace.objValueIn[6] = acadoVariables.x[runObj * 9 + 6];
acadoWorkspace.objValueIn[7] = acadoVariables.x[runObj * 9 + 7];
acadoWorkspace.objValueIn[8] = acadoVariables.x[runObj * 9 + 8];
acadoWorkspace.objValueIn[9] = acadoVariables.u[runObj * 3];
acadoWorkspace.objValueIn[10] = acadoVariables.u[runObj * 3 + 1];
acadoWorkspace.objValueIn[11] = acadoVariables.u[runObj * 3 + 2];
acadoWorkspace.objValueIn[12] = acadoVariables.od[runObj * 9];
acadoWorkspace.objValueIn[13] = acadoVariables.od[runObj * 9 + 1];
acadoWorkspace.objValueIn[14] = acadoVariables.od[runObj * 9 + 2];
acadoWorkspace.objValueIn[15] = acadoVariables.od[runObj * 9 + 3];
acadoWorkspace.objValueIn[16] = acadoVariables.od[runObj * 9 + 4];
acadoWorkspace.objValueIn[17] = acadoVariables.od[runObj * 9 + 5];
acadoWorkspace.objValueIn[18] = acadoVariables.od[runObj * 9 + 6];
acadoWorkspace.objValueIn[19] = acadoVariables.od[runObj * 9 + 7];
acadoWorkspace.objValueIn[20] = acadoVariables.od[runObj * 9 + 8];
acado_evaluateLSQ( acadoWorkspace.objValueIn, acadoWorkspace.objValueOut );
acadoWorkspace.Dy[runObj * 11] = acadoWorkspace.objValueOut[0];
acadoWorkspace.Dy[runObj * 11 + 1] = acadoWorkspace.objValueOut[1];
acadoWorkspace.Dy[runObj * 11 + 2] = acadoWorkspace.objValueOut[2];
acadoWorkspace.Dy[runObj * 11 + 3] = acadoWorkspace.objValueOut[3];
acadoWorkspace.Dy[runObj * 11 + 4] = acadoWorkspace.objValueOut[4];
acadoWorkspace.Dy[runObj * 11 + 5] = acadoWorkspace.objValueOut[5];
acadoWorkspace.Dy[runObj * 11 + 6] = acadoWorkspace.objValueOut[6];
acadoWorkspace.Dy[runObj * 11 + 7] = acadoWorkspace.objValueOut[7];
acadoWorkspace.Dy[runObj * 11 + 8] = acadoWorkspace.objValueOut[8];
acadoWorkspace.Dy[runObj * 11 + 9] = acadoWorkspace.objValueOut[9];
acadoWorkspace.Dy[runObj * 11 + 10] = acadoWorkspace.objValueOut[10];
acado_setObjQ1Q2( &(acadoWorkspace.objValueOut[ 11 ]), acadoVariables.W, &(acadoWorkspace.Q1[ runObj * 81 ]), &(acadoWorkspace.Q2[ runObj * 99 ]) );
acado_setObjR1R2( &(acadoWorkspace.objValueOut[ 110 ]), acadoVariables.W, &(acadoWorkspace.R1[ runObj * 9 ]), &(acadoWorkspace.R2[ runObj * 33 ]) );
acado_setObjS1( &(acadoWorkspace.objValueOut[ 11 ]), &(acadoWorkspace.objValueOut[ 110 ]), acadoVariables.W, &(acadoWorkspace.S1[ runObj * 27 ]) );
}
acadoWorkspace.objValueIn[0] = acadoVariables.x[180];
acadoWorkspace.objValueIn[1] = acadoVariables.x[181];
acadoWorkspace.objValueIn[2] = acadoVariables.x[182];
acadoWorkspace.objValueIn[3] = acadoVariables.x[183];
acadoWorkspace.objValueIn[4] = acadoVariables.x[184];
acadoWorkspace.objValueIn[5] = acadoVariables.x[185];
acadoWorkspace.objValueIn[6] = acadoVariables.x[186];
acadoWorkspace.objValueIn[7] = acadoVariables.x[187];
acadoWorkspace.objValueIn[8] = acadoVariables.x[188];
acadoWorkspace.objValueIn[9] = acadoVariables.od[180];
acadoWorkspace.objValueIn[10] = acadoVariables.od[181];
acadoWorkspace.objValueIn[11] = acadoVariables.od[182];
acadoWorkspace.objValueIn[12] = acadoVariables.od[183];
acadoWorkspace.objValueIn[13] = acadoVariables.od[184];
acadoWorkspace.objValueIn[14] = acadoVariables.od[185];
acadoWorkspace.objValueIn[15] = acadoVariables.od[186];
acadoWorkspace.objValueIn[16] = acadoVariables.od[187];
acadoWorkspace.objValueIn[17] = acadoVariables.od[188];
acado_evaluateLSQEndTerm( acadoWorkspace.objValueIn, acadoWorkspace.objValueOut );
acadoWorkspace.DyN[0] = acadoWorkspace.objValueOut[0];
acadoWorkspace.DyN[1] = acadoWorkspace.objValueOut[1];
acadoWorkspace.DyN[2] = acadoWorkspace.objValueOut[2];
acadoWorkspace.DyN[3] = acadoWorkspace.objValueOut[3];
acadoWorkspace.DyN[4] = acadoWorkspace.objValueOut[4];
acadoWorkspace.DyN[5] = acadoWorkspace.objValueOut[5];
acado_setObjQN1QN2( acadoVariables.WN, acadoWorkspace.QN1, acadoWorkspace.QN2 );
}
void acado_multGxGu( real_t* const Gx1, real_t* const Gu1, real_t* const Gu2 )
{
Gu2[0] = + Gx1[0]*Gu1[0] + Gx1[1]*Gu1[3] + Gx1[2]*Gu1[6] + Gx1[3]*Gu1[9] + Gx1[4]*Gu1[12] + Gx1[5]*Gu1[15] + Gx1[6]*Gu1[18] + Gx1[7]*Gu1[21] + Gx1[8]*Gu1[24];
Gu2[1] = + Gx1[0]*Gu1[1] + Gx1[1]*Gu1[4] + Gx1[2]*Gu1[7] + Gx1[3]*Gu1[10] + Gx1[4]*Gu1[13] + Gx1[5]*Gu1[16] + Gx1[6]*Gu1[19] + Gx1[7]*Gu1[22] + Gx1[8]*Gu1[25];
Gu2[2] = + Gx1[0]*Gu1[2] + Gx1[1]*Gu1[5] + Gx1[2]*Gu1[8] + Gx1[3]*Gu1[11] + Gx1[4]*Gu1[14] + Gx1[5]*Gu1[17] + Gx1[6]*Gu1[20] + Gx1[7]*Gu1[23] + Gx1[8]*Gu1[26];
Gu2[3] = + Gx1[9]*Gu1[0] + Gx1[10]*Gu1[3] + Gx1[11]*Gu1[6] + Gx1[12]*Gu1[9] + Gx1[13]*Gu1[12] + Gx1[14]*Gu1[15] + Gx1[15]*Gu1[18] + Gx1[16]*Gu1[21] + Gx1[17]*Gu1[24];
Gu2[4] = + Gx1[9]*Gu1[1] + Gx1[10]*Gu1[4] + Gx1[11]*Gu1[7] + Gx1[12]*Gu1[10] + Gx1[13]*Gu1[13] + Gx1[14]*Gu1[16] + Gx1[15]*Gu1[19] + Gx1[16]*Gu1[22] + Gx1[17]*Gu1[25];
Gu2[5] = + Gx1[9]*Gu1[2] + Gx1[10]*Gu1[5] + Gx1[11]*Gu1[8] + Gx1[12]*Gu1[11] + Gx1[13]*Gu1[14] + Gx1[14]*Gu1[17] + Gx1[15]*Gu1[20] + Gx1[16]*Gu1[23] + Gx1[17]*Gu1[26];
Gu2[6] = + Gx1[18]*Gu1[0] + Gx1[19]*Gu1[3] + Gx1[20]*Gu1[6] + Gx1[21]*Gu1[9] + Gx1[22]*Gu1[12] + Gx1[23]*Gu1[15] + Gx1[24]*Gu1[18] + Gx1[25]*Gu1[21] + Gx1[26]*Gu1[24];
Gu2[7] = + Gx1[18]*Gu1[1] + Gx1[19]*Gu1[4] + Gx1[20]*Gu1[7] + Gx1[21]*Gu1[10] + Gx1[22]*Gu1[13] + Gx1[23]*Gu1[16] + Gx1[24]*Gu1[19] + Gx1[25]*Gu1[22] + Gx1[26]*Gu1[25];
Gu2[8] = + Gx1[18]*Gu1[2] + Gx1[19]*Gu1[5] + Gx1[20]*Gu1[8] + Gx1[21]*Gu1[11] + Gx1[22]*Gu1[14] + Gx1[23]*Gu1[17] + Gx1[24]*Gu1[20] + Gx1[25]*Gu1[23] + Gx1[26]*Gu1[26];
Gu2[9] = + Gx1[27]*Gu1[0] + Gx1[28]*Gu1[3] + Gx1[29]*Gu1[6] + Gx1[30]*Gu1[9] + Gx1[31]*Gu1[12] + Gx1[32]*Gu1[15] + Gx1[33]*Gu1[18] + Gx1[34]*Gu1[21] + Gx1[35]*Gu1[24];
Gu2[10] = + Gx1[27]*Gu1[1] + Gx1[28]*Gu1[4] + Gx1[29]*Gu1[7] + Gx1[30]*Gu1[10] + Gx1[31]*Gu1[13] + Gx1[32]*Gu1[16] + Gx1[33]*Gu1[19] + Gx1[34]*Gu1[22] + Gx1[35]*Gu1[25];
Gu2[11] = + Gx1[27]*Gu1[2] + Gx1[28]*Gu1[5] + Gx1[29]*Gu1[8] + Gx1[30]*Gu1[11] + Gx1[31]*Gu1[14] + Gx1[32]*Gu1[17] + Gx1[33]*Gu1[20] + Gx1[34]*Gu1[23] + Gx1[35]*Gu1[26];
Gu2[12] = + Gx1[36]*Gu1[0] + Gx1[37]*Gu1[3] + Gx1[38]*Gu1[6] + Gx1[39]*Gu1[9] + Gx1[40]*Gu1[12] + Gx1[41]*Gu1[15] + Gx1[42]*Gu1[18] + Gx1[43]*Gu1[21] + Gx1[44]*Gu1[24];
Gu2[13] = + Gx1[36]*Gu1[1] + Gx1[37]*Gu1[4] + Gx1[38]*Gu1[7] + Gx1[39]*Gu1[10] + Gx1[40]*Gu1[13] + Gx1[41]*Gu1[16] + Gx1[42]*Gu1[19] + Gx1[43]*Gu1[22] + Gx1[44]*Gu1[25];
Gu2[14] = + Gx1[36]*Gu1[2] + Gx1[37]*Gu1[5] + Gx1[38]*Gu1[8] + Gx1[39]*Gu1[11] + Gx1[40]*Gu1[14] + Gx1[41]*Gu1[17] + Gx1[42]*Gu1[20] + Gx1[43]*Gu1[23] + Gx1[44]*Gu1[26];
Gu2[15] = + Gx1[45]*Gu1[0] + Gx1[46]*Gu1[3] + Gx1[47]*Gu1[6] + Gx1[48]*Gu1[9] + Gx1[49]*Gu1[12] + Gx1[50]*Gu1[15] + Gx1[51]*Gu1[18] + Gx1[52]*Gu1[21] + Gx1[53]*Gu1[24];
Gu2[16] = + Gx1[45]*Gu1[1] + Gx1[46]*Gu1[4] + Gx1[47]*Gu1[7] + Gx1[48]*Gu1[10] + Gx1[49]*Gu1[13] + Gx1[50]*Gu1[16] + Gx1[51]*Gu1[19] + Gx1[52]*Gu1[22] + Gx1[53]*Gu1[25];
Gu2[17] = + Gx1[45]*Gu1[2] + Gx1[46]*Gu1[5] + Gx1[47]*Gu1[8] + Gx1[48]*Gu1[11] + Gx1[49]*Gu1[14] + Gx1[50]*Gu1[17] + Gx1[51]*Gu1[20] + Gx1[52]*Gu1[23] + Gx1[53]*Gu1[26];
Gu2[18] = + Gx1[54]*Gu1[0] + Gx1[55]*Gu1[3] + Gx1[56]*Gu1[6] + Gx1[57]*Gu1[9] + Gx1[58]*Gu1[12] + Gx1[59]*Gu1[15] + Gx1[60]*Gu1[18] + Gx1[61]*Gu1[21] + Gx1[62]*Gu1[24];
Gu2[19] = + Gx1[54]*Gu1[1] + Gx1[55]*Gu1[4] + Gx1[56]*Gu1[7] + Gx1[57]*Gu1[10] + Gx1[58]*Gu1[13] + Gx1[59]*Gu1[16] + Gx1[60]*Gu1[19] + Gx1[61]*Gu1[22] + Gx1[62]*Gu1[25];
Gu2[20] = + Gx1[54]*Gu1[2] + Gx1[55]*Gu1[5] + Gx1[56]*Gu1[8] + Gx1[57]*Gu1[11] + Gx1[58]*Gu1[14] + Gx1[59]*Gu1[17] + Gx1[60]*Gu1[20] + Gx1[61]*Gu1[23] + Gx1[62]*Gu1[26];
Gu2[21] = + Gx1[63]*Gu1[0] + Gx1[64]*Gu1[3] + Gx1[65]*Gu1[6] + Gx1[66]*Gu1[9] + Gx1[67]*Gu1[12] + Gx1[68]*Gu1[15] + Gx1[69]*Gu1[18] + Gx1[70]*Gu1[21] + Gx1[71]*Gu1[24];
Gu2[22] = + Gx1[63]*Gu1[1] + Gx1[64]*Gu1[4] + Gx1[65]*Gu1[7] + Gx1[66]*Gu1[10] + Gx1[67]*Gu1[13] + Gx1[68]*Gu1[16] + Gx1[69]*Gu1[19] + Gx1[70]*Gu1[22] + Gx1[71]*Gu1[25];
Gu2[23] = + Gx1[63]*Gu1[2] + Gx1[64]*Gu1[5] + Gx1[65]*Gu1[8] + Gx1[66]*Gu1[11] + Gx1[67]*Gu1[14] + Gx1[68]*Gu1[17] + Gx1[69]*Gu1[20] + Gx1[70]*Gu1[23] + Gx1[71]*Gu1[26];
Gu2[24] = + Gx1[72]*Gu1[0] + Gx1[73]*Gu1[3] + Gx1[74]*Gu1[6] + Gx1[75]*Gu1[9] + Gx1[76]*Gu1[12] + Gx1[77]*Gu1[15] + Gx1[78]*Gu1[18] + Gx1[79]*Gu1[21] + Gx1[80]*Gu1[24];
Gu2[25] = + Gx1[72]*Gu1[1] + Gx1[73]*Gu1[4] + Gx1[74]*Gu1[7] + Gx1[75]*Gu1[10] + Gx1[76]*Gu1[13] + Gx1[77]*Gu1[16] + Gx1[78]*Gu1[19] + Gx1[79]*Gu1[22] + Gx1[80]*Gu1[25];
Gu2[26] = + Gx1[72]*Gu1[2] + Gx1[73]*Gu1[5] + Gx1[74]*Gu1[8] + Gx1[75]*Gu1[11] + Gx1[76]*Gu1[14] + Gx1[77]*Gu1[17] + Gx1[78]*Gu1[20] + Gx1[79]*Gu1[23] + Gx1[80]*Gu1[26];
}
void acado_moveGuE( real_t* const Gu1, real_t* const Gu2 )
{
Gu2[0] = Gu1[0];
Gu2[1] = Gu1[1];
Gu2[2] = Gu1[2];
Gu2[3] = Gu1[3];
Gu2[4] = Gu1[4];
Gu2[5] = Gu1[5];
Gu2[6] = Gu1[6];
Gu2[7] = Gu1[7];
Gu2[8] = Gu1[8];
Gu2[9] = Gu1[9];
Gu2[10] = Gu1[10];
Gu2[11] = Gu1[11];
Gu2[12] = Gu1[12];
Gu2[13] = Gu1[13];
Gu2[14] = Gu1[14];
Gu2[15] = Gu1[15];
Gu2[16] = Gu1[16];
Gu2[17] = Gu1[17];
Gu2[18] = Gu1[18];
Gu2[19] = Gu1[19];
Gu2[20] = Gu1[20];
Gu2[21] = Gu1[21];
Gu2[22] = Gu1[22];
Gu2[23] = Gu1[23];
Gu2[24] = Gu1[24];
Gu2[25] = Gu1[25];
Gu2[26] = Gu1[26];
}
void acado_multBTW1( real_t* const Gu1, real_t* const Gu2, int iRow, int iCol )
{
acadoWorkspace.H[(iRow * 180) + (iCol * 3)] = + Gu1[0]*Gu2[0] + Gu1[3]*Gu2[3] + Gu1[6]*Gu2[6] + Gu1[9]*Gu2[9] + Gu1[12]*Gu2[12] + Gu1[15]*Gu2[15] + Gu1[18]*Gu2[18] + Gu1[21]*Gu2[21] + Gu1[24]*Gu2[24];
acadoWorkspace.H[(iRow * 180) + (iCol * 3 + 1)] = + Gu1[0]*Gu2[1] + Gu1[3]*Gu2[4] + Gu1[6]*Gu2[7] + Gu1[9]*Gu2[10] + Gu1[12]*Gu2[13] + Gu1[15]*Gu2[16] + Gu1[18]*Gu2[19] + Gu1[21]*Gu2[22] + Gu1[24]*Gu2[25];
acadoWorkspace.H[(iRow * 180) + (iCol * 3 + 2)] = + Gu1[0]*Gu2[2] + Gu1[3]*Gu2[5] + Gu1[6]*Gu2[8] + Gu1[9]*Gu2[11] + Gu1[12]*Gu2[14] + Gu1[15]*Gu2[17] + Gu1[18]*Gu2[20] + Gu1[21]*Gu2[23] + Gu1[24]*Gu2[26];
acadoWorkspace.H[(iRow * 180 + 60) + (iCol * 3)] = + Gu1[1]*Gu2[0] + Gu1[4]*Gu2[3] + Gu1[7]*Gu2[6] + Gu1[10]*Gu2[9] + Gu1[13]*Gu2[12] + Gu1[16]*Gu2[15] + Gu1[19]*Gu2[18] + Gu1[22]*Gu2[21] + Gu1[25]*Gu2[24];
acadoWorkspace.H[(iRow * 180 + 60) + (iCol * 3 + 1)] = + Gu1[1]*Gu2[1] + Gu1[4]*Gu2[4] + Gu1[7]*Gu2[7] + Gu1[10]*Gu2[10] + Gu1[13]*Gu2[13] + Gu1[16]*Gu2[16] + Gu1[19]*Gu2[19] + Gu1[22]*Gu2[22] + Gu1[25]*Gu2[25];
acadoWorkspace.H[(iRow * 180 + 60) + (iCol * 3 + 2)] = + Gu1[1]*Gu2[2] + Gu1[4]*Gu2[5] + Gu1[7]*Gu2[8] + Gu1[10]*Gu2[11] + Gu1[13]*Gu2[14] + Gu1[16]*Gu2[17] + Gu1[19]*Gu2[20] + Gu1[22]*Gu2[23] + Gu1[25]*Gu2[26];
acadoWorkspace.H[(iRow * 180 + 120) + (iCol * 3)] = + Gu1[2]*Gu2[0] + Gu1[5]*Gu2[3] + Gu1[8]*Gu2[6] + Gu1[11]*Gu2[9] + Gu1[14]*Gu2[12] + Gu1[17]*Gu2[15] + Gu1[20]*Gu2[18] + Gu1[23]*Gu2[21] + Gu1[26]*Gu2[24];
acadoWorkspace.H[(iRow * 180 + 120) + (iCol * 3 + 1)] = + Gu1[2]*Gu2[1] + Gu1[5]*Gu2[4] + Gu1[8]*Gu2[7] + Gu1[11]*Gu2[10] + Gu1[14]*Gu2[13] + Gu1[17]*Gu2[16] + Gu1[20]*Gu2[19] + Gu1[23]*Gu2[22] + Gu1[26]*Gu2[25];
acadoWorkspace.H[(iRow * 180 + 120) + (iCol * 3 + 2)] = + Gu1[2]*Gu2[2] + Gu1[5]*Gu2[5] + Gu1[8]*Gu2[8] + Gu1[11]*Gu2[11] + Gu1[14]*Gu2[14] + Gu1[17]*Gu2[17] + Gu1[20]*Gu2[20] + Gu1[23]*Gu2[23] + Gu1[26]*Gu2[26];
}
void acado_mac_S1T_E( real_t* const Gu1, real_t* const Gu2, int iRow, int iCol )
{
acadoWorkspace.H[(iRow * 180) + (iCol * 3)] += + Gu1[0]*Gu2[0] + Gu1[3]*Gu2[3] + Gu1[6]*Gu2[6] + Gu1[9]*Gu2[9] + Gu1[12]*Gu2[12] + Gu1[15]*Gu2[15] + Gu1[18]*Gu2[18] + Gu1[21]*Gu2[21] + Gu1[24]*Gu2[24];
acadoWorkspace.H[(iRow * 180) + (iCol * 3 + 1)] += + Gu1[0]*Gu2[1] + Gu1[3]*Gu2[4] + Gu1[6]*Gu2[7] + Gu1[9]*Gu2[10] + Gu1[12]*Gu2[13] + Gu1[15]*Gu2[16] + Gu1[18]*Gu2[19] + Gu1[21]*Gu2[22] + Gu1[24]*Gu2[25];
acadoWorkspace.H[(iRow * 180) + (iCol * 3 + 2)] += + Gu1[0]*Gu2[2] + Gu1[3]*Gu2[5] + Gu1[6]*Gu2[8] + Gu1[9]*Gu2[11] + Gu1[12]*Gu2[14] + Gu1[15]*Gu2[17] + Gu1[18]*Gu2[20] + Gu1[21]*Gu2[23] + Gu1[24]*Gu2[26];
acadoWorkspace.H[(iRow * 180 + 60) + (iCol * 3)] += + Gu1[1]*Gu2[0] + Gu1[4]*Gu2[3] + Gu1[7]*Gu2[6] + Gu1[10]*Gu2[9] + Gu1[13]*Gu2[12] + Gu1[16]*Gu2[15] + Gu1[19]*Gu2[18] + Gu1[22]*Gu2[21] + Gu1[25]*Gu2[24];
acadoWorkspace.H[(iRow * 180 + 60) + (iCol * 3 + 1)] += + Gu1[1]*Gu2[1] + Gu1[4]*Gu2[4] + Gu1[7]*Gu2[7] + Gu1[10]*Gu2[10] + Gu1[13]*Gu2[13] + Gu1[16]*Gu2[16] + Gu1[19]*Gu2[19] + Gu1[22]*Gu2[22] + Gu1[25]*Gu2[25];
acadoWorkspace.H[(iRow * 180 + 60) + (iCol * 3 + 2)] += + Gu1[1]*Gu2[2] + Gu1[4]*Gu2[5] + Gu1[7]*Gu2[8] + Gu1[10]*Gu2[11] + Gu1[13]*Gu2[14] + Gu1[16]*Gu2[17] + Gu1[19]*Gu2[20] + Gu1[22]*Gu2[23] + Gu1[25]*Gu2[26];
acadoWorkspace.H[(iRow * 180 + 120) + (iCol * 3)] += + Gu1[2]*Gu2[0] + Gu1[5]*Gu2[3] + Gu1[8]*Gu2[6] + Gu1[11]*Gu2[9] + Gu1[14]*Gu2[12] + Gu1[17]*Gu2[15] + Gu1[20]*Gu2[18] + Gu1[23]*Gu2[21] + Gu1[26]*Gu2[24];
acadoWorkspace.H[(iRow * 180 + 120) + (iCol * 3 + 1)] += + Gu1[2]*Gu2[1] + Gu1[5]*Gu2[4] + Gu1[8]*Gu2[7] + Gu1[11]*Gu2[10] + Gu1[14]*Gu2[13] + Gu1[17]*Gu2[16] + Gu1[20]*Gu2[19] + Gu1[23]*Gu2[22] + Gu1[26]*Gu2[25];
acadoWorkspace.H[(iRow * 180 + 120) + (iCol * 3 + 2)] += + Gu1[2]*Gu2[2] + Gu1[5]*Gu2[5] + Gu1[8]*Gu2[8] + Gu1[11]*Gu2[11] + Gu1[14]*Gu2[14] + Gu1[17]*Gu2[17] + Gu1[20]*Gu2[20] + Gu1[23]*Gu2[23] + Gu1[26]*Gu2[26];
}
void acado_multBTW1_R1( real_t* const R11, real_t* const Gu1, real_t* const Gu2, int iRow )
{
acadoWorkspace.H[iRow * 183] = + Gu1[0]*Gu2[0] + Gu1[3]*Gu2[3] + Gu1[6]*Gu2[6] + Gu1[9]*Gu2[9] + Gu1[12]*Gu2[12] + Gu1[15]*Gu2[15] + Gu1[18]*Gu2[18] + Gu1[21]*Gu2[21] + Gu1[24]*Gu2[24] + R11[0];
acadoWorkspace.H[iRow * 183 + 1] = + Gu1[0]*Gu2[1] + Gu1[3]*Gu2[4] + Gu1[6]*Gu2[7] + Gu1[9]*Gu2[10] + Gu1[12]*Gu2[13] + Gu1[15]*Gu2[16] + Gu1[18]*Gu2[19] + Gu1[21]*Gu2[22] + Gu1[24]*Gu2[25] + R11[1];
acadoWorkspace.H[iRow * 183 + 2] = + Gu1[0]*Gu2[2] + Gu1[3]*Gu2[5] + Gu1[6]*Gu2[8] + Gu1[9]*Gu2[11] + Gu1[12]*Gu2[14] + Gu1[15]*Gu2[17] + Gu1[18]*Gu2[20] + Gu1[21]*Gu2[23] + Gu1[24]*Gu2[26] + R11[2];
acadoWorkspace.H[iRow * 183 + 60] = + Gu1[1]*Gu2[0] + Gu1[4]*Gu2[3] + Gu1[7]*Gu2[6] + Gu1[10]*Gu2[9] + Gu1[13]*Gu2[12] + Gu1[16]*Gu2[15] + Gu1[19]*Gu2[18] + Gu1[22]*Gu2[21] + Gu1[25]*Gu2[24] + R11[3];
acadoWorkspace.H[iRow * 183 + 61] = + Gu1[1]*Gu2[1] + Gu1[4]*Gu2[4] + Gu1[7]*Gu2[7] + Gu1[10]*Gu2[10] + Gu1[13]*Gu2[13] + Gu1[16]*Gu2[16] + Gu1[19]*Gu2[19] + Gu1[22]*Gu2[22] + Gu1[25]*Gu2[25] + R11[4];
acadoWorkspace.H[iRow * 183 + 62] = + Gu1[1]*Gu2[2] + Gu1[4]*Gu2[5] + Gu1[7]*Gu2[8] + Gu1[10]*Gu2[11] + Gu1[13]*Gu2[14] + Gu1[16]*Gu2[17] + Gu1[19]*Gu2[20] + Gu1[22]*Gu2[23] + Gu1[25]*Gu2[26] + R11[5];
acadoWorkspace.H[iRow * 183 + 120] = + Gu1[2]*Gu2[0] + Gu1[5]*Gu2[3] + Gu1[8]*Gu2[6] + Gu1[11]*Gu2[9] + Gu1[14]*Gu2[12] + Gu1[17]*Gu2[15] + Gu1[20]*Gu2[18] + Gu1[23]*Gu2[21] + Gu1[26]*Gu2[24] + R11[6];
acadoWorkspace.H[iRow * 183 + 121] = + Gu1[2]*Gu2[1] + Gu1[5]*Gu2[4] + Gu1[8]*Gu2[7] + Gu1[11]*Gu2[10] + Gu1[14]*Gu2[13] + Gu1[17]*Gu2[16] + Gu1[20]*Gu2[19] + Gu1[23]*Gu2[22] + Gu1[26]*Gu2[25] + R11[7];
acadoWorkspace.H[iRow * 183 + 122] = + Gu1[2]*Gu2[2] + Gu1[5]*Gu2[5] + Gu1[8]*Gu2[8] + Gu1[11]*Gu2[11] + Gu1[14]*Gu2[14] + Gu1[17]*Gu2[17] + Gu1[20]*Gu2[20] + Gu1[23]*Gu2[23] + Gu1[26]*Gu2[26] + R11[8];
acadoWorkspace.H[iRow * 183] += 1.0000000000000000e-10;
acadoWorkspace.H[iRow * 183 + 61] += 1.0000000000000000e-10;
acadoWorkspace.H[iRow * 183 + 122] += 1.0000000000000000e-10;
}
void acado_multGxTGu( real_t* const Gx1, real_t* const Gu1, real_t* const Gu2 )
{
Gu2[0] = + Gx1[0]*Gu1[0] + Gx1[9]*Gu1[3] + Gx1[18]*Gu1[6] + Gx1[27]*Gu1[9] + Gx1[36]*Gu1[12] + Gx1[45]*Gu1[15] + Gx1[54]*Gu1[18] + Gx1[63]*Gu1[21] + Gx1[72]*Gu1[24];
Gu2[1] = + Gx1[0]*Gu1[1] + Gx1[9]*Gu1[4] + Gx1[18]*Gu1[7] + Gx1[27]*Gu1[10] + Gx1[36]*Gu1[13] + Gx1[45]*Gu1[16] + Gx1[54]*Gu1[19] + Gx1[63]*Gu1[22] + Gx1[72]*Gu1[25];
Gu2[2] = + Gx1[0]*Gu1[2] + Gx1[9]*Gu1[5] + Gx1[18]*Gu1[8] + Gx1[27]*Gu1[11] + Gx1[36]*Gu1[14] + Gx1[45]*Gu1[17] + Gx1[54]*Gu1[20] + Gx1[63]*Gu1[23] + Gx1[72]*Gu1[26];
Gu2[3] = + Gx1[1]*Gu1[0] + Gx1[10]*Gu1[3] + Gx1[19]*Gu1[6] + Gx1[28]*Gu1[9] + Gx1[37]*Gu1[12] + Gx1[46]*Gu1[15] + Gx1[55]*Gu1[18] + Gx1[64]*Gu1[21] + Gx1[73]*Gu1[24];
Gu2[4] = + Gx1[1]*Gu1[1] + Gx1[10]*Gu1[4] + Gx1[19]*Gu1[7] + Gx1[28]*Gu1[10] + Gx1[37]*Gu1[13] + Gx1[46]*Gu1[16] + Gx1[55]*Gu1[19] + Gx1[64]*Gu1[22] + Gx1[73]*Gu1[25];
Gu2[5] = + Gx1[1]*Gu1[2] + Gx1[10]*Gu1[5] + Gx1[19]*Gu1[8] + Gx1[28]*Gu1[11] + Gx1[37]*Gu1[14] + Gx1[46]*Gu1[17] + Gx1[55]*Gu1[20] + Gx1[64]*Gu1[23] + Gx1[73]*Gu1[26];
Gu2[6] = + Gx1[2]*Gu1[0] + Gx1[11]*Gu1[3] + Gx1[20]*Gu1[6] + Gx1[29]*Gu1[9] + Gx1[38]*Gu1[12] + Gx1[47]*Gu1[15] + Gx1[56]*Gu1[18] + Gx1[65]*Gu1[21] + Gx1[74]*Gu1[24];
Gu2[7] = + Gx1[2]*Gu1[1] + Gx1[11]*Gu1[4] + Gx1[20]*Gu1[7] + Gx1[29]*Gu1[10] + Gx1[38]*Gu1[13] + Gx1[47]*Gu1[16] + Gx1[56]*Gu1[19] + Gx1[65]*Gu1[22] + Gx1[74]*Gu1[25];
Gu2[8] = + Gx1[2]*Gu1[2] + Gx1[11]*Gu1[5] + Gx1[20]*Gu1[8] + Gx1[29]*Gu1[11] + Gx1[38]*Gu1[14] + Gx1[47]*Gu1[17] + Gx1[56]*Gu1[20] + Gx1[65]*Gu1[23] + Gx1[74]*Gu1[26];
Gu2[9] = + Gx1[3]*Gu1[0] + Gx1[12]*Gu1[3] + Gx1[21]*Gu1[6] + Gx1[30]*Gu1[9] + Gx1[39]*Gu1[12] + Gx1[48]*Gu1[15] + Gx1[57]*Gu1[18] + Gx1[66]*Gu1[21] + Gx1[75]*Gu1[24];
Gu2[10] = + Gx1[3]*Gu1[1] + Gx1[12]*Gu1[4] + Gx1[21]*Gu1[7] + Gx1[30]*Gu1[10] + Gx1[39]*Gu1[13] + Gx1[48]*Gu1[16] + Gx1[57]*Gu1[19] + Gx1[66]*Gu1[22] + Gx1[75]*Gu1[25];
Gu2[11] = + Gx1[3]*Gu1[2] + Gx1[12]*Gu1[5] + Gx1[21]*Gu1[8] + Gx1[30]*Gu1[11] + Gx1[39]*Gu1[14] + Gx1[48]*Gu1[17] + Gx1[57]*Gu1[20] + Gx1[66]*Gu1[23] + Gx1[75]*Gu1[26];
Gu2[12] = + Gx1[4]*Gu1[0] + Gx1[13]*Gu1[3] + Gx1[22]*Gu1[6] + Gx1[31]*Gu1[9] + Gx1[40]*Gu1[12] + Gx1[49]*Gu1[15] + Gx1[58]*Gu1[18] + Gx1[67]*Gu1[21] + Gx1[76]*Gu1[24];
Gu2[13] = + Gx1[4]*Gu1[1] + Gx1[13]*Gu1[4] + Gx1[22]*Gu1[7] + Gx1[31]*Gu1[10] + Gx1[40]*Gu1[13] + Gx1[49]*Gu1[16] + Gx1[58]*Gu1[19] + Gx1[67]*Gu1[22] + Gx1[76]*Gu1[25];
Gu2[14] = + Gx1[4]*Gu1[2] + Gx1[13]*Gu1[5] + Gx1[22]*Gu1[8] + Gx1[31]*Gu1[11] + Gx1[40]*Gu1[14] + Gx1[49]*Gu1[17] + Gx1[58]*Gu1[20] + Gx1[67]*Gu1[23] + Gx1[76]*Gu1[26];
Gu2[15] = + Gx1[5]*Gu1[0] + Gx1[14]*Gu1[3] + Gx1[23]*Gu1[6] + Gx1[32]*Gu1[9] + Gx1[41]*Gu1[12] + Gx1[50]*Gu1[15] + Gx1[59]*Gu1[18] + Gx1[68]*Gu1[21] + Gx1[77]*Gu1[24];
Gu2[16] = + Gx1[5]*Gu1[1] + Gx1[14]*Gu1[4] + Gx1[23]*Gu1[7] + Gx1[32]*Gu1[10] + Gx1[41]*Gu1[13] + Gx1[50]*Gu1[16] + Gx1[59]*Gu1[19] + Gx1[68]*Gu1[22] + Gx1[77]*Gu1[25];
Gu2[17] = + Gx1[5]*Gu1[2] + Gx1[14]*Gu1[5] + Gx1[23]*Gu1[8] + Gx1[32]*Gu1[11] + Gx1[41]*Gu1[14] + Gx1[50]*Gu1[17] + Gx1[59]*Gu1[20] + Gx1[68]*Gu1[23] + Gx1[77]*Gu1[26];
Gu2[18] = + Gx1[6]*Gu1[0] + Gx1[15]*Gu1[3] + Gx1[24]*Gu1[6] + Gx1[33]*Gu1[9] + Gx1[42]*Gu1[12] + Gx1[51]*Gu1[15] + Gx1[60]*Gu1[18] + Gx1[69]*Gu1[21] + Gx1[78]*Gu1[24];
Gu2[19] = + Gx1[6]*Gu1[1] + Gx1[15]*Gu1[4] + Gx1[24]*Gu1[7] + Gx1[33]*Gu1[10] + Gx1[42]*Gu1[13] + Gx1[51]*Gu1[16] + Gx1[60]*Gu1[19] + Gx1[69]*Gu1[22] + Gx1[78]*Gu1[25];
Gu2[20] = + Gx1[6]*Gu1[2] + Gx1[15]*Gu1[5] + Gx1[24]*Gu1[8] + Gx1[33]*Gu1[11] + Gx1[42]*Gu1[14] + Gx1[51]*Gu1[17] + Gx1[60]*Gu1[20] + Gx1[69]*Gu1[23] + Gx1[78]*Gu1[26];
Gu2[21] = + Gx1[7]*Gu1[0] + Gx1[16]*Gu1[3] + Gx1[25]*Gu1[6] + Gx1[34]*Gu1[9] + Gx1[43]*Gu1[12] + Gx1[52]*Gu1[15] + Gx1[61]*Gu1[18] + Gx1[70]*Gu1[21] + Gx1[79]*Gu1[24];
Gu2[22] = + Gx1[7]*Gu1[1] + Gx1[16]*Gu1[4] + Gx1[25]*Gu1[7] + Gx1[34]*Gu1[10] + Gx1[43]*Gu1[13] + Gx1[52]*Gu1[16] + Gx1[61]*Gu1[19] + Gx1[70]*Gu1[22] + Gx1[79]*Gu1[25];
Gu2[23] = + Gx1[7]*Gu1[2] + Gx1[16]*Gu1[5] + Gx1[25]*Gu1[8] + Gx1[34]*Gu1[11] + Gx1[43]*Gu1[14] + Gx1[52]*Gu1[17] + Gx1[61]*Gu1[20] + Gx1[70]*Gu1[23] + Gx1[79]*Gu1[26];
Gu2[24] = + Gx1[8]*Gu1[0] + Gx1[17]*Gu1[3] + Gx1[26]*Gu1[6] + Gx1[35]*Gu1[9] + Gx1[44]*Gu1[12] + Gx1[53]*Gu1[15] + Gx1[62]*Gu1[18] + Gx1[71]*Gu1[21] + Gx1[80]*Gu1[24];
Gu2[25] = + Gx1[8]*Gu1[1] + Gx1[17]*Gu1[4] + Gx1[26]*Gu1[7] + Gx1[35]*Gu1[10] + Gx1[44]*Gu1[13] + Gx1[53]*Gu1[16] + Gx1[62]*Gu1[19] + Gx1[71]*Gu1[22] + Gx1[80]*Gu1[25];
Gu2[26] = + Gx1[8]*Gu1[2] + Gx1[17]*Gu1[5] + Gx1[26]*Gu1[8] + Gx1[35]*Gu1[11] + Gx1[44]*Gu1[14] + Gx1[53]*Gu1[17] + Gx1[62]*Gu1[20] + Gx1[71]*Gu1[23] + Gx1[80]*Gu1[26];
}
void acado_multQEW2( real_t* const Q11, real_t* const Gu1, real_t* const Gu2, real_t* const Gu3 )
{
Gu3[0] = + Q11[0]*Gu1[0] + Q11[1]*Gu1[3] + Q11[2]*Gu1[6] + Q11[3]*Gu1[9] + Q11[4]*Gu1[12] + Q11[5]*Gu1[15] + Q11[6]*Gu1[18] + Q11[7]*Gu1[21] + Q11[8]*Gu1[24] + Gu2[0];
Gu3[1] = + Q11[0]*Gu1[1] + Q11[1]*Gu1[4] + Q11[2]*Gu1[7] + Q11[3]*Gu1[10] + Q11[4]*Gu1[13] + Q11[5]*Gu1[16] + Q11[6]*Gu1[19] + Q11[7]*Gu1[22] + Q11[8]*Gu1[25] + Gu2[1];
Gu3[2] = + Q11[0]*Gu1[2] + Q11[1]*Gu1[5] + Q11[2]*Gu1[8] + Q11[3]*Gu1[11] + Q11[4]*Gu1[14] + Q11[5]*Gu1[17] + Q11[6]*Gu1[20] + Q11[7]*Gu1[23] + Q11[8]*Gu1[26] + Gu2[2];
Gu3[3] = + Q11[9]*Gu1[0] + Q11[10]*Gu1[3] + Q11[11]*Gu1[6] + Q11[12]*Gu1[9] + Q11[13]*Gu1[12] + Q11[14]*Gu1[15] + Q11[15]*Gu1[18] + Q11[16]*Gu1[21] + Q11[17]*Gu1[24] + Gu2[3];
Gu3[4] = + Q11[9]*Gu1[1] + Q11[10]*Gu1[4] + Q11[11]*Gu1[7] + Q11[12]*Gu1[10] + Q11[13]*Gu1[13] + Q11[14]*Gu1[16] + Q11[15]*Gu1[19] + Q11[16]*Gu1[22] + Q11[17]*Gu1[25] + Gu2[4];
Gu3[5] = + Q11[9]*Gu1[2] + Q11[10]*Gu1[5] + Q11[11]*Gu1[8] + Q11[12]*Gu1[11] + Q11[13]*Gu1[14] + Q11[14]*Gu1[17] + Q11[15]*Gu1[20] + Q11[16]*Gu1[23] + Q11[17]*Gu1[26] + Gu2[5];
Gu3[6] = + Q11[18]*Gu1[0] + Q11[19]*Gu1[3] + Q11[20]*Gu1[6] + Q11[21]*Gu1[9] + Q11[22]*Gu1[12] + Q11[23]*Gu1[15] + Q11[24]*Gu1[18] + Q11[25]*Gu1[21] + Q11[26]*Gu1[24] + Gu2[6];
Gu3[7] = + Q11[18]*Gu1[1] + Q11[19]*Gu1[4] + Q11[20]*Gu1[7] + Q11[21]*Gu1[10] + Q11[22]*Gu1[13] + Q11[23]*Gu1[16] + Q11[24]*Gu1[19] + Q11[25]*Gu1[22] + Q11[26]*Gu1[25] + Gu2[7];
Gu3[8] = + Q11[18]*Gu1[2] + Q11[19]*Gu1[5] + Q11[20]*Gu1[8] + Q11[21]*Gu1[11] + Q11[22]*Gu1[14] + Q11[23]*Gu1[17] + Q11[24]*Gu1[20] + Q11[25]*Gu1[23] + Q11[26]*Gu1[26] + Gu2[8];
Gu3[9] = + Q11[27]*Gu1[0] + Q11[28]*Gu1[3] + Q11[29]*Gu1[6] + Q11[30]*Gu1[9] + Q11[31]*Gu1[12] + Q11[32]*Gu1[15] + Q11[33]*Gu1[18] + Q11[34]*Gu1[21] + Q11[35]*Gu1[24] + Gu2[9];
Gu3[10] = + Q11[27]*Gu1[1] + Q11[28]*Gu1[4] + Q11[29]*Gu1[7] + Q11[30]*Gu1[10] + Q11[31]*Gu1[13] + Q11[32]*Gu1[16] + Q11[33]*Gu1[19] + Q11[34]*Gu1[22] + Q11[35]*Gu1[25] + Gu2[10];
Gu3[11] = + Q11[27]*Gu1[2] + Q11[28]*Gu1[5] + Q11[29]*Gu1[8] + Q11[30]*Gu1[11] + Q11[31]*Gu1[14] + Q11[32]*Gu1[17] + Q11[33]*Gu1[20] + Q11[34]*Gu1[23] + Q11[35]*Gu1[26] + Gu2[11];
Gu3[12] = + Q11[36]*Gu1[0] + Q11[37]*Gu1[3] + Q11[38]*Gu1[6] + Q11[39]*Gu1[9] + Q11[40]*Gu1[12] + Q11[41]*Gu1[15] + Q11[42]*Gu1[18] + Q11[43]*Gu1[21] + Q11[44]*Gu1[24] + Gu2[12];
Gu3[13] = + Q11[36]*Gu1[1] + Q11[37]*Gu1[4] + Q11[38]*Gu1[7] + Q11[39]*Gu1[10] + Q11[40]*Gu1[13] + Q11[41]*Gu1[16] + Q11[42]*Gu1[19] + Q11[43]*Gu1[22] + Q11[44]*Gu1[25] + Gu2[13];
Gu3[14] = + Q11[36]*Gu1[2] + Q11[37]*Gu1[5] + Q11[38]*Gu1[8] + Q11[39]*Gu1[11] + Q11[40]*Gu1[14] + Q11[41]*Gu1[17] + Q11[42]*Gu1[20] + Q11[43]*Gu1[23] + Q11[44]*Gu1[26] + Gu2[14];
Gu3[15] = + Q11[45]*Gu1[0] + Q11[46]*Gu1[3] + Q11[47]*Gu1[6] + Q11[48]*Gu1[9] + Q11[49]*Gu1[12] + Q11[50]*Gu1[15] + Q11[51]*Gu1[18] + Q11[52]*Gu1[21] + Q11[53]*Gu1[24] + Gu2[15];
Gu3[16] = + Q11[45]*Gu1[1] + Q11[46]*Gu1[4] + Q11[47]*Gu1[7] + Q11[48]*Gu1[10] + Q11[49]*Gu1[13] + Q11[50]*Gu1[16] + Q11[51]*Gu1[19] + Q11[52]*Gu1[22] + Q11[53]*Gu1[25] + Gu2[16];
Gu3[17] = + Q11[45]*Gu1[2] + Q11[46]*Gu1[5] + Q11[47]*Gu1[8] + Q11[48]*Gu1[11] + Q11[49]*Gu1[14] + Q11[50]*Gu1[17] + Q11[51]*Gu1[20] + Q11[52]*Gu1[23] + Q11[53]*Gu1[26] + Gu2[17];
Gu3[18] = + Q11[54]*Gu1[0] + Q11[55]*Gu1[3] + Q11[56]*Gu1[6] + Q11[57]*Gu1[9] + Q11[58]*Gu1[12] + Q11[59]*Gu1[15] + Q11[60]*Gu1[18] + Q11[61]*Gu1[21] + Q11[62]*Gu1[24] + Gu2[18];
Gu3[19] = + Q11[54]*Gu1[1] + Q11[55]*Gu1[4] + Q11[56]*Gu1[7] + Q11[57]*Gu1[10] + Q11[58]*Gu1[13] + Q11[59]*Gu1[16] + Q11[60]*Gu1[19] + Q11[61]*Gu1[22] + Q11[62]*Gu1[25] + Gu2[19];
Gu3[20] = + Q11[54]*Gu1[2] + Q11[55]*Gu1[5] + Q11[56]*Gu1[8] + Q11[57]*Gu1[11] + Q11[58]*Gu1[14] + Q11[59]*Gu1[17] + Q11[60]*Gu1[20] + Q11[61]*Gu1[23] + Q11[62]*Gu1[26] + Gu2[20];
Gu3[21] = + Q11[63]*Gu1[0] + Q11[64]*Gu1[3] + Q11[65]*Gu1[6] + Q11[66]*Gu1[9] + Q11[67]*Gu1[12] + Q11[68]*Gu1[15] + Q11[69]*Gu1[18] + Q11[70]*Gu1[21] + Q11[71]*Gu1[24] + Gu2[21];
Gu3[22] = + Q11[63]*Gu1[1] + Q11[64]*Gu1[4] + Q11[65]*Gu1[7] + Q11[66]*Gu1[10] + Q11[67]*Gu1[13] + Q11[68]*Gu1[16] + Q11[69]*Gu1[19] + Q11[70]*Gu1[22] + Q11[71]*Gu1[25] + Gu2[22];
Gu3[23] = + Q11[63]*Gu1[2] + Q11[64]*Gu1[5] + Q11[65]*Gu1[8] + Q11[66]*Gu1[11] + Q11[67]*Gu1[14] + Q11[68]*Gu1[17] + Q11[69]*Gu1[20] + Q11[70]*Gu1[23] + Q11[71]*Gu1[26] + Gu2[23];
Gu3[24] = + Q11[72]*Gu1[0] + Q11[73]*Gu1[3] + Q11[74]*Gu1[6] + Q11[75]*Gu1[9] + Q11[76]*Gu1[12] + Q11[77]*Gu1[15] + Q11[78]*Gu1[18] + Q11[79]*Gu1[21] + Q11[80]*Gu1[24] + Gu2[24];
Gu3[25] = + Q11[72]*Gu1[1] + Q11[73]*Gu1[4] + Q11[74]*Gu1[7] + Q11[75]*Gu1[10] + Q11[76]*Gu1[13] + Q11[77]*Gu1[16] + Q11[78]*Gu1[19] + Q11[79]*Gu1[22] + Q11[80]*Gu1[25] + Gu2[25];
Gu3[26] = + Q11[72]*Gu1[2] + Q11[73]*Gu1[5] + Q11[74]*Gu1[8] + Q11[75]*Gu1[11] + Q11[76]*Gu1[14] + Q11[77]*Gu1[17] + Q11[78]*Gu1[20] + Q11[79]*Gu1[23] + Q11[80]*Gu1[26] + Gu2[26];
}
void acado_macATw1QDy( real_t* const Gx1, real_t* const w11, real_t* const w12, real_t* const w13 )
{
w13[0] = + Gx1[0]*w11[0] + Gx1[9]*w11[1] + Gx1[18]*w11[2] + Gx1[27]*w11[3] + Gx1[36]*w11[4] + Gx1[45]*w11[5] + Gx1[54]*w11[6] + Gx1[63]*w11[7] + Gx1[72]*w11[8] + w12[0];
w13[1] = + Gx1[1]*w11[0] + Gx1[10]*w11[1] + Gx1[19]*w11[2] + Gx1[28]*w11[3] + Gx1[37]*w11[4] + Gx1[46]*w11[5] + Gx1[55]*w11[6] + Gx1[64]*w11[7] + Gx1[73]*w11[8] + w12[1];
w13[2] = + Gx1[2]*w11[0] + Gx1[11]*w11[1] + Gx1[20]*w11[2] + Gx1[29]*w11[3] + Gx1[38]*w11[4] + Gx1[47]*w11[5] + Gx1[56]*w11[6] + Gx1[65]*w11[7] + Gx1[74]*w11[8] + w12[2];
w13[3] = + Gx1[3]*w11[0] + Gx1[12]*w11[1] + Gx1[21]*w11[2] + Gx1[30]*w11[3] + Gx1[39]*w11[4] + Gx1[48]*w11[5] + Gx1[57]*w11[6] + Gx1[66]*w11[7] + Gx1[75]*w11[8] + w12[3];
w13[4] = + Gx1[4]*w11[0] + Gx1[13]*w11[1] + Gx1[22]*w11[2] + Gx1[31]*w11[3] + Gx1[40]*w11[4] + Gx1[49]*w11[5] + Gx1[58]*w11[6] + Gx1[67]*w11[7] + Gx1[76]*w11[8] + w12[4];
w13[5] = + Gx1[5]*w11[0] + Gx1[14]*w11[1] + Gx1[23]*w11[2] + Gx1[32]*w11[3] + Gx1[41]*w11[4] + Gx1[50]*w11[5] + Gx1[59]*w11[6] + Gx1[68]*w11[7] + Gx1[77]*w11[8] + w12[5];
w13[6] = + Gx1[6]*w11[0] + Gx1[15]*w11[1] + Gx1[24]*w11[2] + Gx1[33]*w11[3] + Gx1[42]*w11[4] + Gx1[51]*w11[5] + Gx1[60]*w11[6] + Gx1[69]*w11[7] + Gx1[78]*w11[8] + w12[6];
w13[7] = + Gx1[7]*w11[0] + Gx1[16]*w11[1] + Gx1[25]*w11[2] + Gx1[34]*w11[3] + Gx1[43]*w11[4] + Gx1[52]*w11[5] + Gx1[61]*w11[6] + Gx1[70]*w11[7] + Gx1[79]*w11[8] + w12[7];
w13[8] = + Gx1[8]*w11[0] + Gx1[17]*w11[1] + Gx1[26]*w11[2] + Gx1[35]*w11[3] + Gx1[44]*w11[4] + Gx1[53]*w11[5] + Gx1[62]*w11[6] + Gx1[71]*w11[7] + Gx1[80]*w11[8] + w12[8];
}
void acado_macBTw1( real_t* const Gu1, real_t* const w11, real_t* const U1 )
{
U1[0] += + Gu1[0]*w11[0] + Gu1[3]*w11[1] + Gu1[6]*w11[2] + Gu1[9]*w11[3] + Gu1[12]*w11[4] + Gu1[15]*w11[5] + Gu1[18]*w11[6] + Gu1[21]*w11[7] + Gu1[24]*w11[8];
U1[1] += + Gu1[1]*w11[0] + Gu1[4]*w11[1] + Gu1[7]*w11[2] + Gu1[10]*w11[3] + Gu1[13]*w11[4] + Gu1[16]*w11[5] + Gu1[19]*w11[6] + Gu1[22]*w11[7] + Gu1[25]*w11[8];
U1[2] += + Gu1[2]*w11[0] + Gu1[5]*w11[1] + Gu1[8]*w11[2] + Gu1[11]*w11[3] + Gu1[14]*w11[4] + Gu1[17]*w11[5] + Gu1[20]*w11[6] + Gu1[23]*w11[7] + Gu1[26]*w11[8];
}
void acado_macS1TSbar( real_t* const Gu1, real_t* const w11, real_t* const U1 )
{
U1[0] += + Gu1[0]*w11[0] + Gu1[3]*w11[1] + Gu1[6]*w11[2] + Gu1[9]*w11[3] + Gu1[12]*w11[4] + Gu1[15]*w11[5] + Gu1[18]*w11[6] + Gu1[21]*w11[7] + Gu1[24]*w11[8];
U1[1] += + Gu1[1]*w11[0] + Gu1[4]*w11[1] + Gu1[7]*w11[2] + Gu1[10]*w11[3] + Gu1[13]*w11[4] + Gu1[16]*w11[5] + Gu1[19]*w11[6] + Gu1[22]*w11[7] + Gu1[25]*w11[8];
U1[2] += + Gu1[2]*w11[0] + Gu1[5]*w11[1] + Gu1[8]*w11[2] + Gu1[11]*w11[3] + Gu1[14]*w11[4] + Gu1[17]*w11[5] + Gu1[20]*w11[6] + Gu1[23]*w11[7] + Gu1[26]*w11[8];
}
void acado_macQSbarW2( real_t* const Q11, real_t* const w11, real_t* const w12, real_t* const w13 )
{
w13[0] = + Q11[0]*w11[0] + Q11[1]*w11[1] + Q11[2]*w11[2] + Q11[3]*w11[3] + Q11[4]*w11[4] + Q11[5]*w11[5] + Q11[6]*w11[6] + Q11[7]*w11[7] + Q11[8]*w11[8] + w12[0];
w13[1] = + Q11[9]*w11[0] + Q11[10]*w11[1] + Q11[11]*w11[2] + Q11[12]*w11[3] + Q11[13]*w11[4] + Q11[14]*w11[5] + Q11[15]*w11[6] + Q11[16]*w11[7] + Q11[17]*w11[8] + w12[1];
w13[2] = + Q11[18]*w11[0] + Q11[19]*w11[1] + Q11[20]*w11[2] + Q11[21]*w11[3] + Q11[22]*w11[4] + Q11[23]*w11[5] + Q11[24]*w11[6] + Q11[25]*w11[7] + Q11[26]*w11[8] + w12[2];
w13[3] = + Q11[27]*w11[0] + Q11[28]*w11[1] + Q11[29]*w11[2] + Q11[30]*w11[3] + Q11[31]*w11[4] + Q11[32]*w11[5] + Q11[33]*w11[6] + Q11[34]*w11[7] + Q11[35]*w11[8] + w12[3];
w13[4] = + Q11[36]*w11[0] + Q11[37]*w11[1] + Q11[38]*w11[2] + Q11[39]*w11[3] + Q11[40]*w11[4] + Q11[41]*w11[5] + Q11[42]*w11[6] + Q11[43]*w11[7] + Q11[44]*w11[8] + w12[4];
w13[5] = + Q11[45]*w11[0] + Q11[46]*w11[1] + Q11[47]*w11[2] + Q11[48]*w11[3] + Q11[49]*w11[4] + Q11[50]*w11[5] + Q11[51]*w11[6] + Q11[52]*w11[7] + Q11[53]*w11[8] + w12[5];
w13[6] = + Q11[54]*w11[0] + Q11[55]*w11[1] + Q11[56]*w11[2] + Q11[57]*w11[3] + Q11[58]*w11[4] + Q11[59]*w11[5] + Q11[60]*w11[6] + Q11[61]*w11[7] + Q11[62]*w11[8] + w12[6];
w13[7] = + Q11[63]*w11[0] + Q11[64]*w11[1] + Q11[65]*w11[2] + Q11[66]*w11[3] + Q11[67]*w11[4] + Q11[68]*w11[5] + Q11[69]*w11[6] + Q11[70]*w11[7] + Q11[71]*w11[8] + w12[7];
w13[8] = + Q11[72]*w11[0] + Q11[73]*w11[1] + Q11[74]*w11[2] + Q11[75]*w11[3] + Q11[76]*w11[4] + Q11[77]*w11[5] + Q11[78]*w11[6] + Q11[79]*w11[7] + Q11[80]*w11[8] + w12[8];
}
void acado_macASbar( real_t* const Gx1, real_t* const w11, real_t* const w12 )
{
w12[0] += + Gx1[0]*w11[0] + Gx1[1]*w11[1] + Gx1[2]*w11[2] + Gx1[3]*w11[3] + Gx1[4]*w11[4] + Gx1[5]*w11[5] + Gx1[6]*w11[6] + Gx1[7]*w11[7] + Gx1[8]*w11[8];
w12[1] += + Gx1[9]*w11[0] + Gx1[10]*w11[1] + Gx1[11]*w11[2] + Gx1[12]*w11[3] + Gx1[13]*w11[4] + Gx1[14]*w11[5] + Gx1[15]*w11[6] + Gx1[16]*w11[7] + Gx1[17]*w11[8];
w12[2] += + Gx1[18]*w11[0] + Gx1[19]*w11[1] + Gx1[20]*w11[2] + Gx1[21]*w11[3] + Gx1[22]*w11[4] + Gx1[23]*w11[5] + Gx1[24]*w11[6] + Gx1[25]*w11[7] + Gx1[26]*w11[8];
w12[3] += + Gx1[27]*w11[0] + Gx1[28]*w11[1] + Gx1[29]*w11[2] + Gx1[30]*w11[3] + Gx1[31]*w11[4] + Gx1[32]*w11[5] + Gx1[33]*w11[6] + Gx1[34]*w11[7] + Gx1[35]*w11[8];
w12[4] += + Gx1[36]*w11[0] + Gx1[37]*w11[1] + Gx1[38]*w11[2] + Gx1[39]*w11[3] + Gx1[40]*w11[4] + Gx1[41]*w11[5] + Gx1[42]*w11[6] + Gx1[43]*w11[7] + Gx1[44]*w11[8];
w12[5] += + Gx1[45]*w11[0] + Gx1[46]*w11[1] + Gx1[47]*w11[2] + Gx1[48]*w11[3] + Gx1[49]*w11[4] + Gx1[50]*w11[5] + Gx1[51]*w11[6] + Gx1[52]*w11[7] + Gx1[53]*w11[8];
w12[6] += + Gx1[54]*w11[0] + Gx1[55]*w11[1] + Gx1[56]*w11[2] + Gx1[57]*w11[3] + Gx1[58]*w11[4] + Gx1[59]*w11[5] + Gx1[60]*w11[6] + Gx1[61]*w11[7] + Gx1[62]*w11[8];
w12[7] += + Gx1[63]*w11[0] + Gx1[64]*w11[1] + Gx1[65]*w11[2] + Gx1[66]*w11[3] + Gx1[67]*w11[4] + Gx1[68]*w11[5] + Gx1[69]*w11[6] + Gx1[70]*w11[7] + Gx1[71]*w11[8];
w12[8] += + Gx1[72]*w11[0] + Gx1[73]*w11[1] + Gx1[74]*w11[2] + Gx1[75]*w11[3] + Gx1[76]*w11[4] + Gx1[77]*w11[5] + Gx1[78]*w11[6] + Gx1[79]*w11[7] + Gx1[80]*w11[8];
}
void acado_expansionStep( real_t* const Gx1, real_t* const Gu1, real_t* const U1, real_t* const w11, real_t* const w12 )
{
w12[0] += + Gx1[0]*w11[0] + Gx1[1]*w11[1] + Gx1[2]*w11[2] + Gx1[3]*w11[3] + Gx1[4]*w11[4] + Gx1[5]*w11[5] + Gx1[6]*w11[6] + Gx1[7]*w11[7] + Gx1[8]*w11[8];
w12[1] += + Gx1[9]*w11[0] + Gx1[10]*w11[1] + Gx1[11]*w11[2] + Gx1[12]*w11[3] + Gx1[13]*w11[4] + Gx1[14]*w11[5] + Gx1[15]*w11[6] + Gx1[16]*w11[7] + Gx1[17]*w11[8];
w12[2] += + Gx1[18]*w11[0] + Gx1[19]*w11[1] + Gx1[20]*w11[2] + Gx1[21]*w11[3] + Gx1[22]*w11[4] + Gx1[23]*w11[5] + Gx1[24]*w11[6] + Gx1[25]*w11[7] + Gx1[26]*w11[8];
w12[3] += + Gx1[27]*w11[0] + Gx1[28]*w11[1] + Gx1[29]*w11[2] + Gx1[30]*w11[3] + Gx1[31]*w11[4] + Gx1[32]*w11[5] + Gx1[33]*w11[6] + Gx1[34]*w11[7] + Gx1[35]*w11[8];
w12[4] += + Gx1[36]*w11[0] + Gx1[37]*w11[1] + Gx1[38]*w11[2] + Gx1[39]*w11[3] + Gx1[40]*w11[4] + Gx1[41]*w11[5] + Gx1[42]*w11[6] + Gx1[43]*w11[7] + Gx1[44]*w11[8];
w12[5] += + Gx1[45]*w11[0] + Gx1[46]*w11[1] + Gx1[47]*w11[2] + Gx1[48]*w11[3] + Gx1[49]*w11[4] + Gx1[50]*w11[5] + Gx1[51]*w11[6] + Gx1[52]*w11[7] + Gx1[53]*w11[8];
w12[6] += + Gx1[54]*w11[0] + Gx1[55]*w11[1] + Gx1[56]*w11[2] + Gx1[57]*w11[3] + Gx1[58]*w11[4] + Gx1[59]*w11[5] + Gx1[60]*w11[6] + Gx1[61]*w11[7] + Gx1[62]*w11[8];
w12[7] += + Gx1[63]*w11[0] + Gx1[64]*w11[1] + Gx1[65]*w11[2] + Gx1[66]*w11[3] + Gx1[67]*w11[4] + Gx1[68]*w11[5] + Gx1[69]*w11[6] + Gx1[70]*w11[7] + Gx1[71]*w11[8];
w12[8] += + Gx1[72]*w11[0] + Gx1[73]*w11[1] + Gx1[74]*w11[2] + Gx1[75]*w11[3] + Gx1[76]*w11[4] + Gx1[77]*w11[5] + Gx1[78]*w11[6] + Gx1[79]*w11[7] + Gx1[80]*w11[8];
w12[0] += + Gu1[0]*U1[0] + Gu1[1]*U1[1] + Gu1[2]*U1[2];
w12[1] += + Gu1[3]*U1[0] + Gu1[4]*U1[1] + Gu1[5]*U1[2];
w12[2] += + Gu1[6]*U1[0] + Gu1[7]*U1[1] + Gu1[8]*U1[2];
w12[3] += + Gu1[9]*U1[0] + Gu1[10]*U1[1] + Gu1[11]*U1[2];
w12[4] += + Gu1[12]*U1[0] + Gu1[13]*U1[1] + Gu1[14]*U1[2];
w12[5] += + Gu1[15]*U1[0] + Gu1[16]*U1[1] + Gu1[17]*U1[2];
w12[6] += + Gu1[18]*U1[0] + Gu1[19]*U1[1] + Gu1[20]*U1[2];
w12[7] += + Gu1[21]*U1[0] + Gu1[22]*U1[1] + Gu1[23]*U1[2];
w12[8] += + Gu1[24]*U1[0] + Gu1[25]*U1[1] + Gu1[26]*U1[2];
}
void acado_copyHTH( int iRow, int iCol )
{
acadoWorkspace.H[(iRow * 180) + (iCol * 3)] = acadoWorkspace.H[(iCol * 180) + (iRow * 3)];
acadoWorkspace.H[(iRow * 180) + (iCol * 3 + 1)] = acadoWorkspace.H[(iCol * 180 + 60) + (iRow * 3)];
acadoWorkspace.H[(iRow * 180) + (iCol * 3 + 2)] = acadoWorkspace.H[(iCol * 180 + 120) + (iRow * 3)];
acadoWorkspace.H[(iRow * 180 + 60) + (iCol * 3)] = acadoWorkspace.H[(iCol * 180) + (iRow * 3 + 1)];
acadoWorkspace.H[(iRow * 180 + 60) + (iCol * 3 + 1)] = acadoWorkspace.H[(iCol * 180 + 60) + (iRow * 3 + 1)];
acadoWorkspace.H[(iRow * 180 + 60) + (iCol * 3 + 2)] = acadoWorkspace.H[(iCol * 180 + 120) + (iRow * 3 + 1)];
acadoWorkspace.H[(iRow * 180 + 120) + (iCol * 3)] = acadoWorkspace.H[(iCol * 180) + (iRow * 3 + 2)];
acadoWorkspace.H[(iRow * 180 + 120) + (iCol * 3 + 1)] = acadoWorkspace.H[(iCol * 180 + 60) + (iRow * 3 + 2)];
acadoWorkspace.H[(iRow * 180 + 120) + (iCol * 3 + 2)] = acadoWorkspace.H[(iCol * 180 + 120) + (iRow * 3 + 2)];
}
void acado_multRDy( real_t* const R2, real_t* const Dy1, real_t* const RDy1 )
{
RDy1[0] = + R2[0]*Dy1[0] + R2[1]*Dy1[1] + R2[2]*Dy1[2] + R2[3]*Dy1[3] + R2[4]*Dy1[4] + R2[5]*Dy1[5] + R2[6]*Dy1[6] + R2[7]*Dy1[7] + R2[8]*Dy1[8] + R2[9]*Dy1[9] + R2[10]*Dy1[10];
RDy1[1] = + R2[11]*Dy1[0] + R2[12]*Dy1[1] + R2[13]*Dy1[2] + R2[14]*Dy1[3] + R2[15]*Dy1[4] + R2[16]*Dy1[5] + R2[17]*Dy1[6] + R2[18]*Dy1[7] + R2[19]*Dy1[8] + R2[20]*Dy1[9] + R2[21]*Dy1[10];
RDy1[2] = + R2[22]*Dy1[0] + R2[23]*Dy1[1] + R2[24]*Dy1[2] + R2[25]*Dy1[3] + R2[26]*Dy1[4] + R2[27]*Dy1[5] + R2[28]*Dy1[6] + R2[29]*Dy1[7] + R2[30]*Dy1[8] + R2[31]*Dy1[9] + R2[32]*Dy1[10];
}
void acado_multQDy( real_t* const Q2, real_t* const Dy1, real_t* const QDy1 )
{
QDy1[0] = + Q2[0]*Dy1[0] + Q2[1]*Dy1[1] + Q2[2]*Dy1[2] + Q2[3]*Dy1[3] + Q2[4]*Dy1[4] + Q2[5]*Dy1[5] + Q2[6]*Dy1[6] + Q2[7]*Dy1[7] + Q2[8]*Dy1[8] + Q2[9]*Dy1[9] + Q2[10]*Dy1[10];
QDy1[1] = + Q2[11]*Dy1[0] + Q2[12]*Dy1[1] + Q2[13]*Dy1[2] + Q2[14]*Dy1[3] + Q2[15]*Dy1[4] + Q2[16]*Dy1[5] + Q2[17]*Dy1[6] + Q2[18]*Dy1[7] + Q2[19]*Dy1[8] + Q2[20]*Dy1[9] + Q2[21]*Dy1[10];
QDy1[2] = + Q2[22]*Dy1[0] + Q2[23]*Dy1[1] + Q2[24]*Dy1[2] + Q2[25]*Dy1[3] + Q2[26]*Dy1[4] + Q2[27]*Dy1[5] + Q2[28]*Dy1[6] + Q2[29]*Dy1[7] + Q2[30]*Dy1[8] + Q2[31]*Dy1[9] + Q2[32]*Dy1[10];
QDy1[3] = + Q2[33]*Dy1[0] + Q2[34]*Dy1[1] + Q2[35]*Dy1[2] + Q2[36]*Dy1[3] + Q2[37]*Dy1[4] + Q2[38]*Dy1[5] + Q2[39]*Dy1[6] + Q2[40]*Dy1[7] + Q2[41]*Dy1[8] + Q2[42]*Dy1[9] + Q2[43]*Dy1[10];
QDy1[4] = + Q2[44]*Dy1[0] + Q2[45]*Dy1[1] + Q2[46]*Dy1[2] + Q2[47]*Dy1[3] + Q2[48]*Dy1[4] + Q2[49]*Dy1[5] + Q2[50]*Dy1[6] + Q2[51]*Dy1[7] + Q2[52]*Dy1[8] + Q2[53]*Dy1[9] + Q2[54]*Dy1[10];
QDy1[5] = + Q2[55]*Dy1[0] + Q2[56]*Dy1[1] + Q2[57]*Dy1[2] + Q2[58]*Dy1[3] + Q2[59]*Dy1[4] + Q2[60]*Dy1[5] + Q2[61]*Dy1[6] + Q2[62]*Dy1[7] + Q2[63]*Dy1[8] + Q2[64]*Dy1[9] + Q2[65]*Dy1[10];
QDy1[6] = + Q2[66]*Dy1[0] + Q2[67]*Dy1[1] + Q2[68]*Dy1[2] + Q2[69]*Dy1[3] + Q2[70]*Dy1[4] + Q2[71]*Dy1[5] + Q2[72]*Dy1[6] + Q2[73]*Dy1[7] + Q2[74]*Dy1[8] + Q2[75]*Dy1[9] + Q2[76]*Dy1[10];
QDy1[7] = + Q2[77]*Dy1[0] + Q2[78]*Dy1[1] + Q2[79]*Dy1[2] + Q2[80]*Dy1[3] + Q2[81]*Dy1[4] + Q2[82]*Dy1[5] + Q2[83]*Dy1[6] + Q2[84]*Dy1[7] + Q2[85]*Dy1[8] + Q2[86]*Dy1[9] + Q2[87]*Dy1[10];
QDy1[8] = + Q2[88]*Dy1[0] + Q2[89]*Dy1[1] + Q2[90]*Dy1[2] + Q2[91]*Dy1[3] + Q2[92]*Dy1[4] + Q2[93]*Dy1[5] + Q2[94]*Dy1[6] + Q2[95]*Dy1[7] + Q2[96]*Dy1[8] + Q2[97]*Dy1[9] + Q2[98]*Dy1[10];
}
void acado_condensePrep( )
{
int lRun1;
int lRun2;
int lRun3;
for (lRun2 = 0; lRun2 < 20; ++lRun2)
{
lRun3 = ((lRun2) * (lRun2 * -1 + 41)) / (2);
acado_moveGuE( &(acadoWorkspace.evGu[ lRun2 * 27 ]), &(acadoWorkspace.E[ lRun3 * 27 ]) );
for (lRun1 = 1; lRun1 < lRun2 * -1 + 20; ++lRun1)
{
acado_multGxGu( &(acadoWorkspace.evGx[ ((((lRun2) + (lRun1)) * (9)) * (9)) + (0) ]), &(acadoWorkspace.E[ (((((lRun3) + (lRun1)) - (1)) * (9)) * (3)) + (0) ]), &(acadoWorkspace.E[ ((((lRun3) + (lRun1)) * (9)) * (3)) + (0) ]) );
}
acado_multGxGu( acadoWorkspace.QN1, &(acadoWorkspace.E[ ((((((lRun3) - (lRun2)) + (20)) - (1)) * (9)) * (3)) + (0) ]), acadoWorkspace.W1 );
for (lRun1 = 19; lRun2 < lRun1; --lRun1)
{
acado_multBTW1( &(acadoWorkspace.evGu[ lRun1 * 27 ]), acadoWorkspace.W1, lRun1, lRun2 );
acado_mac_S1T_E( &(acadoWorkspace.S1[ lRun1 * 27 ]), &(acadoWorkspace.E[ ((((((lRun3) + (lRun1)) - (lRun2)) - (1)) * (9)) * (3)) + (0) ]), lRun1, lRun2 );
acado_multGxTGu( &(acadoWorkspace.evGx[ lRun1 * 81 ]), acadoWorkspace.W1, acadoWorkspace.W2 );
acado_multQEW2( &(acadoWorkspace.Q1[ lRun1 * 81 ]), &(acadoWorkspace.E[ ((((((lRun3) + (lRun1)) - (lRun2)) - (1)) * (9)) * (3)) + (0) ]), acadoWorkspace.W2, acadoWorkspace.W1 );
}
acado_multBTW1_R1( &(acadoWorkspace.R1[ lRun2 * 9 ]), &(acadoWorkspace.evGu[ lRun2 * 27 ]), acadoWorkspace.W1, lRun2 );
}
acado_copyHTH( 0, 1 );
acado_copyHTH( 0, 2 );
acado_copyHTH( 1, 2 );
acado_copyHTH( 0, 3 );
acado_copyHTH( 1, 3 );
acado_copyHTH( 2, 3 );
acado_copyHTH( 0, 4 );
acado_copyHTH( 1, 4 );
acado_copyHTH( 2, 4 );
acado_copyHTH( 3, 4 );
acado_copyHTH( 0, 5 );
acado_copyHTH( 1, 5 );
acado_copyHTH( 2, 5 );
acado_copyHTH( 3, 5 );
acado_copyHTH( 4, 5 );
acado_copyHTH( 0, 6 );
acado_copyHTH( 1, 6 );
acado_copyHTH( 2, 6 );
acado_copyHTH( 3, 6 );
acado_copyHTH( 4, 6 );
acado_copyHTH( 5, 6 );
acado_copyHTH( 0, 7 );
acado_copyHTH( 1, 7 );
acado_copyHTH( 2, 7 );
acado_copyHTH( 3, 7 );
acado_copyHTH( 4, 7 );
acado_copyHTH( 5, 7 );
acado_copyHTH( 6, 7 );
acado_copyHTH( 0, 8 );
acado_copyHTH( 1, 8 );
acado_copyHTH( 2, 8 );
acado_copyHTH( 3, 8 );
acado_copyHTH( 4, 8 );
acado_copyHTH( 5, 8 );
acado_copyHTH( 6, 8 );
acado_copyHTH( 7, 8 );
acado_copyHTH( 0, 9 );
acado_copyHTH( 1, 9 );
acado_copyHTH( 2, 9 );
acado_copyHTH( 3, 9 );
acado_copyHTH( 4, 9 );
acado_copyHTH( 5, 9 );
acado_copyHTH( 6, 9 );
acado_copyHTH( 7, 9 );
acado_copyHTH( 8, 9 );
acado_copyHTH( 0, 10 );
acado_copyHTH( 1, 10 );
acado_copyHTH( 2, 10 );
acado_copyHTH( 3, 10 );
acado_copyHTH( 4, 10 );
acado_copyHTH( 5, 10 );
acado_copyHTH( 6, 10 );
acado_copyHTH( 7, 10 );
acado_copyHTH( 8, 10 );
acado_copyHTH( 9, 10 );
acado_copyHTH( 0, 11 );
acado_copyHTH( 1, 11 );
acado_copyHTH( 2, 11 );
acado_copyHTH( 3, 11 );
acado_copyHTH( 4, 11 );
acado_copyHTH( 5, 11 );
acado_copyHTH( 6, 11 );
acado_copyHTH( 7, 11 );
acado_copyHTH( 8, 11 );
acado_copyHTH( 9, 11 );
acado_copyHTH( 10, 11 );
acado_copyHTH( 0, 12 );
acado_copyHTH( 1, 12 );
acado_copyHTH( 2, 12 );
acado_copyHTH( 3, 12 );
acado_copyHTH( 4, 12 );
acado_copyHTH( 5, 12 );
acado_copyHTH( 6, 12 );
acado_copyHTH( 7, 12 );
acado_copyHTH( 8, 12 );
acado_copyHTH( 9, 12 );
acado_copyHTH( 10, 12 );
acado_copyHTH( 11, 12 );
acado_copyHTH( 0, 13 );
acado_copyHTH( 1, 13 );
acado_copyHTH( 2, 13 );
acado_copyHTH( 3, 13 );
acado_copyHTH( 4, 13 );
acado_copyHTH( 5, 13 );
acado_copyHTH( 6, 13 );
acado_copyHTH( 7, 13 );
acado_copyHTH( 8, 13 );
acado_copyHTH( 9, 13 );
acado_copyHTH( 10, 13 );
acado_copyHTH( 11, 13 );
acado_copyHTH( 12, 13 );
acado_copyHTH( 0, 14 );
acado_copyHTH( 1, 14 );
acado_copyHTH( 2, 14 );
acado_copyHTH( 3, 14 );
acado_copyHTH( 4, 14 );
acado_copyHTH( 5, 14 );
acado_copyHTH( 6, 14 );
acado_copyHTH( 7, 14 );
acado_copyHTH( 8, 14 );
acado_copyHTH( 9, 14 );
acado_copyHTH( 10, 14 );
acado_copyHTH( 11, 14 );
acado_copyHTH( 12, 14 );
acado_copyHTH( 13, 14 );
acado_copyHTH( 0, 15 );
acado_copyHTH( 1, 15 );
acado_copyHTH( 2, 15 );
acado_copyHTH( 3, 15 );
acado_copyHTH( 4, 15 );
acado_copyHTH( 5, 15 );
acado_copyHTH( 6, 15 );
acado_copyHTH( 7, 15 );
acado_copyHTH( 8, 15 );
acado_copyHTH( 9, 15 );
acado_copyHTH( 10, 15 );
acado_copyHTH( 11, 15 );
acado_copyHTH( 12, 15 );
acado_copyHTH( 13, 15 );
acado_copyHTH( 14, 15 );
acado_copyHTH( 0, 16 );
acado_copyHTH( 1, 16 );
acado_copyHTH( 2, 16 );
acado_copyHTH( 3, 16 );
acado_copyHTH( 4, 16 );
acado_copyHTH( 5, 16 );
acado_copyHTH( 6, 16 );
acado_copyHTH( 7, 16 );
acado_copyHTH( 8, 16 );
acado_copyHTH( 9, 16 );
acado_copyHTH( 10, 16 );
acado_copyHTH( 11, 16 );
acado_copyHTH( 12, 16 );
acado_copyHTH( 13, 16 );
acado_copyHTH( 14, 16 );
acado_copyHTH( 15, 16 );
acado_copyHTH( 0, 17 );
acado_copyHTH( 1, 17 );
acado_copyHTH( 2, 17 );
acado_copyHTH( 3, 17 );
acado_copyHTH( 4, 17 );
acado_copyHTH( 5, 17 );
acado_copyHTH( 6, 17 );
acado_copyHTH( 7, 17 );
acado_copyHTH( 8, 17 );
acado_copyHTH( 9, 17 );
acado_copyHTH( 10, 17 );
acado_copyHTH( 11, 17 );
acado_copyHTH( 12, 17 );
acado_copyHTH( 13, 17 );
acado_copyHTH( 14, 17 );
acado_copyHTH( 15, 17 );
acado_copyHTH( 16, 17 );
acado_copyHTH( 0, 18 );
acado_copyHTH( 1, 18 );
acado_copyHTH( 2, 18 );
acado_copyHTH( 3, 18 );
acado_copyHTH( 4, 18 );
acado_copyHTH( 5, 18 );
acado_copyHTH( 6, 18 );
acado_copyHTH( 7, 18 );
acado_copyHTH( 8, 18 );
acado_copyHTH( 9, 18 );
acado_copyHTH( 10, 18 );
acado_copyHTH( 11, 18 );
acado_copyHTH( 12, 18 );
acado_copyHTH( 13, 18 );
acado_copyHTH( 14, 18 );
acado_copyHTH( 15, 18 );
acado_copyHTH( 16, 18 );
acado_copyHTH( 17, 18 );
acado_copyHTH( 0, 19 );
acado_copyHTH( 1, 19 );
acado_copyHTH( 2, 19 );
acado_copyHTH( 3, 19 );
acado_copyHTH( 4, 19 );
acado_copyHTH( 5, 19 );
acado_copyHTH( 6, 19 );
acado_copyHTH( 7, 19 );
acado_copyHTH( 8, 19 );
acado_copyHTH( 9, 19 );
acado_copyHTH( 10, 19 );
acado_copyHTH( 11, 19 );
acado_copyHTH( 12, 19 );
acado_copyHTH( 13, 19 );
acado_copyHTH( 14, 19 );
acado_copyHTH( 15, 19 );
acado_copyHTH( 16, 19 );
acado_copyHTH( 17, 19 );
acado_copyHTH( 18, 19 );
for (lRun1 = 0; lRun1 < 180; ++lRun1)
acadoWorkspace.sbar[lRun1 + 9] = acadoWorkspace.d[lRun1];
}
void acado_condenseFdb( )
{
int lRun1;
acadoWorkspace.Dx0[0] = acadoVariables.x0[0] - acadoVariables.x[0];
acadoWorkspace.Dx0[1] = acadoVariables.x0[1] - acadoVariables.x[1];
acadoWorkspace.Dx0[2] = acadoVariables.x0[2] - acadoVariables.x[2];
acadoWorkspace.Dx0[3] = acadoVariables.x0[3] - acadoVariables.x[3];
acadoWorkspace.Dx0[4] = acadoVariables.x0[4] - acadoVariables.x[4];
acadoWorkspace.Dx0[5] = acadoVariables.x0[5] - acadoVariables.x[5];
acadoWorkspace.Dx0[6] = acadoVariables.x0[6] - acadoVariables.x[6];
acadoWorkspace.Dx0[7] = acadoVariables.x0[7] - acadoVariables.x[7];
acadoWorkspace.Dx0[8] = acadoVariables.x0[8] - acadoVariables.x[8];
for (lRun1 = 0; lRun1 < 220; ++lRun1)
acadoWorkspace.Dy[lRun1] -= acadoVariables.y[lRun1];
acadoWorkspace.DyN[0] -= acadoVariables.yN[0];
acadoWorkspace.DyN[1] -= acadoVariables.yN[1];
acadoWorkspace.DyN[2] -= acadoVariables.yN[2];
acadoWorkspace.DyN[3] -= acadoVariables.yN[3];
acadoWorkspace.DyN[4] -= acadoVariables.yN[4];
acadoWorkspace.DyN[5] -= acadoVariables.yN[5];
acado_multRDy( acadoWorkspace.R2, acadoWorkspace.Dy, acadoWorkspace.g );
acado_multRDy( &(acadoWorkspace.R2[ 33 ]), &(acadoWorkspace.Dy[ 11 ]), &(acadoWorkspace.g[ 3 ]) );
acado_multRDy( &(acadoWorkspace.R2[ 66 ]), &(acadoWorkspace.Dy[ 22 ]), &(acadoWorkspace.g[ 6 ]) );
acado_multRDy( &(acadoWorkspace.R2[ 99 ]), &(acadoWorkspace.Dy[ 33 ]), &(acadoWorkspace.g[ 9 ]) );
acado_multRDy( &(acadoWorkspace.R2[ 132 ]), &(acadoWorkspace.Dy[ 44 ]), &(acadoWorkspace.g[ 12 ]) );
acado_multRDy( &(acadoWorkspace.R2[ 165 ]), &(acadoWorkspace.Dy[ 55 ]), &(acadoWorkspace.g[ 15 ]) );
acado_multRDy( &(acadoWorkspace.R2[ 198 ]), &(acadoWorkspace.Dy[ 66 ]), &(acadoWorkspace.g[ 18 ]) );
acado_multRDy( &(acadoWorkspace.R2[ 231 ]), &(acadoWorkspace.Dy[ 77 ]), &(acadoWorkspace.g[ 21 ]) );
acado_multRDy( &(acadoWorkspace.R2[ 264 ]), &(acadoWorkspace.Dy[ 88 ]), &(acadoWorkspace.g[ 24 ]) );
acado_multRDy( &(acadoWorkspace.R2[ 297 ]), &(acadoWorkspace.Dy[ 99 ]), &(acadoWorkspace.g[ 27 ]) );
acado_multRDy( &(acadoWorkspace.R2[ 330 ]), &(acadoWorkspace.Dy[ 110 ]), &(acadoWorkspace.g[ 30 ]) );
acado_multRDy( &(acadoWorkspace.R2[ 363 ]), &(acadoWorkspace.Dy[ 121 ]), &(acadoWorkspace.g[ 33 ]) );
acado_multRDy( &(acadoWorkspace.R2[ 396 ]), &(acadoWorkspace.Dy[ 132 ]), &(acadoWorkspace.g[ 36 ]) );
acado_multRDy( &(acadoWorkspace.R2[ 429 ]), &(acadoWorkspace.Dy[ 143 ]), &(acadoWorkspace.g[ 39 ]) );
acado_multRDy( &(acadoWorkspace.R2[ 462 ]), &(acadoWorkspace.Dy[ 154 ]), &(acadoWorkspace.g[ 42 ]) );
acado_multRDy( &(acadoWorkspace.R2[ 495 ]), &(acadoWorkspace.Dy[ 165 ]), &(acadoWorkspace.g[ 45 ]) );
acado_multRDy( &(acadoWorkspace.R2[ 528 ]), &(acadoWorkspace.Dy[ 176 ]), &(acadoWorkspace.g[ 48 ]) );
acado_multRDy( &(acadoWorkspace.R2[ 561 ]), &(acadoWorkspace.Dy[ 187 ]), &(acadoWorkspace.g[ 51 ]) );
acado_multRDy( &(acadoWorkspace.R2[ 594 ]), &(acadoWorkspace.Dy[ 198 ]), &(acadoWorkspace.g[ 54 ]) );
acado_multRDy( &(acadoWorkspace.R2[ 627 ]), &(acadoWorkspace.Dy[ 209 ]), &(acadoWorkspace.g[ 57 ]) );
acado_multQDy( acadoWorkspace.Q2, acadoWorkspace.Dy, acadoWorkspace.QDy );
acado_multQDy( &(acadoWorkspace.Q2[ 99 ]), &(acadoWorkspace.Dy[ 11 ]), &(acadoWorkspace.QDy[ 9 ]) );
acado_multQDy( &(acadoWorkspace.Q2[ 198 ]), &(acadoWorkspace.Dy[ 22 ]), &(acadoWorkspace.QDy[ 18 ]) );
acado_multQDy( &(acadoWorkspace.Q2[ 297 ]), &(acadoWorkspace.Dy[ 33 ]), &(acadoWorkspace.QDy[ 27 ]) );
acado_multQDy( &(acadoWorkspace.Q2[ 396 ]), &(acadoWorkspace.Dy[ 44 ]), &(acadoWorkspace.QDy[ 36 ]) );
acado_multQDy( &(acadoWorkspace.Q2[ 495 ]), &(acadoWorkspace.Dy[ 55 ]), &(acadoWorkspace.QDy[ 45 ]) );
acado_multQDy( &(acadoWorkspace.Q2[ 594 ]), &(acadoWorkspace.Dy[ 66 ]), &(acadoWorkspace.QDy[ 54 ]) );
acado_multQDy( &(acadoWorkspace.Q2[ 693 ]), &(acadoWorkspace.Dy[ 77 ]), &(acadoWorkspace.QDy[ 63 ]) );
acado_multQDy( &(acadoWorkspace.Q2[ 792 ]), &(acadoWorkspace.Dy[ 88 ]), &(acadoWorkspace.QDy[ 72 ]) );
acado_multQDy( &(acadoWorkspace.Q2[ 891 ]), &(acadoWorkspace.Dy[ 99 ]), &(acadoWorkspace.QDy[ 81 ]) );
acado_multQDy( &(acadoWorkspace.Q2[ 990 ]), &(acadoWorkspace.Dy[ 110 ]), &(acadoWorkspace.QDy[ 90 ]) );
acado_multQDy( &(acadoWorkspace.Q2[ 1089 ]), &(acadoWorkspace.Dy[ 121 ]), &(acadoWorkspace.QDy[ 99 ]) );
acado_multQDy( &(acadoWorkspace.Q2[ 1188 ]), &(acadoWorkspace.Dy[ 132 ]), &(acadoWorkspace.QDy[ 108 ]) );
acado_multQDy( &(acadoWorkspace.Q2[ 1287 ]), &(acadoWorkspace.Dy[ 143 ]), &(acadoWorkspace.QDy[ 117 ]) );
acado_multQDy( &(acadoWorkspace.Q2[ 1386 ]), &(acadoWorkspace.Dy[ 154 ]), &(acadoWorkspace.QDy[ 126 ]) );
acado_multQDy( &(acadoWorkspace.Q2[ 1485 ]), &(acadoWorkspace.Dy[ 165 ]), &(acadoWorkspace.QDy[ 135 ]) );
acado_multQDy( &(acadoWorkspace.Q2[ 1584 ]), &(acadoWorkspace.Dy[ 176 ]), &(acadoWorkspace.QDy[ 144 ]) );
acado_multQDy( &(acadoWorkspace.Q2[ 1683 ]), &(acadoWorkspace.Dy[ 187 ]), &(acadoWorkspace.QDy[ 153 ]) );
acado_multQDy( &(acadoWorkspace.Q2[ 1782 ]), &(acadoWorkspace.Dy[ 198 ]), &(acadoWorkspace.QDy[ 162 ]) );
acado_multQDy( &(acadoWorkspace.Q2[ 1881 ]), &(acadoWorkspace.Dy[ 209 ]), &(acadoWorkspace.QDy[ 171 ]) );
acadoWorkspace.QDy[180] = + acadoWorkspace.QN2[0]*acadoWorkspace.DyN[0] + acadoWorkspace.QN2[1]*acadoWorkspace.DyN[1] + acadoWorkspace.QN2[2]*acadoWorkspace.DyN[2] + acadoWorkspace.QN2[3]*acadoWorkspace.DyN[3] + acadoWorkspace.QN2[4]*acadoWorkspace.DyN[4] + acadoWorkspace.QN2[5]*acadoWorkspace.DyN[5];
acadoWorkspace.QDy[181] = + acadoWorkspace.QN2[6]*acadoWorkspace.DyN[0] + acadoWorkspace.QN2[7]*acadoWorkspace.DyN[1] + acadoWorkspace.QN2[8]*acadoWorkspace.DyN[2] + acadoWorkspace.QN2[9]*acadoWorkspace.DyN[3] + acadoWorkspace.QN2[10]*acadoWorkspace.DyN[4] + acadoWorkspace.QN2[11]*acadoWorkspace.DyN[5];
acadoWorkspace.QDy[182] = + acadoWorkspace.QN2[12]*acadoWorkspace.DyN[0] + acadoWorkspace.QN2[13]*acadoWorkspace.DyN[1] + acadoWorkspace.QN2[14]*acadoWorkspace.DyN[2] + acadoWorkspace.QN2[15]*acadoWorkspace.DyN[3] + acadoWorkspace.QN2[16]*acadoWorkspace.DyN[4] + acadoWorkspace.QN2[17]*acadoWorkspace.DyN[5];
acadoWorkspace.QDy[183] = + acadoWorkspace.QN2[18]*acadoWorkspace.DyN[0] + acadoWorkspace.QN2[19]*acadoWorkspace.DyN[1] + acadoWorkspace.QN2[20]*acadoWorkspace.DyN[2] + acadoWorkspace.QN2[21]*acadoWorkspace.DyN[3] + acadoWorkspace.QN2[22]*acadoWorkspace.DyN[4] + acadoWorkspace.QN2[23]*acadoWorkspace.DyN[5];
acadoWorkspace.QDy[184] = + acadoWorkspace.QN2[24]*acadoWorkspace.DyN[0] + acadoWorkspace.QN2[25]*acadoWorkspace.DyN[1] + acadoWorkspace.QN2[26]*acadoWorkspace.DyN[2] + acadoWorkspace.QN2[27]*acadoWorkspace.DyN[3] + acadoWorkspace.QN2[28]*acadoWorkspace.DyN[4] + acadoWorkspace.QN2[29]*acadoWorkspace.DyN[5];
acadoWorkspace.QDy[185] = + acadoWorkspace.QN2[30]*acadoWorkspace.DyN[0] + acadoWorkspace.QN2[31]*acadoWorkspace.DyN[1] + acadoWorkspace.QN2[32]*acadoWorkspace.DyN[2] + acadoWorkspace.QN2[33]*acadoWorkspace.DyN[3] + acadoWorkspace.QN2[34]*acadoWorkspace.DyN[4] + acadoWorkspace.QN2[35]*acadoWorkspace.DyN[5];
acadoWorkspace.QDy[186] = + acadoWorkspace.QN2[36]*acadoWorkspace.DyN[0] + acadoWorkspace.QN2[37]*acadoWorkspace.DyN[1] + acadoWorkspace.QN2[38]*acadoWorkspace.DyN[2] + acadoWorkspace.QN2[39]*acadoWorkspace.DyN[3] + acadoWorkspace.QN2[40]*acadoWorkspace.DyN[4] + acadoWorkspace.QN2[41]*acadoWorkspace.DyN[5];
acadoWorkspace.QDy[187] = + acadoWorkspace.QN2[42]*acadoWorkspace.DyN[0] + acadoWorkspace.QN2[43]*acadoWorkspace.DyN[1] + acadoWorkspace.QN2[44]*acadoWorkspace.DyN[2] + acadoWorkspace.QN2[45]*acadoWorkspace.DyN[3] + acadoWorkspace.QN2[46]*acadoWorkspace.DyN[4] + acadoWorkspace.QN2[47]*acadoWorkspace.DyN[5];
acadoWorkspace.QDy[188] = + acadoWorkspace.QN2[48]*acadoWorkspace.DyN[0] + acadoWorkspace.QN2[49]*acadoWorkspace.DyN[1] + acadoWorkspace.QN2[50]*acadoWorkspace.DyN[2] + acadoWorkspace.QN2[51]*acadoWorkspace.DyN[3] + acadoWorkspace.QN2[52]*acadoWorkspace.DyN[4] + acadoWorkspace.QN2[53]*acadoWorkspace.DyN[5];
acadoWorkspace.sbar[0] = acadoWorkspace.Dx0[0];
acadoWorkspace.sbar[1] = acadoWorkspace.Dx0[1];
acadoWorkspace.sbar[2] = acadoWorkspace.Dx0[2];
acadoWorkspace.sbar[3] = acadoWorkspace.Dx0[3];
acadoWorkspace.sbar[4] = acadoWorkspace.Dx0[4];
acadoWorkspace.sbar[5] = acadoWorkspace.Dx0[5];
acadoWorkspace.sbar[6] = acadoWorkspace.Dx0[6];
acadoWorkspace.sbar[7] = acadoWorkspace.Dx0[7];
acadoWorkspace.sbar[8] = acadoWorkspace.Dx0[8];
acado_macASbar( acadoWorkspace.evGx, acadoWorkspace.sbar, &(acadoWorkspace.sbar[ 9 ]) );
acado_macASbar( &(acadoWorkspace.evGx[ 81 ]), &(acadoWorkspace.sbar[ 9 ]), &(acadoWorkspace.sbar[ 18 ]) );
acado_macASbar( &(acadoWorkspace.evGx[ 162 ]), &(acadoWorkspace.sbar[ 18 ]), &(acadoWorkspace.sbar[ 27 ]) );
acado_macASbar( &(acadoWorkspace.evGx[ 243 ]), &(acadoWorkspace.sbar[ 27 ]), &(acadoWorkspace.sbar[ 36 ]) );
acado_macASbar( &(acadoWorkspace.evGx[ 324 ]), &(acadoWorkspace.sbar[ 36 ]), &(acadoWorkspace.sbar[ 45 ]) );
acado_macASbar( &(acadoWorkspace.evGx[ 405 ]), &(acadoWorkspace.sbar[ 45 ]), &(acadoWorkspace.sbar[ 54 ]) );
acado_macASbar( &(acadoWorkspace.evGx[ 486 ]), &(acadoWorkspace.sbar[ 54 ]), &(acadoWorkspace.sbar[ 63 ]) );
acado_macASbar( &(acadoWorkspace.evGx[ 567 ]), &(acadoWorkspace.sbar[ 63 ]), &(acadoWorkspace.sbar[ 72 ]) );
acado_macASbar( &(acadoWorkspace.evGx[ 648 ]), &(acadoWorkspace.sbar[ 72 ]), &(acadoWorkspace.sbar[ 81 ]) );
acado_macASbar( &(acadoWorkspace.evGx[ 729 ]), &(acadoWorkspace.sbar[ 81 ]), &(acadoWorkspace.sbar[ 90 ]) );
acado_macASbar( &(acadoWorkspace.evGx[ 810 ]), &(acadoWorkspace.sbar[ 90 ]), &(acadoWorkspace.sbar[ 99 ]) );
acado_macASbar( &(acadoWorkspace.evGx[ 891 ]), &(acadoWorkspace.sbar[ 99 ]), &(acadoWorkspace.sbar[ 108 ]) );
acado_macASbar( &(acadoWorkspace.evGx[ 972 ]), &(acadoWorkspace.sbar[ 108 ]), &(acadoWorkspace.sbar[ 117 ]) );
acado_macASbar( &(acadoWorkspace.evGx[ 1053 ]), &(acadoWorkspace.sbar[ 117 ]), &(acadoWorkspace.sbar[ 126 ]) );
acado_macASbar( &(acadoWorkspace.evGx[ 1134 ]), &(acadoWorkspace.sbar[ 126 ]), &(acadoWorkspace.sbar[ 135 ]) );
acado_macASbar( &(acadoWorkspace.evGx[ 1215 ]), &(acadoWorkspace.sbar[ 135 ]), &(acadoWorkspace.sbar[ 144 ]) );
acado_macASbar( &(acadoWorkspace.evGx[ 1296 ]), &(acadoWorkspace.sbar[ 144 ]), &(acadoWorkspace.sbar[ 153 ]) );
acado_macASbar( &(acadoWorkspace.evGx[ 1377 ]), &(acadoWorkspace.sbar[ 153 ]), &(acadoWorkspace.sbar[ 162 ]) );
acado_macASbar( &(acadoWorkspace.evGx[ 1458 ]), &(acadoWorkspace.sbar[ 162 ]), &(acadoWorkspace.sbar[ 171 ]) );
acado_macASbar( &(acadoWorkspace.evGx[ 1539 ]), &(acadoWorkspace.sbar[ 171 ]), &(acadoWorkspace.sbar[ 180 ]) );
acadoWorkspace.w1[0] = + acadoWorkspace.QN1[0]*acadoWorkspace.sbar[180] + acadoWorkspace.QN1[1]*acadoWorkspace.sbar[181] + acadoWorkspace.QN1[2]*acadoWorkspace.sbar[182] + acadoWorkspace.QN1[3]*acadoWorkspace.sbar[183] + acadoWorkspace.QN1[4]*acadoWorkspace.sbar[184] + acadoWorkspace.QN1[5]*acadoWorkspace.sbar[185] + acadoWorkspace.QN1[6]*acadoWorkspace.sbar[186] + acadoWorkspace.QN1[7]*acadoWorkspace.sbar[187] + acadoWorkspace.QN1[8]*acadoWorkspace.sbar[188] + acadoWorkspace.QDy[180];
acadoWorkspace.w1[1] = + acadoWorkspace.QN1[9]*acadoWorkspace.sbar[180] + acadoWorkspace.QN1[10]*acadoWorkspace.sbar[181] + acadoWorkspace.QN1[11]*acadoWorkspace.sbar[182] + acadoWorkspace.QN1[12]*acadoWorkspace.sbar[183] + acadoWorkspace.QN1[13]*acadoWorkspace.sbar[184] + acadoWorkspace.QN1[14]*acadoWorkspace.sbar[185] + acadoWorkspace.QN1[15]*acadoWorkspace.sbar[186] + acadoWorkspace.QN1[16]*acadoWorkspace.sbar[187] + acadoWorkspace.QN1[17]*acadoWorkspace.sbar[188] + acadoWorkspace.QDy[181];
acadoWorkspace.w1[2] = + acadoWorkspace.QN1[18]*acadoWorkspace.sbar[180] + acadoWorkspace.QN1[19]*acadoWorkspace.sbar[181] + acadoWorkspace.QN1[20]*acadoWorkspace.sbar[182] + acadoWorkspace.QN1[21]*acadoWorkspace.sbar[183] + acadoWorkspace.QN1[22]*acadoWorkspace.sbar[184] + acadoWorkspace.QN1[23]*acadoWorkspace.sbar[185] + acadoWorkspace.QN1[24]*acadoWorkspace.sbar[186] + acadoWorkspace.QN1[25]*acadoWorkspace.sbar[187] + acadoWorkspace.QN1[26]*acadoWorkspace.sbar[188] + acadoWorkspace.QDy[182];
acadoWorkspace.w1[3] = + acadoWorkspace.QN1[27]*acadoWorkspace.sbar[180] + acadoWorkspace.QN1[28]*acadoWorkspace.sbar[181] + acadoWorkspace.QN1[29]*acadoWorkspace.sbar[182] + acadoWorkspace.QN1[30]*acadoWorkspace.sbar[183] + acadoWorkspace.QN1[31]*acadoWorkspace.sbar[184] + acadoWorkspace.QN1[32]*acadoWorkspace.sbar[185] + acadoWorkspace.QN1[33]*acadoWorkspace.sbar[186] + acadoWorkspace.QN1[34]*acadoWorkspace.sbar[187] + acadoWorkspace.QN1[35]*acadoWorkspace.sbar[188] + acadoWorkspace.QDy[183];
acadoWorkspace.w1[4] = + acadoWorkspace.QN1[36]*acadoWorkspace.sbar[180] + acadoWorkspace.QN1[37]*acadoWorkspace.sbar[181] + acadoWorkspace.QN1[38]*acadoWorkspace.sbar[182] + acadoWorkspace.QN1[39]*acadoWorkspace.sbar[183] + acadoWorkspace.QN1[40]*acadoWorkspace.sbar[184] + acadoWorkspace.QN1[41]*acadoWorkspace.sbar[185] + acadoWorkspace.QN1[42]*acadoWorkspace.sbar[186] + acadoWorkspace.QN1[43]*acadoWorkspace.sbar[187] + acadoWorkspace.QN1[44]*acadoWorkspace.sbar[188] + acadoWorkspace.QDy[184];
acadoWorkspace.w1[5] = + acadoWorkspace.QN1[45]*acadoWorkspace.sbar[180] + acadoWorkspace.QN1[46]*acadoWorkspace.sbar[181] + acadoWorkspace.QN1[47]*acadoWorkspace.sbar[182] + acadoWorkspace.QN1[48]*acadoWorkspace.sbar[183] + acadoWorkspace.QN1[49]*acadoWorkspace.sbar[184] + acadoWorkspace.QN1[50]*acadoWorkspace.sbar[185] + acadoWorkspace.QN1[51]*acadoWorkspace.sbar[186] + acadoWorkspace.QN1[52]*acadoWorkspace.sbar[187] + acadoWorkspace.QN1[53]*acadoWorkspace.sbar[188] + acadoWorkspace.QDy[185];
acadoWorkspace.w1[6] = + acadoWorkspace.QN1[54]*acadoWorkspace.sbar[180] + acadoWorkspace.QN1[55]*acadoWorkspace.sbar[181] + acadoWorkspace.QN1[56]*acadoWorkspace.sbar[182] + acadoWorkspace.QN1[57]*acadoWorkspace.sbar[183] + acadoWorkspace.QN1[58]*acadoWorkspace.sbar[184] + acadoWorkspace.QN1[59]*acadoWorkspace.sbar[185] + acadoWorkspace.QN1[60]*acadoWorkspace.sbar[186] + acadoWorkspace.QN1[61]*acadoWorkspace.sbar[187] + acadoWorkspace.QN1[62]*acadoWorkspace.sbar[188] + acadoWorkspace.QDy[186];
acadoWorkspace.w1[7] = + acadoWorkspace.QN1[63]*acadoWorkspace.sbar[180] + acadoWorkspace.QN1[64]*acadoWorkspace.sbar[181] + acadoWorkspace.QN1[65]*acadoWorkspace.sbar[182] + acadoWorkspace.QN1[66]*acadoWorkspace.sbar[183] + acadoWorkspace.QN1[67]*acadoWorkspace.sbar[184] + acadoWorkspace.QN1[68]*acadoWorkspace.sbar[185] + acadoWorkspace.QN1[69]*acadoWorkspace.sbar[186] + acadoWorkspace.QN1[70]*acadoWorkspace.sbar[187] + acadoWorkspace.QN1[71]*acadoWorkspace.sbar[188] + acadoWorkspace.QDy[187];
acadoWorkspace.w1[8] = + acadoWorkspace.QN1[72]*acadoWorkspace.sbar[180] + acadoWorkspace.QN1[73]*acadoWorkspace.sbar[181] + acadoWorkspace.QN1[74]*acadoWorkspace.sbar[182] + acadoWorkspace.QN1[75]*acadoWorkspace.sbar[183] + acadoWorkspace.QN1[76]*acadoWorkspace.sbar[184] + acadoWorkspace.QN1[77]*acadoWorkspace.sbar[185] + acadoWorkspace.QN1[78]*acadoWorkspace.sbar[186] + acadoWorkspace.QN1[79]*acadoWorkspace.sbar[187] + acadoWorkspace.QN1[80]*acadoWorkspace.sbar[188] + acadoWorkspace.QDy[188];
acado_macBTw1( &(acadoWorkspace.evGu[ 513 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 57 ]) );
acado_macS1TSbar( &(acadoWorkspace.S1[ 513 ]), &(acadoWorkspace.sbar[ 171 ]), &(acadoWorkspace.g[ 57 ]) );
acado_macATw1QDy( &(acadoWorkspace.evGx[ 1539 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 171 ]), acadoWorkspace.w2 );
acado_macQSbarW2( &(acadoWorkspace.Q1[ 1539 ]), &(acadoWorkspace.sbar[ 171 ]), acadoWorkspace.w2, acadoWorkspace.w1 );
acado_macBTw1( &(acadoWorkspace.evGu[ 486 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 54 ]) );
acado_macS1TSbar( &(acadoWorkspace.S1[ 486 ]), &(acadoWorkspace.sbar[ 162 ]), &(acadoWorkspace.g[ 54 ]) );
acado_macATw1QDy( &(acadoWorkspace.evGx[ 1458 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 162 ]), acadoWorkspace.w2 );
acado_macQSbarW2( &(acadoWorkspace.Q1[ 1458 ]), &(acadoWorkspace.sbar[ 162 ]), acadoWorkspace.w2, acadoWorkspace.w1 );
acado_macBTw1( &(acadoWorkspace.evGu[ 459 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 51 ]) );
acado_macS1TSbar( &(acadoWorkspace.S1[ 459 ]), &(acadoWorkspace.sbar[ 153 ]), &(acadoWorkspace.g[ 51 ]) );
acado_macATw1QDy( &(acadoWorkspace.evGx[ 1377 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 153 ]), acadoWorkspace.w2 );
acado_macQSbarW2( &(acadoWorkspace.Q1[ 1377 ]), &(acadoWorkspace.sbar[ 153 ]), acadoWorkspace.w2, acadoWorkspace.w1 );
acado_macBTw1( &(acadoWorkspace.evGu[ 432 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 48 ]) );
acado_macS1TSbar( &(acadoWorkspace.S1[ 432 ]), &(acadoWorkspace.sbar[ 144 ]), &(acadoWorkspace.g[ 48 ]) );
acado_macATw1QDy( &(acadoWorkspace.evGx[ 1296 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 144 ]), acadoWorkspace.w2 );
acado_macQSbarW2( &(acadoWorkspace.Q1[ 1296 ]), &(acadoWorkspace.sbar[ 144 ]), acadoWorkspace.w2, acadoWorkspace.w1 );
acado_macBTw1( &(acadoWorkspace.evGu[ 405 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 45 ]) );
acado_macS1TSbar( &(acadoWorkspace.S1[ 405 ]), &(acadoWorkspace.sbar[ 135 ]), &(acadoWorkspace.g[ 45 ]) );
acado_macATw1QDy( &(acadoWorkspace.evGx[ 1215 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 135 ]), acadoWorkspace.w2 );
acado_macQSbarW2( &(acadoWorkspace.Q1[ 1215 ]), &(acadoWorkspace.sbar[ 135 ]), acadoWorkspace.w2, acadoWorkspace.w1 );
acado_macBTw1( &(acadoWorkspace.evGu[ 378 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 42 ]) );
acado_macS1TSbar( &(acadoWorkspace.S1[ 378 ]), &(acadoWorkspace.sbar[ 126 ]), &(acadoWorkspace.g[ 42 ]) );
acado_macATw1QDy( &(acadoWorkspace.evGx[ 1134 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 126 ]), acadoWorkspace.w2 );
acado_macQSbarW2( &(acadoWorkspace.Q1[ 1134 ]), &(acadoWorkspace.sbar[ 126 ]), acadoWorkspace.w2, acadoWorkspace.w1 );
acado_macBTw1( &(acadoWorkspace.evGu[ 351 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 39 ]) );
acado_macS1TSbar( &(acadoWorkspace.S1[ 351 ]), &(acadoWorkspace.sbar[ 117 ]), &(acadoWorkspace.g[ 39 ]) );
acado_macATw1QDy( &(acadoWorkspace.evGx[ 1053 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 117 ]), acadoWorkspace.w2 );
acado_macQSbarW2( &(acadoWorkspace.Q1[ 1053 ]), &(acadoWorkspace.sbar[ 117 ]), acadoWorkspace.w2, acadoWorkspace.w1 );
acado_macBTw1( &(acadoWorkspace.evGu[ 324 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 36 ]) );
acado_macS1TSbar( &(acadoWorkspace.S1[ 324 ]), &(acadoWorkspace.sbar[ 108 ]), &(acadoWorkspace.g[ 36 ]) );
acado_macATw1QDy( &(acadoWorkspace.evGx[ 972 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 108 ]), acadoWorkspace.w2 );
acado_macQSbarW2( &(acadoWorkspace.Q1[ 972 ]), &(acadoWorkspace.sbar[ 108 ]), acadoWorkspace.w2, acadoWorkspace.w1 );
acado_macBTw1( &(acadoWorkspace.evGu[ 297 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 33 ]) );
acado_macS1TSbar( &(acadoWorkspace.S1[ 297 ]), &(acadoWorkspace.sbar[ 99 ]), &(acadoWorkspace.g[ 33 ]) );
acado_macATw1QDy( &(acadoWorkspace.evGx[ 891 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 99 ]), acadoWorkspace.w2 );
acado_macQSbarW2( &(acadoWorkspace.Q1[ 891 ]), &(acadoWorkspace.sbar[ 99 ]), acadoWorkspace.w2, acadoWorkspace.w1 );
acado_macBTw1( &(acadoWorkspace.evGu[ 270 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 30 ]) );
acado_macS1TSbar( &(acadoWorkspace.S1[ 270 ]), &(acadoWorkspace.sbar[ 90 ]), &(acadoWorkspace.g[ 30 ]) );
acado_macATw1QDy( &(acadoWorkspace.evGx[ 810 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 90 ]), acadoWorkspace.w2 );
acado_macQSbarW2( &(acadoWorkspace.Q1[ 810 ]), &(acadoWorkspace.sbar[ 90 ]), acadoWorkspace.w2, acadoWorkspace.w1 );
acado_macBTw1( &(acadoWorkspace.evGu[ 243 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 27 ]) );
acado_macS1TSbar( &(acadoWorkspace.S1[ 243 ]), &(acadoWorkspace.sbar[ 81 ]), &(acadoWorkspace.g[ 27 ]) );
acado_macATw1QDy( &(acadoWorkspace.evGx[ 729 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 81 ]), acadoWorkspace.w2 );
acado_macQSbarW2( &(acadoWorkspace.Q1[ 729 ]), &(acadoWorkspace.sbar[ 81 ]), acadoWorkspace.w2, acadoWorkspace.w1 );
acado_macBTw1( &(acadoWorkspace.evGu[ 216 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 24 ]) );
acado_macS1TSbar( &(acadoWorkspace.S1[ 216 ]), &(acadoWorkspace.sbar[ 72 ]), &(acadoWorkspace.g[ 24 ]) );
acado_macATw1QDy( &(acadoWorkspace.evGx[ 648 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 72 ]), acadoWorkspace.w2 );
acado_macQSbarW2( &(acadoWorkspace.Q1[ 648 ]), &(acadoWorkspace.sbar[ 72 ]), acadoWorkspace.w2, acadoWorkspace.w1 );
acado_macBTw1( &(acadoWorkspace.evGu[ 189 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 21 ]) );
acado_macS1TSbar( &(acadoWorkspace.S1[ 189 ]), &(acadoWorkspace.sbar[ 63 ]), &(acadoWorkspace.g[ 21 ]) );
acado_macATw1QDy( &(acadoWorkspace.evGx[ 567 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 63 ]), acadoWorkspace.w2 );
acado_macQSbarW2( &(acadoWorkspace.Q1[ 567 ]), &(acadoWorkspace.sbar[ 63 ]), acadoWorkspace.w2, acadoWorkspace.w1 );
acado_macBTw1( &(acadoWorkspace.evGu[ 162 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 18 ]) );
acado_macS1TSbar( &(acadoWorkspace.S1[ 162 ]), &(acadoWorkspace.sbar[ 54 ]), &(acadoWorkspace.g[ 18 ]) );
acado_macATw1QDy( &(acadoWorkspace.evGx[ 486 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 54 ]), acadoWorkspace.w2 );
acado_macQSbarW2( &(acadoWorkspace.Q1[ 486 ]), &(acadoWorkspace.sbar[ 54 ]), acadoWorkspace.w2, acadoWorkspace.w1 );
acado_macBTw1( &(acadoWorkspace.evGu[ 135 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 15 ]) );
acado_macS1TSbar( &(acadoWorkspace.S1[ 135 ]), &(acadoWorkspace.sbar[ 45 ]), &(acadoWorkspace.g[ 15 ]) );
acado_macATw1QDy( &(acadoWorkspace.evGx[ 405 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 45 ]), acadoWorkspace.w2 );
acado_macQSbarW2( &(acadoWorkspace.Q1[ 405 ]), &(acadoWorkspace.sbar[ 45 ]), acadoWorkspace.w2, acadoWorkspace.w1 );
acado_macBTw1( &(acadoWorkspace.evGu[ 108 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 12 ]) );
acado_macS1TSbar( &(acadoWorkspace.S1[ 108 ]), &(acadoWorkspace.sbar[ 36 ]), &(acadoWorkspace.g[ 12 ]) );
acado_macATw1QDy( &(acadoWorkspace.evGx[ 324 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 36 ]), acadoWorkspace.w2 );
acado_macQSbarW2( &(acadoWorkspace.Q1[ 324 ]), &(acadoWorkspace.sbar[ 36 ]), acadoWorkspace.w2, acadoWorkspace.w1 );
acado_macBTw1( &(acadoWorkspace.evGu[ 81 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 9 ]) );
acado_macS1TSbar( &(acadoWorkspace.S1[ 81 ]), &(acadoWorkspace.sbar[ 27 ]), &(acadoWorkspace.g[ 9 ]) );
acado_macATw1QDy( &(acadoWorkspace.evGx[ 243 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 27 ]), acadoWorkspace.w2 );
acado_macQSbarW2( &(acadoWorkspace.Q1[ 243 ]), &(acadoWorkspace.sbar[ 27 ]), acadoWorkspace.w2, acadoWorkspace.w1 );
acado_macBTw1( &(acadoWorkspace.evGu[ 54 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 6 ]) );
acado_macS1TSbar( &(acadoWorkspace.S1[ 54 ]), &(acadoWorkspace.sbar[ 18 ]), &(acadoWorkspace.g[ 6 ]) );
acado_macATw1QDy( &(acadoWorkspace.evGx[ 162 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 18 ]), acadoWorkspace.w2 );
acado_macQSbarW2( &(acadoWorkspace.Q1[ 162 ]), &(acadoWorkspace.sbar[ 18 ]), acadoWorkspace.w2, acadoWorkspace.w1 );
acado_macBTw1( &(acadoWorkspace.evGu[ 27 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 3 ]) );
acado_macS1TSbar( &(acadoWorkspace.S1[ 27 ]), &(acadoWorkspace.sbar[ 9 ]), &(acadoWorkspace.g[ 3 ]) );
acado_macATw1QDy( &(acadoWorkspace.evGx[ 81 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 9 ]), acadoWorkspace.w2 );
acado_macQSbarW2( &(acadoWorkspace.Q1[ 81 ]), &(acadoWorkspace.sbar[ 9 ]), acadoWorkspace.w2, acadoWorkspace.w1 );
acado_macBTw1( acadoWorkspace.evGu, acadoWorkspace.w1, acadoWorkspace.g );
acado_macS1TSbar( acadoWorkspace.S1, acadoWorkspace.sbar, acadoWorkspace.g );
acadoWorkspace.lb[0] = acadoVariables.lbValues[0] - acadoVariables.u[0];
acadoWorkspace.lb[1] = acadoVariables.lbValues[1] - acadoVariables.u[1];
acadoWorkspace.lb[2] = acadoVariables.lbValues[2] - acadoVariables.u[2];
acadoWorkspace.lb[3] = acadoVariables.lbValues[3] - acadoVariables.u[3];
acadoWorkspace.lb[4] = acadoVariables.lbValues[4] - acadoVariables.u[4];
acadoWorkspace.lb[5] = acadoVariables.lbValues[5] - acadoVariables.u[5];
acadoWorkspace.lb[6] = acadoVariables.lbValues[6] - acadoVariables.u[6];
acadoWorkspace.lb[7] = acadoVariables.lbValues[7] - acadoVariables.u[7];
acadoWorkspace.lb[8] = acadoVariables.lbValues[8] - acadoVariables.u[8];
acadoWorkspace.lb[9] = acadoVariables.lbValues[9] - acadoVariables.u[9];
acadoWorkspace.lb[10] = acadoVariables.lbValues[10] - acadoVariables.u[10];
acadoWorkspace.lb[11] = acadoVariables.lbValues[11] - acadoVariables.u[11];
acadoWorkspace.lb[12] = acadoVariables.lbValues[12] - acadoVariables.u[12];
acadoWorkspace.lb[13] = acadoVariables.lbValues[13] - acadoVariables.u[13];
acadoWorkspace.lb[14] = acadoVariables.lbValues[14] - acadoVariables.u[14];
acadoWorkspace.lb[15] = acadoVariables.lbValues[15] - acadoVariables.u[15];
acadoWorkspace.lb[16] = acadoVariables.lbValues[16] - acadoVariables.u[16];
acadoWorkspace.lb[17] = acadoVariables.lbValues[17] - acadoVariables.u[17];
acadoWorkspace.lb[18] = acadoVariables.lbValues[18] - acadoVariables.u[18];
acadoWorkspace.lb[19] = acadoVariables.lbValues[19] - acadoVariables.u[19];
acadoWorkspace.lb[20] = acadoVariables.lbValues[20] - acadoVariables.u[20];
acadoWorkspace.lb[21] = acadoVariables.lbValues[21] - acadoVariables.u[21];
acadoWorkspace.lb[22] = acadoVariables.lbValues[22] - acadoVariables.u[22];
acadoWorkspace.lb[23] = acadoVariables.lbValues[23] - acadoVariables.u[23];
acadoWorkspace.lb[24] = acadoVariables.lbValues[24] - acadoVariables.u[24];
acadoWorkspace.lb[25] = acadoVariables.lbValues[25] - acadoVariables.u[25];
acadoWorkspace.lb[26] = acadoVariables.lbValues[26] - acadoVariables.u[26];
acadoWorkspace.lb[27] = acadoVariables.lbValues[27] - acadoVariables.u[27];
acadoWorkspace.lb[28] = acadoVariables.lbValues[28] - acadoVariables.u[28];
acadoWorkspace.lb[29] = acadoVariables.lbValues[29] - acadoVariables.u[29];
acadoWorkspace.lb[30] = acadoVariables.lbValues[30] - acadoVariables.u[30];
acadoWorkspace.lb[31] = acadoVariables.lbValues[31] - acadoVariables.u[31];
acadoWorkspace.lb[32] = acadoVariables.lbValues[32] - acadoVariables.u[32];
acadoWorkspace.lb[33] = acadoVariables.lbValues[33] - acadoVariables.u[33];
acadoWorkspace.lb[34] = acadoVariables.lbValues[34] - acadoVariables.u[34];
acadoWorkspace.lb[35] = acadoVariables.lbValues[35] - acadoVariables.u[35];
acadoWorkspace.lb[36] = acadoVariables.lbValues[36] - acadoVariables.u[36];
acadoWorkspace.lb[37] = acadoVariables.lbValues[37] - acadoVariables.u[37];
acadoWorkspace.lb[38] = acadoVariables.lbValues[38] - acadoVariables.u[38];
acadoWorkspace.lb[39] = acadoVariables.lbValues[39] - acadoVariables.u[39];
acadoWorkspace.lb[40] = acadoVariables.lbValues[40] - acadoVariables.u[40];
acadoWorkspace.lb[41] = acadoVariables.lbValues[41] - acadoVariables.u[41];
acadoWorkspace.lb[42] = acadoVariables.lbValues[42] - acadoVariables.u[42];
acadoWorkspace.lb[43] = acadoVariables.lbValues[43] - acadoVariables.u[43];
acadoWorkspace.lb[44] = acadoVariables.lbValues[44] - acadoVariables.u[44];
acadoWorkspace.lb[45] = acadoVariables.lbValues[45] - acadoVariables.u[45];
acadoWorkspace.lb[46] = acadoVariables.lbValues[46] - acadoVariables.u[46];
acadoWorkspace.lb[47] = acadoVariables.lbValues[47] - acadoVariables.u[47];
acadoWorkspace.lb[48] = acadoVariables.lbValues[48] - acadoVariables.u[48];
acadoWorkspace.lb[49] = acadoVariables.lbValues[49] - acadoVariables.u[49];
acadoWorkspace.lb[50] = acadoVariables.lbValues[50] - acadoVariables.u[50];
acadoWorkspace.lb[51] = acadoVariables.lbValues[51] - acadoVariables.u[51];
acadoWorkspace.lb[52] = acadoVariables.lbValues[52] - acadoVariables.u[52];
acadoWorkspace.lb[53] = acadoVariables.lbValues[53] - acadoVariables.u[53];
acadoWorkspace.lb[54] = acadoVariables.lbValues[54] - acadoVariables.u[54];
acadoWorkspace.lb[55] = acadoVariables.lbValues[55] - acadoVariables.u[55];
acadoWorkspace.lb[56] = acadoVariables.lbValues[56] - acadoVariables.u[56];
acadoWorkspace.lb[57] = acadoVariables.lbValues[57] - acadoVariables.u[57];
acadoWorkspace.lb[58] = acadoVariables.lbValues[58] - acadoVariables.u[58];
acadoWorkspace.lb[59] = acadoVariables.lbValues[59] - acadoVariables.u[59];
acadoWorkspace.ub[0] = acadoVariables.ubValues[0] - acadoVariables.u[0];
acadoWorkspace.ub[1] = acadoVariables.ubValues[1] - acadoVariables.u[1];
acadoWorkspace.ub[2] = acadoVariables.ubValues[2] - acadoVariables.u[2];
acadoWorkspace.ub[3] = acadoVariables.ubValues[3] - acadoVariables.u[3];
acadoWorkspace.ub[4] = acadoVariables.ubValues[4] - acadoVariables.u[4];
acadoWorkspace.ub[5] = acadoVariables.ubValues[5] - acadoVariables.u[5];
acadoWorkspace.ub[6] = acadoVariables.ubValues[6] - acadoVariables.u[6];
acadoWorkspace.ub[7] = acadoVariables.ubValues[7] - acadoVariables.u[7];
acadoWorkspace.ub[8] = acadoVariables.ubValues[8] - acadoVariables.u[8];
acadoWorkspace.ub[9] = acadoVariables.ubValues[9] - acadoVariables.u[9];
acadoWorkspace.ub[10] = acadoVariables.ubValues[10] - acadoVariables.u[10];
acadoWorkspace.ub[11] = acadoVariables.ubValues[11] - acadoVariables.u[11];
acadoWorkspace.ub[12] = acadoVariables.ubValues[12] - acadoVariables.u[12];
acadoWorkspace.ub[13] = acadoVariables.ubValues[13] - acadoVariables.u[13];
acadoWorkspace.ub[14] = acadoVariables.ubValues[14] - acadoVariables.u[14];
acadoWorkspace.ub[15] = acadoVariables.ubValues[15] - acadoVariables.u[15];
acadoWorkspace.ub[16] = acadoVariables.ubValues[16] - acadoVariables.u[16];
acadoWorkspace.ub[17] = acadoVariables.ubValues[17] - acadoVariables.u[17];
acadoWorkspace.ub[18] = acadoVariables.ubValues[18] - acadoVariables.u[18];
acadoWorkspace.ub[19] = acadoVariables.ubValues[19] - acadoVariables.u[19];
acadoWorkspace.ub[20] = acadoVariables.ubValues[20] - acadoVariables.u[20];
acadoWorkspace.ub[21] = acadoVariables.ubValues[21] - acadoVariables.u[21];
acadoWorkspace.ub[22] = acadoVariables.ubValues[22] - acadoVariables.u[22];
acadoWorkspace.ub[23] = acadoVariables.ubValues[23] - acadoVariables.u[23];
acadoWorkspace.ub[24] = acadoVariables.ubValues[24] - acadoVariables.u[24];
acadoWorkspace.ub[25] = acadoVariables.ubValues[25] - acadoVariables.u[25];
acadoWorkspace.ub[26] = acadoVariables.ubValues[26] - acadoVariables.u[26];
acadoWorkspace.ub[27] = acadoVariables.ubValues[27] - acadoVariables.u[27];
acadoWorkspace.ub[28] = acadoVariables.ubValues[28] - acadoVariables.u[28];
acadoWorkspace.ub[29] = acadoVariables.ubValues[29] - acadoVariables.u[29];
acadoWorkspace.ub[30] = acadoVariables.ubValues[30] - acadoVariables.u[30];
acadoWorkspace.ub[31] = acadoVariables.ubValues[31] - acadoVariables.u[31];
acadoWorkspace.ub[32] = acadoVariables.ubValues[32] - acadoVariables.u[32];
acadoWorkspace.ub[33] = acadoVariables.ubValues[33] - acadoVariables.u[33];
acadoWorkspace.ub[34] = acadoVariables.ubValues[34] - acadoVariables.u[34];
acadoWorkspace.ub[35] = acadoVariables.ubValues[35] - acadoVariables.u[35];
acadoWorkspace.ub[36] = acadoVariables.ubValues[36] - acadoVariables.u[36];
acadoWorkspace.ub[37] = acadoVariables.ubValues[37] - acadoVariables.u[37];
acadoWorkspace.ub[38] = acadoVariables.ubValues[38] - acadoVariables.u[38];
acadoWorkspace.ub[39] = acadoVariables.ubValues[39] - acadoVariables.u[39];
acadoWorkspace.ub[40] = acadoVariables.ubValues[40] - acadoVariables.u[40];
acadoWorkspace.ub[41] = acadoVariables.ubValues[41] - acadoVariables.u[41];
acadoWorkspace.ub[42] = acadoVariables.ubValues[42] - acadoVariables.u[42];
acadoWorkspace.ub[43] = acadoVariables.ubValues[43] - acadoVariables.u[43];
acadoWorkspace.ub[44] = acadoVariables.ubValues[44] - acadoVariables.u[44];
acadoWorkspace.ub[45] = acadoVariables.ubValues[45] - acadoVariables.u[45];
acadoWorkspace.ub[46] = acadoVariables.ubValues[46] - acadoVariables.u[46];
acadoWorkspace.ub[47] = acadoVariables.ubValues[47] - acadoVariables.u[47];
acadoWorkspace.ub[48] = acadoVariables.ubValues[48] - acadoVariables.u[48];
acadoWorkspace.ub[49] = acadoVariables.ubValues[49] - acadoVariables.u[49];
acadoWorkspace.ub[50] = acadoVariables.ubValues[50] - acadoVariables.u[50];
acadoWorkspace.ub[51] = acadoVariables.ubValues[51] - acadoVariables.u[51];
acadoWorkspace.ub[52] = acadoVariables.ubValues[52] - acadoVariables.u[52];
acadoWorkspace.ub[53] = acadoVariables.ubValues[53] - acadoVariables.u[53];
acadoWorkspace.ub[54] = acadoVariables.ubValues[54] - acadoVariables.u[54];
acadoWorkspace.ub[55] = acadoVariables.ubValues[55] - acadoVariables.u[55];
acadoWorkspace.ub[56] = acadoVariables.ubValues[56] - acadoVariables.u[56];
acadoWorkspace.ub[57] = acadoVariables.ubValues[57] - acadoVariables.u[57];
acadoWorkspace.ub[58] = acadoVariables.ubValues[58] - acadoVariables.u[58];
acadoWorkspace.ub[59] = acadoVariables.ubValues[59] - acadoVariables.u[59];
}
void acado_expand( )
{
int lRun1;
acadoVariables.u[0] += acadoWorkspace.x[0];
acadoVariables.u[1] += acadoWorkspace.x[1];
acadoVariables.u[2] += acadoWorkspace.x[2];
acadoVariables.u[3] += acadoWorkspace.x[3];
acadoVariables.u[4] += acadoWorkspace.x[4];
acadoVariables.u[5] += acadoWorkspace.x[5];
acadoVariables.u[6] += acadoWorkspace.x[6];
acadoVariables.u[7] += acadoWorkspace.x[7];
acadoVariables.u[8] += acadoWorkspace.x[8];
acadoVariables.u[9] += acadoWorkspace.x[9];
acadoVariables.u[10] += acadoWorkspace.x[10];
acadoVariables.u[11] += acadoWorkspace.x[11];
acadoVariables.u[12] += acadoWorkspace.x[12];
acadoVariables.u[13] += acadoWorkspace.x[13];
acadoVariables.u[14] += acadoWorkspace.x[14];
acadoVariables.u[15] += acadoWorkspace.x[15];
acadoVariables.u[16] += acadoWorkspace.x[16];
acadoVariables.u[17] += acadoWorkspace.x[17];
acadoVariables.u[18] += acadoWorkspace.x[18];
acadoVariables.u[19] += acadoWorkspace.x[19];
acadoVariables.u[20] += acadoWorkspace.x[20];
acadoVariables.u[21] += acadoWorkspace.x[21];
acadoVariables.u[22] += acadoWorkspace.x[22];
acadoVariables.u[23] += acadoWorkspace.x[23];
acadoVariables.u[24] += acadoWorkspace.x[24];
acadoVariables.u[25] += acadoWorkspace.x[25];
acadoVariables.u[26] += acadoWorkspace.x[26];
acadoVariables.u[27] += acadoWorkspace.x[27];
acadoVariables.u[28] += acadoWorkspace.x[28];
acadoVariables.u[29] += acadoWorkspace.x[29];
acadoVariables.u[30] += acadoWorkspace.x[30];
acadoVariables.u[31] += acadoWorkspace.x[31];
acadoVariables.u[32] += acadoWorkspace.x[32];
acadoVariables.u[33] += acadoWorkspace.x[33];
acadoVariables.u[34] += acadoWorkspace.x[34];
acadoVariables.u[35] += acadoWorkspace.x[35];
acadoVariables.u[36] += acadoWorkspace.x[36];
acadoVariables.u[37] += acadoWorkspace.x[37];
acadoVariables.u[38] += acadoWorkspace.x[38];
acadoVariables.u[39] += acadoWorkspace.x[39];
acadoVariables.u[40] += acadoWorkspace.x[40];
acadoVariables.u[41] += acadoWorkspace.x[41];
acadoVariables.u[42] += acadoWorkspace.x[42];
acadoVariables.u[43] += acadoWorkspace.x[43];
acadoVariables.u[44] += acadoWorkspace.x[44];
acadoVariables.u[45] += acadoWorkspace.x[45];
acadoVariables.u[46] += acadoWorkspace.x[46];
acadoVariables.u[47] += acadoWorkspace.x[47];
acadoVariables.u[48] += acadoWorkspace.x[48];
acadoVariables.u[49] += acadoWorkspace.x[49];
acadoVariables.u[50] += acadoWorkspace.x[50];
acadoVariables.u[51] += acadoWorkspace.x[51];
acadoVariables.u[52] += acadoWorkspace.x[52];
acadoVariables.u[53] += acadoWorkspace.x[53];
acadoVariables.u[54] += acadoWorkspace.x[54];
acadoVariables.u[55] += acadoWorkspace.x[55];
acadoVariables.u[56] += acadoWorkspace.x[56];
acadoVariables.u[57] += acadoWorkspace.x[57];
acadoVariables.u[58] += acadoWorkspace.x[58];
acadoVariables.u[59] += acadoWorkspace.x[59];
acadoWorkspace.sbar[0] = acadoWorkspace.Dx0[0];
acadoWorkspace.sbar[1] = acadoWorkspace.Dx0[1];
acadoWorkspace.sbar[2] = acadoWorkspace.Dx0[2];
acadoWorkspace.sbar[3] = acadoWorkspace.Dx0[3];
acadoWorkspace.sbar[4] = acadoWorkspace.Dx0[4];
acadoWorkspace.sbar[5] = acadoWorkspace.Dx0[5];
acadoWorkspace.sbar[6] = acadoWorkspace.Dx0[6];
acadoWorkspace.sbar[7] = acadoWorkspace.Dx0[7];
acadoWorkspace.sbar[8] = acadoWorkspace.Dx0[8];
for (lRun1 = 0; lRun1 < 180; ++lRun1)
acadoWorkspace.sbar[lRun1 + 9] = acadoWorkspace.d[lRun1];
acado_expansionStep( acadoWorkspace.evGx, acadoWorkspace.evGu, acadoWorkspace.x, acadoWorkspace.sbar, &(acadoWorkspace.sbar[ 9 ]) );
acado_expansionStep( &(acadoWorkspace.evGx[ 81 ]), &(acadoWorkspace.evGu[ 27 ]), &(acadoWorkspace.x[ 3 ]), &(acadoWorkspace.sbar[ 9 ]), &(acadoWorkspace.sbar[ 18 ]) );
acado_expansionStep( &(acadoWorkspace.evGx[ 162 ]), &(acadoWorkspace.evGu[ 54 ]), &(acadoWorkspace.x[ 6 ]), &(acadoWorkspace.sbar[ 18 ]), &(acadoWorkspace.sbar[ 27 ]) );
acado_expansionStep( &(acadoWorkspace.evGx[ 243 ]), &(acadoWorkspace.evGu[ 81 ]), &(acadoWorkspace.x[ 9 ]), &(acadoWorkspace.sbar[ 27 ]), &(acadoWorkspace.sbar[ 36 ]) );
acado_expansionStep( &(acadoWorkspace.evGx[ 324 ]), &(acadoWorkspace.evGu[ 108 ]), &(acadoWorkspace.x[ 12 ]), &(acadoWorkspace.sbar[ 36 ]), &(acadoWorkspace.sbar[ 45 ]) );
acado_expansionStep( &(acadoWorkspace.evGx[ 405 ]), &(acadoWorkspace.evGu[ 135 ]), &(acadoWorkspace.x[ 15 ]), &(acadoWorkspace.sbar[ 45 ]), &(acadoWorkspace.sbar[ 54 ]) );
acado_expansionStep( &(acadoWorkspace.evGx[ 486 ]), &(acadoWorkspace.evGu[ 162 ]), &(acadoWorkspace.x[ 18 ]), &(acadoWorkspace.sbar[ 54 ]), &(acadoWorkspace.sbar[ 63 ]) );
acado_expansionStep( &(acadoWorkspace.evGx[ 567 ]), &(acadoWorkspace.evGu[ 189 ]), &(acadoWorkspace.x[ 21 ]), &(acadoWorkspace.sbar[ 63 ]), &(acadoWorkspace.sbar[ 72 ]) );
acado_expansionStep( &(acadoWorkspace.evGx[ 648 ]), &(acadoWorkspace.evGu[ 216 ]), &(acadoWorkspace.x[ 24 ]), &(acadoWorkspace.sbar[ 72 ]), &(acadoWorkspace.sbar[ 81 ]) );
acado_expansionStep( &(acadoWorkspace.evGx[ 729 ]), &(acadoWorkspace.evGu[ 243 ]), &(acadoWorkspace.x[ 27 ]), &(acadoWorkspace.sbar[ 81 ]), &(acadoWorkspace.sbar[ 90 ]) );
acado_expansionStep( &(acadoWorkspace.evGx[ 810 ]), &(acadoWorkspace.evGu[ 270 ]), &(acadoWorkspace.x[ 30 ]), &(acadoWorkspace.sbar[ 90 ]), &(acadoWorkspace.sbar[ 99 ]) );
acado_expansionStep( &(acadoWorkspace.evGx[ 891 ]), &(acadoWorkspace.evGu[ 297 ]), &(acadoWorkspace.x[ 33 ]), &(acadoWorkspace.sbar[ 99 ]), &(acadoWorkspace.sbar[ 108 ]) );
acado_expansionStep( &(acadoWorkspace.evGx[ 972 ]), &(acadoWorkspace.evGu[ 324 ]), &(acadoWorkspace.x[ 36 ]), &(acadoWorkspace.sbar[ 108 ]), &(acadoWorkspace.sbar[ 117 ]) );
acado_expansionStep( &(acadoWorkspace.evGx[ 1053 ]), &(acadoWorkspace.evGu[ 351 ]), &(acadoWorkspace.x[ 39 ]), &(acadoWorkspace.sbar[ 117 ]), &(acadoWorkspace.sbar[ 126 ]) );
acado_expansionStep( &(acadoWorkspace.evGx[ 1134 ]), &(acadoWorkspace.evGu[ 378 ]), &(acadoWorkspace.x[ 42 ]), &(acadoWorkspace.sbar[ 126 ]), &(acadoWorkspace.sbar[ 135 ]) );
acado_expansionStep( &(acadoWorkspace.evGx[ 1215 ]), &(acadoWorkspace.evGu[ 405 ]), &(acadoWorkspace.x[ 45 ]), &(acadoWorkspace.sbar[ 135 ]), &(acadoWorkspace.sbar[ 144 ]) );
acado_expansionStep( &(acadoWorkspace.evGx[ 1296 ]), &(acadoWorkspace.evGu[ 432 ]), &(acadoWorkspace.x[ 48 ]), &(acadoWorkspace.sbar[ 144 ]), &(acadoWorkspace.sbar[ 153 ]) );
acado_expansionStep( &(acadoWorkspace.evGx[ 1377 ]), &(acadoWorkspace.evGu[ 459 ]), &(acadoWorkspace.x[ 51 ]), &(acadoWorkspace.sbar[ 153 ]), &(acadoWorkspace.sbar[ 162 ]) );
acado_expansionStep( &(acadoWorkspace.evGx[ 1458 ]), &(acadoWorkspace.evGu[ 486 ]), &(acadoWorkspace.x[ 54 ]), &(acadoWorkspace.sbar[ 162 ]), &(acadoWorkspace.sbar[ 171 ]) );
acado_expansionStep( &(acadoWorkspace.evGx[ 1539 ]), &(acadoWorkspace.evGu[ 513 ]), &(acadoWorkspace.x[ 57 ]), &(acadoWorkspace.sbar[ 171 ]), &(acadoWorkspace.sbar[ 180 ]) );
for (lRun1 = 0; lRun1 < 189; ++lRun1)
acadoVariables.x[lRun1] += acadoWorkspace.sbar[lRun1];
}
int acado_preparationStep( )
{
int ret;
ret = acado_modelSimulation();
acado_evaluateObjective( );
acado_condensePrep( );
return ret;
}
int acado_feedbackStep( )
{
int tmp;
acado_condenseFdb( );
tmp = acado_solve( );
acado_expand( );
return tmp;
}
int acado_initializeSolver( )
{
int ret;
/* This is a function which must be called once before any other function call! */
ret = 0;
memset(&acadoWorkspace, 0, sizeof( acadoWorkspace ));
acadoVariables.lbValues[0] = -7.8539816339744828e-01;
acadoVariables.lbValues[1] = -7.8539816339744828e-01;
acadoVariables.lbValues[2] = 4.9032999999999998e+00;
acadoVariables.lbValues[3] = -7.8539816339744828e-01;
acadoVariables.lbValues[4] = -7.8539816339744828e-01;
acadoVariables.lbValues[5] = 4.9032999999999998e+00;
acadoVariables.lbValues[6] = -7.8539816339744828e-01;
acadoVariables.lbValues[7] = -7.8539816339744828e-01;
acadoVariables.lbValues[8] = 4.9032999999999998e+00;
acadoVariables.lbValues[9] = -7.8539816339744828e-01;
acadoVariables.lbValues[10] = -7.8539816339744828e-01;
acadoVariables.lbValues[11] = 4.9032999999999998e+00;
acadoVariables.lbValues[12] = -7.8539816339744828e-01;
acadoVariables.lbValues[13] = -7.8539816339744828e-01;
acadoVariables.lbValues[14] = 4.9032999999999998e+00;
acadoVariables.lbValues[15] = -7.8539816339744828e-01;
acadoVariables.lbValues[16] = -7.8539816339744828e-01;
acadoVariables.lbValues[17] = 4.9032999999999998e+00;
acadoVariables.lbValues[18] = -7.8539816339744828e-01;
acadoVariables.lbValues[19] = -7.8539816339744828e-01;
acadoVariables.lbValues[20] = 4.9032999999999998e+00;
acadoVariables.lbValues[21] = -7.8539816339744828e-01;
acadoVariables.lbValues[22] = -7.8539816339744828e-01;
acadoVariables.lbValues[23] = 4.9032999999999998e+00;
acadoVariables.lbValues[24] = -7.8539816339744828e-01;
acadoVariables.lbValues[25] = -7.8539816339744828e-01;
acadoVariables.lbValues[26] = 4.9032999999999998e+00;
acadoVariables.lbValues[27] = -7.8539816339744828e-01;
acadoVariables.lbValues[28] = -7.8539816339744828e-01;
acadoVariables.lbValues[29] = 4.9032999999999998e+00;
acadoVariables.lbValues[30] = -7.8539816339744828e-01;
acadoVariables.lbValues[31] = -7.8539816339744828e-01;
acadoVariables.lbValues[32] = 4.9032999999999998e+00;
acadoVariables.lbValues[33] = -7.8539816339744828e-01;
acadoVariables.lbValues[34] = -7.8539816339744828e-01;
acadoVariables.lbValues[35] = 4.9032999999999998e+00;
acadoVariables.lbValues[36] = -7.8539816339744828e-01;
acadoVariables.lbValues[37] = -7.8539816339744828e-01;
acadoVariables.lbValues[38] = 4.9032999999999998e+00;
acadoVariables.lbValues[39] = -7.8539816339744828e-01;
acadoVariables.lbValues[40] = -7.8539816339744828e-01;
acadoVariables.lbValues[41] = 4.9032999999999998e+00;
acadoVariables.lbValues[42] = -7.8539816339744828e-01;
acadoVariables.lbValues[43] = -7.8539816339744828e-01;
acadoVariables.lbValues[44] = 4.9032999999999998e+00;
acadoVariables.lbValues[45] = -7.8539816339744828e-01;
acadoVariables.lbValues[46] = -7.8539816339744828e-01;
acadoVariables.lbValues[47] = 4.9032999999999998e+00;
acadoVariables.lbValues[48] = -7.8539816339744828e-01;
acadoVariables.lbValues[49] = -7.8539816339744828e-01;
acadoVariables.lbValues[50] = 4.9032999999999998e+00;
acadoVariables.lbValues[51] = -7.8539816339744828e-01;
acadoVariables.lbValues[52] = -7.8539816339744828e-01;
acadoVariables.lbValues[53] = 4.9032999999999998e+00;
acadoVariables.lbValues[54] = -7.8539816339744828e-01;
acadoVariables.lbValues[55] = -7.8539816339744828e-01;
acadoVariables.lbValues[56] = 4.9032999999999998e+00;
acadoVariables.lbValues[57] = -7.8539816339744828e-01;
acadoVariables.lbValues[58] = -7.8539816339744828e-01;
acadoVariables.lbValues[59] = 4.9032999999999998e+00;
acadoVariables.ubValues[0] = 7.8539816339744828e-01;
acadoVariables.ubValues[1] = 7.8539816339744828e-01;
acadoVariables.ubValues[2] = 1.4709899999999999e+01;
acadoVariables.ubValues[3] = 7.8539816339744828e-01;
acadoVariables.ubValues[4] = 7.8539816339744828e-01;
acadoVariables.ubValues[5] = 1.4709899999999999e+01;
acadoVariables.ubValues[6] = 7.8539816339744828e-01;
acadoVariables.ubValues[7] = 7.8539816339744828e-01;
acadoVariables.ubValues[8] = 1.4709899999999999e+01;
acadoVariables.ubValues[9] = 7.8539816339744828e-01;
acadoVariables.ubValues[10] = 7.8539816339744828e-01;
acadoVariables.ubValues[11] = 1.4709899999999999e+01;
acadoVariables.ubValues[12] = 7.8539816339744828e-01;
acadoVariables.ubValues[13] = 7.8539816339744828e-01;
acadoVariables.ubValues[14] = 1.4709899999999999e+01;
acadoVariables.ubValues[15] = 7.8539816339744828e-01;
acadoVariables.ubValues[16] = 7.8539816339744828e-01;
acadoVariables.ubValues[17] = 1.4709899999999999e+01;
acadoVariables.ubValues[18] = 7.8539816339744828e-01;
acadoVariables.ubValues[19] = 7.8539816339744828e-01;
acadoVariables.ubValues[20] = 1.4709899999999999e+01;
acadoVariables.ubValues[21] = 7.8539816339744828e-01;
acadoVariables.ubValues[22] = 7.8539816339744828e-01;
acadoVariables.ubValues[23] = 1.4709899999999999e+01;
acadoVariables.ubValues[24] = 7.8539816339744828e-01;
acadoVariables.ubValues[25] = 7.8539816339744828e-01;
acadoVariables.ubValues[26] = 1.4709899999999999e+01;
acadoVariables.ubValues[27] = 7.8539816339744828e-01;
acadoVariables.ubValues[28] = 7.8539816339744828e-01;
acadoVariables.ubValues[29] = 1.4709899999999999e+01;
acadoVariables.ubValues[30] = 7.8539816339744828e-01;
acadoVariables.ubValues[31] = 7.8539816339744828e-01;
acadoVariables.ubValues[32] = 1.4709899999999999e+01;
acadoVariables.ubValues[33] = 7.8539816339744828e-01;
acadoVariables.ubValues[34] = 7.8539816339744828e-01;
acadoVariables.ubValues[35] = 1.4709899999999999e+01;
acadoVariables.ubValues[36] = 7.8539816339744828e-01;
acadoVariables.ubValues[37] = 7.8539816339744828e-01;
acadoVariables.ubValues[38] = 1.4709899999999999e+01;
acadoVariables.ubValues[39] = 7.8539816339744828e-01;
acadoVariables.ubValues[40] = 7.8539816339744828e-01;
acadoVariables.ubValues[41] = 1.4709899999999999e+01;
acadoVariables.ubValues[42] = 7.8539816339744828e-01;
acadoVariables.ubValues[43] = 7.8539816339744828e-01;
acadoVariables.ubValues[44] = 1.4709899999999999e+01;
acadoVariables.ubValues[45] = 7.8539816339744828e-01;
acadoVariables.ubValues[46] = 7.8539816339744828e-01;
acadoVariables.ubValues[47] = 1.4709899999999999e+01;
acadoVariables.ubValues[48] = 7.8539816339744828e-01;
acadoVariables.ubValues[49] = 7.8539816339744828e-01;
acadoVariables.ubValues[50] = 1.4709899999999999e+01;
acadoVariables.ubValues[51] = 7.8539816339744828e-01;
acadoVariables.ubValues[52] = 7.8539816339744828e-01;
acadoVariables.ubValues[53] = 1.4709899999999999e+01;
acadoVariables.ubValues[54] = 7.8539816339744828e-01;
acadoVariables.ubValues[55] = 7.8539816339744828e-01;
acadoVariables.ubValues[56] = 1.4709899999999999e+01;
acadoVariables.ubValues[57] = 7.8539816339744828e-01;
acadoVariables.ubValues[58] = 7.8539816339744828e-01;
acadoVariables.ubValues[59] = 1.4709899999999999e+01;
return ret;
}
void acado_initializeNodesByForwardSimulation( )
{
int index;
for (index = 0; index < 20; ++index)
{
state[0] = acadoVariables.x[index * 9];
state[1] = acadoVariables.x[index * 9 + 1];
state[2] = acadoVariables.x[index * 9 + 2];
state[3] = acadoVariables.x[index * 9 + 3];
state[4] = acadoVariables.x[index * 9 + 4];
state[5] = acadoVariables.x[index * 9 + 5];
state[6] = acadoVariables.x[index * 9 + 6];
state[7] = acadoVariables.x[index * 9 + 7];
state[8] = acadoVariables.x[index * 9 + 8];
state[117] = acadoVariables.u[index * 3];
state[118] = acadoVariables.u[index * 3 + 1];
state[119] = acadoVariables.u[index * 3 + 2];
state[120] = acadoVariables.od[index * 9];
state[121] = acadoVariables.od[index * 9 + 1];
state[122] = acadoVariables.od[index * 9 + 2];
state[123] = acadoVariables.od[index * 9 + 3];
state[124] = acadoVariables.od[index * 9 + 4];
state[125] = acadoVariables.od[index * 9 + 5];
state[126] = acadoVariables.od[index * 9 + 6];
state[127] = acadoVariables.od[index * 9 + 7];
state[128] = acadoVariables.od[index * 9 + 8];
acado_integrate(state, index == 0);
acadoVariables.x[index * 9 + 9] = state[0];
acadoVariables.x[index * 9 + 10] = state[1];
acadoVariables.x[index * 9 + 11] = state[2];
acadoVariables.x[index * 9 + 12] = state[3];
acadoVariables.x[index * 9 + 13] = state[4];
acadoVariables.x[index * 9 + 14] = state[5];
acadoVariables.x[index * 9 + 15] = state[6];
acadoVariables.x[index * 9 + 16] = state[7];
acadoVariables.x[index * 9 + 17] = state[8];
}
}
void acado_shiftStates( int strategy, real_t* const xEnd, real_t* const uEnd )
{
int index;
for (index = 0; index < 20; ++index)
{
acadoVariables.x[index * 9] = acadoVariables.x[index * 9 + 9];
acadoVariables.x[index * 9 + 1] = acadoVariables.x[index * 9 + 10];
acadoVariables.x[index * 9 + 2] = acadoVariables.x[index * 9 + 11];
acadoVariables.x[index * 9 + 3] = acadoVariables.x[index * 9 + 12];
acadoVariables.x[index * 9 + 4] = acadoVariables.x[index * 9 + 13];
acadoVariables.x[index * 9 + 5] = acadoVariables.x[index * 9 + 14];
acadoVariables.x[index * 9 + 6] = acadoVariables.x[index * 9 + 15];
acadoVariables.x[index * 9 + 7] = acadoVariables.x[index * 9 + 16];
acadoVariables.x[index * 9 + 8] = acadoVariables.x[index * 9 + 17];
}
if (strategy == 1 && xEnd != 0)
{
acadoVariables.x[180] = xEnd[0];
acadoVariables.x[181] = xEnd[1];
acadoVariables.x[182] = xEnd[2];
acadoVariables.x[183] = xEnd[3];
acadoVariables.x[184] = xEnd[4];
acadoVariables.x[185] = xEnd[5];
acadoVariables.x[186] = xEnd[6];
acadoVariables.x[187] = xEnd[7];
acadoVariables.x[188] = xEnd[8];
}
else if (strategy == 2)
{
state[0] = acadoVariables.x[180];
state[1] = acadoVariables.x[181];
state[2] = acadoVariables.x[182];
state[3] = acadoVariables.x[183];
state[4] = acadoVariables.x[184];
state[5] = acadoVariables.x[185];
state[6] = acadoVariables.x[186];
state[7] = acadoVariables.x[187];
state[8] = acadoVariables.x[188];
if (uEnd != 0)
{
state[117] = uEnd[0];
state[118] = uEnd[1];
state[119] = uEnd[2];
}
else
{
state[117] = acadoVariables.u[57];
state[118] = acadoVariables.u[58];
state[119] = acadoVariables.u[59];
}
state[120] = acadoVariables.od[180];
state[121] = acadoVariables.od[181];
state[122] = acadoVariables.od[182];
state[123] = acadoVariables.od[183];
state[124] = acadoVariables.od[184];
state[125] = acadoVariables.od[185];
state[126] = acadoVariables.od[186];
state[127] = acadoVariables.od[187];
state[128] = acadoVariables.od[188];
acado_integrate(state, 1);
acadoVariables.x[180] = state[0];
acadoVariables.x[181] = state[1];
acadoVariables.x[182] = state[2];
acadoVariables.x[183] = state[3];
acadoVariables.x[184] = state[4];
acadoVariables.x[185] = state[5];
acadoVariables.x[186] = state[6];
acadoVariables.x[187] = state[7];
acadoVariables.x[188] = state[8];
}
}
void acado_shiftControls( real_t* const uEnd )
{
int index;
for (index = 0; index < 19; ++index)
{
acadoVariables.u[index * 3] = acadoVariables.u[index * 3 + 3];
acadoVariables.u[index * 3 + 1] = acadoVariables.u[index * 3 + 4];
acadoVariables.u[index * 3 + 2] = acadoVariables.u[index * 3 + 5];
}
if (uEnd != 0)
{
acadoVariables.u[57] = uEnd[0];
acadoVariables.u[58] = uEnd[1];
acadoVariables.u[59] = uEnd[2];
}
}
real_t acado_getKKT( )
{
real_t kkt;
int index;
real_t prd;
kkt = + acadoWorkspace.g[0]*acadoWorkspace.x[0] + acadoWorkspace.g[1]*acadoWorkspace.x[1] + acadoWorkspace.g[2]*acadoWorkspace.x[2] + acadoWorkspace.g[3]*acadoWorkspace.x[3] + acadoWorkspace.g[4]*acadoWorkspace.x[4] + acadoWorkspace.g[5]*acadoWorkspace.x[5] + acadoWorkspace.g[6]*acadoWorkspace.x[6] + acadoWorkspace.g[7]*acadoWorkspace.x[7] + acadoWorkspace.g[8]*acadoWorkspace.x[8] + acadoWorkspace.g[9]*acadoWorkspace.x[9] + acadoWorkspace.g[10]*acadoWorkspace.x[10] + acadoWorkspace.g[11]*acadoWorkspace.x[11] + acadoWorkspace.g[12]*acadoWorkspace.x[12] + acadoWorkspace.g[13]*acadoWorkspace.x[13] + acadoWorkspace.g[14]*acadoWorkspace.x[14] + acadoWorkspace.g[15]*acadoWorkspace.x[15] + acadoWorkspace.g[16]*acadoWorkspace.x[16] + acadoWorkspace.g[17]*acadoWorkspace.x[17] + acadoWorkspace.g[18]*acadoWorkspace.x[18] + acadoWorkspace.g[19]*acadoWorkspace.x[19] + acadoWorkspace.g[20]*acadoWorkspace.x[20] + acadoWorkspace.g[21]*acadoWorkspace.x[21] + acadoWorkspace.g[22]*acadoWorkspace.x[22] + acadoWorkspace.g[23]*acadoWorkspace.x[23] + acadoWorkspace.g[24]*acadoWorkspace.x[24] + acadoWorkspace.g[25]*acadoWorkspace.x[25] + acadoWorkspace.g[26]*acadoWorkspace.x[26] + acadoWorkspace.g[27]*acadoWorkspace.x[27] + acadoWorkspace.g[28]*acadoWorkspace.x[28] + acadoWorkspace.g[29]*acadoWorkspace.x[29] + acadoWorkspace.g[30]*acadoWorkspace.x[30] + acadoWorkspace.g[31]*acadoWorkspace.x[31] + acadoWorkspace.g[32]*acadoWorkspace.x[32] + acadoWorkspace.g[33]*acadoWorkspace.x[33] + acadoWorkspace.g[34]*acadoWorkspace.x[34] + acadoWorkspace.g[35]*acadoWorkspace.x[35] + acadoWorkspace.g[36]*acadoWorkspace.x[36] + acadoWorkspace.g[37]*acadoWorkspace.x[37] + acadoWorkspace.g[38]*acadoWorkspace.x[38] + acadoWorkspace.g[39]*acadoWorkspace.x[39] + acadoWorkspace.g[40]*acadoWorkspace.x[40] + acadoWorkspace.g[41]*acadoWorkspace.x[41] + acadoWorkspace.g[42]*acadoWorkspace.x[42] + acadoWorkspace.g[43]*acadoWorkspace.x[43] + acadoWorkspace.g[44]*acadoWorkspace.x[44] + acadoWorkspace.g[45]*acadoWorkspace.x[45] + acadoWorkspace.g[46]*acadoWorkspace.x[46] + acadoWorkspace.g[47]*acadoWorkspace.x[47] + acadoWorkspace.g[48]*acadoWorkspace.x[48] + acadoWorkspace.g[49]*acadoWorkspace.x[49] + acadoWorkspace.g[50]*acadoWorkspace.x[50] + acadoWorkspace.g[51]*acadoWorkspace.x[51] + acadoWorkspace.g[52]*acadoWorkspace.x[52] + acadoWorkspace.g[53]*acadoWorkspace.x[53] + acadoWorkspace.g[54]*acadoWorkspace.x[54] + acadoWorkspace.g[55]*acadoWorkspace.x[55] + acadoWorkspace.g[56]*acadoWorkspace.x[56] + acadoWorkspace.g[57]*acadoWorkspace.x[57] + acadoWorkspace.g[58]*acadoWorkspace.x[58] + acadoWorkspace.g[59]*acadoWorkspace.x[59];
kkt = fabs( kkt );
for (index = 0; index < 60; ++index)
{
prd = acadoWorkspace.y[index];
if (prd > 1e-12)
kkt += fabs(acadoWorkspace.lb[index] * prd);
else if (prd < -1e-12)
kkt += fabs(acadoWorkspace.ub[index] * prd);
}
return kkt;
}
real_t acado_getObjective( )
{
real_t objVal;
int lRun1;
/** Row vector of size: 11 */
real_t tmpDy[ 11 ];
/** Row vector of size: 6 */
real_t tmpDyN[ 6 ];
for (lRun1 = 0; lRun1 < 20; ++lRun1)
{
acadoWorkspace.objValueIn[0] = acadoVariables.x[lRun1 * 9];
acadoWorkspace.objValueIn[1] = acadoVariables.x[lRun1 * 9 + 1];
acadoWorkspace.objValueIn[2] = acadoVariables.x[lRun1 * 9 + 2];
acadoWorkspace.objValueIn[3] = acadoVariables.x[lRun1 * 9 + 3];
acadoWorkspace.objValueIn[4] = acadoVariables.x[lRun1 * 9 + 4];
acadoWorkspace.objValueIn[5] = acadoVariables.x[lRun1 * 9 + 5];
acadoWorkspace.objValueIn[6] = acadoVariables.x[lRun1 * 9 + 6];
acadoWorkspace.objValueIn[7] = acadoVariables.x[lRun1 * 9 + 7];
acadoWorkspace.objValueIn[8] = acadoVariables.x[lRun1 * 9 + 8];
acadoWorkspace.objValueIn[9] = acadoVariables.u[lRun1 * 3];
acadoWorkspace.objValueIn[10] = acadoVariables.u[lRun1 * 3 + 1];
acadoWorkspace.objValueIn[11] = acadoVariables.u[lRun1 * 3 + 2];
acadoWorkspace.objValueIn[12] = acadoVariables.od[lRun1 * 9];
acadoWorkspace.objValueIn[13] = acadoVariables.od[lRun1 * 9 + 1];
acadoWorkspace.objValueIn[14] = acadoVariables.od[lRun1 * 9 + 2];
acadoWorkspace.objValueIn[15] = acadoVariables.od[lRun1 * 9 + 3];
acadoWorkspace.objValueIn[16] = acadoVariables.od[lRun1 * 9 + 4];
acadoWorkspace.objValueIn[17] = acadoVariables.od[lRun1 * 9 + 5];
acadoWorkspace.objValueIn[18] = acadoVariables.od[lRun1 * 9 + 6];
acadoWorkspace.objValueIn[19] = acadoVariables.od[lRun1 * 9 + 7];
acadoWorkspace.objValueIn[20] = acadoVariables.od[lRun1 * 9 + 8];
acado_evaluateLSQ( acadoWorkspace.objValueIn, acadoWorkspace.objValueOut );
acadoWorkspace.Dy[lRun1 * 11] = acadoWorkspace.objValueOut[0] - acadoVariables.y[lRun1 * 11];
acadoWorkspace.Dy[lRun1 * 11 + 1] = acadoWorkspace.objValueOut[1] - acadoVariables.y[lRun1 * 11 + 1];
acadoWorkspace.Dy[lRun1 * 11 + 2] = acadoWorkspace.objValueOut[2] - acadoVariables.y[lRun1 * 11 + 2];
acadoWorkspace.Dy[lRun1 * 11 + 3] = acadoWorkspace.objValueOut[3] - acadoVariables.y[lRun1 * 11 + 3];
acadoWorkspace.Dy[lRun1 * 11 + 4] = acadoWorkspace.objValueOut[4] - acadoVariables.y[lRun1 * 11 + 4];
acadoWorkspace.Dy[lRun1 * 11 + 5] = acadoWorkspace.objValueOut[5] - acadoVariables.y[lRun1 * 11 + 5];
acadoWorkspace.Dy[lRun1 * 11 + 6] = acadoWorkspace.objValueOut[6] - acadoVariables.y[lRun1 * 11 + 6];
acadoWorkspace.Dy[lRun1 * 11 + 7] = acadoWorkspace.objValueOut[7] - acadoVariables.y[lRun1 * 11 + 7];
acadoWorkspace.Dy[lRun1 * 11 + 8] = acadoWorkspace.objValueOut[8] - acadoVariables.y[lRun1 * 11 + 8];
acadoWorkspace.Dy[lRun1 * 11 + 9] = acadoWorkspace.objValueOut[9] - acadoVariables.y[lRun1 * 11 + 9];
acadoWorkspace.Dy[lRun1 * 11 + 10] = acadoWorkspace.objValueOut[10] - acadoVariables.y[lRun1 * 11 + 10];
}
acadoWorkspace.objValueIn[0] = acadoVariables.x[180];
acadoWorkspace.objValueIn[1] = acadoVariables.x[181];
acadoWorkspace.objValueIn[2] = acadoVariables.x[182];
acadoWorkspace.objValueIn[3] = acadoVariables.x[183];
acadoWorkspace.objValueIn[4] = acadoVariables.x[184];
acadoWorkspace.objValueIn[5] = acadoVariables.x[185];
acadoWorkspace.objValueIn[6] = acadoVariables.x[186];
acadoWorkspace.objValueIn[7] = acadoVariables.x[187];
acadoWorkspace.objValueIn[8] = acadoVariables.x[188];
acadoWorkspace.objValueIn[9] = acadoVariables.od[180];
acadoWorkspace.objValueIn[10] = acadoVariables.od[181];
acadoWorkspace.objValueIn[11] = acadoVariables.od[182];
acadoWorkspace.objValueIn[12] = acadoVariables.od[183];
acadoWorkspace.objValueIn[13] = acadoVariables.od[184];
acadoWorkspace.objValueIn[14] = acadoVariables.od[185];
acadoWorkspace.objValueIn[15] = acadoVariables.od[186];
acadoWorkspace.objValueIn[16] = acadoVariables.od[187];
acadoWorkspace.objValueIn[17] = acadoVariables.od[188];
acado_evaluateLSQEndTerm( acadoWorkspace.objValueIn, acadoWorkspace.objValueOut );
acadoWorkspace.DyN[0] = acadoWorkspace.objValueOut[0] - acadoVariables.yN[0];
acadoWorkspace.DyN[1] = acadoWorkspace.objValueOut[1] - acadoVariables.yN[1];
acadoWorkspace.DyN[2] = acadoWorkspace.objValueOut[2] - acadoVariables.yN[2];
acadoWorkspace.DyN[3] = acadoWorkspace.objValueOut[3] - acadoVariables.yN[3];
acadoWorkspace.DyN[4] = acadoWorkspace.objValueOut[4] - acadoVariables.yN[4];
acadoWorkspace.DyN[5] = acadoWorkspace.objValueOut[5] - acadoVariables.yN[5];
objVal = 0.0000000000000000e+00;
for (lRun1 = 0; lRun1 < 20; ++lRun1)
{
tmpDy[0] = + acadoWorkspace.Dy[lRun1 * 11]*acadoVariables.W[0];
tmpDy[1] = + acadoWorkspace.Dy[lRun1 * 11 + 1]*acadoVariables.W[12];
tmpDy[2] = + acadoWorkspace.Dy[lRun1 * 11 + 2]*acadoVariables.W[24];
tmpDy[3] = + acadoWorkspace.Dy[lRun1 * 11 + 3]*acadoVariables.W[36];
tmpDy[4] = + acadoWorkspace.Dy[lRun1 * 11 + 4]*acadoVariables.W[48];
tmpDy[5] = + acadoWorkspace.Dy[lRun1 * 11 + 5]*acadoVariables.W[60];
tmpDy[6] = + acadoWorkspace.Dy[lRun1 * 11 + 6]*acadoVariables.W[72];
tmpDy[7] = + acadoWorkspace.Dy[lRun1 * 11 + 7]*acadoVariables.W[84];
tmpDy[8] = + acadoWorkspace.Dy[lRun1 * 11 + 8]*acadoVariables.W[96];
tmpDy[9] = + acadoWorkspace.Dy[lRun1 * 11 + 9]*acadoVariables.W[108];
tmpDy[10] = + acadoWorkspace.Dy[lRun1 * 11 + 10]*acadoVariables.W[120];
objVal += + acadoWorkspace.Dy[lRun1 * 11]*tmpDy[0] + acadoWorkspace.Dy[lRun1 * 11 + 1]*tmpDy[1] + acadoWorkspace.Dy[lRun1 * 11 + 2]*tmpDy[2] + acadoWorkspace.Dy[lRun1 * 11 + 3]*tmpDy[3] + acadoWorkspace.Dy[lRun1 * 11 + 4]*tmpDy[4] + acadoWorkspace.Dy[lRun1 * 11 + 5]*tmpDy[5] + acadoWorkspace.Dy[lRun1 * 11 + 6]*tmpDy[6] + acadoWorkspace.Dy[lRun1 * 11 + 7]*tmpDy[7] + acadoWorkspace.Dy[lRun1 * 11 + 8]*tmpDy[8] + acadoWorkspace.Dy[lRun1 * 11 + 9]*tmpDy[9] + acadoWorkspace.Dy[lRun1 * 11 + 10]*tmpDy[10];
}
tmpDyN[0] = + acadoWorkspace.DyN[0]*acadoVariables.WN[0];
tmpDyN[1] = + acadoWorkspace.DyN[1]*acadoVariables.WN[7];
tmpDyN[2] = + acadoWorkspace.DyN[2]*acadoVariables.WN[14];
tmpDyN[3] = + acadoWorkspace.DyN[3]*acadoVariables.WN[21];
tmpDyN[4] = + acadoWorkspace.DyN[4]*acadoVariables.WN[28];
tmpDyN[5] = + acadoWorkspace.DyN[5]*acadoVariables.WN[35];
objVal += + acadoWorkspace.DyN[0]*tmpDyN[0] + acadoWorkspace.DyN[1]*tmpDyN[1] + acadoWorkspace.DyN[2]*tmpDyN[2] + acadoWorkspace.DyN[3]*tmpDyN[3] + acadoWorkspace.DyN[4]*tmpDyN[4] + acadoWorkspace.DyN[5]*tmpDyN[5];
objVal *= 0.5;
return objVal;
}
|
drupal7_fmt_plug.c | /*
* Drupal 7 phpass variant using SHA-512 and hashes cut at 258 bits.
*
* This software is Copyright (c) 2012 magnum, and it is hereby released to the
* general public under the following terms: Redistribution and use in source
* and binary forms, with or without modification, are permitted.
*
* These are 8 byte salted hashes with a loop count that defines the number
* of loops to compute. Drupal uses 258 bits of the hash, this is a multiple of
* 6 but not 8. I presume this is for getting unpadded base64. Anyway we store
* an extra byte but for now we will only compare 256 bits. I doubt that will
* pose any problems. Actually I'm not quite sure the last bits end up correct
* from the current version of get_binary().
*
* Based on [old thick] phpass-md5.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_drupal7;
#elif FMT_REGISTERS_H
john_register_one(&fmt_drupal7);
#else
#include "sha2.h"
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "simd-intrinsics.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 8
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "Drupal7"
#define FORMAT_NAME "$S$"
#define FORMAT_TAG "$S$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "SHA512 " SHA512_ALGORITHM_NAME
#define BENCHMARK_COMMENT " (x16385)"
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 47
#define CIPHERTEXT_LENGTH 55
#define DIGEST_SIZE (512/8)
#define BINARY_SIZE (258/8) // ((258+7)/8)
#define BINARY_ALIGN 4
#define SALT_SIZE 8
#define SALT_ALIGN 4
#ifdef SIMD_COEF_64
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#define GETPOS(i, index) ( (index&(SIMD_COEF_64-1))*8 + ((i)&(0xffffffff-7))*SIMD_COEF_64 + (7-((i)&7)) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64*8 )
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests tests[] = {
{"$S$CwkjgAKeSx2imSiN3SyBEg8e0sgE2QOx4a/VIfCHN0BZUNAWCr1X", "virtualabc"},
{"$S$CFURCPa.k6FAEbJPgejaW4nijv7rYgGc4dUJtChQtV4KLJTPTC/u", "password"},
{"$S$C6x2r.aW5Nkg7st6/u.IKWjTerHXscjPtu4spwhCVZlP89UKcbb/", "NEW_TEMP_PASSWORD"},
{NULL}
};
/*
* NOTE, due to the 0x4000 iteration count, I am not wasting time pre-loading
* keys/salts. We will simply add SIMD code to the crypt_all. We could only
* gain < .1% worrying about all the extra stuff from set_key, get_key, the
* hashes, etc needed to split out SIMD. We just keep all input data in 'flat'
* format, switch to SIMD, do the 0x4000 loops, and put output back into 'flat'
* layout again. So we have no 'static' SIMD objects.
*/
static unsigned char *cursalt;
static unsigned loopCnt;
static unsigned char (*EncKey)[PLAINTEXT_LENGTH + 1];
static unsigned int *EncKeyLen;
static char (*crypt_key)[DIGEST_SIZE];
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
int omp_t;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
EncKey = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*EncKey));
EncKeyLen = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*EncKeyLen));
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
}
static void done(void)
{
MEM_FREE(crypt_key);
MEM_FREE(EncKeyLen);
MEM_FREE(EncKey);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
int i;
unsigned count_log2;
if (strlen(ciphertext) != CIPHERTEXT_LENGTH)
return 0;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
for (i = FORMAT_TAG_LEN; i < CIPHERTEXT_LENGTH; ++i)
if (atoi64[ARCH_INDEX(ciphertext[i])] == 0x7F)
return 0;
count_log2 = atoi64[ARCH_INDEX(ciphertext[3])];
if (count_log2 < 7 || count_log2 > 31)
return 0;
return 1;
}
static void set_salt(void *salt)
{
loopCnt = (1 << (atoi64[ARCH_INDEX(((char*)salt)[8])]));
cursalt = salt;
}
static void set_key(char *key, int index)
{
int len;
len = strlen(key);
EncKeyLen[index] = len;
memcpy(((char*)EncKey[index]), key, len + 1);
}
static char *get_key(int index)
{
return (char*)EncKey[index];
}
static int cmp_all(void *binary, int count)
{
int index;
for(index = 0; index < count; index++)
if (!memcmp(binary, crypt_key[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index+=MAX_KEYS_PER_CRYPT)
{
#ifdef SIMD_COEF_64
unsigned char _IBuf[128*MAX_KEYS_PER_CRYPT+MEM_ALIGN_CACHE], *keys;
ARCH_WORD_64 *keys64;
unsigned i, j, len, Lcount = loopCnt;
keys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_CACHE);
keys64 = (ARCH_WORD_64*)keys;
memset(keys, 0, 128*MAX_KEYS_PER_CRYPT);
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
len = EncKeyLen[index+i];
for (j = 0; j < 8; ++j)
keys[GETPOS(j, i)] = cursalt[j];
for (j = 0; j < len; ++j)
keys[GETPOS(j+8, i)] = EncKey[index+i][j];
keys[GETPOS(j+8, i)] = 0x80;
keys64[15*SIMD_COEF_64+(i&(SIMD_COEF_64-1))+i/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64] = (len+8) << 3;
}
SIMDSHA512body(keys, keys64, NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT);
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
len = EncKeyLen[index+i];
for (j = 0; j < len; ++j)
keys[GETPOS(j+64, i)] = EncKey[index+i][j];
keys[GETPOS(j+64, i)] = 0x80;
keys64[15*SIMD_COEF_64+(i&(SIMD_COEF_64-1))+i/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64] = (len+64) << 3;
}
while (--Lcount)
SIMDSHA512body(keys, keys64, NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT);
// Last one with FLAT_OUT
SIMDSHA512body(keys, (ARCH_WORD_64*)crypt_key[index], NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT|SSEi_FLAT_OUT);
#else
SHA512_CTX ctx;
unsigned char tmp[DIGEST_SIZE + PLAINTEXT_LENGTH];
int len = EncKeyLen[index];
unsigned Lcount = loopCnt - 1;
SHA512_Init( &ctx );
SHA512_Update( &ctx, cursalt, 8 );
SHA512_Update( &ctx, EncKey[index], len );
memcpy(&tmp[DIGEST_SIZE], (char *)EncKey[index], len);
SHA512_Final( tmp, &ctx);
len += DIGEST_SIZE;
do {
SHA512_Init( &ctx );
SHA512_Update( &ctx, tmp, len);
SHA512_Final( tmp, &ctx);
} while (--Lcount);
SHA512_Init( &ctx );
SHA512_Update( &ctx, tmp, len);
SHA512_Final( (unsigned char *) crypt_key[index], &ctx);
#endif
}
return count;
}
static void * get_binary(char *ciphertext)
{
int i;
unsigned sixbits;
static union {
unsigned char u8[BINARY_SIZE + 1];
ARCH_WORD_32 u32;
} out;
int bidx=0;
char *pos;
pos = &ciphertext[FORMAT_TAG_LEN + 1 + 8];
for (i = 0; i < 10; ++i) {
sixbits = atoi64[ARCH_INDEX(*pos++)];
out.u8[bidx] = sixbits;
sixbits = atoi64[ARCH_INDEX(*pos++)];
out.u8[bidx++] |= (sixbits<<6);
sixbits >>= 2;
out.u8[bidx] = sixbits;
sixbits = atoi64[ARCH_INDEX(*pos++)];
out.u8[bidx++] |= (sixbits<<4);
sixbits >>= 4;
out.u8[bidx] = sixbits;
sixbits = atoi64[ARCH_INDEX(*pos++)];
out.u8[bidx++] |= (sixbits<<2);
}
sixbits = atoi64[ARCH_INDEX(*pos++)];
out.u8[bidx] = sixbits;
sixbits = atoi64[ARCH_INDEX(*pos++)];
out.u8[bidx++] |= (sixbits<<6);
sixbits >>= 2;
out.u8[bidx] = sixbits;
sixbits = atoi64[ARCH_INDEX(*pos++)];
out.u8[bidx++] |= (sixbits<<4);
return out.u8;
}
static void * get_salt(char *ciphertext)
{
static union {
unsigned char u8[SALT_SIZE + 1];
ARCH_WORD_32 u32;
} salt;
// store off the 'real' 8 bytes of salt
memcpy(salt.u8, &ciphertext[FORMAT_TAG_LEN+1], 8);
// append the 1 byte of loop count information.
salt.u8[8] = ciphertext[FORMAT_TAG_LEN];
return salt.u8;
}
static int get_hash_0(int index) { return *((ARCH_WORD_32 *)&crypt_key[index]) & PH_MASK_0; }
static int get_hash_1(int index) { return *((ARCH_WORD_32 *)&crypt_key[index]) & PH_MASK_1; }
static int get_hash_2(int index) { return *((ARCH_WORD_32 *)&crypt_key[index]) & PH_MASK_2; }
static int get_hash_3(int index) { return *((ARCH_WORD_32 *)&crypt_key[index]) & PH_MASK_3; }
static int get_hash_4(int index) { return *((ARCH_WORD_32 *)&crypt_key[index]) & PH_MASK_4; }
static int get_hash_5(int index) { return *((ARCH_WORD_32 *)&crypt_key[index]) & PH_MASK_5; }
static int get_hash_6(int index) { return *((ARCH_WORD_32 *)&crypt_key[index]) & PH_MASK_6; }
static int salt_hash(void *salt)
{
return *((ARCH_WORD_32 *)salt) & 0x3FF;
}
static unsigned int iteration_count(void *salt)
{
return (unsigned int) 1 << (atoi64[ARCH_INDEX(((char*)salt)[8])]);
}
struct fmt_main fmt_drupal7 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
// true salt is SALT_SIZE but we add the loop count
SALT_SIZE + 1,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_unop__identity_int64_uint8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int64_uint8)
// op(A') function: GB (_unop_tran__identity_int64_uint8)
// C type: int64_t
// A type: uint8_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int64_t z = (int64_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int64_t z = (int64_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int64_uint8)
(
int64_t *Cx, // Cx and Ax may be aliased
const uint8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint8_t aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int64_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
oracle12c_fmt_plug.c | /*
* This software is Copyright (c) 2015, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*
* https://www.trustwave.com/Resources/SpiderLabs-Blog/Changes-in-Oracle-Database-12c-password-hashes/
*
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_oracle12c;
#elif FMT_REGISTERS_H
john_register_one(&fmt_oracle12c);
#else
#include <openssl/sha.h>
#include <string.h>
#include "arch.h"
//#undef SIMD_COEF_64
//#undef SIMD_PARA_SHA512
//#undef _OPENMP
#include "misc.h"
#include "memory.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "sha2.h"
#include "pbkdf2_hmac_sha512.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "Oracle12C"
#define FORMAT_NAME ""
#ifdef SIMD_COEF_64
#define ALGORITHM_NAME "PBKDF2-SHA512 " SHA512_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-SHA512 32/" ARCH_BITS_STR
#endif
#define PLAINTEXT_LENGTH 125 // XXX
#define CIPHERTEXT_LENGTH 160
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN sizeof(ARCH_WORD_32)
#define BINARY_SIZE 64
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#ifdef SIMD_COEF_64
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_64 * SIMD_PARA_SHA512)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_64 * SIMD_PARA_SHA512)
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define FORMAT_TAG "$oracle12c$"
#define FORMAT_TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
static struct fmt_tests tests[] = {
{"$oracle12c$e3243b98974159cc24fd2c9a8b30ba62e0e83b6ca2fc7c55177c3a7f82602e3bdd17ceb9b9091cf9dad672b8be961a9eac4d344bdba878edc5dcb5899f689ebd8dd1be3f67bff9813a464382381ab36b", "epsilon"},
{NULL}
};
static struct custom_salt {
int saltlen;
unsigned char salt[16 + 22 + 1];
} *cur_salt;
#ifdef SIMD_COEF_64
static char (*saved_key)[SHA_BUF_SIZ*sizeof(ARCH_WORD_64)];
#else
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
#endif
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
static int omp_t = 1;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p = ciphertext;
if (strncasecmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LENGTH))
return 0;
if (strlen(ciphertext) > (FORMAT_TAG_LENGTH + CIPHERTEXT_LENGTH))
return 0;
p = strrchr(ciphertext, '$');
if (!p)
return 0;
p = p + 1;
if (strlen(p) != (BINARY_SIZE * 2 + 32))
return 0;
if (!ishexlc(p))
goto error;
return 1;
error:
return 0;
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
char *p;
int i;
memset(&cs, 0, sizeof(cs));
p = ciphertext + FORMAT_TAG_LENGTH + 2 * BINARY_SIZE;
// AUTH_VFR_DATA is variable, and 16 bytes in length
for(i = 0; i < 16; i++)
cs.salt[i] = (atoi16[ARCH_INDEX(p[2*i])] << 4) | atoi16[ARCH_INDEX(p[2*i+1])];
strncpy((char*)cs.salt + 16, "AUTH_PBKDF2_SPEEDY_KEY", 22); // add constant string to the salt
cs.saltlen = 16 + 22;
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
int i;
char *p;
p = ciphertext + FORMAT_TAG_LENGTH;
for (i = 0; i < BINARY_SIZE && *p; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static int crypt_all(int *pcount, struct db_salt *salt)
{
int index;
const int count = *pcount;
#ifdef _OPENMP
#pragma omp parallel for
#endif
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
#endif
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
{
SHA512_CTX ctx;
int i = 0;
#if SIMD_COEF_64
int lens[SSE_GROUP_SZ_SHA512];
unsigned char *pin[SSE_GROUP_SZ_SHA512];
union {
ARCH_WORD_32 *pout[SSE_GROUP_SZ_SHA512];
unsigned char *poutc;
} x;
for (i = 0; i < SSE_GROUP_SZ_SHA512; ++i) {
lens[i] = strlen(saved_key[index+i]);
pin[i] = (unsigned char*)saved_key[index+i];
x.pout[i] = (ARCH_WORD_32*)(crypt_out[index+i]);
}
pbkdf2_sha512_sse((const unsigned char **)pin, lens, cur_salt->salt,
cur_salt->saltlen, 4096, &(x.poutc), BINARY_SIZE, 0);
#else
pbkdf2_sha512((const unsigned char*)saved_key[index],
strlen(saved_key[index]), cur_salt->salt,
cur_salt->saltlen, 4096,
(unsigned char*)crypt_out[index], BINARY_SIZE, 0);
#endif
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
for (i = 0; i < MAX_KEYS_PER_CRYPT; i++)
#endif
{
SHA512_Init(&ctx);
SHA512_Update(&ctx, (unsigned char*)crypt_out[index + i], BINARY_SIZE);
SHA512_Update(&ctx, cur_salt->salt, 16); // AUTH_VFR_DATA first 16 bytes
SHA512_Final((unsigned char*)crypt_out[index + i], &ctx);
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_oracle12c = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_binop__max_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__max_int64
// A.*B function (eWiseMult): GB_AemultB__max_int64
// A*D function (colscale): GB_AxD__max_int64
// D*A function (rowscale): GB_DxB__max_int64
// C+=B function (dense accum): GB_Cdense_accumB__max_int64
// C+=b function (dense accum): GB_Cdense_accumb__max_int64
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__max_int64
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__max_int64
// C=scalar+B GB_bind1st__max_int64
// C=scalar+B' GB_bind1st_tran__max_int64
// C=A+scalar GB_bind2nd__max_int64
// C=A'+scalar GB_bind2nd_tran__max_int64
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = GB_IMAX (aij, bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_IMAX (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MAX || GxB_NO_INT64 || GxB_NO_MAX_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__max_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__max_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__max_int64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__max_int64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__max_int64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__max_int64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__max_int64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__max_int64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__max_int64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = Bx [p] ;
Cx [p] = GB_IMAX (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__max_int64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = Ax [p] ;
Cx [p] = GB_IMAX (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = GB_IMAX (x, aij) ; \
}
GrB_Info GB_bind1st_tran__max_int64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = GB_IMAX (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__max_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
openmp_ctx.h | #ifndef MOBULA_INCLUDE_CONTEXT_OPENMP_CTX_H_
#define MOBULA_INCLUDE_CONTEXT_OPENMP_CTX_H_
#include <omp.h>
#include <algorithm>
#ifndef _MSC_VER
#define __pragma(id) _Pragma(#id)
#endif
namespace mobula {
#if HOST_NUM_THREADS > 1
template <typename Func>
class KernelRunner {
public:
explicit KernelRunner(Func func) : func_(func) {}
template <typename... Args>
void operator()(const int n, Args... args) {
const int nthreads = std::min(n, omp_get_max_threads());
#pragma omp parallel num_threads(nthreads)
{ func_(n, args...); }
}
private:
Func func_;
};
MOBULA_DEVICE inline int get_num_threads() { return omp_get_num_threads(); }
MOBULA_DEVICE inline int get_thread_num() { return omp_get_thread_num(); }
template <typename Func>
MOBULA_DEVICE void parfor(const size_t n, Func F) {
INDEX_TYPE_SWITCH(n, {
index_t start, end;
get_parfor_range(n, get_num_threads(), get_thread_num(), &start, &end);
for (index_t i = start; i < end; ++i) {
F(i);
}
});
}
inline void __syncthreads() { __pragma(omp barrier); }
#define KERNEL_RUN(a) (mobula::KernelRunner<decltype(&(a))>(&(a)))
#else // HOST_NUM_THREADS > 1 else
MOBULA_DEVICE inline int get_num_threads() { return 1; }
MOBULA_DEVICE inline int get_thread_num() { return 1; }
template <typename Func>
MOBULA_DEVICE void parfor(const size_t n, Func F) {
INDEX_TYPE_SWITCH(n, {
for (index_t i = 0; i < static_cast<index_t>(n); ++i) {
F(i);
}
});
}
inline void __syncthreads() {}
#define KERNEL_RUN(a) (a)
#endif // HOST_NUM_THREADS > 1
} // namespace mobula
#endif // MOBULA_INCLUDE_CONTEXT_OPENMP_CTX_H_
|
cpd.c |
/******************************************************************************
* INCLUDES
*****************************************************************************/
#include "base.h"
#include "cpd.h"
#include "matrix.h"
#include "mttkrp.h"
#include "timer.h"
#include "thd_info.h"
#include "util.h"
#include <math.h>
/******************************************************************************
* API FUNCTIONS
*****************************************************************************/
int splatt_cpd_als(
splatt_csf const * const tensors,
splatt_idx_t const nfactors,
double const * const options,
splatt_kruskal * factored)
{
matrix_t * mats[MAX_NMODES+1];
idx_t nmodes = tensors->nmodes;
rank_info rinfo;
rinfo.rank = 0;
/* allocate factor matrices */
idx_t maxdim = tensors->dims[argmax_elem(tensors->dims, nmodes)];
for(idx_t m=0; m < nmodes; ++m) {
mats[m] = (matrix_t *) mat_rand(tensors[0].dims[m], nfactors);
}
mats[MAX_NMODES] = mat_alloc(maxdim, nfactors);
val_t * lambda = (val_t *) splatt_malloc(nfactors * sizeof(val_t));
/* do the factorization! */
factored->fit = cpd_als_iterate(tensors, mats, lambda, nfactors, &rinfo,
options);
/* store output */
factored->rank = nfactors;
factored->nmodes = nmodes;
factored->lambda = lambda;
for(idx_t m=0; m < nmodes; ++m) {
factored->dims[m] = tensors->dims[m];
factored->factors[m] = mats[m]->vals;
}
/* clean up */
mat_free(mats[MAX_NMODES]);
for(idx_t m=0; m < nmodes; ++m) {
free(mats[m]); /* just the matrix_t ptr, data is safely in factored */
}
return SPLATT_SUCCESS;
}
void splatt_free_kruskal(
splatt_kruskal * factored)
{
free(factored->lambda);
for(idx_t m=0; m < factored->nmodes; ++m) {
free(factored->factors[m]);
}
}
/******************************************************************************
* PRIVATE FUNCTIONS
*****************************************************************************/
/**
* @brief Resets serial and MPI timers that were activated during some CPD
* pre-processing.
*
* @param rinfo MPI rank information.
*/
static void p_reset_cpd_timers(
rank_info const * const rinfo)
{
timer_reset(&timers[TIMER_ATA]);
#ifdef SPLATT_USE_MPI
timer_reset(&timers[TIMER_MPI]);
timer_reset(&timers[TIMER_MPI_IDLE]);
timer_reset(&timers[TIMER_MPI_COMM]);
timer_reset(&timers[TIMER_MPI_ATA]);
timer_reset(&timers[TIMER_MPI_REDUCE]);
timer_reset(&timers[TIMER_MPI_NORM]);
timer_reset(&timers[TIMER_MPI_UPDATE]);
timer_reset(&timers[TIMER_MPI_FIT]);
MPI_Barrier(rinfo->comm_3d);
#endif
}
/**
* @brief Find the Frobenius norm squared of a Kruskal tensor. This equivalent
* to via computing <X,X>, the inner product of X with itself. We find
* this via \lambda^T (AtA * BtB * ...) \lambda, where * is the Hadamard
* product.
*
* @param nmodes The number of modes in the tensor.
* @param lambda The vector of column norms.
* @param aTa An array of Gram Matrices (AtA, BtB, ...).
*
* @return The Frobenius norm of X, squared.
*/
static val_t p_kruskal_norm(
idx_t const nmodes,
val_t const * const restrict lambda,
matrix_t ** aTa)
{
idx_t const rank = aTa[0]->J;
val_t * const restrict av = aTa[MAX_NMODES]->vals;
val_t norm_mats = 0;
/* use aTa[MAX_NMODES] as scratch space */
for(idx_t i=0; i < rank; ++i) {
for(idx_t j=i; j < rank; ++j) {
av[j + (i*rank)] = 1.;
}
}
/* aTa[MAX_NMODES] = hada(aTa) */
for(idx_t m=0; m < nmodes; ++m) {
val_t const * const restrict atavals = aTa[m]->vals;
for(idx_t i=0; i < rank; ++i) {
for(idx_t j=i; j < rank; ++j) {
av[j + (i*rank)] *= atavals[j + (i*rank)];
}
}
}
/* now compute lambda^T * aTa[MAX_NMODES] * lambda */
for(idx_t i=0; i < rank; ++i) {
norm_mats += av[i+(i*rank)] * lambda[i] * lambda[i];
for(idx_t j=i+1; j < rank; ++j) {
norm_mats += av[j+(i*rank)] * lambda[i] * lambda[j] * 2;
}
}
return fabs(norm_mats);
}
/**
* @brief Compute the inner product of a Kruskal tensor and an unfactored
* tensor. Assumes that 'm1' contains the MTTKRP result along the last
* mode of the two input tensors. This naturally follows the end of a
* CPD iteration.
*
* @param nmodes The number of modes in the input tensors.
* @param rinfo MPI rank information.
* @param thds OpenMP thread data structures.
* @param lambda The vector of column norms.
* @param mats The Kruskal-tensor matrices.
* @param m1 The result of doing MTTKRP along the last mode.
*
* @return The inner product of the two tensors, computed via:
* 1^T hadamard(mats[nmodes-1], m1) \lambda.
*/
static val_t p_tt_kruskal_inner(
idx_t const nmodes,
rank_info * const rinfo,
thd_info * const thds,
val_t const * const restrict lambda,
matrix_t ** mats,
matrix_t const * const m1)
{
idx_t const rank = mats[0]->J;
idx_t const lastm = nmodes - 1;
idx_t const dim = m1->I;
val_t const * const m0 = mats[lastm]->vals;
val_t const * const mv = m1->vals;
val_t myinner = 0;
#pragma omp parallel reduction(+:myinner)
{
int const tid = splatt_omp_get_thread_num();
val_t * const restrict accumF = (val_t *) thds[tid].scratch[0];
for(idx_t r=0; r < rank; ++r) {
accumF[r] = 0.;
}
#pragma omp for
for(idx_t i=0; i < dim; ++i) {
for(idx_t r=0; r < rank; ++r) {
accumF[r] += m0[r+(i*rank)] * mv[r+(i*rank)];
}
}
/* accumulate everything into 'myinner' */
for(idx_t r=0; r < rank; ++r) {
myinner += accumF[r] * lambda[r];
}
}
val_t inner = 0.;
#ifdef SPLATT_USE_MPI
timer_start(&timers[TIMER_MPI_FIT]);
MPI_Allreduce(&myinner, &inner, 1, SPLATT_MPI_VAL, MPI_SUM, rinfo->comm_3d);
timer_stop(&timers[TIMER_MPI_FIT]);
#else
inner = myinner;
#endif
return inner;
}
/**
* @brief Compute the fit of a Kruskal tensor, Z, to an input tensor, X. This
* is computed via 1 - [sqrt(<X,X> + <Z,Z> - 2<X,Z>) / sqrt(<X,X>)].
*
* @param nmodes The number of modes in the input tensors.
* @param rinfo MPI rank information.
* @param thds OpenMP thread data structures.
* @param ttnormsq The norm (squared) of the original input tensor, <X,X>.
* @param lambda The vector of column norms.
* @param mats The Kruskal-tensor matrices.
* @param m1 The result of doing MTTKRP along the last mode.
* @param aTa An array of matrices (length MAX_NMODES)containing BtB, CtC, etc.
*
* @return The inner product of the two tensors, computed via:
* \lambda^T hadamard(mats[nmodes-1], m1) \lambda.
*/
static val_t p_calc_fit(
idx_t const nmodes,
rank_info * const rinfo,
thd_info * const thds,
val_t const ttnormsq,
val_t const * const restrict lambda,
matrix_t ** mats,
matrix_t const * const m1,
matrix_t ** aTa)
{
timer_start(&timers[TIMER_FIT]);
/* First get norm of new model: lambda^T * (hada aTa) * lambda. */
val_t const norm_mats = p_kruskal_norm(nmodes, lambda, aTa);
/* Compute inner product of tensor with new model */
val_t const inner = p_tt_kruskal_inner(nmodes, rinfo, thds, lambda, mats,m1);
/*
* We actually want sqrt(<X,X> + <Y,Y> - 2<X,Y>), but if the fit is perfect
* just make it 0.
*/
val_t residual = ttnormsq + norm_mats - (2 * inner);
if(residual > 0.) {
residual = sqrt(residual);
}
timer_stop(&timers[TIMER_FIT]);
return 1 - (residual / sqrt(ttnormsq));
}
/******************************************************************************
* PUBLIC FUNCTIONS
*****************************************************************************/
double cpd_als_iterate(
splatt_csf const * const tensors,
matrix_t ** mats,
val_t * const lambda,
idx_t const nfactors,
rank_info * const rinfo,
double const * const opts)
{
idx_t const nmodes = tensors[0].nmodes;
idx_t const nthreads = (idx_t) opts[SPLATT_OPTION_NTHREADS];
/* Setup thread structures. + 64 bytes is to avoid false sharing.
* TODO make this better */
splatt_omp_set_num_threads(nthreads);
thd_info * thds = thd_init(nthreads, 3,
(nmodes * nfactors * sizeof(val_t)) + 64,
0,
(nmodes * nfactors * sizeof(val_t)) + 64);
matrix_t * m1 = mats[MAX_NMODES];
/* Initialize first A^T * A mats. We redundantly do the first because it
* makes communication easier. */
matrix_t * aTa[MAX_NMODES+1];
for(idx_t m=0; m < nmodes; ++m) {
aTa[m] = mat_alloc(nfactors, nfactors);
memset(aTa[m]->vals, 0, nfactors * nfactors * sizeof(val_t));
mat_aTa(mats[m], aTa[m], rinfo, thds, nthreads);
}
/* used as buffer space */
aTa[MAX_NMODES] = mat_alloc(nfactors, nfactors);
/* mttkrp workspace */
splatt_mttkrp_ws * mttkrp_ws = splatt_mttkrp_alloc_ws(tensors,nfactors,opts);
/* Compute input tensor norm */
double oldfit = 0;
double fit = 0;
val_t ttnormsq = csf_frobsq(tensors);
/* setup timers */
p_reset_cpd_timers(rinfo);
sp_timer_t itertime;
sp_timer_t modetime[MAX_NMODES];
timer_start(&timers[TIMER_CPD]);
idx_t const niters = (idx_t) opts[SPLATT_OPTION_NITER];
for(idx_t it=0; it < niters; ++it) {
timer_fstart(&itertime);
for(idx_t m=0; m < nmodes; ++m) {
//timer_fstart(&modetime[m]);
mats[MAX_NMODES]->I = tensors[0].dims[m];
m1->I = mats[m]->I;
/* M1 = X * (C o B) */
timer_fstart(&modetime[m]);
timer_start(&timers[TIMER_MTTKRP]);
mttkrp_csf(tensors, mats, m, thds, mttkrp_ws, opts);
timer_stop(&timers[TIMER_MTTKRP]);
timer_stop(&modetime[m]);
#if 0
/* M2 = (CtC .* BtB .* ...)^-1 */
calc_gram_inv(m, nmodes, aTa);
/* A = M1 * M2 */
memset(mats[m]->vals, 0, mats[m]->I * nfactors * sizeof(val_t));
mat_matmul(m1, aTa[MAX_NMODES], mats[m]);
#else
par_memcpy(mats[m]->vals, m1->vals, m1->I * nfactors * sizeof(val_t));
mat_solve_normals(m, nmodes, aTa, mats[m],
opts[SPLATT_OPTION_REGULARIZE]);
#endif
/* normalize columns and extract lambda */
if(it == 0) {
mat_normalize(mats[m], lambda, MAT_NORM_2, rinfo, thds, nthreads);
} else {
mat_normalize(mats[m], lambda, MAT_NORM_MAX, rinfo, thds,nthreads);
}
/* update A^T*A */
mat_aTa(mats[m], aTa[m], rinfo, thds, nthreads);
//timer_stop(&modetime[m]);
} /* foreach mode */
fit = p_calc_fit(nmodes, rinfo, thds, ttnormsq, lambda, mats, m1, aTa);
timer_stop(&itertime);
if(rinfo->rank == 0 &&
opts[SPLATT_OPTION_VERBOSITY] > SPLATT_VERBOSITY_NONE) {
printf(" its = %3"SPLATT_PF_IDX" (%0.3fs) fit = %0.5f delta = %+0.4e\n",
it+1, itertime.seconds, fit, fit - oldfit);
if(opts[SPLATT_OPTION_VERBOSITY] > SPLATT_VERBOSITY_LOW) {
for(idx_t m=0; m < nmodes; ++m) {
printf(" mode = %1"SPLATT_PF_IDX" (%0.3fs)\n", m+1,
modetime[m].seconds);
}
}
}
if(fit == 1. ||
(it > 0 && fabs(fit - oldfit) < opts[SPLATT_OPTION_TOLERANCE])) {
break;
}
oldfit = fit;
}
timer_stop(&timers[TIMER_CPD]);
cpd_post_process(nfactors, nmodes, mats, lambda, thds, nthreads, rinfo);
/* CLEAN UP */
splatt_mttkrp_free_ws(mttkrp_ws);
for(idx_t m=0; m < nmodes; ++m) {
mat_free(aTa[m]);
}
mat_free(aTa[MAX_NMODES]);
thd_free(thds, nthreads);
return fit;
}
void cpd_post_process(
idx_t const nfactors,
idx_t const nmodes,
matrix_t ** mats,
val_t * const lambda,
thd_info * const thds,
idx_t const nthreads,
rank_info * const rinfo)
{
val_t * tmp = splatt_malloc(nfactors * sizeof(*tmp));
/* normalize each matrix and adjust lambda */
for(idx_t m=0; m < nmodes; ++m) {
mat_normalize(mats[m], tmp, MAT_NORM_2, rinfo, thds, nthreads);
for(idx_t f=0; f < nfactors; ++f) {
lambda[f] *= tmp[f];
}
}
free(tmp);
}
|
GB_binop__minus_uint64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__minus_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__minus_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__minus_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__minus_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_uint64)
// A*D function (colscale): GB (_AxD__minus_uint64)
// D*A function (rowscale): GB (_DxB__minus_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__minus_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__minus_uint64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_uint64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_uint64)
// C=scalar+B GB (_bind1st__minus_uint64)
// C=scalar+B' GB (_bind1st_tran__minus_uint64)
// C=A+scalar GB (_bind2nd__minus_uint64)
// C=A'+scalar GB (_bind2nd_tran__minus_uint64)
// C type: uint64_t
// A type: uint64_t
// A pattern? 0
// B type: uint64_t
// B pattern? 0
// BinaryOp: cij = (aij - bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x - y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINUS || GxB_NO_UINT64 || GxB_NO_MINUS_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__minus_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__minus_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__minus_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__minus_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__minus_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__minus_uint64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__minus_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint64_t alpha_scalar ;
uint64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__minus_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__minus_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__minus_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__minus_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__minus_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x - bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__minus_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij - y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x - aij) ; \
}
GrB_Info GB (_bind1st_tran__minus_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij - y) ; \
}
GrB_Info GB (_bind2nd_tran__minus_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
c_jacobi03.c | /* ***********************************************************************
This program is part of the
OpenMP Source Code Repository
http://www.pcg.ull.es/ompscr/
e-mail: ompscr@etsii.ull.es
Copyright (c) 2004, OmpSCR Group
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the University of La Laguna nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
FILE: c_jacobi03.c
VERSION: 1.0
DATE: Oct 2004
AUTHORS: Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998
Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998
This version: Dieter an Mey, Aachen University (RWTH), 1999 - 2003
anmey@rz.rwth-aachen.de
http://www.rwth-aachen.de/People/D.an.Mey.html
COMMENTS TO: ompscr@etsii.ull.es
DESCRIPTION: program to solve a finite difference discretization of Helmholtz equation :
(d2/dx2)u + (d2/dy2)u - alpha u = f using Jacobi iterative method.
COMMENTS: OpenMP version 3: 1 PR outside the iteration loop, 4 Barriers
Directives are used in this code to achieve paralleism.
All do loops are parallized with default 'static' scheduling.
REFERENCES: http://www.rz.rwth-aachen.de/computing/hpc/prog/par/openmp/jacobi.html
BASIC PRAGMAS: parallel for
USAGE: ./c_jacobi03.par 5000 5000 0.8 1.0 1000
INPUT: n - grid dimension in x direction
m - grid dimension in y direction
alpha - Helmholtz constant (always greater than 0.0)
tol - error tolerance for iterative solver
relax - Successice over relaxation parameter
mits - Maximum iterations for iterative solver
OUTPUT: Residual and error
u(n,m) - Dependent variable (solutions)
f(n,m) - Right hand side function
FILE FORMATS: -
RESTRICTIONS: -
REVISION HISTORY:
**************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
//#include "OmpSCR.h"
#define U(i,j) u[(i)*n+(j)]
#define F(i,j) f[(i)*n+(j)]
#define NUM_ARGS 6
#define NUM_TIMERS 1
#define NIN 4
#define MIN 4
#define ALPHA 0.1
#define TOL 0.1
#define RELAX 2
#define MITS 2
int n, m, mits;
double tol, relax, alpha;
void jacobi (int n, int m, double dx, double dy,
double alpha, double omega,
double *u, double *f,
double tol, int maxit );
/******************************************************
* Initializes data
* Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2)
*
******************************************************/
void initialize(
int n,
int m,
double alpha,
double *dx,
double *dy,
double *u,
double *f)
{
int i,j,xx,yy;
*dx = 2.0 / (n-1);
*dy = 2.0 / (m-1);
/* Initilize initial condition and RHS */
for (j=0; j<m; j++){
for (i=0; i<n; i++){
xx = -1.0 + *dx * (i-1);
yy = -1.0 + *dy * (j-1);
U(j,i) = 0.0;
F(j,i) = -alpha * (1.0 - xx*xx) * (1.0 - yy*yy)
- 2.0 * (1.0 - xx*xx) - 2.0 * (1.0 - yy*yy);
}
}
}
/************************************************************
* Checks error between numerical and exact solution
*
************************************************************/
void error_check(
int n,
int m,
double alpha,
double dx,
double dy,
double *u,
double *f)
{
int i,j;
double xx, yy, temp, error;
dx = 2.0 / (n-1);
dy = 2.0 / (n-2);
error = 0.0;
for (j=0; j<m; j++){
for (i=0; i<n; i++){
xx = -1.0 + dx * (i-1);
yy = -1.0 + dy * (j-1);
temp = U(j,i) - (1.0 - xx*xx) * (1.0 - yy*yy);
error += temp*temp;
}
}
error = sqrt(error)/(n*m);
printf("Solution Error : %g\n", error);
}
int main(int argc, char **argv){
double *u, *f, dx, dy;
double dt, mflops;
int NUMTHREADS;
// char *PARAM_NAMES[NUM_ARGS] = {"Grid dimension: X dir =", "Grid dimension: Y dir =", "Helmhotlz constant =",
// "Successive over-relaxation parameter =",
// "error tolerance for iterative solver =", "Maximum iterations for solver ="};
// char *TIMERS_NAMES[NUM_TIMERS] = {"Total_time"};
// char *DEFAULT_VALUES[NUM_ARGS] = {"5000", "5000", "0.8", "1.0", "1e-7", "1000"};
NUMTHREADS = 1; //omp_get_num_threads();
//OSCR_init (NUMTHREADS, "Jacobi Solver v1", "Use 'jacoib03' <n> <m> <alpha> <relax> <tol> <mits>", NUM_ARGS,
// PARAM_NAMES, DEFAULT_VALUES , NUM_TIMERS, NUM_TIMERS, TIMERS_NAMES,
// argc, argv);
n = NIN; // OSCR_getarg_int(1);
m = MIN; // OSCR_getarg_int(2);
alpha = ALPHA; // OSCR_getarg_double(3);
relax = RELAX; // OSCR_getarg_double(4);
tol = TOL; // OSCR_getarg_double(5);
mits = MITS; // OSCR_getarg_int(6);
printf("-> %d, %d, %g, %g, %g, %d\n",
n, m, alpha, relax, tol, mits);
u = (double *) malloc(n*m*sizeof(double));
f = (double *) malloc(n*m*sizeof(double));
/* arrays are allocated and initialzed */
initialize(n, m, alpha, &dx, &dy, u, f);
/* Solve Helmholtz eqiation */
//OSCR_timer_start(0);
jacobi(n, m, dx, dy, alpha, relax, u,f, tol, mits);
//OSCR_timer_stop(0);
dt = 1; //OSCR_timer_read(0);
printf(" elapsed time : %12.6f\n", dt);
mflops = (0.000001*mits*(m-2)*(n-2)*13) / dt;
printf(" MFlops : %12.6g (%d, %d, %d, %g)\n",mflops, mits, m, n, dt);
error_check(n, m, alpha, dx, dy, u, f);
//OSCR_report(1, TIMERS_NAMES);
return 0;
}
/*
subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit)
******************************************************************
* Subroutine HelmholtzJ
* Solves poisson equation on rectangular grid assuming :
* (1) Uniform discretization in each direction, and
* (2) Dirichlect boundary conditions
*
* Jacobi method is used in this routine
*
* Input : n,m Number of grid points in the X/Y directions
* dx,dy Grid spacing in the X/Y directions
* alpha Helmholtz eqn. coefficient
* omega Relaxation factor
* f(n,m) Right hand side function
* u(n,m) Dependent variable/Solution
* tol Tolerance for iterative solver
* maxit Maximum number of iterations
*
* Output : u(n,m) - Solution
*****************************************************************
*/
void jacobi ( const int n, const int m, double dx, double dy, double alpha,
double omega, double *u, double *f, double tol, int maxit )
{
int i,j,k;
double error, resid, ax, ay, b;
double *uold;
/* wegen Array-Kompatibilitaet, werden die Zeilen und Spalten (im Kopf)
getauscht, zB uold[spalten_num][zeilen_num]; bzw. wir tuen so, als ob wir das
gespiegelte Problem loesen wollen */
uold = (double *)malloc(sizeof(double) * n *m);
ax = 1.0/(dx * dx); /* X-direction coef */
ay = 1.0/(dy*dy); /* Y_direction coef */
b = -2.0/(dx*dx)-2.0/(dy*dy) - alpha; /* Central coeff */
error = 10.0 * tol;
k = 1;
#pragma omp parallel private(resid, i)
{
while (k <= maxit && error > tol) {
/* copy new solution into old */
#pragma omp for
for (j=0; j<m; j++)
for (i=0; i<n; i++)
uold[i + m*j] = u[i + m*j];
/* compute stencil, residual and update */
#pragma omp for reduction(+:error)
for (i=1; i<n-1; i++){
resid =(
ax * (uold[i-1 + m*j] + uold[i+1 + m*j])
+ ay * (uold[i + m*(j-1)] + uold[i + m*(j+1)])
+ b * uold[i + m*j] - f[i + m*j]
) / b;
/* update solution */
u[i + m*j] = uold[i + m*j] - omega * resid;
/* accumulate residual error */
error =error + resid*resid;
} /* end for */
/* error check */
#pragma omp master
{
k++;
error = sqrt(error) /(n*m);
}
} /* while */
} /* end parallel */
printf("Total Number of Iteratuons %d\n", k);
printf("Residual %.15f\n", error);
free(uold);
}
|
invertm.c | /* cc -lm t4.c -qsmp */
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <math.h>
#include <sys/time.h>
#include <unistd.h>
#define FLT double
/* utility routines */
FLT system_clock(FLT *x);
FLT **matrix(int nrl,int nrh,int ncl,int nch);
/* work routines */
void mset(FLT **m, int n, int in);
FLT mcheck(FLT **m, int n, int in);
void over(FLT ** mat,int size);
int main(int argc,char *argv[]) {
FLT **ms[64];
FLT t0_start;
FLT t1_start,t1_end,e1;
FLT t2_start,t2_end,e2;
FLT t3_start,t3_end,e3;
FLT t4_start,t4_end,e4;
int n;
n=1024;
ms[0]=matrix(1,n,1,n);
ms[1]=matrix(1,n,1,n);
ms[2]=matrix(1,n,1,n);
ms[3]=matrix(1,n,1,n);
mset(ms[0],n,10);
mset(ms[1],n,20);
mset(ms[2],n,30);
mset(ms[3],n,40);
system_clock(&t0_start);
#pragma omp parallel sections
{
#pragma omp section
{
system_clock(&t1_start);
over(ms[0],n);
over(ms[0],n);
system_clock(&t1_end);
e1=mcheck(ms[0],n,10);
t1_start=t1_start-t0_start;
t1_end=t1_end-t0_start;
}
#pragma omp section
{
system_clock(&t2_start);
over(ms[1],n);
over(ms[1],n);
system_clock(&t2_end);
e2=mcheck(ms[1],n,20);
t2_start=t2_start-t0_start;
t2_end=t2_end-t0_start;
}
#pragma omp section
{
system_clock(&t3_start);
over(ms[2],n);
over(ms[2],n);
system_clock(&t3_end);
e3=mcheck(ms[2],n,30);
t3_start=t3_start-t0_start;
t3_end=t3_end-t0_start;
}
#pragma omp section
{
system_clock(&t4_start);
over(ms[3],n);
over(ms[3],n);
system_clock(&t4_end);
e4=mcheck(ms[3],n,40);
t4_start=t4_start-t0_start;
t4_end=t4_end-t0_start;
}
}
printf("section 1 start time= %10.5g end time= %10.5g error= %g\n",t1_start,t1_end,e1);
printf("section 2 start time= %10.5g end time= %10.5g error= %g\n",t2_start,t2_end,e2);
printf("section 3 start time= %10.5g end time= %10.5g error= %g\n",t3_start,t3_end,e3);
printf("section 4 start time= %10.5g end time= %10.5g error= %g\n",t4_start,t4_end,e4);
return 0;
}
void mset(FLT **m, int n, int in) {
int i,j;
for(i=1;i<=n;i++)
for(j=1;j<=n;j++) {
if(i == j) {
m[i][j]=in;
} else {
m[i][j]=1;
}
}
}
FLT mcheck(FLT **m, int n, int in) {
int i,j;
FLT x;
x=0.0;
for(i=1;i<=n;i++)
for(j=1;j<=n;j++) {
if(i == j) {
x=x+fabs(m[i][j]-in);
} else {
x=x+fabs(m[i][j]-1);
}
}
return x;
}
void over(FLT ** mat,int size)
{
int k, jj, kp1, i, j, l, krow, irow;
FLT pivot, temp;
FLT sw[5000][2];
for (k = 1 ;k<= size ; k++)
{
jj = k;
if (k != size)
{
kp1 = k + 1;
pivot = fabs(mat[k][k]);
for( i = kp1;i<= size ;i++)
{
temp = fabs(mat[i][k]);
if (pivot < temp)
{
pivot = temp;
jj = i;
}
}
}
sw[k][0] =k;
sw[k][1] = jj;
if (jj != k)
for (j = 1 ;j<= size; j++)
{
temp = mat[jj][j];
mat[jj][j] = mat[k][ j];
mat[k][j] = temp;
}
for (j = 1 ;j<= size; j++)
if (j != k)
mat[k][j] = mat[k][j] / mat[k][k];
mat[k][k] = 1.0 / mat[k][k];
for (i = 1; i<=size; i++)
if (i != k)
for (j = 1;j<=size; j++)
if (j != k)
mat[i][j] = mat[i][j] - mat[k][j] * mat[i][k];
for (i = 1;i<=size;i++)
if (i != k)
mat[i][k] = -mat[i][k] * mat[k][k];
}
for (l = 1; l<=size; ++l)
{
k = size - l + 1;
krow = sw[k][0];
irow = sw[k][1];
if (krow != irow)
for (i = 1; i<= size; ++i)
{
temp = mat[i][krow];
mat[i][krow] = mat[i][irow];
mat[i][irow] = temp;
}
}
}
/*
The routine matrix was adapted from
Numerical Recipes in C The Art of Scientific Computing
Press, Flannery, Teukolsky, Vetting
Cambridge University Press, 1988.
*/
FLT **matrix(int nrl,int nrh,int ncl,int nch)
{
int i;
FLT **m;
m=(FLT **) malloc((unsigned) (nrh-nrl+1)*sizeof(FLT*));
if (!m){
printf("allocation failure 1 in matrix()\n");
exit(1);
}
m -= nrl;
for(i=nrl;i<=nrh;i++) {
if(i == nrl){
m[i]=(FLT *) malloc((unsigned) (nrh-nrl+1)*(nch-ncl+1)*sizeof(FLT));
if (!m[i]){
printf("allocation failure 2 in matrix()\n");
exit(1);
}
m[i] -= ncl;
}
else {
m[i]=m[i-1]+(nch-ncl+1);
}
}
return m;
}
FLT system_clock(FLT *x) {
FLT t;
FLT six=1.0e-6;
struct timeval tb;
struct timezone tz;
gettimeofday(&tb,&tz);
t=(FLT)tb.tv_sec+((FLT)tb.tv_usec)*six;
if(x){
*x=t;
}
return(t);
}
|
core_zttqrt.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> c d s
*
**/
#include "core_blas.h"
#include "plasma_types.h"
#include "plasma_internal.h"
#include "core_lapack.h"
#include <omp.h>
// This will be swapped during the automatic code generation.
#undef REAL
#define COMPLEX
/***************************************************************************//**
*
* @ingroup core_ttqrt
*
* Computes a QR factorization of a rectangular matrix
* formed by coupling an n-by-n upper triangular tile A1
* on top of an m-by-n upper triangular tile A2:
*
* | A1 | = Q * R
* | A2 |
*
*******************************************************************************
*
* @param[in] m
* The number of columns of the tile A2. m >= 0.
*
* @param[in] n
* The number of rows of the tile A1.
* The number of columns of the tiles A1 and A2. n >= 0.
*
* @param[in] ib
* The inner-blocking size. ib >= 0.
*
* @param[in,out] A1
* On entry, the n-by-n tile A1.
* On exit, the elements on and above the diagonal of the array
* contain the n-by-n upper trapezoidal tile R;
* the elements below the diagonal are not referenced.
*
* @param[in] lda1
* The leading dimension of the array A1. lda1 >= max(1,n).
*
* @param[in,out] A2
* On entry, the m-by-n upper triangular tile A2.
* On exit, the elements on and above the diagonal of the array
* with the matrix T represent
* the unitary tile Q as a product of elementary reflectors
*
* @param[in] lda2
* The leading dimension of the tile A2. lda2 >= max(1,m).
*
* @param[out] T
* The ib-by-n triangular factor T of the block reflector.
* T is upper triangular by block (economic storage);
* The rest of the array is not referenced.
*
* @param[in] ldt
* The leading dimension of the array T. ldt >= ib.
*
* @param tau
* Auxiliary workspace array of length n.
*
* @param work
* Auxiliary workspace array of length ib*n.
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
******************************************************************************/
int core_zttqrt(int m, int n, int ib,
plasma_complex64_t *A1, int lda1,
plasma_complex64_t *A2, int lda2,
plasma_complex64_t *T, int ldt,
plasma_complex64_t *tau,
plasma_complex64_t *work)
{
// Check input arguments.
if (m < 0) {
coreblas_error("illegal value of m");
return -1;
}
if (n < 0) {
coreblas_error("illegal value of n");
return -2;
}
if (ib < 0) {
coreblas_error("illegal value of ib");
return -3;
}
if (A1 == NULL) {
coreblas_error("NULL A1");
return -4;
}
if (lda1 < imax(1, m) && m > 0) {
coreblas_error("illegal value of lda1");
return -5;
}
if (A2 == NULL) {
coreblas_error("NULL A2");
return -6;
}
if (lda2 < imax(1, m) && m > 0) {
coreblas_error("illegal value of lda2");
return -7;
}
if (T == NULL) {
coreblas_error("NULL T");
return -8;
}
if (ldt < imax(1, ib) && ib > 0) {
coreblas_error("illegal value of ldt");
return -9;
}
if (tau == NULL) {
coreblas_error("NULL tau");
return -10;
}
if (work == NULL) {
coreblas_error("NULL work");
return -11;
}
// quick return
if ((m == 0) || (n == 0) || (ib == 0))
return PlasmaSuccess;
// TODO: Need to check why some cases require this to avoid
// uninitialized values
//core_zlaset(PlasmaGeneral, ib, n, 0.0, 0.0, T, ldt);
for (int ii = 0; ii < n; ii += ib) {
int sb = imin(n-ii, ib);
for (int i = 0; i < sb; i++) {
int j = ii + i;
int mi = imin(j+1, m);
int ni = sb-i-1;
// Generate elementary reflector H(j) to annihilate A2(1:mi, j).
LAPACKE_zlarfg_work(
mi+1, &A1[lda1*j+j], &A2[lda2*j], 1, &tau[j]);
if (ni > 0) {
// Apply H(j-1) to A(j:m, j+1:ii+ib) from the left.
cblas_zcopy(
ni,
&A1[lda1*(j+1)+j], lda1,
work, 1);
#ifdef COMPLEX
LAPACKE_zlacgv_work(ni, work, 1);
#endif
plasma_complex64_t zone = 1.0;
cblas_zgemv(
CblasColMajor, (CBLAS_TRANSPOSE)Plasma_ConjTrans,
mi, ni,
CBLAS_SADDR(zone), &A2[lda2*(j+1)], lda2,
&A2[lda2*j], 1,
CBLAS_SADDR(zone), work, 1);
#ifdef COMPLEX
LAPACKE_zlacgv_work(ni, work, 1);
#endif
plasma_complex64_t alpha = -conj(tau[j]);
cblas_zaxpy(
ni, CBLAS_SADDR(alpha),
work, 1,
&A1[lda1*(j+1)+j], lda1);
#ifdef COMPLEX
LAPACKE_zlacgv_work(ni, work, 1);
#endif
cblas_zgerc(
CblasColMajor, mi, ni,
CBLAS_SADDR(alpha), &A2[lda2*j], 1,
work, 1,
&A2[lda2*(j+1)], lda2);
}
// Calculate T.
// T(0:i-1, j) = alpha * A2(0:m-1, ii:j-1)^H * A2(0:m-1, j)
if (i > 0) {
int l = imin(i, imax(0, m-ii));
plasma_complex64_t alpha = -(tau[j]);
core_zpemv(
Plasma_ConjTrans, PlasmaColumnwise,
imin(j, m), i, l,
alpha, &A2[lda2*ii], lda2,
&A2[lda2*j], 1,
0.0, &T[ldt*j], 1,
work);
// T(0:i-1, j) = T(0:i-1, ii:j-1) * T(0:i-1, j)
cblas_ztrmv(
CblasColMajor, (CBLAS_UPLO)PlasmaUpper,
(CBLAS_TRANSPOSE)PlasmaNoTrans,
(CBLAS_DIAG)PlasmaNonUnit,
i, &T[ldt*ii], ldt,
&T[ldt*j], 1);
}
T[ldt*j+i] = tau[j];
}
// Apply Q^H to the rest of the matrix from the left.
if (n > ii+sb) {
int mi = imin(ii+sb, m);
int ni = n-(ii+sb);
int l = imin(sb, imax(0, mi-ii));
core_zparfb(
PlasmaLeft, Plasma_ConjTrans,
PlasmaForward, PlasmaColumnwise,
ib, ni, mi, ni, sb, l, //replaced sb by ib
&A1[lda1*(ii+sb)+ii], lda1,
&A2[lda2*(ii+sb)], lda2,
&A2[lda2*ii], lda2,
&T[ldt*ii], ldt,
work, sb);
}
}
return PlasmaSuccess;
}
/******************************************************************************/
void core_omp_zttqrt(int m, int n, int ib,
plasma_complex64_t *A1, int lda1,
plasma_complex64_t *A2, int lda2,
plasma_complex64_t *T, int ldt,
plasma_workspace_t work,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(inout:A1[0:lda1*n]) \
depend(inout:A2[0:lda2*n]) \
depend(out:T[0:ib*n])
{
if (sequence->status == PlasmaSuccess) {
// Prepare workspaces.
int tid = omp_get_thread_num();
plasma_complex64_t *tau = ((plasma_complex64_t*)work.spaces[tid]);
// Call the kernel.
int info = core_zttqrt(m, n, ib,
A1, lda1,
A2, lda2,
T, ldt,
tau,
tau+n);
if (info != PlasmaSuccess) {
plasma_error("core_zttqrt() failed");
plasma_request_fail(sequence, request, PlasmaErrorInternal);
}
}
}
}
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 4;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,8);t1++) {
lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16));
ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(16*t2-Nz,4)),2*t1);t3<=min(min(min(floord(Nt+Ny-4,4),floord(8*t1+Ny+13,4)),floord(16*t2+Ny+12,4)),floord(16*t1-16*t2+Nz+Ny+11,4));t3++) {
for (t4=max(max(max(0,ceild(t1-255,256)),ceild(16*t2-Nz-2044,2048)),ceild(4*t3-Ny-2044,2048));t4<=min(min(min(min(floord(4*t3+Nx,2048),floord(Nt+Nx-4,2048)),floord(8*t1+Nx+13,2048)),floord(16*t2+Nx+12,2048)),floord(16*t1-16*t2+Nz+Nx+11,2048));t4++) {
for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),4*t3-Ny+2),2048*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),4*t3+2),2048*t4+2046),16*t1-16*t2+Nz+13);t5++) {
for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) {
lbv=max(2048*t4,t5+1);
ubv=min(2048*t4+2047,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
shared_update.c | // RUN: %libomptarget-compile-run-and-check-generic
// REQUIRES: unified_shared_memory
// amdgpu runtime crash
// UNSUPPORTED: amdgcn-amd-amdhsa
// UNSUPPORTED: amdgcn-amd-amdhsa-newRTL
#include <stdio.h>
#include <omp.h>
// ---------------------------------------------------------------------------
// Various definitions copied from OpenMP RTL
extern void __tgt_register_requires(int64_t);
// End of definitions copied from OpenMP RTL.
// ---------------------------------------------------------------------------
#pragma omp requires unified_shared_memory
#define N 1024
int main(int argc, char *argv[]) {
int fails;
void *host_alloc, *device_alloc;
void *host_data, *device_data;
int *alloc = (int *)malloc(N * sizeof(int));
int data[N];
// Manual registration of requires flags for Clang versions
// that do not support requires.
__tgt_register_requires(8);
for (int i = 0; i < N; ++i) {
alloc[i] = 10;
data[i] = 1;
}
host_data = &data[0];
host_alloc = &alloc[0];
// implicit mapping of data
#pragma omp target map(tofrom : device_data, device_alloc)
{
device_data = &data[0];
device_alloc = &alloc[0];
for (int i = 0; i < N; i++) {
alloc[i] += 1;
data[i] += 1;
}
}
// CHECK: Address of alloc on device matches host address.
if (device_alloc == host_alloc)
printf("Address of alloc on device matches host address.\n");
// CHECK: Address of data on device matches host address.
if (device_data == host_data)
printf("Address of data on device matches host address.\n");
// On the host, check that the arrays have been updated.
// CHECK: Alloc device values updated: Succeeded
fails = 0;
for (int i = 0; i < N; i++) {
if (alloc[i] != 11)
fails++;
}
printf("Alloc device values updated: %s\n",
(fails == 0) ? "Succeeded" : "Failed");
// CHECK: Data device values updated: Succeeded
fails = 0;
for (int i = 0; i < N; i++) {
if (data[i] != 2)
fails++;
}
printf("Data device values updated: %s\n",
(fails == 0) ? "Succeeded" : "Failed");
//
// Test that updates on the host snd on the device are both visible.
//
// Update on the host.
for (int i = 0; i < N; ++i) {
alloc[i] += 1;
data[i] += 1;
}
#pragma omp target
{
// CHECK: Alloc host values updated: Succeeded
fails = 0;
for (int i = 0; i < N; i++) {
if (alloc[i] != 12)
fails++;
}
printf("Alloc host values updated: %s\n",
(fails == 0) ? "Succeeded" : "Failed");
// CHECK: Data host values updated: Succeeded
fails = 0;
for (int i = 0; i < N; i++) {
if (data[i] != 3)
fails++;
}
printf("Data host values updated: %s\n",
(fails == 0) ? "Succeeded" : "Failed");
}
free(alloc);
printf("Done!\n");
return 0;
}
|
atomic-10.c | /* { dg-do run } */
/* { dg-options "-O2 -fopenmp" } */
extern void abort (void);
int x1, x2, x3, x4, x5;
volatile int y6 = 9, y2, y3, y4, y5;
volatile unsigned char z1, z2, z3, z4, z5;
float a1, a2, a3, a4;
void
f1 (void)
{
#pragma omp atomic
x1++;
#pragma omp atomic
x2--;
#pragma omp atomic
++x3;
#pragma omp atomic
--x4;
#pragma omp atomic
x5 += 1;
#pragma omp atomic
x1 -= y6;
#pragma omp atomic
x2 |= 1;
#pragma omp atomic
x3 &= 1;
#pragma omp atomic
x4 ^= 1;
#pragma omp atomic
x5 *= 3;
#pragma omp atomic
x1 /= 3;
#pragma omp atomic
x2 /= 3;
#pragma omp atomic
x3 <<= 3;
#pragma omp atomic
x4 >>= 3;
}
void
f2 (void)
{
#pragma omp atomic
y6++;
#pragma omp atomic
y2--;
#pragma omp atomic
++y3;
#pragma omp atomic
--y4;
#pragma omp atomic
y5 += 1;
#pragma omp atomic
y6 -= x1;
#pragma omp atomic
y2 |= 1;
#pragma omp atomic
y3 &= 1;
#pragma omp atomic
y4 ^= 1;
#pragma omp atomic
y5 *= 3;
#pragma omp atomic
y6 /= 3;
#pragma omp atomic
y2 /= 3;
#pragma omp atomic
y3 <<= 3;
#pragma omp atomic
y4 >>= 3;
}
void
f3 (void)
{
#pragma omp atomic
z1++;
#pragma omp atomic
z2--;
#pragma omp atomic
++z3;
#pragma omp atomic
--z4;
#pragma omp atomic
z5 += 1;
#pragma omp atomic
z1 |= 1;
#pragma omp atomic
z2 &= 1;
#pragma omp atomic
z3 ^= 1;
#pragma omp atomic
z4 *= 3;
#pragma omp atomic
z5 /= 3;
#pragma omp atomic
z1 /= 3;
#pragma omp atomic
z2 <<= 3;
#pragma omp atomic
z3 >>= 3;
}
void
f4 (void)
{
#pragma omp atomic
a1 += 8.0;
#pragma omp atomic
a2 *= 3.5;
#pragma omp atomic
a3 -= a1 + a2;
#pragma omp atomic
a4 /= 2.0;
}
int
main (void)
{
f1 ();
if (x1 != -2 || x2 != 0 || x3 != 8 || x4 != -1 || x5 != 3)
abort ();
f2 ();
if (y6 != 4 || y2 != 0 || y3 != 8 || y4 != -1 || y5 != 3)
abort ();
f3 ();
if (z1 != 0 || z2 != 8 || z3 != 0 || z4 != 253 || z5 != 0)
abort ();
a1 = 7;
a2 = 10;
a3 = 11;
a4 = 13;
f4 ();
if (a1 != 15.0 || a2 != 35.0 || a3 != -39.0 || a4 != 6.5)
abort ();
return 0;
}
|
gm_order.h | #ifndef GM_ORDER_H
#define GM_ORDER_H
#include <list>
#include "gm_internal.h"
#include "gm_bitmap.h"
template<typename T>
class gm_order
{
public:
gm_order(int _max_sz, int _max_thread = 16) :
max_thread(_max_thread), max_sz(_max_sz) {
local_Q_front = new std::list<T>[max_thread];
local_Q_back = new std::list<T>[max_thread];
bitmap = new unsigned char[(max_sz + 7) / 8];
for (int i = 0; i < (max_sz + 7) / 8; i++)
bitmap[i] = 0;
}
virtual ~gm_order() {
delete[] local_Q_front;
delete[] local_Q_back;
delete[] bitmap;
}
//------------------------------------------------------------
// API
// push_back/front, pop_back/front, clear, get_size
// push has separate parallel interface
//------------------------------------------------------------
void push_back(T e) // sequential
{
if (!_gm_get_bit(bitmap, e)) {
_gm_set_bit(bitmap, e);
Q.push_back(e);
}
}
void push_front(T e) {
if (!_gm_get_bit(bitmap, e)) {
_gm_set_bit(bitmap, e);
Q.push_front(e);
}
}
void pop_back() {
T e = Q.back();
_gm_clear_bit(bitmap, e);
Q.pop_back();
}
void pop_front() {
T e = Q.front();
_gm_clear_bit(bitmap, e);
Q.pop_front();
}
void clear() {
Q.clear();
#pragma omp parallel for
for (int i = 0; i < (max_sz + 7) / 8; i++)
bitmap[i] = 0;
}
size_t get_size() {
return Q.size();
}
bool is_in(T e) {
return (_gm_get_bit(bitmap, e) == 1);
}
// for parallel execution
void push_back_par(T e, int tid) {
if (!_gm_get_bit(bitmap, e)) { // test and atomic
if (_gm_set_bit_atomic(bitmap, e)) {
local_Q_back[tid].push_back(e);
}
}
}
void push_front_par(T e, int tid) {
if (!_gm_get_bit(bitmap, e)) { // test and atomic
if (_gm_set_bit_atomic(bitmap, e)) {
local_Q_back[tid].push_front(e);
}
}
}
//-------------------------------------------
// called when parallel addition is finished
//-------------------------------------------
void merge() {
for (int i = 0; i < max_thread; i++) {
if (local_Q_front[i].size() > 0) Q.splice(Q.begin(), local_Q_front[i]);
if (local_Q_back[i].size() > 0) Q.splice(Q.end(), local_Q_back[i]);
}
}
// for sequential iteration
typename std::list<T>& get_list() {
return Q;
}
//-----------------------------------------------
// for iteration
//-----------------------------------------------
// todo, correctly use nested template def
#define ITERATOR_CLASS(CLASS_NAME, LIST_ITER_TYPE) \
class CLASS_NAME {\
public: \
CLASS_NAME(typename LIST_ITER_TYPE I, typename LIST_ITER_TYPE E) \
: ITER(I), END_ITER(E) {} \
inline bool has_next() { \
return (ITER != END_ITER); \
} \
inline T get_next() \
{ T t = *ITER; ITER++; return t;} \
private: \
typename LIST_ITER_TYPE ITER; \
typename LIST_ITER_TYPE END_ITER; \
};
ITERATOR_CLASS(seq_iter, std::list<T>::iterator)
;ITERATOR_CLASS(rev_iter, std::list<T>::reverse_iterator)
;
#undef ITERATOR_CLASS
class par_iter
{
public:
par_iter(typename std::list<T>::iterator I, typename std::list<T>::iterator E) :
ITER(I), END_ITER(E), is_small(true), bitmap(NULL) {
}
par_iter(unsigned char* B, T I, T E) :
bitmap(B), ITER(I), END_ITER(E), is_small(false) {
}
inline bool has_next() {
if (is_small)
return (ITER != END_ITER);
else {
while (IDX < END_IDX) {
if (_gm_check_bit(bitmap, IDX) == 0) return true;
IDX++;
}
return false;
}
}
inline T get_next() {
if (is_small) {
T t = *ITER;
ITER++;
return t;
} else {
return IDX++;
}
}
private:
bool is_small;
unsigned char* bitmap;
typename std::set<T>::iterator ITER; // for small instance use
typename std::set<T>::iterator END_ITER; // for small instance use
T IDX;
T END_IDX;
};
seq_iter prepare_seq_iteration() {
seq_iter I(Q.begin(), Q.end());
return I;
}
rev_iter prepare_rev_iteration() {
rev_iter I(Q.rbegin(), Q.rend());
return I;
}
par_iter prepare_par_iteration(int thread_id, int max_threads) {
bool is_small = (Q.size() < THRESHOLD_LARGE);
if (is_small) {
// for small instance, use single thread
if (thread_id == 0) {
par_iter I(Q.begin(), Q.end());
return I;
} else {
par_iter I(Q.end(), Q.end());
return I;
}
} else {
size_t cnt = max_sz / max_threads;
T begin = cnt * thread_id;
T end = (thread_id == (max_threads - 1)) ? max_sz : begin + cnt;
par_iter I(bitmap, begin, end);
return I;
}
}
private:
gm_order() : max_sz(-1), max_thread(-1), bitmap(NULL), local_Q_front(NULL), local_Q_back(NULL) {
} // initialize without size is prohibited
typename std::list<T> Q;
typename std::list<T>* local_Q_front;
typename std::list<T>* local_Q_back;
int max_thread;
int max_sz;
unsigned char* bitmap;
static const int THRESHOLD_LARGE = 4096;
};
typedef gm_order<node_t> gm_node_order;
typedef gm_order<edge_t> gm_edge_order;
#endif
|
mpncbo.c | /* $Header$ */
/* mpncbo -- netCDF binary operator */
/* Purpose: Compute sum, difference, product, or ratio of specified hyperslabs of specfied variables
from two input netCDF files and output them to a single file. */
/* Copyright (C) 1995--present Charlie Zender
This file is part of NCO, the netCDF Operators. NCO is free software.
You may redistribute and/or modify NCO under the terms of the
3-Clause BSD License.
You are permitted to link NCO with the HDF, netCDF, OPeNDAP, and UDUnits
libraries and to distribute the resulting executables under the terms
of the BSD, but in addition obeying the extra stipulations of the
HDF, netCDF, OPeNDAP, and UDUnits licenses.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the 3-Clause BSD License for more details.
The original author of this software, Charlie Zender, seeks to improve
it with your suggestions, contributions, bug-reports, and patches.
Please contact the NCO project at http://nco.sf.net or write to
Charlie Zender
Department of Earth System Science
University of California, Irvine
Irvine, CA 92697-3100 */
/* Usage:
mpncbo -O -p ~/nco/data in.nc in.nc ~/foo.nc
mpncbo -O -v mss_val in.nc in.nc ~/foo.nc
mpncbo -p /data/zender/tmp h0001.nc ~/foo.nc
mpncbo -p /data/zender/tmp -l /data/zender/tmp/rmt h0001.nc h0002.nc ~/foo.nc
mpncbo -p /ZENDER/tmp -l /data/zender/tmp/rmt h0001.nc h0002.nc ~/foo.nc
mpncbo -p /ZENDER/tmp -l /usr/tmp/zender h0001.nc h0002.nc ~/foo.nc
Test type conversion:
ncks -O -C -v float_var in.nc foo1.nc
ncrename -v float_var,double_var foo1.nc
ncks -O -C -v double_var in.nc foo2.nc
mpncbo -O -C -v double_var foo1.nc foo2.nc foo3.nc
mpncbo -O -C -v double_var foo2.nc foo1.nc foo4.nc
ncks -H -m foo1.nc
ncks -H -m foo2.nc
ncks -H -m foo3.nc
ncks -H -m foo4.nc
Test nco_var_cnf_dmn:
ncks -O -v scalar_var in.nc ~/foo.nc ; ncrename -v scalar_var,four_dmn_rec_var foo.nc ; mpncbo -O -v four_dmn_rec_var in.nc ~/foo.nc foo2.nc */
#ifdef HAVE_CONFIG_H
# include <config.h> /* Autotools tokens */
#endif /* !HAVE_CONFIG_H */
/* Standard C headers */
#include <math.h> /* sin cos cos sin 3.14159 */
#include <stdio.h> /* stderr, FILE, NULL, etc. */
#include <stdlib.h> /* atof, atoi, malloc, getopt */
#include <string.h> /* strcmp() */
#include <sys/stat.h> /* stat() */
#include <time.h> /* machine time */
#include <unistd.h> /* POSIX stuff */
#ifndef HAVE_GETOPT_LONG
# include "nco_getopt.h"
#else /* HAVE_GETOPT_LONG */
# ifdef HAVE_GETOPT_H
# include <getopt.h>
# endif /* !HAVE_GETOPT_H */
#endif /* HAVE_GETOPT_LONG */
/* 3rd party vendors */
#include <netcdf.h> /* netCDF definitions and C library */
#ifdef ENABLE_MPI
# include <mpi.h> /* MPI definitions */
# include "nco_mpi.h" /* MPI utilities */
#endif /* !ENABLE_MPI */
/* Personal headers */
/* #define MAIN_PROGRAM_FILE MUST precede #include libnco.h */
#define MAIN_PROGRAM_FILE
#include "libnco.h" /* netCDF Operator (NCO) library */
int
main(int argc,char **argv)
{
char **fl_lst_abb=NULL; /* Option a */
char **fl_lst_in;
char **gaa_arg=NULL; /* [sng] Global attribute arguments */
char **var_lst_in=NULL_CEWI;
char *aux_arg[NC_MAX_DIMS];
char *cmd_ln;
char *cnk_arg[NC_MAX_DIMS];
char *cnk_map_sng=NULL_CEWI; /* [sng] Chunking map */
char *cnk_plc_sng=NULL_CEWI; /* [sng] Chunking policy */
char *fl_in_1=NULL; /* fl_in_1 is nco_realloc'd when not NULL */
char *fl_in_2=NULL; /* fl_in_2 is nco_realloc'd when not NULL */
char *fl_out=NULL; /* Option o */
char *fl_out_tmp=NULL; /* MPI CEWI */
char *fl_pth=NULL; /* Option p */
char *fl_pth_lcl=NULL; /* Option l */
char *lmt_arg[NC_MAX_DIMS];
char *nco_op_typ_sng=NULL; /* [sng] Operation type */
char *opt_crr=NULL; /* [sng] String representation of current long-option name */
char *optarg_lcl=NULL; /* [sng] Local copy of system optarg */
char *sng_cnv_rcd=NULL_CEWI; /* [sng] strtol()/strtoul() return code */
const char * const CVS_Id="$Id$";
const char * const CVS_Revision="$Revision$";
const char * const opt_sht_lst="34567ACcD:d:FhL:l:Oo:p:rRSt:v:X:xy:-:";
cnk_dmn_sct **cnk_dmn=NULL_CEWI;
#if defined(__cplusplus) || defined(PGI_CC)
ddra_info_sct ddra_info;
ddra_info.flg_ddra=False;
#else /* !__cplusplus */
ddra_info_sct ddra_info={.MRV_flg=False,.flg_ddra=False,.lmn_nbr=0LL,.lmn_nbr_avg=0LL,.lmn_nbr_wgt=0LL,.nco_op_typ=nco_op_nil,.rnk_avg=0,.rnk_var=0,.rnk_wgt=0,.tmr_flg=nco_tmr_srt,.var_idx=0,.wgt_brd_flg=False,.wrd_sz=0};
#endif /* !__cplusplus */
dmn_sct **dim_1;
dmn_sct **dim_2;
dmn_sct **dmn_out;
extern char *optarg;
extern int optind;
/* Using naked stdin/stdout/stderr in parallel region generates warning
Copy appropriate filehandle to variable scoped shared in parallel clause */
FILE * const fp_stderr=stderr; /* [fl] stderr filehandle CEWI */
FILE * const fp_stdout=stdout; /* [fl] stdout filehandle CEWI */
int *in_id_1_arr;
int *in_id_2_arr;
int abb_arg_nbr=0;
int aux_nbr=0; /* [nbr] Number of auxiliary coordinate hyperslabs specified */
int cnk_map=nco_cnk_map_nil; /* [enm] Chunking map */
int cnk_nbr=0; /* [nbr] Number of chunk sizes */
int cnk_plc=nco_cnk_plc_nil; /* [enm] Chunking policy */
int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */
int fl_idx;
int fl_nbr=0;
int fl_in_fmt_1; /* [enm] Input file format */
int fl_in_fmt_2; /* [enm] Input file format */
int fl_out_fmt=NCO_FORMAT_UNDEFINED; /* [enm] Output file format */
int fll_md_old; /* [enm] Old fill mode */
int gaa_nbr=0; /* [nbr] Number of global attributes to add */
int idx;
int jdx;
int dmn_idx;
int dmn_jdx;
int in_id_1;
int in_id_2;
int lmt_nbr=0; /* Option d. NB: lmt_nbr gets incremented */
int log_lvl=0; /* [enm] netCDF library debugging verbosity [0..5] */
int md_open; /* [enm] Mode flag for nc_open() call */
int nbr_dmn_fl_1;
int nbr_dmn_fl_2;
int nbr_dmn_xtr_1;
int nbr_dmn_xtr_2;
int nbr_var_fix_1; /* nbr_var_fix_1 gets incremented */
int nbr_var_fix_2; /* nbr_var_fix_2 gets incremented */
int nbr_var_fl_1;
int nbr_var_fl_2;
int nbr_var_prc_1; /* nbr_var_prc_1 gets incremented */
int nbr_var_prc_2; /* nbr_var_prc_2 gets incremented */
int xtr_nbr_1=0; /* xtr_nbr_1 won't otherwise be set for -c with no -v */
int xtr_nbr_2=0; /* xtr_nbr_2 won't otherwise be set for -c with no -v */
int nco_op_typ=nco_op_nil; /* [enm] Operation type */
int opt;
int out_id;
int rcd=NC_NOERR; /* [rcd] Return code */
int thr_idx; /* [idx] Index of current thread */
int thr_nbr=int_CEWI; /* [nbr] Thread number Option t */
int var_lst_in_nbr=0;
lmt_sct **aux=NULL_CEWI; /* Auxiliary coordinate limits */
lmt_sct **lmt=NULL_CEWI;
lmt_all_sct **lmt_all_lst=NULL_CEWI; /* List of *lmt_all structures */
cnv_sct *cnv; /* [sct] Convention structure */
nco_bool EXCLUDE_INPUT_LIST=False; /* Option c */
nco_bool EXTRACT_ALL_COORDINATES=False; /* Option c */
nco_bool EXTRACT_ASSOCIATED_COORDINATES=True; /* Option C */
nco_bool FILE_1_RETRIEVED_FROM_REMOTE_LOCATION;
nco_bool FILE_2_RETRIEVED_FROM_REMOTE_LOCATION;
nco_bool FL_LST_IN_FROM_STDIN=False; /* [flg] fl_lst_in comes from stdin */
nco_bool FORCE_APPEND=False; /* Option A */
nco_bool FORCE_OVERWRITE=False; /* Option O */
nco_bool FORTRAN_IDX_CNV=False; /* Option F */
nco_bool HISTORY_APPEND=True; /* Option h */
nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */
nco_bool MSA_USR_RDR=False; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order*/
nco_bool RAM_CREATE=False; /* [flg] Create file in RAM */
nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */
nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */
nco_bool WRT_TMP_FL=True; /* [flg] Write output to temporary file */
nco_bool flg_mmr_cln=False; /* [flg] Clean memory prior to exit */
nco_bool flg_ddra=False; /* [flg] DDRA diagnostics */
nm_id_sct *dmn_lst_1;
nm_id_sct *dmn_lst_2;
nm_id_sct *xtr_lst_1=NULL; /* xtr_lst_1 may be alloc()'d from NULL with -c option */
nm_id_sct *xtr_lst_2=NULL; /* xtr_lst_2 may be alloc()'d from NULL with -c option */
size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */
size_t cnk_csh_byt=NCO_CNK_CSH_BYT_DFL; /* [B] Chunk cache size */
size_t cnk_min_byt=NCO_CNK_SZ_MIN_BYT_DFL; /* [B] Minimize size of variable to chunk */
size_t cnk_sz_byt=0UL; /* [B] Chunk size in bytes */
size_t cnk_sz_scl=0UL; /* [nbr] Chunk size scalar */
size_t hdr_pad=0UL; /* [B] Pad at end of header section */
var_sct **var_1;
var_sct **var_2;
var_sct **var_fix_1;
var_sct **var_fix_2;
var_sct **var_fix_out;
var_sct **var_out;
var_sct **var_prc_1;
var_sct **var_prc_2;
var_sct **var_prc_out;
#ifdef ENABLE_MPI
/* Declare all MPI-specific variables here */
MPI_Status mpi_stt; /* [enm] Status check to decode msg_tag_typ */
nco_bool TKN_WRT_FREE=True; /* [flg] Write-access to output file is available */
int fl_nm_lng; /* [nbr] Output file name length CEWI */
int msg_bfr[msg_bfr_lng]; /* [bfr] Buffer containing var, idx, tkn_wrt_rsp */
int msg_tag_typ; /* [enm] MPI message tag type */
int prc_rnk; /* [idx] Process rank */
int prc_nbr=0; /* [nbr] Number of MPI processes */
int tkn_wrt_rsp; /* [enm] Response to request for write token */
int var_wrt_nbr=0; /* [nbr] Variables written to output file until now */
int rnk_wrk; /* [idx] Worker rank */
int wrk_id_bfr[wrk_id_bfr_lng]; /* [bfr] Buffer for rnk_wrk */
#endif /* !ENABLE_MPI */
static struct option opt_lng[]={ /* Structure ordered by short option key if possible */
/* Long options with no argument, no short option counterpart */
{"clean",no_argument,0,0}, /* [flg] Clean memory prior to exit */
{"mmr_cln",no_argument,0,0}, /* [flg] Clean memory prior to exit */
{"drt",no_argument,0,0}, /* [flg] Allow dirty memory on exit */
{"dirty",no_argument,0,0}, /* [flg] Allow dirty memory on exit */
{"mmr_drt",no_argument,0,0}, /* [flg] Allow dirty memory on exit */
{"ddra",no_argument,0,0}, /* [flg] DDRA diagnostics */
{"mdl_cmp",no_argument,0,0}, /* [flg] DDRA diagnostics */
{"msa_usr_rdr",no_argument,0,0}, /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */
{"msa_user_order",no_argument,0,0}, /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */
{"ram_all",no_argument,0,0}, /* [flg] Open (netCDF3) and create file(s) in RAM */
{"create_ram",no_argument,0,0}, /* [flg] Create file in RAM */
{"open_ram",no_argument,0,0}, /* [flg] Open (netCDF3) file(s) in RAM */
{"diskless_all",no_argument,0,0}, /* [flg] Open (netCDF3) and create file(s) in RAM */
{"wrt_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */
{"write_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */
{"no_tmp_fl",no_argument,0,0}, /* [flg] Do not write output to temporary file */
{"version",no_argument,0,0},
{"vrs",no_argument,0,0},
/* Long options with argument, no short option counterpart */
{"bfr_sz_hnt",required_argument,0,0}, /* [B] Buffer size hint */
{"buffer_size_hint",required_argument,0,0}, /* [B] Buffer size hint */
{"cnk_byt",required_argument,0,0}, /* [B] Chunk size in bytes */
{"chunk_byte",required_argument,0,0}, /* [B] Chunk size in bytes */
{"cnk_dmn",required_argument,0,0}, /* [nbr] Chunk size */
{"chunk_dimension",required_argument,0,0}, /* [nbr] Chunk size */
{"cnk_map",required_argument,0,0}, /* [nbr] Chunking map */
{"chunk_map",required_argument,0,0}, /* [nbr] Chunking map */
{"cnk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */
{"chunk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */
{"cnk_plc",required_argument,0,0}, /* [nbr] Chunking policy */
{"chunk_policy",required_argument,0,0}, /* [nbr] Chunking policy */
{"cnk_scl",required_argument,0,0}, /* [nbr] Chunk size scalar */
{"chunk_scalar",required_argument,0,0}, /* [nbr] Chunk size scalar */
{"fl_fmt",required_argument,0,0},
{"file_format",required_argument,0,0},
{"gaa",required_argument,0,0}, /* [sng] Global attribute add */
{"glb_att_add",required_argument,0,0}, /* [sng] Global attribute add */
{"hdr_pad",required_argument,0,0},
{"header_pad",required_argument,0,0},
{"log_lvl",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */
{"log_level",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */
/* Long options with short counterparts */
{"3",no_argument,0,'3'},
{"4",no_argument,0,'4'},
{"netcdf4",no_argument,0,'4'},
{"5",no_argument,0,'5'},
{"64bit_data",no_argument,0,'5'},
{"cdf5",no_argument,0,'5'},
{"pnetcdf",no_argument,0,'5'},
{"64bit_offset",no_argument,0,'6'},
{"7",no_argument,0,'7'},
{"append",no_argument,0,'A'},
{"coords",no_argument,0,'c'},
{"crd",no_argument,0,'c'},
{"xtr_ass_var",no_argument,0,'c'},
{"xcl_ass_var",no_argument,0,'C'},
{"no_coords",no_argument,0,'C'},
{"no_crd",no_argument,0,'C'},
{"debug",required_argument,0,'D'},
{"nco_dbg_lvl",required_argument,0,'D'},
{"dimension",required_argument,0,'d'},
{"dmn",required_argument,0,'d'},
{"fortran",no_argument,0,'F'},
{"ftn",no_argument,0,'F'},
{"history",no_argument,0,'h'},
{"hst",no_argument,0,'h'},
{"dfl_lvl",required_argument,0,'L'}, /* [enm] Deflate level */
{"deflate",required_argument,0,'L'}, /* [enm] Deflate level */
{"local",required_argument,0,'l'},
{"lcl",required_argument,0,'l'},
{"overwrite",no_argument,0,'O'},
{"ovr",no_argument,0,'O'},
{"path",required_argument,0,'p'},
{"retain",no_argument,0,'R'},
{"rtn",no_argument,0,'R'},
{"revision",no_argument,0,'r'},
{"suspend", no_argument,0,'S'},
{"thr_nbr",required_argument,0,'t'},
{"threads",required_argument,0,'t'},
{"omp_num_threads",required_argument,0,'t'},
{"variable",required_argument,0,'v'},
{"auxiliary",required_argument,0,'X'},
{"exclude",no_argument,0,'x'},
{"xcl",no_argument,0,'x'},
{"operation",required_argument,0,'y'},
{"op_typ",required_argument,0,'y'},
{"help",no_argument,0,'?'},
{"hlp",no_argument,0,'?'},
{0,0,0,0}
}; /* end opt_lng */
int opt_idx=0; /* Index of current long option into opt_lng array */
#ifdef ENABLE_MPI
/* MPI Initialization */
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&prc_nbr);
MPI_Comm_rank(MPI_COMM_WORLD,&prc_rnk);
#endif /* !ENABLE_MPI */
/* Start timer and save command line */
ddra_info.tmr_flg=nco_tmr_srt;
rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info);
ddra_info.tmr_flg=nco_tmr_mtd;
cmd_ln=nco_cmd_ln_sng(argc,argv);
/* Get program name and set program enum (e.g., nco_prg_id=ncra) */
nco_prg_nm=nco_prg_prs(argv[0],&nco_prg_id);
/* Parse command line arguments */
while(1){
/* getopt_long_only() allows one dash to prefix long options */
opt=getopt_long(argc,argv,opt_sht_lst,opt_lng,&opt_idx);
/* NB: access to opt_crr is only valid when long_opt is detected */
if(opt == EOF) break; /* Parse positional arguments once getopt_long() returns EOF */
opt_crr=(char *)strdup(opt_lng[opt_idx].name);
/* Process long options without short option counterparts */
if(opt == 0){
if(!strcmp(opt_crr,"bfr_sz_hnt") || !strcmp(opt_crr,"buffer_size_hint")){
bfr_sz_hnt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_byt") || !strcmp(opt_crr,"chunk_byte")){
cnk_sz_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk_byt */
if(!strcmp(opt_crr,"cnk_min") || !strcmp(opt_crr,"chunk_min")){
cnk_min_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk_min */
if(!strcmp(opt_crr,"cnk_dmn") || !strcmp(opt_crr,"chunk_dimension")){
/* Copy limit argument for later processing */
cnk_arg[cnk_nbr]=(char *)strdup(optarg);
cnk_nbr++;
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_scl") || !strcmp(opt_crr,"chunk_scalar")){
cnk_sz_scl=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_map") || !strcmp(opt_crr,"chunk_map")){
/* Chunking map */
cnk_map_sng=(char *)strdup(optarg);
cnk_map=nco_cnk_map_get(cnk_map_sng);
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_plc") || !strcmp(opt_crr,"chunk_policy")){
/* Chunking policy */
cnk_plc_sng=(char *)strdup(optarg);
cnk_plc=nco_cnk_plc_get(cnk_plc_sng);
} /* endif cnk */
if(!strcmp(opt_crr,"mmr_cln") || !strcmp(opt_crr,"clean")) flg_mmr_cln=True; /* [flg] Clean memory prior to exit */
if(!strcmp(opt_crr,"drt") || !strcmp(opt_crr,"mmr_drt") || !strcmp(opt_crr,"dirty")) flg_mmr_cln=False; /* [flg] Clean memory prior to exit */
if(!strcmp(opt_crr,"ddra") || !strcmp(opt_crr,"mdl_cmp")) ddra_info.flg_ddra=flg_ddra=True; /* [flg] DDRA diagnostics */
if(!strcmp(opt_crr,"fl_fmt") || !strcmp(opt_crr,"file_format")) rcd=nco_create_mode_prs(optarg,&fl_out_fmt);
if(!strcmp(opt_crr,"gaa") || !strcmp(opt_crr,"glb_att_add")){
gaa_arg=(char **)nco_realloc(gaa_arg,(gaa_nbr+1)*sizeof(char *));
gaa_arg[gaa_nbr++]=(char *)strdup(optarg);
} /* endif gaa */
if(!strcmp(opt_crr,"hdr_pad") || !strcmp(opt_crr,"header_pad")){
hdr_pad=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif "hdr_pad" */
if(!strcmp(opt_crr,"log_lvl") || !strcmp(opt_crr,"log_level")){
log_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd);
nc_set_log_level(log_lvl);
} /* !log_lvl */
if(!strcmp(opt_crr,"msa_usr_rdr") || !strcmp(opt_crr,"msa_user_order")) MSA_USR_RDR=True; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */
if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"create_ram") || !strcmp(opt_crr,"diskless_all")) RAM_CREATE=True; /* [flg] Open (netCDF3) file(s) in RAM */
if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"open_ram") || !strcmp(opt_crr,"diskless_all")) RAM_OPEN=True; /* [flg] Create file in RAM */
if(!strcmp(opt_crr,"vrs") || !strcmp(opt_crr,"version")){
(void)nco_vrs_prn(CVS_Id,CVS_Revision);
nco_exit(EXIT_SUCCESS);
} /* endif "vrs" */
if(!strcmp(opt_crr,"wrt_tmp_fl") || !strcmp(opt_crr,"write_tmp_fl")) WRT_TMP_FL=True;
if(!strcmp(opt_crr,"no_tmp_fl")) WRT_TMP_FL=False;
} /* opt != 0 */
/* Process short options */
switch(opt){
case 0: /* Long options have already been processed, return */
break;
case '3': /* Request netCDF3 output storage format */
fl_out_fmt=NC_FORMAT_CLASSIC;
break;
case '4': /* Request netCDF4 output storage format */
fl_out_fmt=NC_FORMAT_NETCDF4;
break;
case '5': /* Request netCDF3 64-bit offset+data storage (i.e., pnetCDF) format */
fl_out_fmt=NC_FORMAT_CDF5;
break;
case '6': /* Request netCDF3 64-bit offset output storage format */
fl_out_fmt=NC_FORMAT_64BIT_OFFSET;
break;
case '7': /* Request netCDF4-classic output storage format */
fl_out_fmt=NC_FORMAT_NETCDF4_CLASSIC;
break;
case 'A': /* Toggle FORCE_APPEND */
FORCE_APPEND=!FORCE_APPEND;
break;
case 'C': /* Extract all coordinates associated with extracted variables? */
EXTRACT_ASSOCIATED_COORDINATES=False;
break;
case 'c':
EXTRACT_ALL_COORDINATES=True;
break;
case 'D': /* The debugging level. Default is 0. */
nco_dbg_lvl=(unsigned short int)strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
break;
case 'd': /* Copy limit argument for later processing */
lmt_arg[lmt_nbr]=(char *)strdup(optarg);
lmt_nbr++;
break;
case 'F': /* Toggle index convention. Default is 0-based arrays (C-style). */
FORTRAN_IDX_CNV=!FORTRAN_IDX_CNV;
break;
case 'h': /* Toggle appending to history global attribute */
HISTORY_APPEND=!HISTORY_APPEND;
break;
case 'L': /* [enm] Deflate level. Default is 0. */
dfl_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd);
break;
case 'l': /* Local path prefix for files retrieved from remote file system */
fl_pth_lcl=(char *)strdup(optarg);
break;
case 'O': /* Toggle FORCE_OVERWRITE */
FORCE_OVERWRITE=!FORCE_OVERWRITE;
break;
case 'o': /* Name of output file */
fl_out=(char *)strdup(optarg);
break;
case 'p': /* Common file path */
fl_pth=(char *)strdup(optarg);
break;
case 'R': /* Toggle removal of remotely-retrieved-files. Default is True. */
RM_RMT_FL_PST_PRC=!RM_RMT_FL_PST_PRC;
break;
case 'r': /* Print CVS program information and copyright notice */
(void)nco_vrs_prn(CVS_Id,CVS_Revision);
(void)nco_lbr_vrs_prn();
(void)nco_cpy_prn();
(void)nco_cnf_prn();
nco_exit(EXIT_SUCCESS);
break;
#ifdef ENABLE_MPI
case 'S': /* Suspend with signal handler to facilitate debugging */
if(signal(SIGUSR1,nco_cnt_run) == SIG_ERR) (void)fprintf(fp_stdout,"%s: ERROR Could not install suspend handler.\n",nco_prg_nm);
while(!nco_spn_lck_brk) usleep(nco_spn_lck_us); /* Spinlock. fxm: should probably insert a sched_yield */
break;
#endif /* !ENABLE_MPI */
case 't': /* Thread number */
thr_nbr=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd);
break;
case 'v': /* Variables to extract/exclude */
/* Replace commas with hashes when within braces (convert back later) */
optarg_lcl=(char *)strdup(optarg);
(void)nco_rx_comma2hash(optarg_lcl);
var_lst_in=nco_lst_prs_2D(optarg_lcl,",",&var_lst_in_nbr);
optarg_lcl=(char *)nco_free(optarg_lcl);
xtr_nbr_1=xtr_nbr_2=var_lst_in_nbr;
break;
case 'X': /* Copy auxiliary coordinate argument for later processing */
aux_arg[aux_nbr]=(char *)strdup(optarg);
aux_nbr++;
MSA_USR_RDR=True; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */
break;
case 'x': /* Exclude rather than extract variables specified with -v */
EXCLUDE_INPUT_LIST=True;
break;
case 'y': /* User-specified operation type overrides invocation default */
nco_op_typ_sng=(char *)strdup(optarg);
nco_op_typ=nco_op_typ_get(nco_op_typ_sng);
break;
case '?': /* Print proper usage */
(void)nco_usg_prn();
nco_exit(EXIT_SUCCESS);
break;
case '-': /* Long options are not allowed */
(void)fprintf(stderr,"%s: ERROR Long options are not available in this build. Use single letter options instead.\n",nco_prg_nm_get());
nco_exit(EXIT_FAILURE);
break;
default: /* Print proper usage */
(void)fprintf(stdout,"%s ERROR in command-line syntax/options. Please reformulate command accordingly.\n",nco_prg_nm_get());
(void)nco_usg_prn();
nco_exit(EXIT_FAILURE);
break;
} /* end switch */
if(opt_crr) opt_crr=(char *)nco_free(opt_crr);
} /* end while loop */
/* Process positional arguments and fill-in filenames */
fl_lst_in=nco_fl_lst_mk(argv,argc,optind,&fl_nbr,&fl_out,&FL_LST_IN_FROM_STDIN,FORCE_OVERWRITE);
/* Make uniform list of user-specified chunksizes */
if(cnk_nbr > 0) cnk_dmn=nco_cnk_prs(cnk_nbr,cnk_arg);
/* Make uniform list of user-specified dimension limits */
lmt=nco_lmt_prs(lmt_nbr,lmt_arg);
/* Initialize thread information */
thr_nbr=nco_openmp_ini(thr_nbr);
in_id_1_arr=(int *)nco_malloc(thr_nbr*sizeof(int));
in_id_2_arr=(int *)nco_malloc(thr_nbr*sizeof(int));
/* Parse filenames */
fl_idx=0; /* Input file _1 */
fl_in_1=nco_fl_nm_prs(fl_in_1,fl_idx,&fl_nbr,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth);
if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,"%s: INFO Input file %d is %s",nco_prg_nm_get(),fl_idx,fl_in_1);
/* Make sure file is on local system and is readable or die trying */
fl_in_1=nco_fl_mk_lcl(fl_in_1,fl_pth_lcl,&FILE_1_RETRIEVED_FROM_REMOTE_LOCATION);
if(nco_dbg_lvl >= nco_dbg_fl && FILE_1_RETRIEVED_FROM_REMOTE_LOCATION) (void)fprintf(stderr,", local file is %s",fl_in_1);
if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,"\n");
/* Open file once per thread to improve caching */
if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE;
for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) rcd+=nco_fl_open(fl_in_1,md_open,&bfr_sz_hnt,in_id_1_arr+thr_idx);
in_id_1=in_id_1_arr[0];
fl_idx=1; /* Input file _2 */
fl_in_2=nco_fl_nm_prs(fl_in_2,fl_idx,&fl_nbr,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth);
if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,"%s: INFO Input file %d is %s",nco_prg_nm_get(),fl_idx,fl_in_2);
/* Make sure file is on local system and is readable or die trying */
fl_in_2=nco_fl_mk_lcl(fl_in_2,fl_pth_lcl,&FILE_2_RETRIEVED_FROM_REMOTE_LOCATION);
if(nco_dbg_lvl >= nco_dbg_fl && FILE_2_RETRIEVED_FROM_REMOTE_LOCATION) (void)fprintf(stderr,", local file is %s",fl_in_2);
if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,"\n");
/* Open file once per thread to improve caching */
if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE;
for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) rcd+=nco_fl_open(fl_in_2,md_open,&bfr_sz_hnt,in_id_2_arr+thr_idx);
in_id_2=in_id_2_arr[0];
/* Parse auxiliary coordinates */
if(aux_nbr > 0){
int aux_idx_nbr;
aux=nco_aux_evl(in_id_1,aux_nbr,aux_arg,&aux_idx_nbr);
if(aux_idx_nbr > 0){
lmt=(lmt_sct **)nco_realloc(lmt,(lmt_nbr+aux_idx_nbr)*sizeof(lmt_sct *));
int lmt_nbr_new=lmt_nbr+aux_idx_nbr;
int aux_idx=0;
for(int lmt_idx=lmt_nbr;lmt_idx<lmt_nbr_new;lmt_idx++) lmt[lmt_idx]=aux[aux_idx++];
lmt_nbr=lmt_nbr_new;
} /* endif aux */
} /* endif aux_nbr */
/* Get number of variables and dimensions in file */
(void)nco_inq(in_id_1,&nbr_dmn_fl_1,&nbr_var_fl_1,(int *)NULL,(int *)NULL);
(void)nco_inq(in_id_2,&nbr_dmn_fl_2,&nbr_var_fl_2,(int *)NULL,(int *)NULL);
(void)nco_inq_format(in_id_1,&fl_in_fmt_1);
(void)nco_inq_format(in_id_2,&fl_in_fmt_2);
/* Form initial extraction list which may include extended regular expressions */
xtr_lst_1=nco_var_lst_mk(in_id_1,nbr_var_fl_1,var_lst_in,EXCLUDE_INPUT_LIST,EXTRACT_ALL_COORDINATES,&xtr_nbr_1);
xtr_lst_2=nco_var_lst_mk(in_id_2,nbr_var_fl_2,var_lst_in,EXCLUDE_INPUT_LIST,EXTRACT_ALL_COORDINATES,&xtr_nbr_2);
/* Change included variables to excluded variables */
if(EXCLUDE_INPUT_LIST) xtr_lst_1=nco_var_lst_xcl(in_id_1,nbr_var_fl_1,xtr_lst_1,&xtr_nbr_1);
if(EXCLUDE_INPUT_LIST) xtr_lst_2=nco_var_lst_xcl(in_id_2,nbr_var_fl_2,xtr_lst_2,&xtr_nbr_2);
/* Determine conventions (ARM/CCM/CCSM/CF/MPAS) for treating file */
cnv=nco_cnv_ini(in_id_1);
/* Add all coordinate variables to extraction list */
if(EXTRACT_ALL_COORDINATES) xtr_lst_1=nco_var_lst_crd_add(in_id_1,nbr_dmn_fl_1,nbr_var_fl_1,xtr_lst_1,&xtr_nbr_1,cnv);
if(EXTRACT_ALL_COORDINATES) xtr_lst_2=nco_var_lst_crd_add(in_id_2,nbr_dmn_fl_2,nbr_var_fl_2,xtr_lst_2,&xtr_nbr_2,cnv);
/* Extract coordinates associated with extracted variables */
if(EXTRACT_ASSOCIATED_COORDINATES) xtr_lst_1=nco_var_lst_crd_ass_add(in_id_1,xtr_lst_1,&xtr_nbr_1,cnv);
if(EXTRACT_ASSOCIATED_COORDINATES) xtr_lst_2=nco_var_lst_crd_ass_add(in_id_2,xtr_lst_2,&xtr_nbr_2,cnv);
/* With fully symmetric 1<->2 ordering, may occasionally find xtr_nbr_2 > xtr_nbr_1
This occurs, e.g., when fl_in_1 contains reduced variables and full coordinates
are only in fl_in_2 and so will not appear xtr_lst_1 */
/* Sort extraction list by variable ID for fastest I/O */
if(xtr_nbr_1 > 1) xtr_lst_1=nco_lst_srt_nm_id(xtr_lst_1,xtr_nbr_1,False);
if(xtr_nbr_2 > 1) xtr_lst_2=nco_lst_srt_nm_id(xtr_lst_2,xtr_nbr_2,False);
/* We now have final list of variables to extract. Phew. */
/* Find coordinate/dimension values associated with user-specified limits
NB: nco_lmt_evl() with same nc_id contains OpenMP critical region */
for(idx=0;idx<lmt_nbr;idx++) (void)nco_lmt_evl(in_id_1,lmt[idx],0L,FORTRAN_IDX_CNV);
/* Place all dimensions in lmt_all_lst */
lmt_all_lst=(lmt_all_sct **)nco_malloc(nbr_dmn_fl_1*sizeof(lmt_all_sct *));
/* Initialize lmt_all_sct's */
(void)nco_msa_lmt_all_ntl(in_id_1,MSA_USR_RDR,lmt_all_lst,nbr_dmn_fl_1,lmt,lmt_nbr);
/* Find dimensions associated with variables to be extracted */
dmn_lst_1=nco_dmn_lst_ass_var(in_id_1,xtr_lst_1,xtr_nbr_1,&nbr_dmn_xtr_1);
dmn_lst_2=nco_dmn_lst_ass_var(in_id_2,xtr_lst_2,xtr_nbr_2,&nbr_dmn_xtr_2);
/* Fill-in dimension structure for all extracted dimensions */
dim_1=(dmn_sct **)nco_malloc(nbr_dmn_xtr_1*sizeof(dmn_sct *));
dim_2=(dmn_sct **)nco_malloc(nbr_dmn_xtr_2*sizeof(dmn_sct *));
for(idx=0;idx<nbr_dmn_xtr_1;idx++) dim_1[idx]=nco_dmn_fll(in_id_1,dmn_lst_1[idx].id,dmn_lst_1[idx].nm);
for(idx=0;idx<nbr_dmn_xtr_2;idx++) dim_2[idx]=nco_dmn_fll(in_id_2,dmn_lst_2[idx].id,dmn_lst_2[idx].nm);
/* Dimension lists no longer needed */
dmn_lst_1=nco_nm_id_lst_free(dmn_lst_1,nbr_dmn_xtr_1);
dmn_lst_2=nco_nm_id_lst_free(dmn_lst_2,nbr_dmn_xtr_2);
/* Check that dims in list 2 are a subset of list 1 and that they are the same size */
(void)nco_dmn_sct_cmp(dim_1,nbr_dmn_xtr_1,dim_2,nbr_dmn_xtr_2,fl_in_1,fl_in_2);
/* Duplicate input dimension structures for output dimension structures */
dmn_out=(dmn_sct **)nco_malloc(nbr_dmn_xtr_1*sizeof(dmn_sct *));
for(idx=0;idx<nbr_dmn_xtr_1;idx++){
dmn_out[idx]=nco_dmn_dpl(dim_1[idx]);
(void)nco_dmn_xrf(dim_1[idx],dmn_out[idx]);
}
/* Merge hyperslab limit information into dimension structures */
if(nbr_dmn_fl_1 > 0) (void)nco_dmn_lmt_all_mrg(dmn_out,nbr_dmn_xtr_1,lmt_all_lst,nbr_dmn_fl_1);
if(nco_dbg_lvl >= nco_dbg_sbr){
for(idx=0;idx<xtr_nbr_1;idx++) (void)fprintf(stderr,"xtr_lst_1[%d].nm = %s, .id= %d\n",idx,xtr_lst_1[idx].nm,xtr_lst_1[idx].id);
} /* end if */
/* Fill-in variable structure list for all extracted variables */
var_1=(var_sct **)nco_malloc(xtr_nbr_1*sizeof(var_sct *));
var_2=(var_sct **)nco_malloc(xtr_nbr_2*sizeof(var_sct *));
var_out=(var_sct **)nco_malloc(xtr_nbr_1*sizeof(var_sct *));
for(idx=0;idx<xtr_nbr_1;idx++){
var_1[idx]=nco_var_fll(in_id_1,xtr_lst_1[idx].id,xtr_lst_1[idx].nm,dim_1,nbr_dmn_xtr_1);
var_out[idx]=nco_var_dpl(var_1[idx]);
(void)nco_xrf_var(var_1[idx],var_out[idx]);
(void)nco_xrf_dmn(var_out[idx]);
} /* end loop over idx */
for(idx=0;idx<xtr_nbr_2;idx++) var_2[idx]=nco_var_fll(in_id_2,xtr_lst_2[idx].id,xtr_lst_2[idx].nm,dim_2,nbr_dmn_xtr_2);
/* Extraction lists no longer needed */
xtr_lst_1=nco_nm_id_lst_free(xtr_lst_1,xtr_nbr_1);
xtr_lst_2=nco_nm_id_lst_free(xtr_lst_2,xtr_nbr_2);
/* Die gracefully on unsupported features... */
if(xtr_nbr_1 < xtr_nbr_2){
(void)fprintf(fp_stdout,"%s: WARNING First file has fewer extracted variables than second file (%d < %d). This desired feature is TODO nco581.\n",nco_prg_nm,xtr_nbr_1,xtr_nbr_2);
nco_exit(EXIT_FAILURE);
} /* endif */
/* Refresh var_out with dim_out data */
(void)nco_var_dmn_refresh(var_out,xtr_nbr_1);
/* Change dimensions in dim_2 to dim_out */
for(idx=0;idx<nbr_dmn_xtr_2;idx++){
for(jdx=0;jdx<nbr_dmn_xtr_1;jdx++)
if(!strcmp(dim_2[idx]->nm,dmn_out[jdx]->nm)){
/* NB: Copy new dim data but do NOT free original as dimension element is aliased in var_2 array */
(void)nco_dmn_cpy(dim_2[idx],dmn_out[jdx]);
break;
} /* endif */
/* Dimension not found so die gracefully */
if(jdx==nbr_dmn_xtr_1){
(void)fprintf(fp_stdout,"%s: ERROR dimension \"%s\" in second file %s is not present in first file %s\n",nco_prg_nm,dim_2[idx]->nm,fl_in_2,fl_in_1);
nco_exit(EXIT_FAILURE);
} /* endif dimension not found */
} /* end loop over dimensions */
/* Refresh var_2 with the new dim_2 data */
(void)nco_var_dmn_refresh(var_2,xtr_nbr_2);
/* Divide variable lists into lists of fixed variables and variables to be processed
Create lists from file_1 last so those values remain in *_out arrays */
(void)nco_var_lst_dvd(var_2,var_out,xtr_nbr_2,cnv,True,nco_pck_plc_nil,nco_pck_map_nil,(dmn_sct **)NULL,0,&var_fix_2,&var_fix_out,&nbr_var_fix_2,&var_prc_2,&var_prc_out,&nbr_var_prc_2);
/* Avoid double-free() condition */
var_fix_out=(var_sct **)nco_free(var_fix_out);
var_prc_out=(var_sct **)nco_free(var_prc_out);
(void)nco_var_lst_dvd(var_1,var_out,xtr_nbr_1,cnv,True,nco_pck_plc_nil,nco_pck_map_nil,(dmn_sct **)NULL,0,&var_fix_1,&var_fix_out,&nbr_var_fix_1,&var_prc_1,&var_prc_out,&nbr_var_prc_1);
/* Die gracefully on unsupported features... */
if(nbr_var_fix_1 < nbr_var_fix_2){
(void)fprintf(fp_stdout,"%s: ERROR First file has fewer fixed variables than second file (%d < %d). This feature is TODO nco581.\n",nco_prg_nm,nbr_var_fix_1,nbr_var_fix_2);
nco_exit(EXIT_FAILURE);
} /* endif */
/* Merge two variable lists into same order */
rcd=nco_var_lst_mrg(&var_prc_1,&var_prc_2,&nbr_var_prc_1,&nbr_var_prc_2);
/* Make output and input files consanguinous */
if(fl_out_fmt == NCO_FORMAT_UNDEFINED) fl_out_fmt=fl_in_fmt_1;
/* Verify output file format supports requested actions */
(void)nco_fl_fmt_vet(fl_out_fmt,cnk_nbr,dfl_lvl);
/* Open output file */
fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,WRT_TMP_FL,&out_id);
/* 20101019 fxm got to here merging ncbo 4.0.5 into mpncbo */
/* Assign zero to start and unity to stride vectors in output variables */
(void)nco_var_srd_srt_set(var_out,xtr_nbr_1);
#ifdef ENABLE_MPI
if(prc_rnk == rnk_mgr){ /* MPI manager code */
#endif /* !ENABLE_MPI */
/* Make output and input files consanguinous */
if(fl_out_fmt == NCO_FORMAT_UNDEFINED) fl_out_fmt=fl_in_fmt_1;
/* Open output file */
fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,WRT_TMP_FL,&out_id);
/* Copy global attributes */
(void)nco_att_cpy(in_id_1,out_id,NC_GLOBAL,NC_GLOBAL,(nco_bool)True);
/* Catenate time-stamped command line to "history" global attribute */
if(HISTORY_APPEND) (void)nco_hst_att_cat(out_id,cmd_ln);
if(HISTORY_APPEND && FORCE_APPEND) (void)nco_prv_att_cat(fl_in_1,in_id_1,out_id);
if(gaa_nbr > 0) (void)nco_glb_att_add(out_id,gaa_arg,gaa_nbr);
if(HISTORY_APPEND) (void)nco_vrs_att_cat(out_id);
if(thr_nbr > 1 && HISTORY_APPEND) (void)nco_thr_att_cat(out_id,thr_nbr);
#ifdef ENABLE_MPI
/* Initialize MPI task information */
if(prc_nbr > 0 && HISTORY_APPEND) (void)nco_mpi_att_cat(out_id,prc_nbr);
#endif /* !ENABLE_MPI */
/* Define dimensions in output file */
(void)nco_dmn_dfn(fl_out,out_id,dmn_out,nbr_dmn_xtr_1);
/* fxm: TODO 550 put max_dim_sz/list(var_1,var_2) into var_def(var_out) */
/* Define variables in output file, copy their attributes */
(void)nco_var_dfn(in_id_1,fl_out,out_id,var_out,xtr_nbr_1,(dmn_sct **)NULL,(int)0,nco_pck_plc_nil,nco_pck_map_nil,dfl_lvl);
/* Set chunksize parameters */
if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC) (void)nco_cnk_sz_set(out_id,lmt_all_lst,nbr_dmn_fl_1,&cnk_map,&cnk_plc,cnk_sz_scl,cnk_dmn,cnk_nbr);
/* Turn-off default filling behavior to enhance efficiency */
nco_set_fill(out_id,NC_NOFILL,&fll_md_old);
/* Take output file out of define mode */
if(hdr_pad == 0UL){
(void)nco_enddef(out_id);
}else{
(void)nco__enddef(out_id,hdr_pad);
if(nco_dbg_lvl >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO Padding header with %lu extra bytes\n",nco_prg_nm_get(),(unsigned long)hdr_pad);
} /* hdr_pad */
#ifdef ENABLE_MPI
} /* prc_rnk != rnk_mgr */
/* Manager obtains output filename and broadcasts to workers */
if(prc_rnk == rnk_mgr) fl_nm_lng=(int)strlen(fl_out_tmp);
MPI_Bcast(&fl_nm_lng,1,MPI_INT,rnk_mgr,MPI_COMM_WORLD);
if(prc_rnk != rnk_mgr) fl_out_tmp=(char *)nco_malloc((fl_nm_lng+1)*sizeof(char));
MPI_Bcast(fl_out_tmp,fl_nm_lng+1,MPI_CHAR,rnk_mgr,MPI_COMM_WORLD);
if(prc_rnk == rnk_mgr){ /* MPI manager code */
TKN_WRT_FREE=False;
#endif /* !ENABLE_MPI */
/* Copy variable data for non-processed variables */
(void)nco_msa_var_val_cpy(in_id_1,out_id,var_fix_1,nbr_var_fix_1,lmt_all_lst,nbr_dmn_fl_1);
#ifdef ENABLE_MPI
/* Close output file so workers can open it */
nco_close(out_id);
TKN_WRT_FREE=True;
} /* prc_rnk != rnk_mgr */
#endif /* !ENABLE_MPI */
/* ncbo() code has been similar to nces() (and ncra()) wherever possible
Major differences occur where performance would otherwise suffer
From now on, however, binary-file and binary-operation nature of ncbo()
is too different from nces() paradigm to justify following nces() style.
Instead, we adopt symmetric nomenclature (e.g., file_1, file_2), and
perform differences variable-by-variable so peak memory usage goes as
Order(2*maximum variable size) rather than Order(3*maximum record size) or
Order(3*file size) */
/* Perform various error-checks on input file */
if(False) (void)nco_fl_cmp_err_chk();
/* Default operation depends on invocation name */
if(nco_op_typ_sng == NULL) nco_op_typ=nco_op_typ_get(nco_op_typ_sng);
/* Timestamp end of metadata setup and disk layout */
rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info);
ddra_info.tmr_flg=nco_tmr_rgl;
#ifdef ENABLE_MPI
if(prc_rnk == rnk_mgr){ /* MPI manager code */
/* Compensate for incrementing on each worker's first message */
var_wrt_nbr=-prc_nbr+1;
idx=0;
/* While variables remain to be processed or written... */
while(var_wrt_nbr < nbr_var_prc_1){
/* Receive message from any worker */
MPI_Recv(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_COMM_WORLD,&mpi_stt);
/* Obtain MPI message tag type */
msg_tag_typ=mpi_stt.MPI_TAG;
/* Get sender's prc_rnk */
rnk_wrk=wrk_id_bfr[0];
/* Allocate next variable, if any, to worker */
if(msg_tag_typ == msg_tag_wrk_rqs){
var_wrt_nbr++; /* [nbr] Number of variables written */
/* Worker closed output file before sending msg_tag_wrk_rqs */
TKN_WRT_FREE=True;
if(idx < nbr_var_prc_1){
/* Tell requesting worker to allocate space for next variable */
msg_bfr[0]=idx; /* [idx] Variable to be processed */
msg_bfr[1]=out_id; /* Output file ID */
msg_bfr[2]=var_prc_out[idx]->id; /* [id] Variable ID in output file */
/* Point to next variable on list */
idx++;
}else{
msg_bfr[0]=idx_all_wrk_ass; /* [enm] All variables already assigned */
msg_bfr[1]=out_id; /* Output file ID */
} /* endif idx */
MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,rnk_wrk,msg_tag_wrk_rsp,MPI_COMM_WORLD);
/* msg_tag_typ != msg_tag_wrk_rqs */
}else if(msg_tag_typ == msg_tag_tkn_wrt_rqs){
/* Allocate token if free, else ask worker to try later */
if(TKN_WRT_FREE){
TKN_WRT_FREE=False;
msg_bfr[0]=tkn_wrt_rqs_xcp; /* Accept request for write token */
}else{
msg_bfr[0]=tkn_wrt_rqs_dny; /* Deny request for write token */
} /* !TKN_WRT_FREE */
MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,rnk_wrk,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD);
} /* msg_tag_typ != msg_tag_tkn_wrt_rqs */
} /* end while var_wrt_nbr < nbr_var_prc_1 */
}else{ /* prc_rnk != rnk_mgr, end Manager code begin Worker code */
wrk_id_bfr[0]=prc_rnk;
while(1){ /* While work remains... */
/* Send msg_tag_wrk_rqs */
wrk_id_bfr[0]=prc_rnk;
MPI_Send(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,rnk_mgr,msg_tag_wrk_rqs,MPI_COMM_WORLD);
/* Receive msg_tag_wrk_rsp */
MPI_Recv(msg_bfr,msg_bfr_lng,MPI_INT,0,msg_tag_wrk_rsp,MPI_COMM_WORLD,&mpi_stt);
idx=msg_bfr[0];
out_id=msg_bfr[1];
if(idx == idx_all_wrk_ass) break;
else{
var_prc_out[idx]->id=msg_bfr[2];
/* Process this variable same as UP code */
#else /* !ENABLE_MPI */
#ifdef _OPENMP
/* OpenMP notes:
shared(): msk and wgt are not altered within loop
private(): wgt_avg does not need initialization */
#pragma omp parallel for default(none) firstprivate(ddra_info) private(idx,in_id_1,in_id_2,dmn_idx,dmn_jdx) shared(nco_dbg_lvl,dim_1,fl_in_1,fl_in_2,fl_out,flg_ddra,in_id_1_arr,in_id_2_arr,nbr_dmn_xtr_1,nbr_var_prc_1,nbr_var_prc_2,nco_op_typ,out_id,nco_prg_nm,rcd,var_prc_1,var_prc_2,var_prc_out,lmt_all_lst,nbr_dmn_fl_1)
#endif /* !_OPENMP */
/* UP and SMP codes main loop over variables */
for(idx=0;idx<nbr_var_prc_1;idx++){
#endif /* ENABLE_MPI */
/* Common code for UP, SMP, and MPI */
int has_mss_val=False;
ptr_unn mss_val;
if(nco_dbg_lvl >= nco_dbg_var) (void)fprintf(fp_stderr,"%s, ",var_prc_1[idx]->nm);
if(nco_dbg_lvl >= nco_dbg_var) (void)fflush(fp_stderr);
in_id_1=in_id_1_arr[omp_get_thread_num()];
in_id_2=in_id_2_arr[omp_get_thread_num()];
(void)nco_var_mtd_refresh(in_id_1,var_prc_1[idx]);
has_mss_val=var_prc_1[idx]->has_mss_val;
(void)nco_msa_var_get(in_id_1,var_prc_1[idx],lmt_all_lst,nbr_dmn_fl_1);
/* Find and set variable dmn_nbr, ID, mss_val, type in second file */
(void)nco_var_mtd_refresh(in_id_2,var_prc_2[idx]);
/* Read hyperslab from second file */
(void)nco_msa_var_get(in_id_2,var_prc_2[idx],lmt_all_lst,nbr_dmn_fl_1);
/* Check that all dims in var_prc_2 are in var_prc_1 */
for(dmn_idx=0;dmn_idx<var_prc_2[idx]->nbr_dim;dmn_idx++){
for(dmn_jdx=0;dmn_jdx<var_prc_1[idx]->nbr_dim;dmn_jdx++)
if(!strcmp(var_prc_2[idx]->dim[dmn_idx]->nm,var_prc_1[idx]->dim[dmn_jdx]->nm))
break;
if(dmn_jdx==var_prc_1[idx]->nbr_dim){
(void)fprintf(fp_stdout,"%s: ERROR Variables do not conform:\nFile %s variable %s has dimension %s not present in file %s variable %s\n",nco_prg_nm,fl_in_2,var_prc_2[idx]->nm, var_prc_2[idx]->dim[dmn_idx]->nm,fl_in_1,var_prc_1[idx]->nm);
nco_exit(EXIT_FAILURE);
} /* endif error */
} /* end loop over idx */
/* Die gracefully on unsupported features... */
if(var_prc_1[idx]->nbr_dim < var_prc_2[idx]->nbr_dim){
(void)fprintf(fp_stdout,"%s: ERROR Variable %s has lesser rank in first file than in second file (%d < %d). This feature is NCO TODO 552.\n",nco_prg_nm,var_prc_1[idx]->nm,var_prc_1[idx]->nbr_dim,var_prc_2[idx]->nbr_dim);
nco_exit(EXIT_FAILURE);
} /* endif */
if(var_prc_1[idx]->nbr_dim > var_prc_2[idx]->nbr_dim) (void)ncap_var_cnf_dmn(&var_prc_out[idx],&var_prc_2[idx]);
/* var2 now conforms in size to var1, and is in memory */
/* fxm: TODO 268 allow var1 or var2 to typecast */
/* Make sure var2 conforms to type of var1 */
if(var_prc_1[idx]->type != var_prc_2[idx]->type){
if(nco_dbg_lvl >= nco_dbg_std) (void)fprintf(fp_stderr,"%s: INFO Input variables do not conform in type:\nFile 1 = %s variable %s has type %s\nFile 2 = %s variable %s has type %s\nFile 3 = %s variable %s will have type %s\n",nco_prg_nm,fl_in_1,var_prc_1[idx]->nm,nco_typ_sng(var_prc_1[idx]->type),fl_in_2,var_prc_2[idx]->nm,nco_typ_sng(var_prc_2[idx]->type),fl_out,var_prc_1[idx]->nm,nco_typ_sng(var_prc_1[idx]->type));
} /* endif different type */
var_prc_2[idx]=nco_var_cnf_typ(var_prc_1[idx]->type,var_prc_2[idx]);
/* Change missing_value of var_prc_2, if any, to missing_value of var_prc_1, if any */
has_mss_val=nco_mss_val_cnf(var_prc_1[idx],var_prc_2[idx]);
/* mss_val in fl_1, if any, overrides mss_val in fl_2 */
if(has_mss_val) mss_val=var_prc_1[idx]->mss_val;
/* Perform specified binary operation */
switch(nco_op_typ){
case nco_op_add: /* [enm] Add file_1 to file_2 */
(void)nco_var_add(var_prc_1[idx]->type,var_prc_1[idx]->sz,has_mss_val,mss_val,var_prc_2[idx]->val,var_prc_1[idx]->val); break;
case nco_op_mlt: /* [enm] Multiply file_1 by file_2 */
(void)nco_var_mlt(var_prc_1[idx]->type,var_prc_1[idx]->sz,has_mss_val,mss_val,var_prc_2[idx]->val,var_prc_1[idx]->val); break;
case nco_op_dvd: /* [enm] Divide file_1 by file_2 */
(void)nco_var_dvd(var_prc_1[idx]->type,var_prc_1[idx]->sz,has_mss_val,mss_val,var_prc_2[idx]->val,var_prc_1[idx]->val); break;
case nco_op_sbt: /* [enm] Subtract file_2 from file_1 */
(void)nco_var_sbt(var_prc_1[idx]->type,var_prc_1[idx]->sz,has_mss_val,mss_val,var_prc_2[idx]->val,var_prc_1[idx]->val); break;
default: /* Other defined nco_op_typ values are valid for ncra(), ncrcat(), ncwa(), not ncbo() */
(void)fprintf(fp_stdout,"%s: ERROR Illegal nco_op_typ in binary operation\n",nco_prg_nm);
nco_exit(EXIT_FAILURE);
break;
} /* end case */
var_prc_2[idx]->val.vp=nco_free(var_prc_2[idx]->val.vp);
#ifdef ENABLE_MPI
/* Obtain token and prepare to write */
while(1){ /* Send msg_tag_tkn_wrt_rqs repeatedly until token obtained */
wrk_id_bfr[0]=prc_rnk;
MPI_Send(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,rnk_mgr,msg_tag_tkn_wrt_rqs,MPI_COMM_WORLD);
MPI_Recv(msg_bfr,msg_bfr_lng,MPI_INT,rnk_mgr,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD,&mpi_stt);
tkn_wrt_rsp=msg_bfr[0];
/* Wait then re-send request */
if(tkn_wrt_rsp == tkn_wrt_rqs_dny) sleep(tkn_wrt_rqs_ntv); else break;
} /* end while loop waiting for write token */
/* Worker has token---prepare to write */
if(tkn_wrt_rsp == tkn_wrt_rqs_xcp){
if(RAM_OPEN) md_open=NC_WRITE|NC_SHARE|NC_DISKLESS; else md_open=NC_WRITE|NC_SHARE;
rcd=nco_fl_open(fl_out_tmp,md_open,&bfr_sz_hnt,&out_id);
/* Set chunksize parameters */
if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC) (void)nco_cnk_sz_set(out_id,lmt_all_lst,nbr_dmn_fl_1,&cnk_map,&cnk_plc,cnk_sz_scl,cnk_dmn,cnk_nbr);
/* Turn-off default filling behavior to enhance efficiency */
nco_set_fill(out_id,NC_NOFILL,&fll_md_old);
#else /* !ENABLE_MPI */
#ifdef _OPENMP
#pragma omp critical
#endif /* !_OPENMP */
#endif /* !ENABLE_MPI */
/* Common code for UP, SMP, and MPI */
{ /* begin OpenMP critical */
/* Copy result to output file and free workspace buffer */
if(var_prc_1[idx]->nbr_dim == 0){
(void)nco_put_var1(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_1[idx]->val.vp,var_prc_1[idx]->type);
}else{ /* end if variable is scalar */
(void)nco_put_vara(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->cnt,var_prc_1[idx]->val.vp,var_prc_1[idx]->type);
} /* end else */
} /* end OpenMP critical */
var_prc_1[idx]->val.vp=nco_free(var_prc_1[idx]->val.vp);
if(flg_ddra){
/* DDRA diagnostics
Usage:
ncbo -O -C --mdl -p ~/nco/data in.nc in.nc ~/foo.nc
ncbo -O -C --mdl -p ${DATA}/nco_bm stl_5km.nc stl_5km.nc ~/foo.nc
ncbo -O -C --mdl -p ${DATA}/nco_bm gcm_T85.nc gcm_T85.nc ~/foo.nc */
/* Assign remaining input for DDRA diagnostics */
ddra_info.lmn_nbr=var_prc_1[idx]->sz; /* [nbr] Variable size */
ddra_info.nco_op_typ=nco_op_typ; /* [enm] Operation type */
ddra_info.rnk_var=var_prc_1[idx]->nbr_dim; /* I [nbr] Variable rank (in input file) */
ddra_info.var_idx=idx; /* [enm] Index */
ddra_info.wrd_sz=nco_typ_lng(var_prc_1[idx]->type); /* [B] Bytes per element */
/* DDRA diagnostics */
rcd+=nco_ddra /* [fnc] Count operations */
(var_prc_1[idx]->nm, /* I [sng] Variable name */
(char *)NULL, /* I [sng] Weight name */
&ddra_info); /* I [sct] DDRA information */
} /* !flg_ddra */
#ifdef ENABLE_MPI
/* Close output file and increment written counter */
nco_close(out_id);
var_wrt_nbr++;
} /* endif tkn_wrt_rqs_xcp */
} /* end else !idx_all_wrk_ass */
} /* end while loop requesting work/token */
} /* endif Worker */
#else /* !ENABLE_MPI */
} /* end (OpenMP parallel for) loop over idx */
#endif /* !ENABLE_MPI */
if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,"\n");
/* Close input netCDF files */
for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) nco_close(in_id_1_arr[thr_idx]);
for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) nco_close(in_id_2_arr[thr_idx]);
#ifdef ENABLE_MPI
/* Manager moves output file (closed by workers) from temporary to permanent location */
if(prc_rnk == rnk_mgr) (void)nco_fl_mv(fl_out_tmp,fl_out);
#else /* !ENABLE_MPI */
/* Close output file and move it from temporary to permanent location */
(void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id);
#endif /* end !ENABLE_MPI */
/* Remove local copy of file */
if(FILE_1_RETRIEVED_FROM_REMOTE_LOCATION && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_in_1);
if(FILE_2_RETRIEVED_FROM_REMOTE_LOCATION && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_in_2);
/* Clean memory unless dirty memory allowed */
if(flg_mmr_cln){
/* ncbo-specific memory */
if(fl_in_1) fl_in_1=(char *)nco_free(fl_in_1);
if(fl_in_2) fl_in_2=(char *)nco_free(fl_in_2);
/* NCO-generic clean-up */
/* Free individual strings/arrays */
for(idx=0;idx<nbr_dmn_fl_1;idx++)
for(jdx=0;jdx<lmt_all_lst[idx]->lmt_dmn_nbr;jdx++)
lmt_all_lst[idx]->lmt_dmn[jdx]=nco_lmt_free(lmt_all_lst[idx]->lmt_dmn[jdx]);
if(nbr_dmn_fl_1 > 0) lmt_all_lst=nco_lmt_all_lst_free(lmt_all_lst,nbr_dmn_fl_1);
lmt=(lmt_sct**)nco_free(lmt);
if(cmd_ln) cmd_ln=(char *)nco_free(cmd_ln);
if(cnk_map_sng) cnk_map_sng=(char *)nco_free(cnk_map_sng);
if(cnk_plc_sng) cnk_plc_sng=(char *)nco_free(cnk_plc_sng);
if(fl_out) fl_out=(char *)nco_free(fl_out);
if(fl_out_tmp) fl_out_tmp=(char *)nco_free(fl_out_tmp);
if(fl_pth) fl_pth=(char *)nco_free(fl_pth);
if(fl_pth_lcl) fl_pth_lcl=(char *)nco_free(fl_pth_lcl);
if(in_id_1_arr) in_id_1_arr=(int *)nco_free(in_id_1_arr);
if(in_id_2_arr) in_id_2_arr=(int *)nco_free(in_id_2_arr);
/* Free lists of strings */
if(fl_lst_in && fl_lst_abb == NULL) fl_lst_in=nco_sng_lst_free(fl_lst_in,fl_nbr);
if(fl_lst_in && fl_lst_abb) fl_lst_in=nco_sng_lst_free(fl_lst_in,1);
if(fl_lst_abb) fl_lst_abb=nco_sng_lst_free(fl_lst_abb,abb_arg_nbr);
if(gaa_nbr > 0) gaa_arg=nco_sng_lst_free(gaa_arg,gaa_nbr);
if(var_lst_in_nbr > 0) var_lst_in=nco_sng_lst_free(var_lst_in,var_lst_in_nbr);
/* Free limits */
for(idx=0;idx<lmt_nbr;idx++) lmt_arg[idx]=(char *)nco_free(lmt_arg[idx]);
for(idx=0;idx<aux_nbr;idx++) aux_arg[idx]=(char *)nco_free(aux_arg[idx]);
if(aux_nbr > 0) aux=(lmt_sct **)nco_free(aux);
/* Free chunking information */
for(idx=0;idx<cnk_nbr;idx++) cnk_arg[idx]=(char *)nco_free(cnk_arg[idx]);
if(cnk_nbr > 0) cnk_dmn=nco_cnk_lst_free(cnk_dmn,cnk_nbr);
/* Free dimension lists */
if(nbr_dmn_xtr_1 > 0) dim_1=nco_dmn_lst_free(dim_1,nbr_dmn_xtr_1);
if(nbr_dmn_xtr_2 > 0) dim_2=nco_dmn_lst_free(dim_2,nbr_dmn_xtr_2);
if(nbr_dmn_xtr_1 > 0) dmn_out=nco_dmn_lst_free(dmn_out,nbr_dmn_xtr_1);
/* Free variable lists
Using nco_var_lst_free() to free main var_1 and var_2 lists would fail
if ncap_var_prc_dmn() had to broadcast any variables because pointer
var_1 and var_2 still contain dangling pointer to old variable.
Hence, use nco_var_lst_free() to free prc and fix lists and
use nco_free() to free main var_1 and var_2 lists.
Dangling pointers in var_1 and var_2 are unsafe: fxm TODO 578 */
if(nbr_var_prc_1 > 0) var_prc_1=nco_var_lst_free(var_prc_1,nbr_var_prc_1);
if(nbr_var_fix_1 > 0) var_fix_1=nco_var_lst_free(var_fix_1,nbr_var_fix_1);
if(nbr_var_prc_2 > 0) var_prc_2=nco_var_lst_free(var_prc_2,nbr_var_prc_2);
if(nbr_var_fix_2 > 0) var_fix_2=nco_var_lst_free(var_fix_2,nbr_var_fix_2);
var_1=(var_sct **)nco_free(var_1);
var_2=(var_sct **)nco_free(var_2);
if(xtr_nbr_1 > 0) var_out=nco_var_lst_free(var_out,xtr_nbr_1);
var_prc_out=(var_sct **)nco_free(var_prc_out);
var_fix_out=(var_sct **)nco_free(var_fix_out);
} /* !flg_mmr_cln */
#ifdef ENABLE_MPI
MPI_Finalize();
#endif /* !ENABLE_MPI */
/* End timer */
ddra_info.tmr_flg=nco_tmr_end; /* [enm] Timer flag */
rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info);
if(rcd != NC_NOERR) nco_err_exit(rcd,"main");
nco_exit_gracefully();
return EXIT_SUCCESS;
} /* end main() */
|
adi.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 10x1024x1024. */
#include "adi.h"
/* Array initialization. */
static
void init_array (int n,
DATA_TYPE POLYBENCH_2D(X,N,N,n,n),
DATA_TYPE POLYBENCH_2D(A,N,N,n,n),
DATA_TYPE POLYBENCH_2D(B,N,N,n,n))
{
int i, j;
for (i = 0; i < n; i++)
for (j = 0; j < n; j++)
{
X[i][j] = ((DATA_TYPE) i*(j+1) + 1) / n;
A[i][j] = ((DATA_TYPE) i*(j+2) + 2) / n;
B[i][j] = ((DATA_TYPE) i*(j+3) + 3) / n;
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int n,
DATA_TYPE POLYBENCH_2D(X,N,N,n,n))
{
int i, j;
for (i = 0; i < n; i++)
for (j = 0; j < n; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, X[i][j]);
if ((i * N + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_adi(int tsteps,
int n,
DATA_TYPE POLYBENCH_2D(X,N,N,n,n),
DATA_TYPE POLYBENCH_2D(A,N,N,n,n),
DATA_TYPE POLYBENCH_2D(B,N,N,n,n))
{
int t, i1, i2;
#pragma omp parallel private (t, i1, i2)
{
#pragma omp master
{
for (t = 0; t < _PB_TSTEPS; t++)
{
#pragma omp for
for (i1 = 0; i1 < _PB_N; i1++)
for (i2 = 1; i2 < _PB_N; i2++)
{
X[i1][i2] = X[i1][i2] - X[i1][i2-1] * A[i1][i2] / B[i1][i2-1];
B[i1][i2] = B[i1][i2] - A[i1][i2] * A[i1][i2] / B[i1][i2-1];
}
#pragma omp for
for (i1 = 0; i1 < _PB_N; i1++)
X[i1][_PB_N-1] = X[i1][_PB_N-1] / B[i1][_PB_N-1];
#pragma omp for
for (i1 = 0; i1 < _PB_N; i1++)
for (i2 = 0; i2 < _PB_N-2; i2++)
X[i1][_PB_N-i2-2] = (X[i1][_PB_N-2-i2] - X[i1][_PB_N-2-i2-1] * A[i1][_PB_N-i2-3]) / B[i1][_PB_N-3-i2];
#pragma omp for
for (i1 = 1; i1 < _PB_N; i1++)
for (i2 = 0; i2 < _PB_N; i2++) {
X[i1][i2] = X[i1][i2] - X[i1-1][i2] * A[i1][i2] / B[i1-1][i2];
B[i1][i2] = B[i1][i2] - A[i1][i2] * A[i1][i2] / B[i1-1][i2];
}
#pragma omp for
for (i2 = 0; i2 < _PB_N; i2++)
X[_PB_N-1][i2] = X[_PB_N-1][i2] / B[_PB_N-1][i2];
#pragma omp for
for (i1 = 0; i1 < _PB_N-2; i1++)
for (i2 = 0; i2 < _PB_N; i2++)
X[_PB_N-2-i1][i2] = (X[_PB_N-2-i1][i2] - X[_PB_N-i1-3][i2] * A[_PB_N-3-i1][i2]) / B[_PB_N-2-i1][i2];
}
}
}
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int tsteps = TSTEPS;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(X, DATA_TYPE, N, N, n, n);
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, N, N, n, n);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, N, N, n, n);
/* Initialize array(s). */
init_array (n, POLYBENCH_ARRAY(X), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_adi (tsteps, n, POLYBENCH_ARRAY(X),
POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(X)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(X);
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
GB_binop__second_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__second_uint64)
// A.*B function (eWiseMult): GB (_AemultB_01__second_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__second_uint64)
// A.*B function (eWiseMult): GB (_AemultB_03__second_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__second_uint64)
// A*D function (colscale): GB (_AxD__second_uint64)
// D*A function (rowscale): GB (_DxB__second_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__second_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__second_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_uint64)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = bij
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = y ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
1
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SECOND || GxB_NO_UINT64 || GxB_NO_SECOND_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__second_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__second_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__second_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__second_uint64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__second_uint64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__second_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__second_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__second_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__second_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__second_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = bij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = y ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = y ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
GB_unop__identity_fc32_uint8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fc32_uint8)
// op(A') function: GB (_unop_tran__identity_fc32_uint8)
// C type: GxB_FC32_t
// A type: uint8_t
// cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fc32_uint8)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const uint8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint8_t aij = Ax [p] ;
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc32_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_in_parallel.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include "omp_testsuite.h"
/*
* Checks that false is returned when called from serial region
* and true is returned when called within parallel region.
*/
int test_omp_in_parallel()
{
int serial;
int isparallel;
serial = 1;
isparallel = 0;
serial = omp_in_parallel();
#pragma omp parallel
{
#pragma omp single
{
isparallel = omp_in_parallel();
}
}
return (!(serial) && isparallel);
}
int main()
{
int i;
int num_failed=0;
// the test requires more than 1 thread to pass
omp_set_dynamic(0); // disable dynamic adjustment of threads
if (omp_get_max_threads() == 1)
omp_set_num_threads(2); // set 2 threads if no HW resources available
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_in_parallel()) {
num_failed++;
}
}
return num_failed;
}
|
ROF_TV_core.c | /*
* This work is part of the Core Imaging Library developed by
* Visual Analytics and Imaging System Group of the Science Technology
* Facilities Council, STFC
*
* Copyright 2017 Daniil Kazantsev
* Copyright 2017 Srikanth Nagella, Edoardo Pasca
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ROF_TV_core.h"
#define EPS 1.0e-8
#define MAX(x, y) (((x) > (y)) ? (x) : (y))
#define MIN(x, y) (((x) < (y)) ? (x) : (y))
/*sign function*/
int sign(float x) {
return (x > 0) - (x < 0);
}
/* C-OMP implementation of ROF-TV denoising/regularization model [1] (2D/3D case)
*
* Input Parameters:
* 1. Noisy image/volume [REQUIRED]
* 2. lambda - regularisation parameter (a constant or the same size as the input (1))
* 3. tau - marching step for explicit scheme, ~1 is recommended [REQUIRED]
* 4. Number of iterations, for explicit scheme >= 150 is recommended [REQUIRED]
* 5. eplsilon: tolerance constant
*
* Output:
* [1] Regularised image/volume
* [2] Information vector which contains [iteration no., reached tolerance]
*
* This function is based on the paper by
* [1] Rudin, Osher, Fatemi, "Nonlinear Total Variation based noise removal algorithms"
*/
/* Running iterations of TV-ROF function */
float TV_ROF_CPU_main(float *Input, float *Output, float *infovector, float *lambdaPar, int lambda_is_arr, int iterationsNumb, float tau, float epsil, int dimX, int dimY, int dimZ)
{
float *D1=NULL, *D2=NULL, *D3=NULL, *Output_prev=NULL;
float re, re1;
re = 0.0f; re1 = 0.0f;
int count = 0;
int i;
long DimTotal,j;
DimTotal = (long)(dimX*dimY*dimZ);
D1 = calloc(DimTotal, sizeof(float));
D2 = calloc(DimTotal, sizeof(float));
D3 = calloc(DimTotal, sizeof(float));
/* copy into output */
copyIm(Input, Output, (long)(dimX), (long)(dimY), (long)(dimZ));
if (epsil != 0.0f) Output_prev = calloc(DimTotal, sizeof(float));
/* start TV iterations */
for(i=0; i < iterationsNumb; i++) {
if ((epsil != 0.0f) && (i % 5 == 0)) copyIm(Output, Output_prev, (long)(dimX), (long)(dimY), (long)(dimZ));
/* calculate differences */
D1_func(Output, D1, (long)(dimX), (long)(dimY), (long)(dimZ));
D2_func(Output, D2, (long)(dimX), (long)(dimY), (long)(dimZ));
if (dimZ > 1) D3_func(Output, D3, (long)(dimX), (long)(dimY), (long)(dimZ));
TV_kernel(D1, D2, D3, Output, Input, lambdaPar, lambda_is_arr, tau, (long)(dimX), (long)(dimY), (long)(dimZ));
/* check early stopping criteria */
if ((epsil != 0.0f) && (i % 5 == 0)) {
re = 0.0f; re1 = 0.0f;
for(j=0; j<DimTotal; j++)
{
re += powf(Output[j] - Output_prev[j],2);
re1 += powf(Output[j],2);
}
re = sqrtf(re)/sqrtf(re1);
if (re < epsil) count++;
if (count > 3) break;
}
}
free(D1);free(D2); free(D3);
if (epsil != 0.0f) free(Output_prev);
/*adding info into info_vector */
infovector[0] = (float)(i); /*iterations number (if stopped earlier based on tolerance)*/
infovector[1] = re; /* reached tolerance */
return 0;
}
/* calculate differences 1 */
float D1_func(float *A, float *D1, long dimX, long dimY, long dimZ)
{
float NOMx_1, NOMy_1, NOMy_0, NOMz_1, NOMz_0, denom1, denom2,denom3, T1;
long i,j,k,i1,i2,k1,j1,j2,k2,index;
if (dimZ > 1) {
#pragma omp parallel for shared (A, D1, dimX, dimY, dimZ) private(index, i, j, k, i1, j1, k1, i2, j2, k2, NOMx_1,NOMy_1,NOMy_0,NOMz_1,NOMz_0,denom1,denom2,denom3,T1)
for(k=0; k<dimZ; k++) {
for(j=0; j<dimY; j++) {
for(i=0; i<dimX; i++) {
index = (dimX*dimY)*k + j*dimX+i;
/* symmetric boundary conditions (Neuman) */
i1 = i + 1; if (i1 >= dimX) i1 = i-1;
i2 = i - 1; if (i2 < 0) i2 = i+1;
j1 = j + 1; if (j1 >= dimY) j1 = j-1;
j2 = j - 1; if (j2 < 0) j2 = j+1;
k1 = k + 1; if (k1 >= dimZ) k1 = k-1;
k2 = k - 1; if (k2 < 0) k2 = k+1;
/* Forward-backward differences */
NOMx_1 = A[(dimX*dimY)*k + j1*dimX + i] - A[index]; /* x+ */
NOMy_1 = A[(dimX*dimY)*k + j*dimX + i1] - A[index]; /* y+ */
/*NOMx_0 = (A[(i)*dimY + j] - A[(i2)*dimY + j]); */ /* x- */
NOMy_0 = A[index] - A[(dimX*dimY)*k + j*dimX + i2]; /* y- */
NOMz_1 = A[(dimX*dimY)*k1 + j*dimX + i] - A[index]; /* z+ */
NOMz_0 = A[index] - A[(dimX*dimY)*k2 + j*dimX + i]; /* z- */
denom1 = NOMx_1*NOMx_1;
denom2 = 0.5f*(sign(NOMy_1) + sign(NOMy_0))*(MIN(fabs(NOMy_1),fabs(NOMy_0)));
denom2 = denom2*denom2;
denom3 = 0.5f*(sign(NOMz_1) + sign(NOMz_0))*(MIN(fabs(NOMz_1),fabs(NOMz_0)));
denom3 = denom3*denom3;
T1 = sqrt(denom1 + denom2 + denom3 + EPS);
D1[index] = NOMx_1/T1;
}}}
}
else {
#pragma omp parallel for shared (A, D1, dimX, dimY) private(i, j, i1, j1, i2, j2,NOMx_1,NOMy_1,NOMy_0,denom1,denom2,T1,index)
for(j=0; j<dimY; j++) {
for(i=0; i<dimX; i++) {
index = j*dimX+i;
/* symmetric boundary conditions (Neuman) */
i1 = i + 1; if (i1 >= dimX) i1 = i-1;
i2 = i - 1; if (i2 < 0) i2 = i+1;
j1 = j + 1; if (j1 >= dimY) j1 = j-1;
j2 = j - 1; if (j2 < 0) j2 = j+1;
/* Forward-backward differences */
NOMx_1 = A[j1*dimX + i] - A[index]; /* x+ */
NOMy_1 = A[j*dimX + i1] - A[index]; /* y+ */
/*NOMx_0 = (A[(i)*dimY + j] - A[(i2)*dimY + j]); */ /* x- */
NOMy_0 = A[index] - A[(j)*dimX + i2]; /* y- */
denom1 = NOMx_1*NOMx_1;
denom2 = 0.5f*(sign(NOMy_1) + sign(NOMy_0))*(MIN(fabs(NOMy_1),fabs(NOMy_0)));
denom2 = denom2*denom2;
T1 = sqrtf(denom1 + denom2 + EPS);
D1[index] = NOMx_1/T1;
}}
}
return *D1;
}
/* calculate differences 2 */
float D2_func(float *A, float *D2, long dimX, long dimY, long dimZ)
{
float NOMx_1, NOMy_1, NOMx_0, NOMz_1, NOMz_0, denom1, denom2, denom3, T2;
long i,j,k,i1,i2,k1,j1,j2,k2,index;
if (dimZ > 1) {
#pragma omp parallel for shared (A, D2, dimX, dimY, dimZ) private(index, i, j, k, i1, j1, k1, i2, j2, k2, NOMx_1, NOMy_1, NOMx_0, NOMz_1, NOMz_0, denom1, denom2, denom3, T2)
for(k=0; k<dimZ; k++) {
for(j=0; j<dimY; j++) {
for(i=0; i<dimX; i++) {
index = (dimX*dimY)*k + j*dimX+i;
/* symmetric boundary conditions (Neuman) */
i1 = i + 1; if (i1 >= dimX) i1 = i-1;
i2 = i - 1; if (i2 < 0) i2 = i+1;
j1 = j + 1; if (j1 >= dimY) j1 = j-1;
j2 = j - 1; if (j2 < 0) j2 = j+1;
k1 = k + 1; if (k1 >= dimZ) k1 = k-1;
k2 = k - 1; if (k2 < 0) k2 = k+1;
/* Forward-backward differences */
NOMx_1 = A[(dimX*dimY)*k + (j1)*dimX + i] - A[index]; /* x+ */
NOMy_1 = A[(dimX*dimY)*k + (j)*dimX + i1] - A[index]; /* y+ */
NOMx_0 = A[index] - A[(dimX*dimY)*k + (j2)*dimX + i]; /* x- */
NOMz_1 = A[(dimX*dimY)*k1 + j*dimX + i] - A[index]; /* z+ */
NOMz_0 = A[index] - A[(dimX*dimY)*k2 + (j)*dimX + i]; /* z- */
denom1 = NOMy_1*NOMy_1;
denom2 = 0.5f*(sign(NOMx_1) + sign(NOMx_0))*(MIN(fabs(NOMx_1),fabs(NOMx_0)));
denom2 = denom2*denom2;
denom3 = 0.5f*(sign(NOMz_1) + sign(NOMz_0))*(MIN(fabs(NOMz_1),fabs(NOMz_0)));
denom3 = denom3*denom3;
T2 = sqrtf(denom1 + denom2 + denom3 + EPS);
D2[index] = NOMy_1/T2;
}}}
}
else {
#pragma omp parallel for shared (A, D2, dimX, dimY) private(i, j, i1, j1, i2, j2, NOMx_1,NOMy_1,NOMx_0,denom1,denom2,T2,index)
for(j=0; j<dimY; j++) {
for(i=0; i<dimX; i++) {
index = j*dimX+i;
/* symmetric boundary conditions (Neuman) */
i1 = i + 1; if (i1 >= dimX) i1 = i-1;
i2 = i - 1; if (i2 < 0) i2 = i+1;
j1 = j + 1; if (j1 >= dimY) j1 = j-1;
j2 = j - 1; if (j2 < 0) j2 = j+1;
/* Forward-backward differences */
NOMx_1 = A[j1*dimX + i] - A[index]; /* x+ */
NOMy_1 = A[j*dimX + i1] - A[index]; /* y+ */
NOMx_0 = A[index] - A[j2*dimX + i]; /* x- */
/*NOMy_0 = A[(i)*dimY + j] - A[(i)*dimY + j2]; */ /* y- */
denom1 = NOMy_1*NOMy_1;
denom2 = 0.5f*(sign(NOMx_1) + sign(NOMx_0))*(MIN(fabs(NOMx_1),fabs(NOMx_0)));
denom2 = denom2*denom2;
T2 = sqrtf(denom1 + denom2 + EPS);
D2[index] = NOMy_1/T2;
}}
}
return *D2;
}
/* calculate differences 3 */
float D3_func(float *A, float *D3, long dimX, long dimY, long dimZ)
{
float NOMx_1, NOMy_1, NOMx_0, NOMy_0, NOMz_1, denom1, denom2, denom3, T3;
long index,i,j,k,i1,i2,k1,j1,j2,k2;
#pragma omp parallel for shared (A, D3, dimX, dimY, dimZ) private(index, i, j, k, i1, j1, k1, i2, j2, k2, NOMx_1, NOMy_1, NOMy_0, NOMx_0, NOMz_1, denom1, denom2, denom3, T3)
for(k=0; k<dimZ; k++) {
for(j=0; j<dimY; j++) {
for(i=0; i<dimX; i++) {
index = (dimX*dimY)*k + j*dimX+i;
/* symmetric boundary conditions (Neuman) */
i1 = i + 1; if (i1 >= dimX) i1 = i-1;
i2 = i - 1; if (i2 < 0) i2 = i+1;
j1 = j + 1; if (j1 >= dimY) j1 = j-1;
j2 = j - 1; if (j2 < 0) j2 = j+1;
k1 = k + 1; if (k1 >= dimZ) k1 = k-1;
k2 = k - 1; if (k2 < 0) k2 = k+1;
/* Forward-backward differences */
NOMx_1 = A[(dimX*dimY)*k + (j1)*dimX + i] - A[index]; /* x+ */
NOMy_1 = A[(dimX*dimY)*k + (j)*dimX + i1] - A[index]; /* y+ */
NOMy_0 = A[index] - A[(dimX*dimY)*k + (j)*dimX + i2]; /* y- */
NOMx_0 = A[index] - A[(dimX*dimY)*k + (j2)*dimX + i]; /* x- */
NOMz_1 = A[(dimX*dimY)*k1 + j*dimX + i] - A[index]; /* z+ */
/*NOMz_0 = A[(dimX*dimY)*k + (i)*dimY + j] - A[(dimX*dimY)*k2 + (i)*dimY + j]; */ /* z- */
denom1 = NOMz_1*NOMz_1;
denom2 = 0.5f*(sign(NOMx_1) + sign(NOMx_0))*(MIN(fabs(NOMx_1),fabs(NOMx_0)));
denom2 = denom2*denom2;
denom3 = 0.5f*(sign(NOMy_1) + sign(NOMy_0))*(MIN(fabs(NOMy_1),fabs(NOMy_0)));
denom3 = denom3*denom3;
T3 = sqrtf(denom1 + denom2 + denom3 + EPS);
D3[index] = NOMz_1/T3;
}}}
return *D3;
}
/* calculate divergence */
float TV_kernel(float *D1, float *D2, float *D3, float *B, float *A, float *lambda, int lambda_is_arr, float tau, long dimX, long dimY, long dimZ)
{
float dv1, dv2, dv3, lambda_val;
long index,i,j,k,i1,i2,k1,j1,j2,k2;
if (dimZ > 1) {
#pragma omp parallel for shared (D1, D2, D3, B, dimX, dimY, dimZ) private(index, i, j, k, i1, j1, k1, i2, j2, k2, dv1,dv2,dv3,lambda_val)
for(k=0; k<dimZ; k++) {
for(j=0; j<dimY; j++) {
for(i=0; i<dimX; i++) {
index = (dimX*dimY)*k + j*dimX+i;
lambda_val = *(lambda + index* lambda_is_arr);
/* symmetric boundary conditions (Neuman) */
i1 = i + 1; if (i1 >= dimX) i1 = i-1;
i2 = i - 1; if (i2 < 0) i2 = i+1;
j1 = j + 1; if (j1 >= dimY) j1 = j-1;
j2 = j - 1; if (j2 < 0) j2 = j+1;
k1 = k + 1; if (k1 >= dimZ) k1 = k-1;
k2 = k - 1; if (k2 < 0) k2 = k+1;
/*divergence components */
dv1 = D1[index] - D1[(dimX*dimY)*k + j2*dimX+i];
dv2 = D2[index] - D2[(dimX*dimY)*k + j*dimX+i2];
dv3 = D3[index] - D3[(dimX*dimY)*k2 + j*dimX+i];
B[index] += tau*(lambda_val*(dv1 + dv2 + dv3) - (B[index] - A[index]));
}}}
}
else {
#pragma omp parallel for shared (D1, D2, B, dimX, dimY) private(index, i, j, i1, j1, i2, j2,dv1,dv2,lambda_val)
for(j=0; j<dimY; j++) {
for(i=0; i<dimX; i++) {
index = j*dimX+i;
lambda_val = *(lambda + index* lambda_is_arr);
/* symmetric boundary conditions (Neuman) */
i1 = i + 1; if (i1 >= dimX) i1 = i-1;
i2 = i - 1; if (i2 < 0) i2 = i+1;
j1 = j + 1; if (j1 >= dimY) j1 = j-1;
j2 = j - 1; if (j2 < 0) j2 = j+1;
/* divergence components */
dv1 = D1[index] - D1[j2*dimX + i];
dv2 = D2[index] - D2[j*dimX + i2];
B[index] += tau*(lambda_val*(dv1 + dv2) - (B[index] - A[index]));
}}
}
return *B;
}
|
triplet_grid.c | /* Copyright (C) 2015 Atsushi Togo */
/* All rights reserved. */
/* These codes were originally parts of spglib, but only develped */
/* and used for phono3py. Therefore these were moved from spglib to */
/* phono3py. This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include "triplet_grid.h"
#include <stddef.h>
#include <stdlib.h>
#include "bzgrid.h"
#include "grgrid.h"
#include "lagrid.h"
#include "triplet.h"
static long get_ir_triplets_at_q(long *map_triplets, long *map_q,
const long grid_point, const long D_diag[3],
const RotMats *rot_reciprocal,
const long swappable);
static long get_ir_triplets_at_q_perm_q1q2(long *map_triplets,
const long *map_q,
const long grid_point,
const long D_diag[3]);
static long get_ir_triplets_at_q_noperm(long *map_triplets, const long *map_q,
const long grid_point,
const long D_diag[3]);
static long get_BZ_triplets_at_q(long (*triplets)[3], const long grid_point,
const ConstBZGrid *bzgrid,
const long *map_triplets);
static void get_BZ_triplets_at_q_type1(long (*triplets)[3],
const long grid_point,
const ConstBZGrid *bzgrid,
const long *ir_q1_gps,
const long num_ir);
static void get_BZ_triplets_at_q_type2(long (*triplets)[3],
const long grid_point,
const ConstBZGrid *bzgrid,
const long *ir_q1_gps,
const long num_ir);
static double get_squared_distance(const long G[3], const double LQD_inv[3][3]);
static void get_LQD_inv(double LQD_inv[3][3], const ConstBZGrid *bzgrid);
static RotMats *get_reciprocal_point_group_with_q(const RotMats *rot_reciprocal,
const long D_diag[3],
const long grid_point);
static RotMats *get_reciprocal_point_group(const long (*rec_rotations_in)[3][3],
const long num_rot,
const long is_time_reversal,
const long is_transpose);
long tpk_get_ir_triplets_at_q(long *map_triplets, long *map_q,
const long grid_point, const long D_diag[3],
const long is_time_reversal,
const long (*rec_rotations_in)[3][3],
const long num_rot, const long swappable) {
long num_ir;
RotMats *rotations;
rotations = get_reciprocal_point_group(rec_rotations_in, num_rot,
is_time_reversal, 0);
if (rotations == NULL) {
return 0;
}
num_ir = get_ir_triplets_at_q(map_triplets, map_q, grid_point, D_diag,
rotations, swappable);
bzg_free_RotMats(rotations);
rotations = NULL;
return num_ir;
}
long tpk_get_BZ_triplets_at_q(long (*triplets)[3], const long grid_point,
const ConstBZGrid *bzgrid,
const long *map_triplets) {
return get_BZ_triplets_at_q(triplets, grid_point, bzgrid, map_triplets);
}
static long get_ir_triplets_at_q(long *map_triplets, long *map_q,
const long grid_point, const long D_diag[3],
const RotMats *rot_reciprocal,
const long swappable) {
long i, num_ir_q, num_ir_triplets;
long PS[3];
RotMats *rot_reciprocal_q;
rot_reciprocal_q = NULL;
for (i = 0; i < 3; i++) {
PS[i] = 0;
}
/* Search irreducible q-points (map_q) with a stabilizer. */
rot_reciprocal_q =
get_reciprocal_point_group_with_q(rot_reciprocal, D_diag, grid_point);
grg_get_ir_grid_map(map_q, rot_reciprocal_q->mat, rot_reciprocal_q->size,
D_diag, PS);
num_ir_q = 0;
for (i = 0; i < D_diag[0] * D_diag[1] * D_diag[2]; i++) {
if (map_q[i] == i) {
num_ir_q++;
}
}
if (swappable) {
num_ir_triplets = get_ir_triplets_at_q_perm_q1q2(map_triplets, map_q,
grid_point, D_diag);
} else {
num_ir_triplets = get_ir_triplets_at_q_noperm(map_triplets, map_q,
grid_point, D_diag);
}
bzg_free_RotMats(rot_reciprocal_q);
rot_reciprocal_q = NULL;
return num_ir_triplets;
}
static long get_ir_triplets_at_q_perm_q1q2(long *map_triplets,
const long *map_q,
const long grid_point,
const long D_diag[3]) {
long j, num_grid, num_ir_triplets, gp1, gp2;
long adrs0[3], adrs1[3], adrs2[3];
num_ir_triplets = 0;
num_grid = D_diag[0] * D_diag[1] * D_diag[2];
grg_get_grid_address_from_index(adrs0, grid_point, D_diag);
// #ifdef PHPYOPENMP
// #pragma omp parallel for private(j, gp2, adrs1, adrs2)
// #endif
for (gp1 = 0; gp1 < num_grid; gp1++) {
if (map_q[gp1] == gp1) {
grg_get_grid_address_from_index(adrs1, gp1, D_diag);
for (j = 0; j < 3; j++) {
adrs2[j] = -adrs0[j] - adrs1[j];
}
/* If map_q[gp2] is smaller than current gp1, map_q[gp2] should */
/* equal to a previous gp1 for which map_triplets is already */
/* filled. So the counter is not incremented. */
gp2 = grg_get_grid_index(adrs2, D_diag);
if (map_q[gp2] < gp1) {
map_triplets[gp1] = map_q[gp2];
} else {
map_triplets[gp1] = gp1;
num_ir_triplets++;
}
}
}
/* Fill unfilled elements of map_triplets. */
#ifdef PHPYOPENMP
#pragma omp parallel for
#endif
for (gp1 = 0; gp1 < num_grid; gp1++) {
if (map_q[gp1] != gp1) {
/* map_q[gp1] is one of ir-gp1, so it is already filled. */
map_triplets[gp1] = map_triplets[map_q[gp1]];
}
}
return num_ir_triplets;
}
static long get_ir_triplets_at_q_noperm(long *map_triplets, const long *map_q,
const long grid_point,
const long D_diag[3]) {
long gp1, num_grid, num_ir_triplets;
num_ir_triplets = 0;
num_grid = D_diag[0] * D_diag[1] * D_diag[2];
for (gp1 = 0; gp1 < num_grid; gp1++) {
if (map_q[gp1] == gp1) {
map_triplets[gp1] = gp1;
num_ir_triplets++;
} else {
map_triplets[gp1] = map_triplets[map_q[gp1]];
}
}
return num_ir_triplets;
}
static long get_BZ_triplets_at_q(long (*triplets)[3], const long grid_point,
const ConstBZGrid *bzgrid,
const long *map_triplets) {
long gp1, num_ir;
long *ir_q1_gps;
ir_q1_gps = NULL;
num_ir = 0;
if ((ir_q1_gps = (long *)malloc(sizeof(long) * bzgrid->size)) == NULL) {
warning_print("Memory could not be allocated.");
goto ret;
}
for (gp1 = 0; gp1 < bzgrid->size; gp1++) {
if (map_triplets[gp1] == gp1) {
ir_q1_gps[num_ir] = gp1;
num_ir++;
}
}
if (bzgrid->type == 1) {
get_BZ_triplets_at_q_type1(triplets, grid_point, bzgrid, ir_q1_gps,
num_ir);
} else {
get_BZ_triplets_at_q_type2(triplets, grid_point, bzgrid, ir_q1_gps,
num_ir);
}
free(ir_q1_gps);
ir_q1_gps = NULL;
ret:
return num_ir;
}
static void get_BZ_triplets_at_q_type1(long (*triplets)[3],
const long grid_point,
const ConstBZGrid *bzgrid,
const long *ir_q1_gps,
const long num_ir) {
long i, j, gp2, num_gp, num_bzgp, bz0, bz1, bz2;
long bzgp[3], G[3];
long bz_adrs0[3], bz_adrs1[3], bz_adrs2[3];
const long *gp_map;
const long(*bz_adrs)[3];
double d2, min_d2, tolerance;
double LQD_inv[3][3];
gp_map = bzgrid->gp_map;
bz_adrs = bzgrid->addresses;
get_LQD_inv(LQD_inv, bzgrid);
/* This tolerance is used to be consistent to BZ reduction in bzgrid. */
tolerance = bzg_get_tolerance_for_BZ_reduction((BZGrid *)bzgrid);
for (i = 0; i < 3; i++) {
bz_adrs0[i] = bz_adrs[grid_point][i];
}
num_gp = bzgrid->D_diag[0] * bzgrid->D_diag[1] * bzgrid->D_diag[2];
num_bzgp = num_gp * 8;
#ifdef PHPYOPENMP
#pragma omp parallel for private(j, gp2, bzgp, G, bz_adrs1, bz_adrs2, d2, \
min_d2, bz0, bz1, bz2)
#endif
for (i = 0; i < num_ir; i++) {
for (j = 0; j < 3; j++) {
bz_adrs1[j] = bz_adrs[ir_q1_gps[i]][j];
bz_adrs2[j] = -bz_adrs0[j] - bz_adrs1[j];
}
gp2 = grg_get_grid_index(bz_adrs2, bzgrid->D_diag);
/* Negative value is the signal to initialize min_d2 later. */
min_d2 = -1;
for (bz0 = 0; bz0 < gp_map[num_bzgp + grid_point + 1] -
gp_map[num_bzgp + grid_point] + 1;
bz0++) {
if (bz0 == 0) {
bzgp[0] = grid_point;
} else {
bzgp[0] = num_gp + gp_map[num_bzgp + grid_point] + bz0 - 1;
}
for (bz1 = 0; bz1 < gp_map[num_bzgp + ir_q1_gps[i] + 1] -
gp_map[num_bzgp + ir_q1_gps[i]] + 1;
bz1++) {
if (bz1 == 0) {
bzgp[1] = ir_q1_gps[i];
} else {
bzgp[1] =
num_gp + gp_map[num_bzgp + ir_q1_gps[i]] + bz1 - 1;
}
for (bz2 = 0; bz2 < gp_map[num_bzgp + gp2 + 1] -
gp_map[num_bzgp + gp2] + 1;
bz2++) {
if (bz2 == 0) {
bzgp[2] = gp2;
} else {
bzgp[2] = num_gp + gp_map[num_bzgp + gp2] + bz2 - 1;
}
for (j = 0; j < 3; j++) {
G[j] = bz_adrs[bzgp[0]][j] + bz_adrs[bzgp[1]][j] +
bz_adrs[bzgp[2]][j];
}
if (G[0] == 0 && G[1] == 0 && G[2] == 0) {
for (j = 0; j < 3; j++) {
triplets[i][j] = bzgp[j];
}
goto found;
}
d2 = get_squared_distance(G, LQD_inv);
if (d2 < min_d2 - tolerance || min_d2 < 0) {
min_d2 = d2;
for (j = 0; j < 3; j++) {
triplets[i][j] = bzgp[j];
}
}
}
}
}
found:;
}
}
static void get_BZ_triplets_at_q_type2(long (*triplets)[3],
const long grid_point,
const ConstBZGrid *bzgrid,
const long *ir_q1_gps,
const long num_ir) {
long i, j, gp0, gp2;
long bzgp[3], G[3];
long bz_adrs0[3], bz_adrs1[3], bz_adrs2[3];
const long *gp_map;
const long(*bz_adrs)[3];
double d2, min_d2, tolerance;
double LQD_inv[3][3];
gp_map = bzgrid->gp_map;
bz_adrs = bzgrid->addresses;
get_LQD_inv(LQD_inv, bzgrid);
/* This tolerance is used to be consistent to BZ reduction in bzgrid. */
tolerance = bzg_get_tolerance_for_BZ_reduction((BZGrid *)bzgrid);
for (i = 0; i < 3; i++) {
bz_adrs0[i] = bz_adrs[grid_point][i];
}
gp0 = grg_get_grid_index(bz_adrs0, bzgrid->D_diag);
#ifdef PHPYOPENMP
#pragma omp parallel for private(j, gp2, bzgp, G, bz_adrs1, bz_adrs2, d2, \
min_d2)
#endif
for (i = 0; i < num_ir; i++) {
for (j = 0; j < 3; j++) {
bz_adrs1[j] = bz_adrs[gp_map[ir_q1_gps[i]]][j];
bz_adrs2[j] = -bz_adrs0[j] - bz_adrs1[j];
}
gp2 = grg_get_grid_index(bz_adrs2, bzgrid->D_diag);
/* Negative value is the signal to initialize min_d2 later. */
min_d2 = -1;
for (bzgp[0] = gp_map[gp0]; bzgp[0] < gp_map[gp0 + 1]; bzgp[0]++) {
for (bzgp[1] = gp_map[ir_q1_gps[i]];
bzgp[1] < gp_map[ir_q1_gps[i] + 1]; bzgp[1]++) {
for (bzgp[2] = gp_map[gp2]; bzgp[2] < gp_map[gp2 + 1];
bzgp[2]++) {
for (j = 0; j < 3; j++) {
G[j] = bz_adrs[bzgp[0]][j] + bz_adrs[bzgp[1]][j] +
bz_adrs[bzgp[2]][j];
}
if (G[0] == 0 && G[1] == 0 && G[2] == 0) {
for (j = 0; j < 3; j++) {
triplets[i][j] = bzgp[j];
}
goto found;
}
d2 = get_squared_distance(G, LQD_inv);
if (d2 < min_d2 - tolerance || min_d2 < 0) {
min_d2 = d2;
for (j = 0; j < 3; j++) {
triplets[i][j] = bzgp[j];
}
}
}
}
}
found:;
}
}
static double get_squared_distance(const long G[3],
const double LQD_inv[3][3]) {
double d, d2;
long i;
d2 = 0;
for (i = 0; i < 3; i++) {
d = LQD_inv[i][0] * G[0] + LQD_inv[i][1] * G[1] + LQD_inv[i][2] * G[2];
d2 += d * d;
}
return d2;
}
static void get_LQD_inv(double LQD_inv[3][3], const ConstBZGrid *bzgrid) {
long i, j, k;
/* LQD^-1 */
for (i = 0; i < 3; i++) {
for (j = 0; j < 3; j++) {
for (k = 0; k < 3; k++) {
LQD_inv[i][k] =
bzgrid->reclat[i][j] * bzgrid->Q[j][k] / bzgrid->D_diag[k];
}
}
}
}
/* Return NULL if failed */
static RotMats *get_reciprocal_point_group_with_q(const RotMats *rot_reciprocal,
const long D_diag[3],
const long grid_point) {
long i, num_rot, gp_rot;
long *ir_rot;
long adrs[3], adrs_rot[3];
RotMats *rot_reciprocal_q;
ir_rot = NULL;
rot_reciprocal_q = NULL;
num_rot = 0;
grg_get_grid_address_from_index(adrs, grid_point, D_diag);
if ((ir_rot = (long *)malloc(sizeof(long) * rot_reciprocal->size)) ==
NULL) {
warning_print("Memory of ir_rot could not be allocated.");
return NULL;
}
for (i = 0; i < rot_reciprocal->size; i++) {
ir_rot[i] = -1;
}
for (i = 0; i < rot_reciprocal->size; i++) {
lagmat_multiply_matrix_vector_l3(adrs_rot, rot_reciprocal->mat[i],
adrs);
gp_rot = grg_get_grid_index(adrs_rot, D_diag);
if (gp_rot == grid_point) {
ir_rot[num_rot] = i;
num_rot++;
}
}
if ((rot_reciprocal_q = bzg_alloc_RotMats(num_rot)) != NULL) {
for (i = 0; i < num_rot; i++) {
lagmat_copy_matrix_l3(rot_reciprocal_q->mat[i],
rot_reciprocal->mat[ir_rot[i]]);
}
}
free(ir_rot);
ir_rot = NULL;
return rot_reciprocal_q;
}
static RotMats *get_reciprocal_point_group(const long (*rec_rotations_in)[3][3],
const long num_rot,
const long is_time_reversal,
const long is_transpose) {
long i, num_rot_out;
long rec_rotations_out[48][3][3];
RotMats *rec_rotations;
num_rot_out =
grg_get_reciprocal_point_group(rec_rotations_out, rec_rotations_in,
num_rot, is_time_reversal, is_transpose);
if (num_rot_out == 0) {
return NULL;
}
rec_rotations = bzg_alloc_RotMats(num_rot_out);
for (i = 0; i < num_rot_out; i++) {
lagmat_copy_matrix_l3(rec_rotations->mat[i], rec_rotations_out[i]);
}
return rec_rotations;
}
|
pi_omp.c | /*
* OpenMP version of program to estimate pi using Monte Carlo Methods.
*
* Justin Ragatz
*/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <time.h>
int main (int argc, char *argv[]) {
int i; // For loop
int hits; // How many times we "hit" the zone
int trials; // Number of trials to run
int n_threads;
int seed;
double x, y;
double start, end;
struct drand48_data buffer;
if (argc > 2) {
trials = atoi(argv[1]);
n_threads = atoi(argv[2]);
} else {
printf("Usage: ./pi_omp <trials> <threads>\n");
return -1;
}
omp_set_num_threads (n_threads);
printf("Trials : %7d\n", trials);
printf("Threads : %7d\n", n_threads);
start = omp_get_wtime();
hits = 0;
#pragma omp parallel private(i, x, y, seed, buffer) shared(trials)
{
seed = 1202107158 + omp_get_thread_num() * time(NULL);
srand48_r (seed, &buffer);
#pragma omp for reduction(+:hits)
for (i = 0; i < trials; i++) {
drand48_r (&buffer, &x);
drand48_r (&buffer, &y);
if (x*x + y*y <= 1.0) {
hits++;
}
}
}
end = omp_get_wtime();
printf("Time : %7.2fs\n\n", end - start);
printf("Estimate of pi: %7.5f\n", 4.0 * hits / trials);
return 0;
}
|
lbfgsbsolver.h | // CppNumericalSolver
#include <iostream>
#include <list>
#include <Eigen/LU>
#include "isolver.h"
#include "../boundedproblem.h"
#include "../linesearch/morethuente.h"
#ifndef LBFGSBSOLVER_H
#define LBFGSBSOLVER_H
namespace cppoptlib {
template<typename TProblem>
class LbfgsbSolver : public ISolver<TProblem, 1> {
public:
using Superclass = ISolver<TProblem, 1>;
using typename Superclass::Scalar;
using typename Superclass::TVector;
using MatrixType = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using VariableTVector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
protected:
// last updates
std::list<TVector> xHistory;
// workspace matrices
MatrixType W, M;
Scalar theta;
int DIM;
int m_historySize = 5;
/**
* @brief sort pairs (k,v) according v ascending
* @details [long description]
*
* @param v [description]
* @return [description]
*/
std::vector<int> sort_indexes(const std::vector< std::pair<int, Scalar> > &v) {
std::vector<int> idx(v.size());
for (size_t i = 0; i != idx.size(); ++i)
idx[i] = v[i].first;
sort(idx.begin(), idx.end(), [&v](size_t i1, size_t i2) {
return v[i1].second < v[i2].second;
});
return idx;
}
/**
* @brief Algorithm CP: Computation of the generalized Cauchy point
* @details PAGE 8
*
* @param c [description]
*/
void getGeneralizedCauchyPoint(const TProblem &problem, TVector &x, TVector &g, TVector &x_cauchy, VariableTVector &c) {
const int DIM = x.rows();
// Given x,l,u,g, and B = \theta I-WMW
// {all t_i} = { (idx,value), ... }
// TODO: use "std::set" ?
std::vector<std::pair<int, Scalar> > SetOfT;
// the feasible set is implicitly given by "SetOfT - {t_i==0}"
TVector d = -g;
// n operations
for (int j = 0; j < DIM; j++) {
if (g(j) == 0) {
SetOfT.push_back(std::make_pair(j, std::numeric_limits<Scalar>::max()));
} else {
Scalar tmp = 0;
if (g(j) < 0) {
tmp = (x(j) - problem.upperBound()(j)) / g(j);
} else {
tmp = (x(j) - problem.lowerBound()(j)) / g(j);
}
SetOfT.push_back(std::make_pair(j, tmp));
}
}
// sortedindices [1,0,2] means the minimal element is on the 1-st entry
std::vector<int> sortedIndices = sort_indexes(SetOfT);
x_cauchy = x;
// Initialize
// p := W^Scalar*p
VariableTVector p = (W.transpose() * d); // (2mn operations)
// c := 0
c = VariableTVector::Zero(W.cols());
// f' := g^Scalar*d = -d^Td
Scalar f_prime = -d.dot(d); // (n operations)
// f'' := \theta*d^Scalar*d-d^Scalar*W*M*W^Scalar*d = -\theta*f' - p^Scalar*M*p
Scalar f_doubleprime = (Scalar)(-1.0 * theta) * f_prime - p.dot(M * p); // (O(m^2) operations)
// \delta t_min := -f'/f''
Scalar dt_min = -f_prime / f_doubleprime;
// t_old := 0
Scalar t_old = 0;
// b := argmin {t_i , t_i >0}
int i = 0;
for (int j = 0; j < DIM; j++) {
i = j;
if (SetOfT[sortedIndices[j]].second > 0)
break;
}
int b = sortedIndices[i];
// see below
// t := min{t_i : i in F}
Scalar t = SetOfT[b].second;
// \delta Scalar := t - 0
Scalar dt = t ;
// examination of subsequent segments
while ((dt_min >= dt) && (i < DIM)) {
if (d(b) > 0)
x_cauchy(b) = problem.upperBound()(b);
else if (d(b) < 0)
x_cauchy(b) = problem.lowerBound()(b);
// z_b = x_p^{cp} - x_b
Scalar zb = x_cauchy(b) - x(b);
// c := c +\delta t*p
c += dt * p;
// cache
VariableTVector wbt = W.row(b);
f_prime += dt * f_doubleprime + (Scalar) g(b) * g(b) + (Scalar) theta * g(b) * zb - (Scalar) g(b) *
wbt.transpose() * (M * c);
f_doubleprime += (Scalar) - 1.0 * theta * g(b) * g(b)
- (Scalar) 2.0 * (g(b) * (wbt.dot(M * p)))
- (Scalar) g(b) * g(b) * wbt.transpose() * (M * wbt);
p += g(b) * wbt.transpose();
d(b) = 0;
dt_min = -f_prime / f_doubleprime;
t_old = t;
++i;
if (i < DIM) {
b = sortedIndices[i];
t = SetOfT[b].second;
dt = t - t_old;
}
}
dt_min = std::max(dt_min, (Scalar)0.0);
t_old += dt_min;
#pragma omp parallel for
for (int ii = i; ii < x_cauchy.rows(); ii++) {
x_cauchy(sortedIndices[ii]) = x(sortedIndices[ii]) + t_old * d(sortedIndices[ii]);
}
c += dt_min * p;
}
/**
* @brief find alpha* = max {a : a <= 1 and l_i-xc_i <= a*d_i <= u_i-xc_i}
* @details [long description]
*
* @param FreeVariables [description]
* @return [description]
*/
Scalar findAlpha(const TProblem &problem, TVector &x_cp, VariableTVector &du, std::vector<int> &FreeVariables) {
Scalar alphastar = 1;
const unsigned int n = FreeVariables.size();
assert(du.rows() == n);
for (unsigned int i = 0; i < n; i++) {
if (du(i) > 0) {
alphastar = std::min(alphastar, (problem.upperBound()(FreeVariables[i]) - x_cp(FreeVariables[i])) / du(i));
} else {
alphastar = std::min(alphastar, (problem.lowerBound()(FreeVariables[i]) - x_cp(FreeVariables[i])) / du(i));
}
}
return alphastar;
}
/**
* @brief solving unbounded probelm
* @details [long description]
*
* @param SubspaceMin [description]
*/
void SubspaceMinimization(const TProblem &problem, TVector &x_cauchy, TVector &x, VariableTVector &c, TVector &g,
TVector &SubspaceMin) {
Scalar theta_inverse = 1 / theta;
std::vector<int> FreeVariablesIndex;
for (int i = 0; i < x_cauchy.rows(); i++) {
if ((x_cauchy(i) != problem.upperBound()(i)) && (x_cauchy(i) != problem.lowerBound()(i))) {
FreeVariablesIndex.push_back(i);
}
}
const int FreeVarCount = FreeVariablesIndex.size();
MatrixType WZ = MatrixType::Zero(W.cols(), FreeVarCount);
for (int i = 0; i < FreeVarCount; i++)
WZ.col(i) = W.row(FreeVariablesIndex[i]);
TVector rr = (g + theta * (x_cauchy - x) - W * (M * c));
// r=r(FreeVariables);
MatrixType r = MatrixType::Zero(FreeVarCount, 1);
for (int i = 0; i < FreeVarCount; i++)
r.row(i) = rr.row(FreeVariablesIndex[i]);
// STEP 2: "v = w^T*Z*r" and STEP 3: "v = M*v"
VariableTVector v = M * (WZ * r);
// STEP 4: N = 1/theta*W^T*Z*(W^T*Z)^T
MatrixType N = theta_inverse * WZ * WZ.transpose();
// N = I - MN
N = MatrixType::Identity(N.rows(), N.rows()) - M * N;
// STEP: 5
// v = N^{-1}*v
v = N.lu().solve(v);
// STEP: 6
// HERE IS A MISTAKE IN THE ORIGINAL PAPER!
VariableTVector du = -theta_inverse * r - theta_inverse * theta_inverse * WZ.transpose() * v;
// STEP: 7
Scalar alpha_star = findAlpha(problem, x_cauchy, du, FreeVariablesIndex);
// STEP: 8
VariableTVector dStar = alpha_star * du;
SubspaceMin = x_cauchy;
for (int i = 0; i < FreeVarCount; i++) {
SubspaceMin(FreeVariablesIndex[i]) = SubspaceMin(FreeVariablesIndex[i]) + dStar(i);
}
}
public:
void setHistorySize(const int hs) { m_historySize = hs; }
void minimize(TProblem &problem, TVector &x0) {
DIM = x0.rows();
theta = 1.0;
W = MatrixType::Zero(DIM, 0);
M = MatrixType::Zero(0, 0);
xHistory.push_back(x0);
MatrixType yHistory = MatrixType::Zero(DIM, 0);
MatrixType sHistory = MatrixType::Zero(DIM, 0);
TVector x = x0, g = x0;
Scalar f = problem.value(x);
problem.gradient(x, g);
// conv. crit.
auto noConvergence =
[&](TVector &x, TVector &g)->bool {
return (((x - g).cwiseMax(problem.lowerBound()).cwiseMin(problem.upperBound()) - x).template lpNorm<Eigen::Infinity>() >= 1e-4);
};
this->m_current.reset();
this->m_status = Status::Continue;
while (problem.callback(this->m_current, x) && noConvergence(x, g) && (this->m_status == Status::Continue)) {
Scalar f_old = f;
TVector x_old = x;
TVector g_old = g;
// STEP 2: compute the cauchy point
TVector CauchyPoint = TVector::Zero(DIM);
VariableTVector c = VariableTVector::Zero(W.cols());
getGeneralizedCauchyPoint(problem, x, g, CauchyPoint, c);
// STEP 3: compute a search direction d_k by the primal method for the sub-problem
TVector SubspaceMin;
SubspaceMinimization(problem, CauchyPoint, x, c, g, SubspaceMin);
// STEP 4: perform linesearch and STEP 5: compute gradient
Scalar alpha_init = 1.0;
const Scalar rate = MoreThuente<TProblem, 1>::linesearch(x, SubspaceMin-x , problem, alpha_init);
// update current guess and function information
x = x - rate*(x-SubspaceMin);
f = problem.value(x);
problem.gradient(x, g);
xHistory.push_back(x);
// prepare for next iteration
TVector newY = g - g_old;
TVector newS = x - x_old;
// STEP 6:
Scalar test = newS.dot(newY);
test = (test < 0) ? -1.0 * test : test;
if (test > 1e-7 * newY.squaredNorm()) {
if (yHistory.cols() < m_historySize) {
yHistory.conservativeResize(DIM, this->m_current.iterations + 1);
sHistory.conservativeResize(DIM, this->m_current.iterations + 1);
} else {
yHistory.leftCols(m_historySize - 1) = yHistory.rightCols(m_historySize - 1).eval();
sHistory.leftCols(m_historySize - 1) = sHistory.rightCols(m_historySize - 1).eval();
}
yHistory.rightCols(1) = newY;
sHistory.rightCols(1) = newS;
// STEP 7:
theta = (Scalar)(newY.transpose() * newY) / (newY.transpose() * newS);
W = MatrixType::Zero(yHistory.rows(), yHistory.cols() + sHistory.cols());
W << yHistory, (theta * sHistory);
MatrixType A = sHistory.transpose() * yHistory;
MatrixType L = A.template triangularView<Eigen::StrictlyLower>();
MatrixType MM(A.rows() + L.rows(), A.rows() + L.cols());
MatrixType D = -1 * A.diagonal().asDiagonal();
MM << D, L.transpose(), L, ((sHistory.transpose() * sHistory) * theta);
M = MM.inverse();
}
if (fabs(f_old - f) < 1e-8) {
// successive function values too similar
break;
}
++this->m_current.iterations;
this->m_current.gradNorm = g.norm();
this->m_status = checkConvergence(this->m_stop, this->m_current);
}
x0 = x;
if (this->m_debug > DebugLevel::None) {
std::cout << "Stop status was: " << this->m_status << std::endl;
std::cout << "Stop criteria were: " << std::endl << this->m_stop << std::endl;
std::cout << "Current values are: " << std::endl << this->m_current << std::endl;
}
}
};
}
/* namespace cppoptlib */
#endif /* LBFGSBSOLVER_H_ */
|
libimagequant.c | /* pngquant.c - quantize the colors in an alphamap down to a specified number
**
** Copyright (C) 1989, 1991 by Jef Poskanzer.
** Copyright (C) 1997, 2000, 2002 by Greg Roelofs; based on an idea by
** Stefan Schneider.
** © 2009-2013 by Kornel Lesinski.
**
** Permission to use, copy, modify, and distribute this software and its
** documentation for any purpose and without fee is hereby granted, provided
** that the above copyright notice appear in all copies and that both that
** copyright notice and this permission notice appear in supporting
** documentation. This software is provided "as is" without express or
** implied warranty.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stdint.h>
#include <limits.h>
#if !(defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199900L) && !(defined(_MSC_VER) && _MSC_VER >= 1800)
#error "This program requires C99, e.g. -std=c99 switch in GCC or it requires MSVC 18.0 or higher."
#error "Ignore torrent of syntax errors that may follow. It's only because compiler is set to use too old C version."
#endif
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_max_threads() 1
#define omp_get_thread_num() 0
#endif
#include "libimagequant.h"
#include "pam.h"
#include "mediancut.h"
#include "nearest.h"
#include "blur.h"
#include "viter.h"
#define LIQ_HIGH_MEMORY_LIMIT (1<<26) /* avoid allocating buffers larger than 64MB */
// each structure has a pointer as a unique identifier that allows type checking at run time
static const char *const liq_attr_magic = "liq_attr", *const liq_image_magic = "liq_image",
*const liq_result_magic = "liq_result", *const liq_remapping_result_magic = "liq_remapping_result",
*const liq_freed_magic = "free";
#define CHECK_STRUCT_TYPE(attr, kind) liq_crash_if_invalid_handle_pointer_given((const liq_attr*)attr, kind ## _magic)
#define CHECK_USER_POINTER(ptr) liq_crash_if_invalid_pointer_given(ptr)
struct liq_attr {
const char *magic_header;
void* (*malloc)(size_t);
void (*free)(void*);
double target_mse, max_mse, voronoi_iteration_limit;
float min_opaque_val;
unsigned int max_colors, max_histogram_entries;
unsigned int min_posterization_output /* user setting */, min_posterization_input /* speed setting */;
unsigned int voronoi_iterations, feedback_loop_trials;
bool last_index_transparent, use_contrast_maps, use_dither_map, fast_palette;
unsigned int speed;
liq_log_callback_function *log_callback;
void *log_callback_user_info;
liq_log_flush_callback_function *log_flush_callback;
void *log_flush_callback_user_info;
};
struct liq_image {
const char *magic_header;
void* (*malloc)(size_t);
void (*free)(void*);
f_pixel *f_pixels;
rgba_pixel **rows;
double gamma;
unsigned int width, height;
unsigned char *noise, *edges, *dither_map;
rgba_pixel *pixels, *temp_row;
f_pixel *temp_f_row;
liq_image_get_rgba_row_callback *row_callback;
void *row_callback_user_info;
float min_opaque_val;
bool free_pixels, free_rows, free_rows_internal;
};
typedef struct liq_remapping_result {
const char *magic_header;
void* (*malloc)(size_t);
void (*free)(void*);
unsigned char *pixels;
colormap *palette;
liq_palette int_palette;
double gamma, palette_error;
float dither_level;
bool use_dither_map;
} liq_remapping_result;
struct liq_result {
const char *magic_header;
void* (*malloc)(size_t);
void (*free)(void*);
liq_remapping_result *remapping;
colormap *palette;
liq_palette int_palette;
float dither_level;
double gamma, palette_error;
int min_posterization_output;
bool use_dither_map, fast_palette;
};
static liq_result *pngquant_quantize(histogram *hist, const liq_attr *options, double gamma);
static void modify_alpha(liq_image *input_image, rgba_pixel *const row_pixels);
static void contrast_maps(liq_image *image);
static histogram *get_histogram(liq_image *input_image, const liq_attr *options);
static const rgba_pixel *liq_image_get_row_rgba(liq_image *input_image, unsigned int row);
static const f_pixel *liq_image_get_row_f(liq_image *input_image, unsigned int row);
static void liq_remapping_result_destroy(liq_remapping_result *result);
static void liq_verbose_printf(const liq_attr *context, const char *fmt, ...)
{
if (context->log_callback) {
va_list va;
va_start(va, fmt);
int required_space = vsnprintf(NULL, 0, fmt, va)+1; // +\0
va_end(va);
char buf[required_space];
va_start(va, fmt);
vsnprintf(buf, required_space, fmt, va);
va_end(va);
context->log_callback(context, buf, context->log_callback_user_info);
}
}
inline static void verbose_print(const liq_attr *attr, const char *msg)
{
if (attr->log_callback) {
attr->log_callback(attr, msg, attr->log_callback_user_info);
}
}
static void liq_verbose_printf_flush(liq_attr *attr)
{
if (attr->log_flush_callback) {
attr->log_flush_callback(attr, attr->log_flush_callback_user_info);
}
}
#if USE_SSE
inline static bool is_sse_available()
{
#if (defined(__x86_64__) || defined(__amd64))
return true;
#else
int a,b,c,d;
cpuid(1, a, b, c, d);
return d & (1<<25); // edx bit 25 is set when SSE is present
#endif
}
#endif
/* make it clear in backtrace when user-supplied handle points to invalid memory */
NEVER_INLINE LIQ_EXPORT bool liq_crash_if_invalid_handle_pointer_given(const liq_attr *user_supplied_pointer, const char *const expected_magic_header);
LIQ_EXPORT bool liq_crash_if_invalid_handle_pointer_given(const liq_attr *user_supplied_pointer, const char *const expected_magic_header)
{
if (!user_supplied_pointer) {
return false;
}
if (user_supplied_pointer->magic_header == liq_freed_magic) {
fprintf(stderr, "%s used after being freed", expected_magic_header);
// this is not normal error handling, this is programmer error that should crash the program.
// program cannot safely continue if memory has been used after it's been freed.
// abort() is nasty, but security vulnerability may be worse.
abort();
}
return user_supplied_pointer->magic_header == expected_magic_header;
}
NEVER_INLINE LIQ_EXPORT bool liq_crash_if_invalid_pointer_given(void *pointer);
LIQ_EXPORT bool liq_crash_if_invalid_pointer_given(void *pointer)
{
if (!pointer) {
return false;
}
// Force a read from the given (potentially invalid) memory location in order to check early whether this crashes the program or not.
// It doesn't matter what value is read, the code here is just to shut the compiler up about unused read.
char test_access = *((volatile char *)pointer);
return test_access || true;
}
static void liq_log_error(const liq_attr *attr, const char *msg) {
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return;
liq_verbose_printf(attr, " error: %s", msg);
}
static double quality_to_mse(long quality)
{
if (quality == 0) {
return MAX_DIFF;
}
if (quality == 100) {
return 0;
}
// curve fudged to be roughly similar to quality of libjpeg
// except lowest 10 for really low number of colors
const double extra_low_quality_fudge = MAX(0,0.016/(0.001+quality) - 0.001);
return extra_low_quality_fudge + 2.5/pow(210.0 + quality, 1.2) * (100.1-quality)/100.0;
}
static unsigned int mse_to_quality(double mse)
{
for(int i=100; i > 0; i--) {
if (mse <= quality_to_mse(i) + 0.000001) { // + epsilon for floating point errors
return i;
}
}
return 0;
}
LIQ_EXPORT liq_error liq_set_quality(liq_attr* attr, int minimum, int target)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (target < 0 || target > 100 || target < minimum || minimum < 0) return LIQ_VALUE_OUT_OF_RANGE;
attr->target_mse = quality_to_mse(target);
attr->max_mse = quality_to_mse(minimum);
return LIQ_OK;
}
LIQ_EXPORT int liq_get_min_quality(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return mse_to_quality(attr->max_mse);
}
LIQ_EXPORT int liq_get_max_quality(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return mse_to_quality(attr->target_mse);
}
LIQ_EXPORT liq_error liq_set_max_colors(liq_attr* attr, int colors)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (colors < 2 || colors > 256) return LIQ_VALUE_OUT_OF_RANGE;
attr->max_colors = colors;
return LIQ_OK;
}
LIQ_EXPORT int liq_get_max_colors(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return attr->max_colors;
}
LIQ_EXPORT liq_error liq_set_min_posterization(liq_attr *attr, int bits)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (bits < 0 || bits > 4) return LIQ_VALUE_OUT_OF_RANGE;
attr->min_posterization_output = bits;
return LIQ_OK;
}
LIQ_EXPORT int liq_get_min_posterization(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return attr->min_posterization_output;
}
LIQ_EXPORT liq_error liq_set_speed(liq_attr* attr, int speed)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (speed < 1 || speed > 10) return LIQ_VALUE_OUT_OF_RANGE;
int iterations = MAX(8-speed,0); iterations += iterations * iterations/2;
attr->voronoi_iterations = iterations;
attr->voronoi_iteration_limit = 1.0/(double)(1<<(23-speed));
attr->feedback_loop_trials = MAX(56-9*speed, 0);
attr->max_histogram_entries = (1<<17) + (1<<18)*(10-speed);
attr->min_posterization_input = (speed >= 8) ? 1 : 0;
attr->fast_palette = (speed >= 7);
attr->use_dither_map = (speed <= (omp_get_max_threads() > 1 ? 7 : 5)); // parallelized dither map might speed up floyd remapping
attr->use_contrast_maps = (speed <= 7) || attr->use_dither_map;
attr->speed = speed;
return LIQ_OK;
}
LIQ_EXPORT int liq_get_speed(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return attr->speed;
}
LIQ_EXPORT liq_error liq_set_output_gamma(liq_result* res, double gamma)
{
if (!CHECK_STRUCT_TYPE(res, liq_result)) return LIQ_INVALID_POINTER;
if (gamma <= 0 || gamma >= 1.0) return LIQ_VALUE_OUT_OF_RANGE;
if (res->remapping) {
liq_remapping_result_destroy(res->remapping);
res->remapping = NULL;
}
res->gamma = gamma;
return LIQ_OK;
}
LIQ_EXPORT liq_error liq_set_min_opacity(liq_attr* attr, int min)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (min < 0 || min > 255) return LIQ_VALUE_OUT_OF_RANGE;
attr->min_opaque_val = (double)min/255.0;
return LIQ_OK;
}
LIQ_EXPORT int liq_get_min_opacity(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return MIN(255, 256.0 * attr->min_opaque_val);
}
LIQ_EXPORT void liq_set_last_index_transparent(liq_attr* attr, int is_last)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return;
attr->last_index_transparent = !!is_last;
}
LIQ_EXPORT void liq_set_log_callback(liq_attr *attr, liq_log_callback_function *callback, void* user_info)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return;
liq_verbose_printf_flush(attr);
attr->log_callback = callback;
attr->log_callback_user_info = user_info;
}
LIQ_EXPORT void liq_set_log_flush_callback(liq_attr *attr, liq_log_flush_callback_function *callback, void* user_info)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return;
attr->log_flush_callback = callback;
attr->log_flush_callback_user_info = user_info;
}
LIQ_EXPORT liq_attr* liq_attr_create()
{
return liq_attr_create_with_allocator(NULL, NULL);
}
LIQ_EXPORT void liq_attr_destroy(liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) {
return;
}
liq_verbose_printf_flush(attr);
attr->magic_header = liq_freed_magic;
attr->free(attr);
}
LIQ_EXPORT liq_attr* liq_attr_copy(liq_attr *orig)
{
if (!CHECK_STRUCT_TYPE(orig, liq_attr)) {
return NULL;
}
liq_attr *attr = orig->malloc(sizeof(liq_attr));
if (!attr) return NULL;
*attr = *orig;
return attr;
}
static void *liq_aligned_malloc(size_t size)
{
unsigned char *ptr = malloc(size + 16);
if (!ptr) {
return NULL;
}
uintptr_t offset = 16 - ((uintptr_t)ptr & 15); // also reserves 1 byte for ptr[-1]
ptr += offset;
assert(0 == (((uintptr_t)ptr) & 15));
ptr[-1] = offset ^ 0x59; // store how much pointer was shifted to get the original for free()
return ptr;
}
static void liq_aligned_free(void *inptr)
{
unsigned char *ptr = inptr;
size_t offset = ptr[-1] ^ 0x59;
assert(offset > 0 && offset <= 16);
free(ptr - offset);
}
LIQ_EXPORT liq_attr* liq_attr_create_with_allocator(void* (*custom_malloc)(size_t), void (*custom_free)(void*))
{
#if USE_SSE
if (!is_sse_available()) {
return NULL;
}
#endif
if (!custom_malloc && !custom_free) {
custom_malloc = liq_aligned_malloc;
custom_free = liq_aligned_free;
} else if (!custom_malloc != !custom_free) {
return NULL; // either specify both or none
}
liq_attr *attr = custom_malloc(sizeof(liq_attr));
if (!attr) return NULL;
*attr = (liq_attr) {
.magic_header = liq_attr_magic,
.malloc = custom_malloc,
.free = custom_free,
.max_colors = 256,
.min_opaque_val = 1, // whether preserve opaque colors for IE (1.0=no, does not affect alpha)
.last_index_transparent = false, // puts transparent color at last index. This is workaround for blu-ray subtitles.
.target_mse = 0,
.max_mse = MAX_DIFF,
};
liq_set_speed(attr, 3);
return attr;
}
static bool liq_image_use_low_memory(liq_image *img)
{
img->temp_f_row = img->malloc(sizeof(img->f_pixels[0]) * img->width * omp_get_max_threads());
return img->temp_f_row != NULL;
}
static bool liq_image_should_use_low_memory(liq_image *img, const bool low_memory_hint)
{
return img->width * img->height > (low_memory_hint ? LIQ_HIGH_MEMORY_LIMIT/8 : LIQ_HIGH_MEMORY_LIMIT) / sizeof(f_pixel); // Watch out for integer overflow
}
static liq_image *liq_image_create_internal(liq_attr *attr, rgba_pixel* rows[], liq_image_get_rgba_row_callback *row_callback, void *row_callback_user_info, int width, int height, double gamma)
{
if (gamma < 0 || gamma > 1.0) {
liq_log_error(attr, "gamma must be >= 0 and <= 1 (try 1/gamma instead)");
return NULL;
}
if (!rows && !row_callback) {
liq_log_error(attr, "missing row data");
return NULL;
}
liq_image *img = attr->malloc(sizeof(liq_image));
if (!img) return NULL;
*img = (liq_image){
.magic_header = liq_image_magic,
.malloc = attr->malloc,
.free = attr->free,
.width = width, .height = height,
.gamma = gamma ? gamma : 0.45455,
.rows = rows,
.row_callback = row_callback,
.row_callback_user_info = row_callback_user_info,
.min_opaque_val = attr->min_opaque_val,
};
if (!rows || attr->min_opaque_val < 1.f) {
img->temp_row = attr->malloc(sizeof(img->temp_row[0]) * width * omp_get_max_threads());
if (!img->temp_row) return NULL;
}
// if image is huge or converted pixels are not likely to be reused then don't cache converted pixels
if (liq_image_should_use_low_memory(img, !img->temp_row && !attr->use_contrast_maps && !attr->use_dither_map)) {
verbose_print(attr, " conserving memory");
if (!liq_image_use_low_memory(img)) return NULL;
}
if (img->min_opaque_val < 1.f) {
verbose_print(attr, " Working around IE6 bug by making image less transparent...");
}
return img;
}
LIQ_EXPORT liq_error liq_image_set_memory_ownership(liq_image *img, int ownership_flags)
{
if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER;
if (!img->rows || !ownership_flags || (ownership_flags & ~(LIQ_OWN_ROWS|LIQ_OWN_PIXELS))) {
return LIQ_VALUE_OUT_OF_RANGE;
}
if (ownership_flags & LIQ_OWN_ROWS) {
if (img->free_rows_internal) return LIQ_VALUE_OUT_OF_RANGE;
img->free_rows = true;
}
if (ownership_flags & LIQ_OWN_PIXELS) {
img->free_pixels = true;
if (!img->pixels) {
// for simplicity of this API there's no explicit bitmap argument,
// so the row with the lowest address is assumed to be at the start of the bitmap
img->pixels = img->rows[0];
for(unsigned int i=1; i < img->height; i++) {
img->pixels = MIN(img->pixels, img->rows[i]);
}
}
}
return LIQ_OK;
}
static bool check_image_size(const liq_attr *attr, const int width, const int height)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) {
return false;
}
if (width <= 0 || height <= 0) {
liq_log_error(attr, "width and height must be > 0");
return false;
}
if (width > INT_MAX/height) {
liq_log_error(attr, "image too large");
return false;
}
return true;
}
LIQ_EXPORT liq_image *liq_image_create_custom(liq_attr *attr, liq_image_get_rgba_row_callback *row_callback, void* user_info, int width, int height, double gamma)
{
if (!check_image_size(attr, width, height)) {
return NULL;
}
return liq_image_create_internal(attr, NULL, row_callback, user_info, width, height, gamma);
}
LIQ_EXPORT liq_image *liq_image_create_rgba_rows(liq_attr *attr, void* rows[], int width, int height, double gamma)
{
if (!check_image_size(attr, width, height)) {
return NULL;
}
for(int i=0; i < height; i++) {
if (!CHECK_USER_POINTER(rows+i) || !CHECK_USER_POINTER(rows[i])) {
liq_log_error(attr, "invalid row pointers");
return NULL;
}
}
return liq_image_create_internal(attr, (rgba_pixel**)rows, NULL, NULL, width, height, gamma);
}
LIQ_EXPORT liq_image *liq_image_create_rgba(liq_attr *attr, void* bitmap, int width, int height, double gamma)
{
if (!check_image_size(attr, width, height)) {
return NULL;
}
if (!CHECK_USER_POINTER(bitmap)) {
liq_log_error(attr, "invalid bitmap pointer");
return NULL;
}
rgba_pixel *pixels = bitmap;
rgba_pixel **rows = attr->malloc(sizeof(rows[0])*height);
if (!rows) return NULL;
for(int i=0; i < height; i++) {
rows[i] = pixels + width * i;
}
liq_image *image = liq_image_create_internal(attr, rows, NULL, NULL, width, height, gamma);
image->free_rows = true;
image->free_rows_internal = true;
return image;
}
NEVER_INLINE LIQ_EXPORT void liq_executing_user_callback(liq_image_get_rgba_row_callback *callback, liq_color *temp_row, int row, int width, void *user_info);
LIQ_EXPORT void liq_executing_user_callback(liq_image_get_rgba_row_callback *callback, liq_color *temp_row, int row, int width, void *user_info)
{
assert(callback);
assert(temp_row);
callback(temp_row, row, width, user_info);
}
inline static bool liq_image_can_use_rows(liq_image *img)
{
const bool iebug = img->min_opaque_val < 1.f;
return (img->rows && !iebug);
}
static const rgba_pixel *liq_image_get_row_rgba(liq_image *img, unsigned int row)
{
if (liq_image_can_use_rows(img)) {
return img->rows[row];
}
assert(img->temp_row);
rgba_pixel *temp_row = img->temp_row + img->width * omp_get_thread_num();
if (img->rows) {
memcpy(temp_row, img->rows[row], img->width * sizeof(temp_row[0]));
} else {
liq_executing_user_callback(img->row_callback, (liq_color*)temp_row, row, img->width, img->row_callback_user_info);
}
if (img->min_opaque_val < 1.f) modify_alpha(img, temp_row);
return temp_row;
}
static void convert_row_to_f(liq_image *img, f_pixel *row_f_pixels, const unsigned int row, const float gamma_lut[])
{
assert(row_f_pixels);
assert(!USE_SSE || 0 == ((uintptr_t)row_f_pixels & 15));
const rgba_pixel *const row_pixels = liq_image_get_row_rgba(img, row);
for(unsigned int col=0; col < img->width; col++) {
row_f_pixels[col] = to_f(gamma_lut, row_pixels[col]);
}
}
static const f_pixel *liq_image_get_row_f(liq_image *img, unsigned int row)
{
if (!img->f_pixels) {
if (img->temp_f_row) {
float gamma_lut[256];
to_f_set_gamma(gamma_lut, img->gamma);
f_pixel *row_for_thread = img->temp_f_row + img->width * omp_get_thread_num();
convert_row_to_f(img, row_for_thread, row, gamma_lut);
return row_for_thread;
}
assert(omp_get_thread_num() == 0);
if (!liq_image_should_use_low_memory(img, false)) {
img->f_pixels = img->malloc(sizeof(img->f_pixels[0]) * img->width * img->height);
}
if (!img->f_pixels) {
if (!liq_image_use_low_memory(img)) return NULL;
return liq_image_get_row_f(img, row);
}
float gamma_lut[256];
to_f_set_gamma(gamma_lut, img->gamma);
for(unsigned int i=0; i < img->height; i++) {
convert_row_to_f(img, &img->f_pixels[i*img->width], i, gamma_lut);
}
}
return img->f_pixels + img->width * row;
}
LIQ_EXPORT int liq_image_get_width(const liq_image *input_image)
{
if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return -1;
return input_image->width;
}
LIQ_EXPORT int liq_image_get_height(const liq_image *input_image)
{
if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return -1;
return input_image->height;
}
typedef void free_func(void*);
free_func *get_default_free_func(liq_image *img)
{
// When default allocator is used then user-supplied pointers must be freed with free()
if (img->free_rows_internal || img->free != liq_aligned_free) {
return img->free;
}
return free;
}
static void liq_image_free_rgba_source(liq_image *input_image)
{
if (input_image->free_pixels && input_image->pixels) {
get_default_free_func(input_image)(input_image->pixels);
input_image->pixels = NULL;
}
if (input_image->free_rows && input_image->rows) {
get_default_free_func(input_image)(input_image->rows);
input_image->rows = NULL;
}
}
LIQ_EXPORT void liq_image_destroy(liq_image *input_image)
{
if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return;
liq_image_free_rgba_source(input_image);
if (input_image->noise) {
input_image->free(input_image->noise);
}
if (input_image->edges) {
input_image->free(input_image->edges);
}
if (input_image->dither_map) {
input_image->free(input_image->dither_map);
}
if (input_image->f_pixels) {
input_image->free(input_image->f_pixels);
}
if (input_image->temp_row) {
input_image->free(input_image->temp_row);
}
input_image->magic_header = liq_freed_magic;
input_image->free(input_image);
}
LIQ_EXPORT liq_result *liq_quantize_image(liq_attr *attr, liq_image *img)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return NULL;
if (!CHECK_STRUCT_TYPE(img, liq_image)) {
liq_log_error(attr, "invalid image pointer");
return NULL;
}
histogram *hist = get_histogram(img, attr);
if (!hist) {
return NULL;
}
liq_result *result = pngquant_quantize(hist, attr, img->gamma);
pam_freeacolorhist(hist);
return result;
}
LIQ_EXPORT liq_error liq_set_dithering_level(liq_result *res, float dither_level)
{
if (!CHECK_STRUCT_TYPE(res, liq_result)) return LIQ_INVALID_POINTER;
if (res->remapping) {
liq_remapping_result_destroy(res->remapping);
res->remapping = NULL;
}
if (res->dither_level < 0 || res->dither_level > 1.0f) return LIQ_VALUE_OUT_OF_RANGE;
res->dither_level = dither_level;
return LIQ_OK;
}
static liq_remapping_result *liq_remapping_result_create(liq_result *result)
{
if (!CHECK_STRUCT_TYPE(result, liq_result)) {
return NULL;
}
liq_remapping_result *res = result->malloc(sizeof(liq_remapping_result));
if (!res) return NULL;
*res = (liq_remapping_result) {
.magic_header = liq_remapping_result_magic,
.malloc = result->malloc,
.free = result->free,
.dither_level = result->dither_level,
.use_dither_map = result->use_dither_map,
.palette_error = result->palette_error,
.gamma = result->gamma,
.palette = pam_duplicate_colormap(result->palette),
};
return res;
}
LIQ_EXPORT double liq_get_output_gamma(const liq_result *result)
{
if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1;
return result->gamma;
}
static void liq_remapping_result_destroy(liq_remapping_result *result)
{
if (!CHECK_STRUCT_TYPE(result, liq_remapping_result)) return;
if (result->palette) pam_freecolormap(result->palette);
if (result->pixels) result->free(result->pixels);
result->magic_header = liq_freed_magic;
result->free(result);
}
LIQ_EXPORT void liq_result_destroy(liq_result *res)
{
if (!CHECK_STRUCT_TYPE(res, liq_result)) return;
memset(&res->int_palette, 0, sizeof(liq_palette));
if (res->remapping) {
memset(&res->remapping->int_palette, 0, sizeof(liq_palette));
liq_remapping_result_destroy(res->remapping);
}
pam_freecolormap(res->palette);
res->magic_header = liq_freed_magic;
res->free(res);
}
LIQ_EXPORT double liq_get_quantization_error(liq_result *result)
{
if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1;
if (result->palette_error >= 0) {
return result->palette_error*65536.0/6.0;
}
if (result->remapping && result->remapping->palette_error >= 0) {
return result->remapping->palette_error*65536.0/6.0;
}
return result->palette_error;
}
LIQ_EXPORT int liq_get_quantization_quality(liq_result *result)
{
if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1;
if (result->palette_error >= 0) {
return mse_to_quality(result->palette_error);
}
if (result->remapping && result->remapping->palette_error >= 0) {
return mse_to_quality(result->remapping->palette_error);
}
return result->palette_error;
}
static int compare_popularity(const void *ch1, const void *ch2)
{
const float v1 = ((const colormap_item*)ch1)->popularity;
const float v2 = ((const colormap_item*)ch2)->popularity;
return v1 > v2 ? -1 : 1;
}
static void sort_palette_qsort(colormap *map, int start, int nelem)
{
qsort(map->palette + start, nelem, sizeof(map->palette[0]), compare_popularity);
}
#define SWAP_PALETTE(map, a,b) { \
const colormap_item tmp = (map)->palette[(a)]; \
(map)->palette[(a)] = (map)->palette[(b)]; \
(map)->palette[(b)] = tmp; }
static void sort_palette(colormap *map, const liq_attr *options)
{
/*
** Step 3.5 [GRR]: remap the palette colors so that all entries with
** the maximal alpha value (i.e., fully opaque) are at the end and can
** therefore be omitted from the tRNS chunk.
*/
if (options->last_index_transparent) {
for(unsigned int i=0; i < map->colors; i++) {
if (map->palette[i].acolor.a < 1.0/256.0) {
const unsigned int old = i, transparent_dest = map->colors-1;
SWAP_PALETTE(map, transparent_dest, old);
/* colors sorted by popularity make pngs slightly more compressible */
sort_palette_qsort(map, 0, map->colors-1);
return;
}
}
}
/* move transparent colors to the beginning to shrink trns chunk */
unsigned int num_transparent=0;
for(unsigned int i=0; i < map->colors; i++) {
if (map->palette[i].acolor.a < 255.0/256.0) {
// current transparent color is swapped with earlier opaque one
if (i != num_transparent) {
SWAP_PALETTE(map, num_transparent, i);
i--;
}
num_transparent++;
}
}
liq_verbose_printf(options, " eliminated opaque tRNS-chunk entries...%d entr%s transparent", num_transparent, (num_transparent == 1)? "y" : "ies");
/* colors sorted by popularity make pngs slightly more compressible
* opaque and transparent are sorted separately
*/
sort_palette_qsort(map, 0, num_transparent);
sort_palette_qsort(map, num_transparent, map->colors-num_transparent);
if (map->colors > 16) {
SWAP_PALETTE(map, 7, 1); // slightly improves compression
SWAP_PALETTE(map, 8, 2);
SWAP_PALETTE(map, 9, 3);
}
}
inline static unsigned int posterize_channel(unsigned int color, unsigned int bits)
{
return (color & ~((1<<bits)-1)) | (color >> (8-bits));
}
static void set_rounded_palette(liq_palette *const dest, colormap *const map, const double gamma, unsigned int posterize)
{
float gamma_lut[256];
to_f_set_gamma(gamma_lut, gamma);
dest->count = map->colors;
for(unsigned int x = 0; x < map->colors; ++x) {
rgba_pixel px = to_rgb(gamma, map->palette[x].acolor);
px.r = posterize_channel(px.r, posterize);
px.g = posterize_channel(px.g, posterize);
px.b = posterize_channel(px.b, posterize);
px.a = posterize_channel(px.a, posterize);
map->palette[x].acolor = to_f(gamma_lut, px); /* saves rounding error introduced by to_rgb, which makes remapping & dithering more accurate */
if (!px.a) {
px.r = 'L'; px.g = 'i'; px.b = 'q';
}
dest->entries[x] = (liq_color){.r=px.r,.g=px.g,.b=px.b,.a=px.a};
}
}
LIQ_EXPORT const liq_palette *liq_get_palette(liq_result *result)
{
if (!CHECK_STRUCT_TYPE(result, liq_result)) return NULL;
if (result->remapping && result->remapping->int_palette.count) {
return &result->remapping->int_palette;
}
if (!result->int_palette.count) {
set_rounded_palette(&result->int_palette, result->palette, result->gamma, result->min_posterization_output);
}
return &result->int_palette;
}
static float remap_to_palette(liq_image *const input_image, unsigned char *const *const output_pixels, colormap *const map, const bool fast)
{
const int rows = input_image->height;
const unsigned int cols = input_image->width;
const float min_opaque_val = input_image->min_opaque_val;
double remapping_error=0;
if (!liq_image_get_row_f(input_image, 0)) { // trigger lazy conversion
return -1;
}
struct nearest_map *const n = nearest_init(map, fast);
const unsigned int max_threads = omp_get_max_threads();
viter_state average_color[(VITER_CACHE_LINE_GAP+map->colors) * max_threads];
viter_init(map, max_threads, average_color);
#pragma omp parallel for if (rows*cols > 3000) \
schedule(static) default(none) shared(average_color) reduction(+:remapping_error)
for(int row = 0; row < rows; ++row) {
const f_pixel *const row_pixels = liq_image_get_row_f(input_image, row);
unsigned int last_match=0;
for(unsigned int col = 0; col < cols; ++col) {
f_pixel px = row_pixels[col];
float diff;
output_pixels[row][col] = last_match = nearest_search(n, px, last_match, min_opaque_val, &diff);
remapping_error += diff;
viter_update_color(px, 1.0, map, last_match, omp_get_thread_num(), average_color);
}
}
viter_finalize(map, max_threads, average_color);
nearest_free(n);
return remapping_error / (input_image->width * input_image->height);
}
inline static f_pixel get_dithered_pixel(const float dither_level, const float max_dither_error, const f_pixel thiserr, const f_pixel px)
{
/* Use Floyd-Steinberg errors to adjust actual color. */
const float sr = thiserr.r * dither_level,
sg = thiserr.g * dither_level,
sb = thiserr.b * dither_level,
sa = thiserr.a * dither_level;
float ratio = 1.0;
// allowing some overflow prevents undithered bands caused by clamping of all channels
if (px.r + sr > 1.03) ratio = MIN(ratio, (1.03-px.r)/sr);
else if (px.r + sr < 0) ratio = MIN(ratio, px.r/-sr);
if (px.g + sg > 1.03) ratio = MIN(ratio, (1.03-px.g)/sg);
else if (px.g + sg < 0) ratio = MIN(ratio, px.g/-sg);
if (px.b + sb > 1.03) ratio = MIN(ratio, (1.03-px.b)/sb);
else if (px.b + sb < 0) ratio = MIN(ratio, px.b/-sb);
float a = px.a + sa;
if (a > 1.0) { a = 1.0; }
else if (a < 0) { a = 0; }
// If dithering error is crazy high, don't propagate it that much
// This prevents crazy geen pixels popping out of the blue (or red or black! ;)
const float dither_error = sr*sr + sg*sg + sb*sb + sa*sa;
if (dither_error > max_dither_error) {
ratio *= 0.8;
} else if (dither_error < 2.f/256.f/256.f) {
// don't dither areas that don't have noticeable error — makes file smaller
return px;
}
return (f_pixel){
.r=px.r + sr * ratio,
.g=px.g + sg * ratio,
.b=px.b + sb * ratio,
.a=a,
};
}
/**
Uses edge/noise map to apply dithering only to flat areas. Dithering on edges creates jagged lines, and noisy areas are "naturally" dithered.
If output_image_is_remapped is true, only pixels noticeably changed by error diffusion will be written to output image.
*/
static void remap_to_palette_floyd(liq_image *input_image, unsigned char *const output_pixels[], const colormap *map, const float max_dither_error, const bool use_dither_map, const bool output_image_is_remapped, float base_dithering_level)
{
const unsigned int rows = input_image->height, cols = input_image->width;
const unsigned char *dither_map = use_dither_map ? (input_image->dither_map ? input_image->dither_map : input_image->edges) : NULL;
const float min_opaque_val = input_image->min_opaque_val;
const colormap_item *acolormap = map->palette;
struct nearest_map *const n = nearest_init(map, false);
/* Initialize Floyd-Steinberg error vectors. */
f_pixel *restrict thiserr, *restrict nexterr;
thiserr = input_image->malloc((cols + 2) * sizeof(*thiserr) * 2); // +2 saves from checking out of bounds access
nexterr = thiserr + (cols + 2);
srand(12345); /* deterministic dithering is better for comparing results */
if (!thiserr) return;
for (unsigned int col = 0; col < cols + 2; ++col) {
const double rand_max = RAND_MAX;
thiserr[col].r = ((double)rand() - rand_max/2.0)/rand_max/255.0;
thiserr[col].g = ((double)rand() - rand_max/2.0)/rand_max/255.0;
thiserr[col].b = ((double)rand() - rand_max/2.0)/rand_max/255.0;
thiserr[col].a = ((double)rand() - rand_max/2.0)/rand_max/255.0;
}
// response to this value is non-linear and without it any value < 0.8 would give almost no dithering
base_dithering_level = 1.0 - (1.0-base_dithering_level)*(1.0-base_dithering_level)*(1.0-base_dithering_level);
if (dither_map) {
base_dithering_level *= 1.0/255.0; // convert byte to float
}
base_dithering_level *= 15.0/16.0; // prevent small errors from accumulating
bool fs_direction = true;
unsigned int last_match=0;
for (unsigned int row = 0; row < rows; ++row) {
memset(nexterr, 0, (cols + 2) * sizeof(*nexterr));
unsigned int col = (fs_direction) ? 0 : (cols - 1);
const f_pixel *const row_pixels = liq_image_get_row_f(input_image, row);
do {
float dither_level = base_dithering_level;
if (dither_map) {
dither_level *= dither_map[row*cols + col];
}
const f_pixel spx = get_dithered_pixel(dither_level, max_dither_error, thiserr[col + 1], row_pixels[col]);
const unsigned int guessed_match = output_image_is_remapped ? output_pixels[row][col] : last_match;
output_pixels[row][col] = last_match = nearest_search(n, spx, guessed_match, min_opaque_val, NULL);
const f_pixel xp = acolormap[last_match].acolor;
f_pixel err = {
.r = (spx.r - xp.r),
.g = (spx.g - xp.g),
.b = (spx.b - xp.b),
.a = (spx.a - xp.a),
};
// If dithering error is crazy high, don't propagate it that much
// This prevents crazy geen pixels popping out of the blue (or red or black! ;)
if (err.r*err.r + err.g*err.g + err.b*err.b + err.a*err.a > max_dither_error) {
dither_level *= 0.75;
}
const float colorimp = (3.0f + acolormap[last_match].acolor.a)/4.0f * dither_level;
err.r *= colorimp;
err.g *= colorimp;
err.b *= colorimp;
err.a *= dither_level;
/* Propagate Floyd-Steinberg error terms. */
if (fs_direction) {
thiserr[col + 2].a += err.a * (7.f/16.f);
thiserr[col + 2].r += err.r * (7.f/16.f);
thiserr[col + 2].g += err.g * (7.f/16.f);
thiserr[col + 2].b += err.b * (7.f/16.f);
nexterr[col + 2].a = err.a * (1.f/16.f);
nexterr[col + 2].r = err.r * (1.f/16.f);
nexterr[col + 2].g = err.g * (1.f/16.f);
nexterr[col + 2].b = err.b * (1.f/16.f);
nexterr[col + 1].a += err.a * (5.f/16.f);
nexterr[col + 1].r += err.r * (5.f/16.f);
nexterr[col + 1].g += err.g * (5.f/16.f);
nexterr[col + 1].b += err.b * (5.f/16.f);
nexterr[col ].a += err.a * (3.f/16.f);
nexterr[col ].r += err.r * (3.f/16.f);
nexterr[col ].g += err.g * (3.f/16.f);
nexterr[col ].b += err.b * (3.f/16.f);
} else {
thiserr[col ].a += err.a * (7.f/16.f);
thiserr[col ].r += err.r * (7.f/16.f);
thiserr[col ].g += err.g * (7.f/16.f);
thiserr[col ].b += err.b * (7.f/16.f);
nexterr[col ].a = err.a * (1.f/16.f);
nexterr[col ].r = err.r * (1.f/16.f);
nexterr[col ].g = err.g * (1.f/16.f);
nexterr[col ].b = err.b * (1.f/16.f);
nexterr[col + 1].a += err.a * (5.f/16.f);
nexterr[col + 1].r += err.r * (5.f/16.f);
nexterr[col + 1].g += err.g * (5.f/16.f);
nexterr[col + 1].b += err.b * (5.f/16.f);
nexterr[col + 2].a += err.a * (3.f/16.f);
nexterr[col + 2].r += err.r * (3.f/16.f);
nexterr[col + 2].g += err.g * (3.f/16.f);
nexterr[col + 2].b += err.b * (3.f/16.f);
}
// remapping is done in zig-zag
if (fs_direction) {
++col;
if (col >= cols) break;
} else {
if (col <= 0) break;
--col;
}
} while(1);
f_pixel *const temperr = thiserr;
thiserr = nexterr;
nexterr = temperr;
fs_direction = !fs_direction;
}
input_image->free(MIN(thiserr, nexterr)); // MIN because pointers were swapped
nearest_free(n);
}
/* histogram contains information how many times each color is present in the image, weighted by importance_map */
static histogram *get_histogram(liq_image *input_image, const liq_attr *options)
{
unsigned int ignorebits=MAX(options->min_posterization_output, options->min_posterization_input);
const unsigned int cols = input_image->width, rows = input_image->height;
if (!input_image->noise && options->use_contrast_maps) {
contrast_maps(input_image);
}
/*
** Step 2: attempt to make a histogram of the colors, unclustered.
** If at first we don't succeed, increase ignorebits to increase color
** coherence and try again.
*/
unsigned int maxcolors = options->max_histogram_entries;
struct acolorhash_table *acht;
const bool all_rows_at_once = liq_image_can_use_rows(input_image);
do {
acht = pam_allocacolorhash(maxcolors, rows*cols, ignorebits, options->malloc, options->free);
if (!acht) return NULL;
// histogram uses noise contrast map for importance. Color accuracy in noisy areas is not very important.
// noise map does not include edges to avoid ruining anti-aliasing
for(unsigned int row=0; row < rows; row++) {
bool added_ok;
if (all_rows_at_once) {
added_ok = pam_computeacolorhash(acht, (const rgba_pixel *const *)input_image->rows, cols, rows, input_image->noise);
if (added_ok) break;
} else {
const rgba_pixel* rows_p[1] = { liq_image_get_row_rgba(input_image, row) };
added_ok = pam_computeacolorhash(acht, rows_p, cols, 1, input_image->noise ? &input_image->noise[row * cols] : NULL);
}
if (!added_ok) {
ignorebits++;
liq_verbose_printf(options, " too many colors! Scaling colors to improve clustering... %d", ignorebits);
pam_freeacolorhash(acht);
acht = NULL;
break;
}
}
} while(!acht);
if (input_image->noise) {
input_image->free(input_image->noise);
input_image->noise = NULL;
}
if (input_image->free_pixels && input_image->f_pixels) {
liq_image_free_rgba_source(input_image); // bow can free the RGBA source if copy has been made in f_pixels
}
histogram *hist = pam_acolorhashtoacolorhist(acht, input_image->gamma, options->malloc, options->free);
pam_freeacolorhash(acht);
if (hist) {
liq_verbose_printf(options, " made histogram...%d colors found", hist->size);
}
return hist;
}
static void modify_alpha(liq_image *input_image, rgba_pixel *const row_pixels)
{
/* IE6 makes colors with even slightest transparency completely transparent,
thus to improve situation in IE, make colors that are less than ~10% transparent
completely opaque */
const float min_opaque_val = input_image->min_opaque_val;
const float almost_opaque_val = min_opaque_val * 169.f/256.f;
const unsigned int almost_opaque_val_int = (min_opaque_val * 169.f/256.f)*255.f;
for(unsigned int col = 0; col < input_image->width; col++) {
const rgba_pixel px = row_pixels[col];
/* ie bug: to avoid visible step caused by forced opaqueness, linearily raise opaqueness of almost-opaque colors */
if (px.a >= almost_opaque_val_int) {
float al = px.a / 255.f;
al = almost_opaque_val + (al-almost_opaque_val) * (1.f-almost_opaque_val) / (min_opaque_val-almost_opaque_val);
al *= 256.f;
row_pixels[col].a = al >= 255.f ? 255 : al;
}
}
}
/**
Builds two maps:
noise - approximation of areas with high-frequency noise, except straight edges. 1=flat, 0=noisy.
edges - noise map including all edges
*/
static void contrast_maps(liq_image *image)
{
const int cols = image->width, rows = image->height;
if (cols < 4 || rows < 4 || (3*cols*rows) > LIQ_HIGH_MEMORY_LIMIT) {
return;
}
unsigned char *restrict noise = image->malloc(cols*rows);
unsigned char *restrict edges = image->malloc(cols*rows);
unsigned char *restrict tmp = image->malloc(cols*rows);
if (!noise || !edges || !tmp) {
return;
}
const f_pixel *curr_row, *prev_row, *next_row;
curr_row = prev_row = next_row = liq_image_get_row_f(image, 0);
for (int j=0; j < rows; j++) {
prev_row = curr_row;
curr_row = next_row;
next_row = liq_image_get_row_f(image, MIN(rows-1,j+1));
f_pixel prev, curr = curr_row[0], next=curr;
for (int i=0; i < cols; i++) {
prev=curr;
curr=next;
next = curr_row[MIN(cols-1,i+1)];
// contrast is difference between pixels neighbouring horizontally and vertically
const float a = fabsf(prev.a+next.a - curr.a*2.f),
r = fabsf(prev.r+next.r - curr.r*2.f),
g = fabsf(prev.g+next.g - curr.g*2.f),
b = fabsf(prev.b+next.b - curr.b*2.f);
const f_pixel prevl = prev_row[i];
const f_pixel nextl = next_row[i];
const float a1 = fabsf(prevl.a+nextl.a - curr.a*2.f),
r1 = fabsf(prevl.r+nextl.r - curr.r*2.f),
g1 = fabsf(prevl.g+nextl.g - curr.g*2.f),
b1 = fabsf(prevl.b+nextl.b - curr.b*2.f);
const float horiz = MAX(MAX(a,r),MAX(g,b));
const float vert = MAX(MAX(a1,r1),MAX(g1,b1));
const float edge = MAX(horiz,vert);
float z = edge - fabsf(horiz-vert)*.5f;
z = 1.f - MAX(z,MIN(horiz,vert));
z *= z; // noise is amplified
z *= z;
z *= 256.f;
noise[j*cols+i] = z < 256 ? z : 255;
z = (1.f-edge)*256.f;
edges[j*cols+i] = z < 256 ? z : 255;
}
}
// noise areas are shrunk and then expanded to remove thin edges from the map
liq_max3(noise, tmp, cols, rows);
liq_max3(tmp, noise, cols, rows);
liq_blur(noise, tmp, noise, cols, rows, 3);
liq_max3(noise, tmp, cols, rows);
liq_min3(tmp, noise, cols, rows);
liq_min3(noise, tmp, cols, rows);
liq_min3(tmp, noise, cols, rows);
liq_min3(edges, tmp, cols, rows);
liq_max3(tmp, edges, cols, rows);
for(int i=0; i < cols*rows; i++) edges[i] = MIN(noise[i], edges[i]);
image->free(tmp);
image->noise = noise;
image->edges = edges;
}
/**
* Builds map of neighbor pixels mapped to the same palette entry
*
* For efficiency/simplicity it mainly looks for same consecutive pixels horizontally
* and peeks 1 pixel above/below. Full 2d algorithm doesn't improve it significantly.
* Correct flood fill doesn't have visually good properties.
*/
static void update_dither_map(unsigned char *const *const row_pointers, liq_image *input_image)
{
const unsigned int width = input_image->width;
const unsigned int height = input_image->height;
unsigned char *const edges = input_image->edges;
for(unsigned int row=0; row < height; row++) {
unsigned char lastpixel = row_pointers[row][0];
unsigned int lastcol=0;
for(unsigned int col=1; col < width; col++) {
const unsigned char px = row_pointers[row][col];
if (px != lastpixel || col == width-1) {
float neighbor_count = 2.5f + col-lastcol;
unsigned int i=lastcol;
while(i < col) {
if (row > 0) {
unsigned char pixelabove = row_pointers[row-1][i];
if (pixelabove == lastpixel) neighbor_count += 1.f;
}
if (row < height-1) {
unsigned char pixelbelow = row_pointers[row+1][i];
if (pixelbelow == lastpixel) neighbor_count += 1.f;
}
i++;
}
while(lastcol <= col) {
float e = edges[row*width + lastcol] / 255.f;
e *= 1.f - 2.5f/neighbor_count;
edges[row*width + lastcol++] = e * 255.f;
}
lastpixel = px;
}
}
}
input_image->dither_map = input_image->edges;
input_image->edges = NULL;
}
static void adjust_histogram_callback(hist_item *item, float diff)
{
item->adjusted_weight = (item->perceptual_weight+item->adjusted_weight) * (sqrtf(1.f+diff));
}
/**
Repeats mediancut with different histogram weights to find palette with minimum error.
feedback_loop_trials controls how long the search will take. < 0 skips the iteration.
*/
static colormap *find_best_palette(histogram *hist, const liq_attr *options, const double max_mse, double *palette_error_p)
{
unsigned int max_colors = options->max_colors;
// if output is posterized it doesn't make sense to aim for perfrect colors, so increase target_mse
// at this point actual gamma is not set, so very conservative posterization estimate is used
const double target_mse = MIN(max_mse, MAX(options->target_mse, pow((1<<options->min_posterization_output)/1024.0, 2)));
int feedback_loop_trials = options->feedback_loop_trials;
colormap *acolormap = NULL;
double least_error = MAX_DIFF;
double target_mse_overshoot = feedback_loop_trials>0 ? 1.05 : 1.0;
const double percent = (double)(feedback_loop_trials>0?feedback_loop_trials:1)/100.0;
do {
colormap *newmap = mediancut(hist, options->min_opaque_val, max_colors,
target_mse * target_mse_overshoot, MAX(MAX(90.0/65536.0, target_mse), least_error)*1.2,
options->malloc, options->free);
if (!newmap) {
return NULL;
}
if (feedback_loop_trials <= 0) {
return newmap;
}
// after palette has been created, total error (MSE) is calculated to keep the best palette
// at the same time Voronoi iteration is done to improve the palette
// and histogram weights are adjusted based on remapping error to give more weight to poorly matched colors
const bool first_run_of_target_mse = !acolormap && target_mse > 0;
double total_error = viter_do_iteration(hist, newmap, options->min_opaque_val, first_run_of_target_mse ? NULL : adjust_histogram_callback, !acolormap || options->fast_palette);
// goal is to increase quality or to reduce number of colors used if quality is good enough
if (!acolormap || total_error < least_error || (total_error <= target_mse && newmap->colors < max_colors)) {
if (acolormap) pam_freecolormap(acolormap);
acolormap = newmap;
if (total_error < target_mse && total_error > 0) {
// voronoi iteration improves quality above what mediancut aims for
// this compensates for it, making mediancut aim for worse
target_mse_overshoot = MIN(target_mse_overshoot*1.25, target_mse/total_error);
}
least_error = total_error;
// if number of colors could be reduced, try to keep it that way
// but allow extra color as a bit of wiggle room in case quality can be improved too
max_colors = MIN(newmap->colors+1, max_colors);
feedback_loop_trials -= 1; // asymptotic improvement could make it go on forever
} else {
for(unsigned int j=0; j < hist->size; j++) {
hist->achv[j].adjusted_weight = (hist->achv[j].perceptual_weight + hist->achv[j].adjusted_weight)/2.0;
}
target_mse_overshoot = 1.0;
feedback_loop_trials -= 6;
// if error is really bad, it's unlikely to improve, so end sooner
if (total_error > least_error*4) feedback_loop_trials -= 3;
pam_freecolormap(newmap);
}
liq_verbose_printf(options, " selecting colors...%d%%",100-MAX(0,(int)(feedback_loop_trials/percent)));
}
while(feedback_loop_trials > 0);
// likely_colormap_index (used and set in viter_do_iteration) can't point to index outside colormap
if (acolormap->colors < 256) {
for(unsigned int j=0; j < hist->size; j++) {
if (hist->achv[j].tmp.likely_colormap_index >= acolormap->colors) {
hist->achv[j].tmp.likely_colormap_index = 0; // actual value doesn't matter, as the guess is out of date anyway
}
}
}
*palette_error_p = least_error;
return acolormap;
}
static liq_result *pngquant_quantize(histogram *hist, const liq_attr *options, const double gamma)
{
colormap *acolormap;
double palette_error = -1;
// no point having perfect match with imperfect colors (ignorebits > 0)
const bool fast_palette = options->fast_palette || hist->ignorebits > 0;
const bool few_input_colors = hist->size <= options->max_colors;
// If image has few colors to begin with (and no quality degradation is required)
// then it's possible to skip quantization entirely
if (few_input_colors && options->target_mse == 0) {
acolormap = pam_colormap(hist->size, options->malloc, options->free);
for(unsigned int i=0; i < hist->size; i++) {
acolormap->palette[i].acolor = hist->achv[i].acolor;
acolormap->palette[i].popularity = hist->achv[i].perceptual_weight;
}
palette_error = 0;
} else {
const double max_mse = options->max_mse * (few_input_colors ? 0.33 : 1.0); // when degrading image that's already paletted, require much higher improvement, since pal2pal often looks bad and there's little gain
acolormap = find_best_palette(hist, options, max_mse, &palette_error);
if (!acolormap) {
return NULL;
}
// Voronoi iteration approaches local minimum for the palette
const double iteration_limit = options->voronoi_iteration_limit;
unsigned int iterations = options->voronoi_iterations;
if (!iterations && palette_error < 0 && max_mse < MAX_DIFF) iterations = 1; // otherwise total error is never calculated and MSE limit won't work
if (iterations) {
verbose_print(options, " moving colormap towards local minimum");
double previous_palette_error = MAX_DIFF;
for(unsigned int i=0; i < iterations; i++) {
palette_error = viter_do_iteration(hist, acolormap, options->min_opaque_val, NULL, i==0 || options->fast_palette);
if (fabs(previous_palette_error-palette_error) < iteration_limit) {
break;
}
if (palette_error > max_mse*1.5) { // probably hopeless
if (palette_error > max_mse*3.0) break; // definitely hopeless
i++;
}
previous_palette_error = palette_error;
}
}
if (palette_error > max_mse) {
liq_verbose_printf(options, " image degradation MSE=%.3f (Q=%d) exceeded limit of %.3f (%d)",
palette_error*65536.0/6.0, mse_to_quality(palette_error),
max_mse*65536.0/6.0, mse_to_quality(max_mse));
pam_freecolormap(acolormap);
return NULL;
}
}
sort_palette(acolormap, options);
liq_result *result = options->malloc(sizeof(liq_result));
if (!result) return NULL;
*result = (liq_result){
.magic_header = liq_result_magic,
.malloc = options->malloc,
.free = options->free,
.palette = acolormap,
.palette_error = palette_error,
.fast_palette = fast_palette,
.use_dither_map = options->use_dither_map,
.gamma = gamma,
.min_posterization_output = options->min_posterization_output,
};
return result;
}
LIQ_EXPORT liq_error liq_write_remapped_image(liq_result *result, liq_image *input_image, void *buffer, size_t buffer_size)
{
if (!CHECK_STRUCT_TYPE(result, liq_result)) {
return LIQ_INVALID_POINTER;
}
if (!CHECK_STRUCT_TYPE(input_image, liq_image)) {
return LIQ_INVALID_POINTER;
}
if (!CHECK_USER_POINTER(buffer)) {
return LIQ_INVALID_POINTER;
}
const size_t required_size = input_image->width * input_image->height;
if (buffer_size < required_size) {
return LIQ_BUFFER_TOO_SMALL;
}
unsigned char *rows[input_image->height];
unsigned char *buffer_bytes = buffer;
for(unsigned int i=0; i < input_image->height; i++) {
rows[i] = &buffer_bytes[input_image->width * i];
}
return liq_write_remapped_image_rows(result, input_image, rows);
}
LIQ_EXPORT liq_error liq_write_remapped_image_rows(liq_result *quant, liq_image *input_image, unsigned char **row_pointers)
{
if (!CHECK_STRUCT_TYPE(quant, liq_result)) return LIQ_INVALID_POINTER;
if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return LIQ_INVALID_POINTER;
for(unsigned int i=0; i < input_image->height; i++) {
if (!CHECK_USER_POINTER(row_pointers+i) || !CHECK_USER_POINTER(row_pointers[i])) return LIQ_INVALID_POINTER;
}
if (quant->remapping) {
liq_remapping_result_destroy(quant->remapping);
}
liq_remapping_result *const result = quant->remapping = liq_remapping_result_create(quant);
if (!result) return LIQ_OUT_OF_MEMORY;
if (!input_image->edges && !input_image->dither_map && quant->use_dither_map) {
contrast_maps(input_image);
}
/*
** Step 4: map the colors in the image to their closest match in the
** new colormap, and write 'em out.
*/
float remapping_error = result->palette_error;
if (result->dither_level == 0) {
set_rounded_palette(&result->int_palette, result->palette, result->gamma, quant->min_posterization_output);
remapping_error = remap_to_palette(input_image, row_pointers, result->palette, quant->fast_palette);
} else {
const bool generate_dither_map = result->use_dither_map && (input_image->edges && !input_image->dither_map);
if (generate_dither_map) {
// If dithering (with dither map) is required, this image is used to find areas that require dithering
remapping_error = remap_to_palette(input_image, row_pointers, result->palette, quant->fast_palette);
update_dither_map(row_pointers, input_image);
}
// remapping above was the last chance to do voronoi iteration, hence the final palette is set after remapping
set_rounded_palette(&result->int_palette, result->palette, result->gamma, quant->min_posterization_output);
remap_to_palette_floyd(input_image, row_pointers, result->palette,
MAX(remapping_error*2.4, 16.f/256.f), result->use_dither_map, generate_dither_map, result->dither_level);
}
// remapping error from dithered image is absurd, so always non-dithered value is used
// palette_error includes some perceptual weighting from histogram which is closer correlated with dssim
// so that should be used when possible.
if (result->palette_error < 0) {
result->palette_error = remapping_error;
}
return LIQ_OK;
}
LIQ_EXPORT int liq_version() {
return LIQ_VERSION;
}
|
StreamTriad_par3.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "timer.h"
int main(int argc, char *argv[]){
int nsize = 20000000, ntimes=16;
double* restrict a = malloc(nsize * sizeof(double));
double* restrict b = malloc(nsize * sizeof(double));
double* restrict c = malloc(nsize * sizeof(double));
struct timespec tstart;
// initializing data and arrays
double scalar = 3.0, time_sum = 0.0;
#pragma omp target data map(to:a[0:nsize], b[0:nsize], c[0:nsize])
{
#pragma omp target teams distribute parallel for simd
for (int i=0; i<nsize; i++) {
a[i] = 1.0;
b[i] = 2.0;
}
for (int k=0; k<ntimes; k++){
cpu_timer_start(&tstart);
// stream triad loop
#pragma omp target teams distribute parallel for simd
for (int i=0; i<nsize; i++){
c[i] = a[i] + scalar*b[i];
}
time_sum += cpu_timer_stop(tstart);
}
} // #pragma omp target data map(from:c[0:nsize])
printf("Average runtime for stream triad loop is %lf msecs\n", time_sum/ntimes);
free(a);
free(b);
free(c);
return(0);
}
|
convert.c | /*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <ParTI.h>
#include "../sptensor.h"
#include "hicoo.h"
/*************************************************
* PRIVATE FUNCTIONS
*************************************************/
/**
* Compare two specified coordinates.
* @param[in] item1 first tuple
* @param[in] item2 second tuple
* @return 1, item1 == item2; otherwise, 0.
*/
static int spt_EqualWithTwoCoordinates(
const sptIndex * item1,
const sptIndex * item2,
const sptIndex nmodes)
{
sptIndex i1, i2;
for(sptIndex m=0; m<nmodes; ++m) {
i1 = item1[m];
i2 = item2[m];
if(i1 != i2) {
return 0;
break;
}
}
return 1;
}
/**
* Compute the end of this block
* @param tsr a pointer to a sparse tensor
* @return out_item the end indices of this block
*/
static int spt_BlockEnd(
sptIndex * out_item,
sptSparseTensor *tsr,
const sptIndex * in_item,
const sptElementIndex sb)
{
sptIndex nmodes = tsr->nmodes;
for(sptIndex m=0; m<nmodes; ++m) {
sptAssert(in_item[m] < tsr->ndims[m]);
out_item[m] = in_item[m]+sb < tsr->ndims[m] ? in_item[m]+sb : tsr->ndims[m]; // exclusive
}
return 0;
}
/**
* Locate the beginning of the block/kernel containing the coordinates
* @param tsr a pointer to a sparse tensor
* @return out_item the beginning indices of this block
*/
static int spt_LocateBeginCoord(
sptIndex * out_item,
sptSparseTensor *tsr,
const sptIndex * in_item,
const sptElementIndex bits)
{
sptIndex nmodes = tsr->nmodes;
for(sptIndex m=0; m<nmodes; ++m) {
out_item[m] = in_item[m] >> bits;
}
return 0;
}
/**
* Set scheduler for kernels.
* @param[out] kschr nmodes kernel schedulers
* @param[out] nkiters the number of columns for nmodes schedulers
* @param[in] kptr a vector of kernel pointers
* @param[in] tsr a pointer to a sparse tensor
* @param[in] sk_bits the bits of superblock size (sk)
* @return mode pointers
*/
int spt_SetKernelScheduler(
sptIndexVector **kschr,
sptIndex *nkiters,
sptNnzIndexVector * const kptr,
sptSparseTensor *tsr,
const sptElementIndex sk_bits)
{
sptIndex nmodes = tsr->nmodes;
sptIndex * ndims = tsr->ndims;
int result = 0;
sptIndex * coord = (sptIndex *)malloc(nmodes * sizeof(*coord));
sptIndex * kernel_coord = (sptIndex *)malloc(nmodes * sizeof(*kernel_coord));
for(sptNnzIndex k=0; k<kptr->len - 1; ++k) {
sptNnzIndex z = kptr->data[k];
for(sptIndex m=0; m<nmodes; ++m)
coord[m] = tsr->inds[m].data[z];
result = spt_LocateBeginCoord(kernel_coord, tsr, coord, sk_bits);
spt_CheckError(result, "HiSpTns Convert", NULL);
for(sptIndex m=0; m<nmodes; ++m) {
result = sptAppendIndexVector(&(kschr[m][kernel_coord[m]]), k);
spt_CheckError(result, "HiSpTns Convert", NULL);
}
}
free(coord);
free(kernel_coord);
sptIndex sk = (sptIndex)pow(2, sk_bits);
sptIndex tmp;
for(sptIndex m=0; m<nmodes; ++m) {
tmp = 0;
sptIndex kernel_ndim = (ndims[m] + sk - 1) / sk;
for(sptIndex i=0; i<kernel_ndim; ++i) {
if(tmp < kschr[m][i].len)
tmp = kschr[m][i].len;
}
nkiters[m] = tmp;
}
return 0;
}
/**
* Pre-process COO sparse tensor by permuting, sorting, and record pointers to blocked rows. Kernels in Row-major order, blocks and elements are in Z-Morton order.
* @param[out] kptr a vector of kernel pointers
* @param[out] kschr nmodes kernel schedulers
* @param[out] nkiters the number of columns for nmodes schedulers
* @param[in] tsr a pointer to a sparse tensor
* @param[in] sk_bits the bits of superblock size (sk)
* @param[in] sb_bits the bits of block size (sb)
* @param[in] tk the number of threads
*/
int spt_PreprocessSparseTensor(
sptNnzIndexVector * kptr,
sptIndexVector **kschr,
sptIndex *nkiters,
sptSparseTensor *tsr,
const sptElementIndex sb_bits,
const sptElementIndex sk_bits,
int const tk)
{
sptNnzIndex nnz = tsr->nnz;
int result;
// TODO: possible permute modes to improve parallelism
/* Sort tsr in a Row-major Block order to get all kernels. Not use Morton-order for kernels: 1. better support for higher-order tensors by limiting kernel size, because Morton key bit <= 128; */
sptTimer rowblock_sort_timer;
sptNewTimer(&rowblock_sort_timer, 0);
sptStartTimer(rowblock_sort_timer);
sptSparseTensorSortIndexRowBlock(tsr, 1, 0, nnz, sk_bits, tk); // Parallelized inside
sptStopTimer(rowblock_sort_timer);
sptPrintElapsedTime(rowblock_sort_timer, "rowblock sorting");
sptFreeTimer(rowblock_sort_timer);
#if PARTI_DEBUG == 3
printf("Sorted by sptSparseTensorSortIndexRowBlock.\n");
sptAssert(sptDumpSparseTensor(tsr, 0, stdout) == 0);
#endif
sptTimer set_kernel_timer;
sptNewTimer(&set_kernel_timer, 0);
sptStartTimer(set_kernel_timer);
result = sptSetKernelPointers(kptr, tsr, sk_bits);
spt_CheckError(result, "HiSpTns Preprocess", NULL);
result = spt_SetKernelScheduler(kschr, nkiters, kptr, tsr, sk_bits);
spt_CheckError(result, "HiSpTns Preprocess", NULL);
sptStopTimer(set_kernel_timer);
sptPrintElapsedTime(set_kernel_timer, "Set Kernel Ptrs");
sptFreeTimer(set_kernel_timer);
sptTimer morton_sort_timer;
sptNewTimer(&morton_sort_timer, 0);
sptStartTimer(morton_sort_timer);
/* Sort blocks in each kernel in Morton-order */
sptNnzIndex k_begin, k_end;
/* Loop for all kernels, 0-kptr.len for OMP code */
#pragma omp parallel for num_threads(tk)
for(sptNnzIndex k=0; k<kptr->len - 1; ++k) {
k_begin = kptr->data[k];
k_end = kptr->data[k+1]; // exclusive
/* Sort blocks in each kernel in Morton-order */
sptSparseTensorSortIndexMorton(tsr, 1, k_begin, k_end, sb_bits, tk);
#if PARTI_DEBUG == 3
printf("Kernel %"PARTI_PRI_NNZ_INDEX ": Sorted by sptSparseTensorSortIndexMorton.\n", k);
sptAssert(sptDumpSparseTensor(tsr, 0, stdout) == 0);
#endif
}
sptStopTimer(morton_sort_timer);
sptPrintElapsedTime(morton_sort_timer, "Morton sorting");
sptFreeTimer(morton_sort_timer);
return 0;
}
/*************************************************
* PUBLIC FUNCTIONS
*************************************************/
/**
* Record mode pointers for kernel rows, from a sorted tensor.
* @param[out] kptr a vector of kernel pointers
* @param[in] tsr a pointer to a sparse tensor
* @param[in] sk_bits the bits of superblock size (sk)
*/
int sptSetKernelPointers(
sptNnzIndexVector *kptr,
sptSparseTensor *tsr,
const sptElementIndex sk_bits)
{
sptIndex nmodes = tsr->nmodes;
sptNnzIndex nnz = tsr->nnz;
sptNnzIndex k = 0; // count kernels
sptNnzIndex knnz = 0; // #Nonzeros per kernel
int result = 0;
result = sptAppendNnzIndexVector(kptr, 0);
spt_CheckError(result, "HiSpTns Convert", NULL);
sptIndex * coord = (sptIndex *)malloc(nmodes * sizeof(*coord));
sptIndex * kernel_coord = (sptIndex *)malloc(nmodes * sizeof(*kernel_coord));
sptIndex * kernel_coord_prior = (sptIndex *)malloc(nmodes * sizeof(*kernel_coord_prior));
/* Process first nnz to get the first kernel_coord_prior */
for(sptIndex m=0; m<nmodes; ++m)
coord[m] = tsr->inds[m].data[0]; // first nonzero indices
result = spt_LocateBeginCoord(kernel_coord_prior, tsr, coord, sk_bits);
spt_CheckError(result, "HiSpTns Convert", NULL);
for(sptNnzIndex z=0; z<nnz; ++z) {
for(sptIndex m=0; m<nmodes; ++m)
coord[m] = tsr->inds[m].data[z];
result = spt_LocateBeginCoord(kernel_coord, tsr, coord, sk_bits);
spt_CheckError(result, "HiSpTns Convert", NULL);
if(spt_EqualWithTwoCoordinates(kernel_coord, kernel_coord_prior, nmodes) == 1) {
++ knnz;
} else {
++ k;
result = sptAppendNnzIndexVector(kptr, knnz + kptr->data[k-1]);
spt_CheckError(result, "HiSpTns Convert", NULL);
for(sptIndex m=0; m<nmodes; ++m)
kernel_coord_prior[m] = kernel_coord[m];
knnz = 1;
}
}
sptAssert(k < kptr->len);
sptAssert(kptr->data[kptr->len-1] + knnz == nnz);
/* Set the last element for kptr */
sptAppendNnzIndexVector(kptr, nnz);
free(coord);
free(kernel_coord);
free(kernel_coord_prior);
return 0;
}
/**
* Convert a COO tensor to a HiCOO tensor.
* @param[out] hitsr the sparse tensor in HiCOO format
* @param[out] max_nnzb the maximum number of nonzeros per tensor block
* @param[in] tsr a pointer to a sparse tensor
* @param[in] sb_bits the bits of block size (sb)
* @param[in] sk_bits the bits of superblock size (sk)
* @param[in] tk the number of threads
*/
int sptSparseTensorToHiCOO(
sptSparseTensorHiCOO *hitsr,
sptNnzIndex *max_nnzb,
sptSparseTensor *tsr,
const sptElementIndex sb_bits,
const sptElementIndex sk_bits,
int const tk)
{
const sptElementIndex sc_bits = 14; // It is kept for the future use.
sptAssert(sk_bits >= sb_bits);
sptAssert(sc_bits >= sb_bits);
sptIndex i;
int result;
sptIndex nmodes = tsr->nmodes;
sptNnzIndex nnz = tsr->nnz;
sptElementIndex sb = pow(2, sb_bits);
sptIndex sc = pow(2, sc_bits);
/* Set HiCOO parameters. ndims for type conversion, size_t -> sptIndex */
sptIndex * ndims = malloc(nmodes * sizeof *ndims);
spt_CheckOSError(!ndims, "HiSpTns Convert");
for(i = 0; i < nmodes; ++i) {
ndims[i] = (sptIndex)tsr->ndims[i];
}
result = sptNewSparseTensorHiCOO(hitsr, (sptIndex)tsr->nmodes, ndims, (sptNnzIndex)tsr->nnz, sb_bits, sk_bits, sc_bits);
spt_CheckError(result, "HiSpTns Convert", NULL);
/* Pre-process tensor to get hitsr->kptr, values are nonzero locations. */
sptTimer sort_timer;
sptNewTimer(&sort_timer, 0);
sptStartTimer(sort_timer);
spt_PreprocessSparseTensor(&hitsr->kptr, hitsr->kschr, hitsr->nkiters, tsr, sb_bits, sk_bits, tk);
sptStopTimer(sort_timer);
sptPrintElapsedTime(sort_timer, "HiCOO sorting (rowblock + morton)");
sptFreeTimer(sort_timer);
#if PARTI_DEBUG >= 2
printf("Kernels: Row-major, blocks: Morton-order sorted:\n");
sptAssert(sptDumpSparseTensor(tsr, 0, stdout) == 0);
printf("hitsr->kptr:\n");
sptDumpNnzIndexVector(&hitsr->kptr, stdout);
#endif
sptTimer gen_timer;
sptNewTimer(&gen_timer, 0);
sptStartTimer(gen_timer);
/* Temporary storage */
sptIndex * block_begin = (sptIndex *)malloc(nmodes * sizeof(*block_begin));
sptIndex * block_end = (sptIndex *)malloc(nmodes * sizeof(*block_end));
sptIndex * block_begin_prior = (sptIndex *)malloc(nmodes * sizeof(*block_begin_prior));
sptIndex * block_coord = (sptIndex *)malloc(nmodes * sizeof(*block_coord));
sptNnzIndex k_begin, k_end; // #Nonzeros locations
sptNnzIndex nk = 0; // #Kernels
sptNnzIndex nc = 0; // #Chunks
sptNnzIndex nb = 1; // #Blocks // counting from the first nnz
sptNnzIndex nb_tmp = 0;
sptNnzIndex ne = 0; // #Nonzeros per block
sptIndex eindex = 0;
sptBlockIndex chunk_size = 0;
/* different appending methods:
* elements: append every nonzero entry
* blocks: append when seeing a new block.
* chunks: appending when seeting a new chunk. Notice the boundary of kernels and the last chunk of the whole tensor may be larger than the sc.
* kernels: append when seeing a new kernel. Not appending a vector, just write data into an allocated array.
*/
/* Process first nnz */
for(sptIndex m=0; m<nmodes; ++m)
block_coord[m] = tsr->inds[m].data[0]; // first nonzero indices
result = spt_LocateBeginCoord(block_begin_prior, tsr, block_coord, sb_bits);
spt_CheckError(result, "HiSpTns Convert", NULL);
for(sptIndex m=0; m<nmodes; ++m)
sptAppendBlockIndexVector(&hitsr->binds[m], (sptBlockIndex)block_begin_prior[m]);
sptAppendNnzIndexVector(&hitsr->bptr, 0);
/* Loop for all kernels, 0 - hitsr->kptr.len - 1 for OMP code */
for(sptNnzIndex k=0; k<hitsr->kptr.len - 1; ++k) {
k_begin = hitsr->kptr.data[k];
k_end = hitsr->kptr.data[k+1]; // exclusive
nb_tmp = k == 0 ? 0: nb;
/* Modify kptr pointing to block locations */
hitsr->kptr.data[k] = nb_tmp;
++ nk;
/* Only append a chunk for the new kernel, the last chunk in the old kernel may be larger than sc */
sptAppendNnzIndexVector(&hitsr->cptr, nb_tmp);
++ nc;
chunk_size = 0;
/* Loop nonzeros in each kernel */
for(sptNnzIndex z = k_begin; z < k_end; ++z) {
#if PARTI_DEBUG == 5
printf("z: %"PARTI_PRI_NNZ_INDEX "\n", z);
#endif
for(sptIndex m=0; m<nmodes; ++m)
block_coord[m] = tsr->inds[m].data[z]; // first nonzero indices
#if PARTI_DEBUG == 5
printf("block_coord:\n");
sptAssert(sptDumpIndexArray(block_coord, nmodes, stdout) == 0);
#endif
result = spt_LocateBeginCoord(block_begin, tsr, block_coord, sb_bits);
spt_CheckError(result, "HiSpTns Convert", NULL);
#if PARTI_DEBUG == 5
printf("block_begin_prior:\n");
sptAssert(sptDumpIndexArray(block_begin_prior, nmodes, stdout) == 0);
printf("block_begin:\n");
sptAssert(sptDumpIndexArray(block_begin, nmodes, stdout) == 0);
#endif
result = spt_BlockEnd(block_end, tsr, block_begin, sb); // exclusive
spt_CheckError(result, "HiSpTns Convert", NULL);
/* Append einds and values */
for(sptIndex m=0; m<nmodes; ++m) {
eindex = tsr->inds[m].data[z] < (block_begin[m] << sb_bits) ? tsr->inds[m].data[z] : tsr->inds[m].data[z] - (block_begin[m] << sb_bits);
sptAssert(eindex < sb);
sptAppendElementIndexVector(&hitsr->einds[m], (sptElementIndex)eindex);
}
sptAppendValueVector(&hitsr->values, tsr->values.data[z]);
/* z in the same block with last z */
if (spt_EqualWithTwoCoordinates(block_begin, block_begin_prior, nmodes) == 1)
{
/* ne: #Elements in current block */
++ ne;
} else { /* New block */
/* ne: #Elements in the last block */
/* Append block bptr and bidx */
sptAppendNnzIndexVector(&hitsr->bptr, (sptBlockIndex)z);
for(sptIndex m=0; m<nmodes; ++m)
sptAppendBlockIndexVector(&hitsr->binds[m], (sptBlockIndex)block_begin[m]);
for(sptIndex m=0; m<nmodes; ++m)
block_begin_prior[m] = block_begin[m];
/* ne: old block's number of nonzeros */
if(chunk_size + ne >= sc) { // calculate the prior block
/* Append a chunk ending by the old block */
sptAppendNnzIndexVector(&hitsr->cptr, nb);
++ nc;
chunk_size = 0;
} else {
chunk_size += ne;
}
++ nb;
ne = 1;
} // End new block
#if PARTI_DEBUG == 5
printf("nk: %u, nc: %u, nb: %u, ne: %u, chunk_size: %lu\n\n", nk, nc, nb, ne, chunk_size);
#endif
} // End z loop
} // End k loop
sptAssert(nb <= nnz);
sptAssert(nb == hitsr->binds[0].len);
// sptAssert(nc <= nb);
sptAssert(nk == hitsr->kptr.len - 1);
/* Last element for kptr, cptr, bptr */
hitsr->kptr.data[hitsr->kptr.len - 1] = hitsr->bptr.len;
sptAppendNnzIndexVector(&hitsr->cptr, hitsr->bptr.len);
sptAppendNnzIndexVector(&hitsr->bptr, nnz);
*max_nnzb = hitsr->bptr.data[1] - hitsr->bptr.data[0];
sptNnzIndex sum_nnzb = 0;
for(sptIndex i=0; i < hitsr->bptr.len - 1; ++i) {
sptNnzIndex nnzb = hitsr->bptr.data[i+1] - hitsr->bptr.data[i];
sum_nnzb += nnzb;
if(*max_nnzb < nnzb) {
*max_nnzb = nnzb;
}
}
sptAssert(sum_nnzb == hitsr->nnz);
sptStopTimer(gen_timer);
sptPrintElapsedTime(gen_timer, "Generate HiCOO");
sptFreeTimer(gen_timer);
free(block_begin);
free(block_end);
free(block_begin_prior);
free(block_coord);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.