source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
params.c | /*
*
* c Ivo Hofacker
*
* Vienna RNA package
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include "ViennaRNA/params/default.h"
#include "ViennaRNA/fold_vars.h"
#include "ViennaRNA/utils/basic.h"
#include "ViennaRNA/params/io.h"
#include "ViennaRNA/params/basic.h"
/**
*** \file ViennaRNA/params/basic.c
*** <P>
*** This file provides functions that return temperature scaled energy parameters and
*** Boltzmann weights packed in datastructures
*** </P>
***/
/*------------------------------------------------------------------------*/
#define SCALE 10
/**
*** dangling ends should never be destabilizing, i.e. expdangle>=1<BR>
*** specific heat needs smooth function (2nd derivative)<BR>
*** we use a*(sin(x+b)+1)^2, with a=2/(3*sqrt(3)), b=Pi/6-sqrt(3)/2,
*** in the interval b<x<sqrt(3)/2
*/
#define SMOOTH(X) ((X) / SCALE < -1.2283697) ? 0 : (((X) / SCALE > 0.8660254) ? (X) : \
SCALE *0.38490018 *(sin((X) / SCALE - \
0.34242663) + 1) * \
(sin((X) / SCALE - 0.34242663) + 1))
/* #define SMOOTH(X) ((X)<0 ? 0 : (X)) */
/*
#################################
# PRIVATE VARIABLES #
#################################
*/
PRIVATE vrna_param_t p;
PRIVATE int id = -1;
/* variables for partition function */
PRIVATE vrna_exp_param_t pf;
PRIVATE int pf_id = -1;
#ifdef _OPENMP
#pragma omp threadprivate(id, pf_id)
#endif
/*
#################################
# PRIVATE FUNCTION DECLARATIONS #
#################################
*/
PRIVATE vrna_param_t *get_scaled_params(vrna_md_t *md);
PRIVATE vrna_exp_param_t *get_scaled_exp_params(vrna_md_t *md,
double pfs);
PRIVATE vrna_exp_param_t *get_exp_params_ali(vrna_md_t *md,
unsigned int n_seq,
double pfs);
PRIVATE void rescale_params(vrna_fold_compound_t *vc);
/*
#################################
# BEGIN OF FUNCTION DEFINITIONS #
#################################
*/
PUBLIC vrna_param_t *
vrna_params(vrna_md_t *md)
{
if (md) {
return get_scaled_params(md);
} else {
vrna_md_t md;
vrna_md_set_default(&md);
return get_scaled_params(&md);
}
}
PUBLIC vrna_exp_param_t *
vrna_exp_params(vrna_md_t *md)
{
if (md) {
return get_scaled_exp_params(md, -1.);
} else {
vrna_md_t md;
vrna_md_set_default(&md);
return get_scaled_exp_params(&md, -1.);
}
}
PUBLIC vrna_exp_param_t *
vrna_exp_params_comparative(unsigned int n_seq,
vrna_md_t *md)
{
if (md) {
return get_exp_params_ali(md, n_seq, -1.);
} else {
vrna_md_t md;
vrna_md_set_default(&md);
return get_exp_params_ali(&md, n_seq, -1.);
}
}
PUBLIC vrna_param_t *
vrna_params_copy(vrna_param_t *par)
{
vrna_param_t *copy = NULL;
if (par) {
copy = (vrna_param_t *)vrna_alloc(sizeof(vrna_param_t));
memcpy(copy, par, sizeof(vrna_param_t));
}
return copy;
}
PUBLIC vrna_exp_param_t *
vrna_exp_params_copy(vrna_exp_param_t *par)
{
vrna_exp_param_t *copy = NULL;
if (par) {
copy = (vrna_exp_param_t *)vrna_alloc(sizeof(vrna_exp_param_t));
memcpy(copy, par, sizeof(vrna_exp_param_t));
}
return copy;
}
PUBLIC void
vrna_params_subst(vrna_fold_compound_t *vc,
vrna_param_t *parameters)
{
if (vc) {
if (vc->params)
free(vc->params);
if (parameters) {
vc->params = vrna_params_copy(parameters);
} else {
switch (vc->type) {
case VRNA_FC_TYPE_SINGLE: /* fall through */
case VRNA_FC_TYPE_COMPARATIVE:
vc->params = vrna_params(NULL);
break;
default:
break;
}
}
}
}
PUBLIC void
vrna_params_reset(vrna_fold_compound_t *vc,
vrna_md_t *md_p)
{
if (vc) {
switch (vc->type) {
case VRNA_FC_TYPE_SINGLE: /* fall through */
case VRNA_FC_TYPE_COMPARATIVE:
if (vc->params)
free(vc->params);
vc->params = vrna_params(md_p);
break;
default:
break;
}
}
}
PUBLIC void
vrna_exp_params_reset(vrna_fold_compound_t *vc,
vrna_md_t *md_p)
{
if (vc) {
switch (vc->type) {
case VRNA_FC_TYPE_SINGLE: /* fall through */
case VRNA_FC_TYPE_COMPARATIVE:
if (vc->exp_params)
free(vc->exp_params);
vc->exp_params = vrna_exp_params(md_p);
break;
default:
break;
}
}
}
PUBLIC void
vrna_exp_params_subst(vrna_fold_compound_t *vc,
vrna_exp_param_t *params)
{
if (vc) {
if (vc->exp_params)
free(vc->exp_params);
if (params) {
vc->exp_params = vrna_exp_params_copy(params);
} else {
switch (vc->type) {
case VRNA_FC_TYPE_SINGLE:
vc->exp_params = vrna_exp_params(NULL);
if (vc->cutpoint > 0)
vc->exp_params->model_details.min_loop_size = 0;
break;
case VRNA_FC_TYPE_COMPARATIVE:
vc->exp_params = vrna_exp_params_comparative(vc->n_seq, NULL);
break;
default:
break;
}
}
/* fill additional helper arrays for scaling etc. */
vrna_exp_params_rescale(vc, NULL);
}
}
PUBLIC void
vrna_exp_params_rescale(vrna_fold_compound_t *vc,
double *mfe)
{
vrna_exp_param_t *pf;
double e_per_nt, kT;
vrna_md_t *md;
if (vc) {
if (!vc->exp_params) {
switch (vc->type) {
case VRNA_FC_TYPE_SINGLE:
vc->exp_params = vrna_exp_params(&(vc->params->model_details));
break;
case VRNA_FC_TYPE_COMPARATIVE:
vc->exp_params = vrna_exp_params_comparative(vc->n_seq, &(vc->params->model_details));
break;
}
} else if (memcmp(&(vc->params->model_details),
&(vc->exp_params->model_details),
sizeof(vrna_md_t)) != 0) {
/* make sure that model details are matching */
(void)vrna_md_copy(&(vc->exp_params->model_details), &(vc->params->model_details));
/* we probably need some mechanism to check whether DP matrices still match the new model settings! */
}
pf = vc->exp_params;
if (pf) {
kT = pf->kT;
md = &(pf->model_details);
if (vc->type == VRNA_FC_TYPE_COMPARATIVE)
kT /= vc->n_seq;
/* re-compute scaling factor if necessary */
if ((mfe) || (pf->pf_scale < 1.)) {
if (mfe) /* use largest known Boltzmann factor for scaling */
e_per_nt = *mfe * 1000. / vc->length;
else /* use mean energy for random sequences: 184.3*length cal for scaling */
e_per_nt = -185 + (pf->temperature - 37.) * 7.27;
/* apply user-defined scaling factor to allow scaling for unusually stable/unstable structure enembles */
pf->pf_scale = exp(-(md->sfact * e_per_nt) / kT);
}
if (pf->pf_scale < 1.)
pf->pf_scale = 1.;
rescale_params(vc);
}
}
}
PUBLIC void
vrna_params_prepare(vrna_fold_compound_t *fc,
unsigned int options)
{
if (fc) {
vrna_md_t *md_p;
/*
* every vrna_fold_compound_t must have a vrna_paramt_t structure attached
* to it that holds the current model details. So we just use this here as
* the reference model
*/
md_p = &(fc->params->model_details);
if (options & VRNA_OPTION_PF) {
/* remove previous parameters if present and they differ from reference model */
if (fc->exp_params) {
if (memcmp(md_p, &(fc->exp_params->model_details), sizeof(vrna_md_t)) != 0) {
free(fc->exp_params);
fc->exp_params = NULL;
}
}
if (!fc->exp_params)
fc->exp_params = (fc->type == VRNA_FC_TYPE_SINGLE) ? \
vrna_exp_params(md_p) : \
vrna_exp_params_comparative(fc->n_seq, md_p);
}
}
}
/*
#####################################
# BEGIN OF STATIC HELPER FUNCTIONS #
#####################################
*/
PRIVATE vrna_param_t *
get_scaled_params(vrna_md_t *md)
{
unsigned int i, j, k, l;
double tempf;
vrna_param_t *params;
params = (vrna_param_t *)vrna_alloc(sizeof(vrna_param_t));
memset(params->param_file, '\0', 256);
if (last_parameter_file() != NULL)
strncpy(params->param_file, last_parameter_file(), 255);
params->model_details = *md; /* copy over the model details */
params->temperature = md->temperature;
tempf = ((params->temperature + K0) / Tmeasure);
for (i = VRNA_GQUAD_MIN_STACK_SIZE; i <= VRNA_GQUAD_MAX_STACK_SIZE; i++)
for (j = 3 * VRNA_GQUAD_MIN_LINKER_LENGTH; j <= 3 * VRNA_GQUAD_MAX_LINKER_LENGTH; j++) {
double GQuadAlpha_T = (double)GQuadAlphadH - (double)(GQuadAlphadH - GQuadAlpha37) * tempf;
double GQuadBeta_T = (double)GQuadBetadH - (double)(GQuadBetadH - GQuadBeta37) * tempf;
params->gquad[i][j] = (int)GQuadAlpha_T * (i - 1) + (int)(((double)GQuadBeta_T) * log(j - 2));
}
params->gquadLayerMismatch = (int)((double)GQuadLayerMismatchH - (double)(GQuadLayerMismatchH - GQuadLayerMismatch37) * tempf);
params->gquadLayerMismatchMax = GQuadLayerMismatchMax;
for (i = 0; i < 31; i++)
params->hairpin[i] = hairpindH[i] - (hairpindH[i] - hairpin37[i]) * tempf;
for (i = 0; i <= MIN2(30, MAXLOOP); i++) {
params->bulge[i] = bulgedH[i] - (bulgedH[i] - bulge37[i]) * tempf;
params->internal_loop[i] = internal_loopdH[i] - (internal_loopdH[i] - internal_loop37[i]) *
tempf;
}
params->lxc = lxc37 * tempf;
for (; i <= MAXLOOP; i++) {
params->bulge[i] = params->bulge[30] + (int)(params->lxc * log((double)(i) / 30.));
params->internal_loop[i] = params->internal_loop[30] +
(int)(params->lxc * log((double)(i) / 30.));
}
params->ninio[2] = niniodH - (niniodH - ninio37) * tempf;
params->TripleC = TripleCdH - (TripleCdH - TripleC37) * tempf;
params->MultipleCA = MultipleCAdH - (MultipleCAdH - MultipleCA37) * tempf;
params->MultipleCB = MultipleCBdH - (MultipleCBdH - MultipleCB37) * tempf;
for (i = 0; (i * 7) < strlen(Tetraloops); i++)
params->Tetraloop_E[i] = TetraloopdH[i] - (TetraloopdH[i] - Tetraloop37[i]) * tempf;
for (i = 0; (i * 5) < strlen(Triloops); i++)
params->Triloop_E[i] = TriloopdH[i] - (TriloopdH[i] - Triloop37[i]) * tempf;
for (i = 0; (i * 9) < strlen(Hexaloops); i++)
params->Hexaloop_E[i] = HexaloopdH[i] - (HexaloopdH[i] - Hexaloop37[i]) * tempf;
params->TerminalAU = TerminalAUdH - (TerminalAUdH - TerminalAU37) * tempf;
params->DuplexInit = DuplexInitdH - (DuplexInitdH - DuplexInit37) * tempf;
params->MLbase = ML_BASEdH - (ML_BASEdH - ML_BASE37) * tempf;
for (i = 0; i <= NBPAIRS; i++)
params->MLintern[i] = ML_interndH - (ML_interndH - ML_intern37) * tempf;
params->MLclosing = ML_closingdH - (ML_closingdH - ML_closing37) * tempf;
/* stacks G(T) = H - [H - G(T0)]*T/T0 */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j <= NBPAIRS; j++)
params->stack[i][j] = stackdH[i][j] - (stackdH[i][j] - stack37[i][j]) * tempf;
/* mismatches */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j < 5; j++)
for (k = 0; k < 5; k++) {
int mm;
params->mismatchI[i][j][k] = mismatchIdH[i][j][k] -
(mismatchIdH[i][j][k] - mismatchI37[i][j][k]) * tempf;
params->mismatchH[i][j][k] = mismatchHdH[i][j][k] -
(mismatchHdH[i][j][k] - mismatchH37[i][j][k]) * tempf;
params->mismatch1nI[i][j][k] = mismatch1nIdH[i][j][k] -
(mismatch1nIdH[i][j][k] - mismatch1nI37[i][j][k]) * tempf; /* interior nx1 loops */
params->mismatch23I[i][j][k] = mismatch23IdH[i][j][k] -
(mismatch23IdH[i][j][k] - mismatch23I37[i][j][k]) * tempf; /* interior 2x3 loops */
if (md->dangles) {
mm = mismatchMdH[i][j][k] -
(mismatchMdH[i][j][k] - mismatchM37[i][j][k]) * tempf;
params->mismatchM[i][j][k] = (mm > 0) ? 0 : mm;
mm = mismatchExtdH[i][j][k] -
(mismatchExtdH[i][j][k] - mismatchExt37[i][j][k]) * tempf;
params->mismatchExt[i][j][k] = (mm > 0) ? 0 : mm;
} else {
params->mismatchM[i][j][k] = params->mismatchExt[i][j][k] = 0;
}
}
/* dangles */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j < 5; j++) {
int dd;
dd = dangle5_dH[i][j] - (dangle5_dH[i][j] - dangle5_37[i][j]) * tempf;
params->dangle5[i][j] = (dd > 0) ? 0 : dd; /* must be <= 0 */
dd = dangle3_dH[i][j] - (dangle3_dH[i][j] - dangle3_37[i][j]) * tempf;
params->dangle3[i][j] = (dd > 0) ? 0 : dd; /* must be <= 0 */
}
/* interior 1x1 loops */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j <= NBPAIRS; j++)
for (k = 0; k < 5; k++)
for (l = 0; l < 5; l++)
params->int11[i][j][k][l] = int11_dH[i][j][k][l] -
(int11_dH[i][j][k][l] - int11_37[i][j][k][l]) * tempf;
/* interior 2x1 loops */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j <= NBPAIRS; j++)
for (k = 0; k < 5; k++)
for (l = 0; l < 5; l++) {
int m;
for (m = 0; m < 5; m++)
params->int21[i][j][k][l][m] = int21_dH[i][j][k][l][m] -
(int21_dH[i][j][k][l][m] - int21_37[i][j][k][l][m]) *
tempf;
}
/* interior 2x2 loops */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j <= NBPAIRS; j++)
for (k = 0; k < 5; k++)
for (l = 0; l < 5; l++) {
int m, n;
for (m = 0; m < 5; m++)
for (n = 0; n < 5; n++)
params->int22[i][j][k][l][m][n] = int22_dH[i][j][k][l][m][n] -
(int22_dH[i][j][k][l][m][n] -
int22_37[i][j][k][l][m][n]) * tempf;
}
strncpy(params->Tetraloops, Tetraloops, 281);
strncpy(params->Triloops, Triloops, 241);
strncpy(params->Hexaloops, Hexaloops, 361);
params->id = ++id;
return params;
}
PRIVATE vrna_exp_param_t *
get_scaled_exp_params(vrna_md_t *md,
double pfs)
{
unsigned int i, j, k, l;
double kT, TT;
double GT;
vrna_exp_param_t *pf;
pf = (vrna_exp_param_t *)vrna_alloc(sizeof(vrna_exp_param_t));
memset(pf->param_file, '\0', 256);
if (last_parameter_file() != NULL)
strncpy(pf->param_file, last_parameter_file(), 255);
pf->model_details = *md;
pf->temperature = md->temperature;
pf->alpha = md->betaScale;
pf->kT = kT = md->betaScale * (md->temperature + K0) * GASCONST; /* kT in cal/mol */
pf->pf_scale = pfs;
TT = (md->temperature + K0) / (Tmeasure);
for (i = VRNA_GQUAD_MIN_STACK_SIZE; i <= VRNA_GQUAD_MAX_STACK_SIZE; i++)
for (j = 3 * VRNA_GQUAD_MIN_LINKER_LENGTH; j <= 3 * VRNA_GQUAD_MAX_LINKER_LENGTH; j++) {
double GQuadAlpha_T = (double)GQuadAlphadH - (double)(GQuadAlphadH - GQuadAlpha37) * TT;
double GQuadBeta_T = (double)GQuadBetadH - (double)(GQuadBetadH - GQuadBeta37) * TT;
GT = ((double)GQuadAlpha_T) * ((double)(i - 1)) + ((double)GQuadBeta_T) *
log(((double)j) - 2.);
pf->expgquad[i][j] = exp(-GT * 10. / kT);
}
GT = (double)GQuadLayerMismatchH - (double)(GQuadLayerMismatchH - GQuadLayerMismatch37) * TT;
pf->expgquadLayerMismatch = exp(-GT * 10. / kT);
pf->gquadLayerMismatchMax = GQuadLayerMismatchMax;
/* loop energies: hairpins, bulges, interior, mulit-loops */
for (i = 0; i < 31; i++) {
GT = hairpindH[i] - (hairpindH[i] - hairpin37[i]) * TT;
pf->exphairpin[i] = exp(-GT * 10. / kT);
}
for (i = 0; i <= MIN2(30, MAXLOOP); i++) {
GT = bulgedH[i] - (bulgedH[i] - bulge37[i]) * TT;
pf->expbulge[i] = exp(-GT * 10. / kT);
GT = internal_loopdH[i] - (internal_loopdH[i] - internal_loop37[i]) * TT;
pf->expinternal[i] = exp(-GT * 10. / kT);
}
/* special case of size 2 interior loops (single mismatch) */
if (james_rule)
pf->expinternal[2] = exp(-80 * 10. / kT);
pf->lxc = lxc37 * TT;
GT = DuplexInitdH - (DuplexInitdH - DuplexInit37) * TT;
pf->expDuplexInit = exp(-GT * 10. / kT);
for (i = 31; i <= MAXLOOP; i++) {
GT = bulge37[30] * TT + (pf->lxc * log(i / 30.));
pf->expbulge[i] = exp(-GT * 10. / kT);
GT = internal_loop37[30] * TT + (pf->lxc * log(i / 30.));
pf->expinternal[i] = exp(-GT * 10. / kT);
}
GT = niniodH - (niniodH - ninio37) * TT;
for (j = 0; j <= MAXLOOP; j++)
pf->expninio[2][j] = exp(-MIN2(MAX_NINIO, j * GT) * 10. / kT);
for (i = 0; (i * 7) < strlen(Tetraloops); i++) {
GT = TetraloopdH[i] - (TetraloopdH[i] - Tetraloop37[i]) * TT;
pf->exptetra[i] = exp(-GT * 10. / kT);
}
for (i = 0; (i * 5) < strlen(Triloops); i++) {
GT = TriloopdH[i] - (TriloopdH[i] - Triloop37[i]) * TT;
pf->exptri[i] = exp(-GT * 10. / kT);
}
for (i = 0; (i * 9) < strlen(Hexaloops); i++) {
GT = HexaloopdH[i] - (HexaloopdH[i] - Hexaloop37[i]) * TT;
pf->exphex[i] = exp(-GT * 10. / kT);
}
GT = ML_closingdH - (ML_closingdH - ML_closing37) * TT;
pf->expMLclosing = exp(-GT * 10. / kT);
for (i = 0; i <= NBPAIRS; i++) {
GT = ML_interndH - (ML_interndH - ML_intern37) * TT;
/* if (i>2) GT += TerminalAU; */
pf->expMLintern[i] = exp(-GT * 10. / kT);
}
GT = TerminalAUdH - (TerminalAUdH - TerminalAU37) * TT;
pf->expTermAU = exp(-GT * 10. / kT);
GT = ML_BASEdH - (ML_BASEdH - ML_BASE37) * TT;
pf->expMLbase = exp(-10. * GT / kT);
/* if dangles==0 just set their energy to 0,
* don't let dangle energies become > 0 (at large temps),
* but make sure go smoothly to 0 */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j <= 4; j++) {
if (md->dangles) {
GT = dangle5_dH[i][j] - (dangle5_dH[i][j] - dangle5_37[i][j]) * TT;
pf->expdangle5[i][j] = exp(SMOOTH(-GT) * 10. / kT);
GT = dangle3_dH[i][j] - (dangle3_dH[i][j] - dangle3_37[i][j]) * TT;
pf->expdangle3[i][j] = exp(SMOOTH(-GT) * 10. / kT);
} else {
pf->expdangle3[i][j] = pf->expdangle5[i][j] = 1;
}
}
/* stacking energies */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j <= NBPAIRS; j++) {
GT = stackdH[i][j] - (stackdH[i][j] - stack37[i][j]) * TT;
pf->expstack[i][j] = exp(-GT * 10. / kT);
}
/* mismatch energies */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j < 5; j++)
for (k = 0; k < 5; k++) {
GT = mismatchIdH[i][j][k] -
(mismatchIdH[i][j][k] - mismatchI37[i][j][k]) * TT;
pf->expmismatchI[i][j][k] = exp(-GT * 10.0 / kT);
GT = mismatch1nIdH[i][j][k] -
(mismatch1nIdH[i][j][k] - mismatch1nI37[i][j][k]) * TT;
pf->expmismatch1nI[i][j][k] = exp(-GT * 10.0 / kT);
GT = mismatchHdH[i][j][k] -
(mismatchHdH[i][j][k] - mismatchH37[i][j][k]) * TT;
pf->expmismatchH[i][j][k] = exp(-GT * 10.0 / kT);
if (md->dangles) {
GT = mismatchMdH[i][j][k] -
(mismatchMdH[i][j][k] - mismatchM37[i][j][k]) * TT;
pf->expmismatchM[i][j][k] = exp(SMOOTH(-GT) * 10.0 / kT);
GT = mismatchExtdH[i][j][k] -
(mismatchExtdH[i][j][k] - mismatchExt37[i][j][k]) * TT;
pf->expmismatchExt[i][j][k] = exp(SMOOTH(-GT) * 10.0 / kT);
} else {
pf->expmismatchM[i][j][k] = pf->expmismatchExt[i][j][k] = 1.;
}
GT = mismatch23IdH[i][j][k] -
(mismatch23IdH[i][j][k] - mismatch23I37[i][j][k]) * TT;
pf->expmismatch23I[i][j][k] = exp(-GT * 10.0 / kT);
}
/* interior lops of length 2 */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j <= NBPAIRS; j++)
for (k = 0; k < 5; k++)
for (l = 0; l < 5; l++) {
GT = int11_dH[i][j][k][l] -
(int11_dH[i][j][k][l] - int11_37[i][j][k][l]) * TT;
pf->expint11[i][j][k][l] = exp(-GT * 10. / kT);
}
/* interior 2x1 loops */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j <= NBPAIRS; j++)
for (k = 0; k < 5; k++)
for (l = 0; l < 5; l++) {
int m;
for (m = 0; m < 5; m++) {
GT = int21_dH[i][j][k][l][m] -
(int21_dH[i][j][k][l][m] - int21_37[i][j][k][l][m]) * TT;
pf->expint21[i][j][k][l][m] = exp(-GT * 10. / kT);
}
}
/* interior 2x2 loops */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j <= NBPAIRS; j++)
for (k = 0; k < 5; k++)
for (l = 0; l < 5; l++) {
int m, n;
for (m = 0; m < 5; m++)
for (n = 0; n < 5; n++) {
GT = int22_dH[i][j][k][l][m][n] -
(int22_dH[i][j][k][l][m][n] - int22_37[i][j][k][l][m][n]) * TT;
pf->expint22[i][j][k][l][m][n] = exp(-GT * 10. / kT);
}
}
strncpy(pf->Tetraloops, Tetraloops, 281);
strncpy(pf->Triloops, Triloops, 241);
strncpy(pf->Hexaloops, Hexaloops, 361);
return pf;
}
PRIVATE vrna_exp_param_t *
get_exp_params_ali(vrna_md_t *md,
unsigned int n_seq,
double pfs)
{
/* scale energy parameters and pre-calculate Boltzmann weights */
unsigned int i, j, k, l;
double kTn, TT;
double GT;
vrna_exp_param_t *pf;
pf = (vrna_exp_param_t *)vrna_alloc(sizeof(vrna_exp_param_t));
pf->model_details = *md;
pf->alpha = md->betaScale;
pf->temperature = md->temperature;
pf->pf_scale = pfs;
pf->kT = kTn = ((double)n_seq) * md->betaScale * (md->temperature + K0) * GASCONST; /* kT in cal/mol */
TT = (md->temperature + K0) / (Tmeasure);
for (i = VRNA_GQUAD_MIN_STACK_SIZE; i <= VRNA_GQUAD_MAX_STACK_SIZE; i++)
for (j = 3 * VRNA_GQUAD_MIN_LINKER_LENGTH; j <= 3 * VRNA_GQUAD_MAX_LINKER_LENGTH; j++) {
double GQuadAlpha_T = (double)GQuadAlphadH - (double)(GQuadAlphadH - GQuadAlpha37) * TT;
double GQuadBeta_T = (double)GQuadBetadH - (double)(GQuadBetadH - GQuadBeta37) * TT;
GT = ((double)GQuadAlpha_T) * ((double)(i - 1)) + ((double)GQuadBeta_T) *
log(((double)j) - 2.);
pf->expgquad[i][j] = exp(-GT * 10. / kTn);
}
GT = (double)GQuadLayerMismatchH - (double)(GQuadLayerMismatchH - GQuadLayerMismatch37) * TT;
pf->expgquadLayerMismatch = exp(-GT * 10. / kTn);
pf->gquadLayerMismatchMax = GQuadLayerMismatchMax;
/* loop energies: hairpins, bulges, interior, mulit-loops */
for (i = 0; i < 31; i++) {
GT = hairpindH[i] - (hairpindH[i] - hairpin37[i]) * TT;
pf->exphairpin[i] = exp(-GT * 10. / kTn);
}
/*add penalty for too short hairpins*/
for (i = 0; i < 3; i++) {
GT = 600 /*Penalty*/ * TT;
pf->exphairpin[i] = exp(-GT * 10. / kTn);
}
for (i = 0; i <= MIN2(30, MAXLOOP); i++) {
GT = bulgedH[i] - (bulgedH[i] - bulge37[i]) * TT;
pf->expbulge[i] = exp(-GT * 10. / kTn);
GT = internal_loopdH[i] - (internal_loopdH[i] - internal_loop37[i]) * TT;
pf->expinternal[i] = exp(-GT * 10. / kTn);
}
/* special case of size 2 interior loops (single mismatch) */
if (james_rule)
pf->expinternal[2] = exp(-80 * 10. / kTn);
pf->lxc = lxc37 * TT;
GT = DuplexInitdH - (DuplexInitdH - DuplexInit37) * TT;
pf->expDuplexInit = exp(-GT * 10. / kTn);
for (i = 31; i <= MAXLOOP; i++) {
GT = bulge37[30] * TT + (pf->lxc * log(i / 30.));
pf->expbulge[i] = exp(-GT * 10. / kTn);
GT = internal_loop37[30] * TT + (pf->lxc * log(i / 30.));
pf->expinternal[i] = exp(-GT * 10. / kTn);
}
GT = niniodH - (niniodH - ninio37) * TT;
for (j = 0; j <= MAXLOOP; j++)
pf->expninio[2][j] = exp(-MIN2(MAX_NINIO, j * GT) * 10. / kTn);
for (i = 0; (i * 7) < strlen(Tetraloops); i++) {
GT = TetraloopdH[i] - (TetraloopdH[i] - Tetraloop37[i]) * TT;
pf->exptetra[i] = exp(-GT * 10. / kTn);
}
for (i = 0; (i * 5) < strlen(Triloops); i++) {
GT = TriloopdH[i] - (TriloopdH[i] - Triloop37[i]) * TT;
pf->exptri[i] = exp(-GT * 10. / kTn);
}
for (i = 0; (i * 9) < strlen(Hexaloops); i++) {
GT = HexaloopdH[i] - (HexaloopdH[i] - Hexaloop37[i]) * TT;
pf->exphex[i] = exp(-GT * 10. / kTn);
}
GT = ML_closingdH - (ML_closingdH - ML_closing37) * TT;
pf->expMLclosing = exp(-GT * 10. / kTn);
for (i = 0; i <= NBPAIRS; i++) {
/* includes AU penalty */
GT = ML_interndH - (ML_interndH - ML_intern37) * TT;
/* if (i>2) GT += TerminalAU; */
pf->expMLintern[i] = exp(-GT * 10. / kTn);
}
GT = TerminalAUdH - (TerminalAUdH - TerminalAU37) * TT;
pf->expTermAU = exp(-GT * 10. / kTn);
GT = ML_BASEdH - (ML_BASEdH - ML_BASE37) * TT;
pf->expMLbase = exp(-10. * GT / (kTn / n_seq));
/* if dangle_model==0 just set their energy to 0,
* don't let dangle energies become > 0 (at large temps),
* but make sure go smoothly to 0 */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j <= 4; j++) {
if (md->dangles) {
GT = dangle5_dH[i][j] - (dangle5_dH[i][j] - dangle5_37[i][j]) * TT;
pf->expdangle5[i][j] = exp(SMOOTH(-GT) * 10. / kTn);
GT = dangle3_dH[i][j] - (dangle3_dH[i][j] - dangle3_37[i][j]) * TT;
pf->expdangle3[i][j] = exp(SMOOTH(-GT) * 10. / kTn);
} else {
pf->expdangle3[i][j] = pf->expdangle5[i][j] = 1;
}
}
/* stacking energies */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j <= NBPAIRS; j++) {
GT = stackdH[i][j] - (stackdH[i][j] - stack37[i][j]) * TT;
pf->expstack[i][j] = exp(-GT * 10. / kTn);
}
/* mismatch energies */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j < 5; j++)
for (k = 0; k < 5; k++) {
GT = mismatchIdH[i][j][k] -
(mismatchIdH[i][j][k] - mismatchI37[i][j][k]) * TT;
pf->expmismatchI[i][j][k] = exp(-GT * 10.0 / kTn);
GT = mismatch1nIdH[i][j][k] -
(mismatch1nIdH[i][j][k] - mismatch1nI37[i][j][k]) * TT;
pf->expmismatch1nI[i][j][k] = exp(-GT * 10.0 / kTn);
GT = mismatchHdH[i][j][k] -
(mismatchHdH[i][j][k] - mismatchH37[i][j][k]) * TT;
pf->expmismatchH[i][j][k] = exp(-GT * 10.0 / kTn);
if (md->dangles) {
GT = mismatchMdH[i][j][k] -
(mismatchMdH[i][j][k] - mismatchM37[i][j][k]) * TT;
pf->expmismatchM[i][j][k] = exp(SMOOTH(-GT) * 10.0 / kTn);
GT = mismatchExtdH[i][j][k] -
(mismatchExtdH[i][j][k] - mismatchExt37[i][j][k]) * TT;
pf->expmismatchExt[i][j][k] = exp(SMOOTH(-GT) * 10.0 / kTn);
} else {
pf->expmismatchM[i][j][k] = pf->expmismatchExt[i][j][k] = 1.;
}
GT = mismatch23IdH[i][j][k] -
(mismatch23IdH[i][j][k] - mismatch23I37[i][j][k]) * TT;
pf->expmismatch23I[i][j][k] = exp(-GT * 10.0 / kTn);
}
/* interior lops of length 2 */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j <= NBPAIRS; j++)
for (k = 0; k < 5; k++)
for (l = 0; l < 5; l++) {
GT = int11_dH[i][j][k][l] -
(int11_dH[i][j][k][l] - int11_37[i][j][k][l]) * TT;
pf->expint11[i][j][k][l] = exp(-GT * 10. / kTn);
}
/* interior 2x1 loops */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j <= NBPAIRS; j++)
for (k = 0; k < 5; k++)
for (l = 0; l < 5; l++) {
int m;
for (m = 0; m < 5; m++) {
GT = int21_dH[i][j][k][l][m] -
(int21_dH[i][j][k][l][m] - int21_37[i][j][k][l][m]) * TT;
pf->expint21[i][j][k][l][m] = exp(-GT * 10. / kTn);
}
}
/* interior 2x2 loops */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j <= NBPAIRS; j++)
for (k = 0; k < 5; k++)
for (l = 0; l < 5; l++) {
int m, n;
for (m = 0; m < 5; m++)
for (n = 0; n < 5; n++) {
GT = int22_dH[i][j][k][l][m][n] -
(int22_dH[i][j][k][l][m][n] - int22_37[i][j][k][l][m][n]) * TT;
pf->expint22[i][j][k][l][m][n] = exp(-GT * 10. / kTn);
}
}
strncpy(pf->Tetraloops, Tetraloops, 281);
strncpy(pf->Triloops, Triloops, 241);
strncpy(pf->Hexaloops, Hexaloops, 361);
return pf;
}
PRIVATE void
rescale_params(vrna_fold_compound_t *vc)
{
int i;
vrna_exp_param_t *pf = vc->exp_params;
vrna_mx_pf_t *m = vc->exp_matrices;
if (m && pf) {
m->scale[0] = 1.;
m->scale[1] = (FLT_OR_DBL)(1. / pf->pf_scale);
m->expMLbase[0] = 1;
m->expMLbase[1] = (FLT_OR_DBL)(pf->expMLbase / pf->pf_scale);
for (i = 2; i <= vc->length; i++) {
m->scale[i] = m->scale[i / 2] * m->scale[i - (i / 2)];
m->expMLbase[i] = (FLT_OR_DBL)pow(pf->expMLbase, (double)i) * m->scale[i];
}
}
}
#ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY
/*###########################################*/
/*# deprecated functions below #*/
/*###########################################*/
PUBLIC vrna_param_t *
scale_parameters(void)
{
vrna_md_t md;
set_model_details(&md);
return vrna_params(&md);
}
PUBLIC vrna_param_t *
get_scaled_parameters(double temp,
vrna_md_t md)
{
md.temperature = temp;
return get_scaled_params(&md);
}
PUBLIC vrna_exp_param_t *
get_boltzmann_factors(double temp,
double betaScale,
vrna_md_t md,
double pfs)
{
md.temperature = temp;
md.betaScale = betaScale;
pf_scale = pfs;
return get_scaled_exp_params(&md, pfs);
}
PUBLIC vrna_exp_param_t *
get_scaled_pf_parameters(void)
{
vrna_md_t md;
vrna_exp_param_t *pf;
set_model_details(&md);
pf = vrna_exp_params(&md);
pf->pf_scale = pf_scale;
return pf;
}
PUBLIC vrna_exp_param_t *
get_boltzmann_factors_ali(unsigned int n_seq,
double temp,
double betaScale,
vrna_md_t md,
double pfs)
{
md.temperature = temp;
md.betaScale = betaScale;
pf_scale = pfs;
return get_exp_params_ali(&md, n_seq, pfs);
}
PUBLIC vrna_exp_param_t *
get_scaled_alipf_parameters(unsigned int n_seq)
{
vrna_md_t md;
set_model_details(&md);
return get_exp_params_ali(&md, n_seq, pf_scale);
}
PUBLIC vrna_exp_param_t *
get_boltzmann_factor_copy(vrna_exp_param_t *par)
{
return vrna_exp_params_copy(par);
}
PUBLIC vrna_param_t *
get_parameter_copy(vrna_param_t *par)
{
return vrna_params_copy(par);
}
PUBLIC vrna_param_t *
copy_parameters(void)
{
vrna_param_t *copy;
if (p.id != id) {
vrna_md_t md;
set_model_details(&md);
return vrna_params(&md);
} else {
copy = (vrna_param_t *)vrna_alloc(sizeof(vrna_param_t));
memcpy(copy, &p, sizeof(vrna_param_t));
}
return copy;
}
PUBLIC vrna_param_t *
set_parameters(vrna_param_t *dest)
{
memcpy(&p, dest, sizeof(vrna_param_t));
return &p;
}
PUBLIC vrna_exp_param_t *
copy_pf_param(void)
{
vrna_exp_param_t *copy;
if (pf.id != pf_id) {
vrna_md_t md;
set_model_details(&md);
copy = vrna_exp_params(&md);
copy->pf_scale = pf_scale;
return copy;
} else {
copy = (vrna_exp_param_t *)vrna_alloc(sizeof(vrna_exp_param_t));
memcpy(copy, &pf, sizeof(vrna_exp_param_t));
}
return copy;
}
PUBLIC vrna_exp_param_t *
set_pf_param(vrna_param_t *dest)
{
memcpy(&pf, dest, sizeof(vrna_exp_param_t));
return &pf;
}
PUBLIC vrna_exp_param_t *
scale_pf_parameters(void)
{
vrna_md_t md;
vrna_exp_param_t *pf;
set_model_details(&md);
pf = vrna_exp_params(&md);
pf->pf_scale = pf_scale;
return pf;
}
#endif
|
GB_unop__atanh_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__atanh_fc32_fc32)
// op(A') function: GB (_unop_tran__atanh_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = catanhf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = catanhf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = catanhf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ATANH || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__atanh_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = catanhf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = catanhf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__atanh_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
par_rap.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#include "_hypre_parcsr_ls.h"
#include "_hypre_utilities.h"
#include "hypre_hopscotch_hash.h"
/*--------------------------------------------------------------------------
* OLD NOTES:
* Sketch of John's code to build RAP
*
* Uses two integer arrays icg and ifg as marker arrays
*
* icg needs to be of size n_fine; size of ia.
* A negative value of icg(i) indicates i is a f-point, otherwise
* icg(i) is the converts from fine to coarse grid orderings.
* Note that I belive the code assumes that if i<j and both are
* c-points, then icg(i) < icg(j).
* ifg needs to be of size n_coarse; size of irap
* I don't think it has meaning as either input or output.
*
* In the code, both the interpolation and restriction operator
* are stored row-wise in the array b. If i is a f-point,
* ib(i) points the row of the interpolation operator for point
* i. If i is a c-point, ib(i) points the row of the restriction
* operator for point i.
*
* In the CSR storage for rap, its guaranteed that the rows will
* be ordered ( i.e. ic<jc -> irap(ic) < irap(jc)) but I don't
* think there is a guarantee that the entries within a row will
* be ordered in any way except that the diagonal entry comes first.
*
* As structured now, the code requires that the size of rap be
* predicted up front. To avoid this, one could execute the code
* twice, the first time would only keep track of icg ,ifg and ka.
* Then you would know how much memory to allocate for rap and jrap.
* The second time would fill in these arrays. Actually you might
* be able to include the filling in of jrap into the first pass;
* just overestimate its size (its an integer array) and cut it
* back before the second time through. This would avoid some if tests
* in the second pass.
*
* Questions
* 1) parallel (PetSc) version?
* 2) what if we don't store R row-wise and don't
* even want to store a copy of it in this form
* temporarily?
*--------------------------------------------------------------------------*/
hypre_CSRMatrix *
hypre_ExchangeRAPData( hypre_CSRMatrix *RAP_int,
hypre_ParCSRCommPkg *comm_pkg_RT)
{
HYPRE_Int *RAP_int_i;
HYPRE_Int *RAP_int_j = NULL;
HYPRE_Real *RAP_int_data = NULL;
HYPRE_Int num_cols = 0;
MPI_Comm comm = hypre_ParCSRCommPkgComm(comm_pkg_RT);
HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg_RT);
HYPRE_Int *recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg_RT);
HYPRE_Int *recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_RT);
HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg_RT);
HYPRE_Int *send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg_RT);
HYPRE_Int *send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_RT);
hypre_CSRMatrix *RAP_ext;
HYPRE_Int *RAP_ext_i;
HYPRE_Int *RAP_ext_j = NULL;
HYPRE_Real *RAP_ext_data = NULL;
hypre_ParCSRCommHandle *comm_handle = NULL;
hypre_ParCSRCommPkg *tmp_comm_pkg;
HYPRE_Int *jdata_recv_vec_starts;
HYPRE_Int *jdata_send_map_starts;
HYPRE_Int num_rows;
HYPRE_Int num_nonzeros;
HYPRE_Int i, j;
HYPRE_Int num_procs;
hypre_MPI_Comm_size(comm,&num_procs);
RAP_ext_i = hypre_CTAlloc(HYPRE_Int, send_map_starts[num_sends]+1);
jdata_recv_vec_starts = hypre_TAlloc(HYPRE_Int, num_recvs+1);
jdata_send_map_starts = hypre_TAlloc(HYPRE_Int, num_sends+1);
/*--------------------------------------------------------------------------
* recompute RAP_int_i so that RAP_int_i[j+1] contains the number of
* elements of row j (to be determined through send_map_elmnts on the
* receiving end)
*--------------------------------------------------------------------------*/
if (num_recvs)
{
RAP_int_i = hypre_CSRMatrixI(RAP_int);
RAP_int_j = hypre_CSRMatrixJ(RAP_int);
RAP_int_data = hypre_CSRMatrixData(RAP_int);
num_cols = hypre_CSRMatrixNumCols(RAP_int);
}
jdata_recv_vec_starts[0] = 0;
for (i=0; i < num_recvs; i++)
{
jdata_recv_vec_starts[i+1] = RAP_int_i[recv_vec_starts[i+1]];
}
for (i=num_recvs; i > 0; i--)
for (j = recv_vec_starts[i]; j > recv_vec_starts[i-1]; j--)
RAP_int_i[j] -= RAP_int_i[j-1];
/*--------------------------------------------------------------------------
* initialize communication
*--------------------------------------------------------------------------*/
if (num_recvs && num_sends)
comm_handle = hypre_ParCSRCommHandleCreate(12,comm_pkg_RT,
&RAP_int_i[1], &RAP_ext_i[1]);
else if (num_recvs)
comm_handle = hypre_ParCSRCommHandleCreate(12,comm_pkg_RT,
&RAP_int_i[1], NULL);
else if (num_sends)
comm_handle = hypre_ParCSRCommHandleCreate(12,comm_pkg_RT,
NULL, &RAP_ext_i[1]);
tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1);
hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm;
hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_recvs;
hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_sends;
hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = recv_procs;
hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = send_procs;
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
/*--------------------------------------------------------------------------
* compute num_nonzeros for RAP_ext
*--------------------------------------------------------------------------*/
for (i=0; i < num_sends; i++)
for (j = send_map_starts[i]; j < send_map_starts[i+1]; j++)
RAP_ext_i[j+1] += RAP_ext_i[j];
num_rows = send_map_starts[num_sends];
num_nonzeros = RAP_ext_i[num_rows];
if (num_nonzeros)
{
RAP_ext_j = hypre_TAlloc(HYPRE_Int, num_nonzeros);
RAP_ext_data = hypre_TAlloc(HYPRE_Real, num_nonzeros);
}
for (i=0; i < num_sends+1; i++)
{
jdata_send_map_starts[i] = RAP_ext_i[send_map_starts[i]];
}
hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = jdata_send_map_starts;
hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = jdata_recv_vec_starts;
comm_handle = hypre_ParCSRCommHandleCreate(1,tmp_comm_pkg,RAP_int_data,
RAP_ext_data);
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
comm_handle = hypre_ParCSRCommHandleCreate(11,tmp_comm_pkg,RAP_int_j,
RAP_ext_j);
RAP_ext = hypre_CSRMatrixCreate(num_rows,num_cols,num_nonzeros);
hypre_CSRMatrixI(RAP_ext) = RAP_ext_i;
if (num_nonzeros)
{
hypre_CSRMatrixJ(RAP_ext) = RAP_ext_j;
hypre_CSRMatrixData(RAP_ext) = RAP_ext_data;
}
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
hypre_TFree(jdata_recv_vec_starts);
hypre_TFree(jdata_send_map_starts);
hypre_TFree(tmp_comm_pkg);
return RAP_ext;
}
/*--------------------------------------------------------------------------
* hypre_BoomerAMGBuildCoarseOperator
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildCoarseOperator( hypre_ParCSRMatrix *RT,
hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *P,
hypre_ParCSRMatrix **RAP_ptr )
{
hypre_BoomerAMGBuildCoarseOperatorKT( RT, A, P, 0, RAP_ptr);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGBuildCoarseOperatorKT( hypre_ParCSRMatrix *RT,
hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *P,
HYPRE_Int keepTranspose,
hypre_ParCSRMatrix **RAP_ptr )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RAP] -= hypre_MPI_Wtime();
#endif
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *RT_diag = hypre_ParCSRMatrixDiag(RT);
hypre_CSRMatrix *RT_offd = hypre_ParCSRMatrixOffd(RT);
HYPRE_Int num_cols_diag_RT = hypre_CSRMatrixNumCols(RT_diag);
HYPRE_Int num_cols_offd_RT = hypre_CSRMatrixNumCols(RT_offd);
HYPRE_Int num_rows_offd_RT = hypre_CSRMatrixNumRows(RT_offd);
hypre_ParCSRCommPkg *comm_pkg_RT = hypre_ParCSRMatrixCommPkg(RT);
HYPRE_Int num_recvs_RT = 0;
HYPRE_Int num_sends_RT = 0;
HYPRE_Int *send_map_starts_RT;
HYPRE_Int *send_map_elmts_RT;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag);
HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd);
hypre_CSRMatrix *P_diag = hypre_ParCSRMatrixDiag(P);
HYPRE_Real *P_diag_data = hypre_CSRMatrixData(P_diag);
HYPRE_Int *P_diag_i = hypre_CSRMatrixI(P_diag);
HYPRE_Int *P_diag_j = hypre_CSRMatrixJ(P_diag);
hypre_CSRMatrix *P_offd = hypre_ParCSRMatrixOffd(P);
HYPRE_Int *col_map_offd_P = hypre_ParCSRMatrixColMapOffd(P);
HYPRE_Real *P_offd_data = hypre_CSRMatrixData(P_offd);
HYPRE_Int *P_offd_i = hypre_CSRMatrixI(P_offd);
HYPRE_Int *P_offd_j = hypre_CSRMatrixJ(P_offd);
HYPRE_Int first_col_diag_P = hypre_ParCSRMatrixFirstColDiag(P);
HYPRE_Int last_col_diag_P;
HYPRE_Int num_cols_diag_P = hypre_CSRMatrixNumCols(P_diag);
HYPRE_Int num_cols_offd_P = hypre_CSRMatrixNumCols(P_offd);
HYPRE_Int *coarse_partitioning = hypre_ParCSRMatrixColStarts(P);
HYPRE_Int *RT_partitioning = hypre_ParCSRMatrixColStarts(RT);
hypre_ParCSRMatrix *RAP;
HYPRE_Int *col_map_offd_RAP = NULL;
HYPRE_Int *new_col_map_offd_RAP = NULL;
hypre_CSRMatrix *RAP_int = NULL;
HYPRE_Real *RAP_int_data;
HYPRE_Int *RAP_int_i;
HYPRE_Int *RAP_int_j;
hypre_CSRMatrix *RAP_ext;
HYPRE_Real *RAP_ext_data = NULL;
HYPRE_Int *RAP_ext_i = NULL;
HYPRE_Int *RAP_ext_j = NULL;
hypre_CSRMatrix *RAP_diag;
HYPRE_Real *RAP_diag_data;
HYPRE_Int *RAP_diag_i;
HYPRE_Int *RAP_diag_j;
hypre_CSRMatrix *RAP_offd;
HYPRE_Real *RAP_offd_data = NULL;
HYPRE_Int *RAP_offd_i = NULL;
HYPRE_Int *RAP_offd_j = NULL;
HYPRE_Int RAP_size;
HYPRE_Int RAP_ext_size;
HYPRE_Int RAP_diag_size;
HYPRE_Int RAP_offd_size;
HYPRE_Int P_ext_diag_size;
HYPRE_Int P_ext_offd_size;
HYPRE_Int first_col_diag_RAP;
HYPRE_Int last_col_diag_RAP;
HYPRE_Int num_cols_offd_RAP = 0;
hypre_CSRMatrix *R_diag;
HYPRE_Real *R_diag_data;
HYPRE_Int *R_diag_i;
HYPRE_Int *R_diag_j;
hypre_CSRMatrix *R_offd;
HYPRE_Real *R_offd_data;
HYPRE_Int *R_offd_i;
HYPRE_Int *R_offd_j;
HYPRE_Real *RA_diag_data_array = NULL;
HYPRE_Int *RA_diag_j_array = NULL;
HYPRE_Real *RA_offd_data_array = NULL;
HYPRE_Int *RA_offd_j_array = NULL;
hypre_CSRMatrix *Ps_ext;
HYPRE_Real *Ps_ext_data;
HYPRE_Int *Ps_ext_i;
HYPRE_Int *Ps_ext_j;
HYPRE_Real *P_ext_diag_data = NULL;
HYPRE_Int *P_ext_diag_i = NULL;
HYPRE_Int *P_ext_diag_j = NULL;
HYPRE_Real *P_ext_offd_data = NULL;
HYPRE_Int *P_ext_offd_i = NULL;
HYPRE_Int *P_ext_offd_j = NULL;
HYPRE_Int *col_map_offd_Pext;
HYPRE_Int *map_P_to_Pext = NULL;
HYPRE_Int *map_P_to_RAP = NULL;
HYPRE_Int *map_Pext_to_RAP = NULL;
HYPRE_Int *P_marker;
HYPRE_Int **P_mark_array;
HYPRE_Int **A_mark_array;
HYPRE_Int *A_marker;
HYPRE_Int *temp;
HYPRE_Int n_coarse, n_coarse_RT;
HYPRE_Int square = 1;
HYPRE_Int num_cols_offd_Pext = 0;
HYPRE_Int ic, i, j, k;
HYPRE_Int i1, i2, i3, ii, ns, ne, size, rest;
HYPRE_Int cnt = 0; /*value; */
HYPRE_Int jj1, jj2, jj3, jcol;
HYPRE_Int *jj_count, *jj_cnt_diag, *jj_cnt_offd;
HYPRE_Int jj_counter, jj_count_diag, jj_count_offd;
HYPRE_Int jj_row_begining, jj_row_begin_diag, jj_row_begin_offd;
HYPRE_Int start_indexing = 0; /* start indexing for RAP_data at 0 */
HYPRE_Int num_nz_cols_A;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Real r_entry;
HYPRE_Real r_a_product;
HYPRE_Real r_a_p_product;
HYPRE_Real zero = 0.0;
HYPRE_Int *prefix_sum_workspace;
/*-----------------------------------------------------------------------
* Copy ParCSRMatrix RT into CSRMatrix R so that we have row-wise access
* to restriction .
*-----------------------------------------------------------------------*/
hypre_MPI_Comm_size(comm,&num_procs);
num_threads = hypre_NumThreads();
if (comm_pkg_RT)
{
num_recvs_RT = hypre_ParCSRCommPkgNumRecvs(comm_pkg_RT);
num_sends_RT = hypre_ParCSRCommPkgNumSends(comm_pkg_RT);
send_map_starts_RT =hypre_ParCSRCommPkgSendMapStarts(comm_pkg_RT);
send_map_elmts_RT = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_RT);
}
else if (num_procs > 1)
{
hypre_MatvecCommPkgCreate(RT);
comm_pkg_RT = hypre_ParCSRMatrixCommPkg(RT);
num_recvs_RT = hypre_ParCSRCommPkgNumRecvs(comm_pkg_RT);
num_sends_RT = hypre_ParCSRCommPkgNumSends(comm_pkg_RT);
send_map_starts_RT =hypre_ParCSRCommPkgSendMapStarts(comm_pkg_RT);
send_map_elmts_RT = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_RT);
}
hypre_CSRMatrixTranspose(RT_diag,&R_diag,1);
if (num_cols_offd_RT)
{
hypre_CSRMatrixTranspose(RT_offd,&R_offd,1);
R_offd_data = hypre_CSRMatrixData(R_offd);
R_offd_i = hypre_CSRMatrixI(R_offd);
R_offd_j = hypre_CSRMatrixJ(R_offd);
}
/*-----------------------------------------------------------------------
* Access the CSR vectors for R. Also get sizes of fine and
* coarse grids.
*-----------------------------------------------------------------------*/
R_diag_data = hypre_CSRMatrixData(R_diag);
R_diag_i = hypre_CSRMatrixI(R_diag);
R_diag_j = hypre_CSRMatrixJ(R_diag);
n_coarse = hypre_ParCSRMatrixGlobalNumCols(P);
num_nz_cols_A = num_cols_diag_A + num_cols_offd_A;
n_coarse_RT = hypre_ParCSRMatrixGlobalNumCols(RT);
if (n_coarse != n_coarse_RT)
square = 0;
/*-----------------------------------------------------------------------
* Generate Ps_ext, i.e. portion of P that is stored on neighbor procs
* and needed locally for triple matrix product
*-----------------------------------------------------------------------*/
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_UnorderedIntMap send_map_elmts_RT_inverse_map;
HYPRE_Int *send_map_elmts_starts_RT_aggregated = NULL;
HYPRE_Int *send_map_elmts_RT_aggregated = NULL;
HYPRE_Int send_map_elmts_RT_inverse_map_initialized =
num_sends_RT > 0 && send_map_starts_RT[num_sends_RT] - send_map_starts_RT[0] > 0;
if (send_map_elmts_RT_inverse_map_initialized)
{
hypre_UnorderedIntSet send_map_elmts_set;
hypre_UnorderedIntSetCreate(&send_map_elmts_set, 2*(send_map_starts_RT[num_sends_RT] - send_map_starts_RT[0]), 16*hypre_NumThreads());
#pragma omp parallel for HYPRE_SMP_SCHEDULE
for (i = send_map_starts_RT[0]; i < send_map_starts_RT[num_sends_RT]; i++)
{
HYPRE_Int key = send_map_elmts_RT[i];
hypre_UnorderedIntSetPut(&send_map_elmts_set, key);
}
HYPRE_Int send_map_elmts_unique_size;
HYPRE_Int *send_map_elmts_unique = hypre_UnorderedIntSetCopyToArray(&send_map_elmts_set, &send_map_elmts_unique_size);
hypre_UnorderedIntSetDestroy(&send_map_elmts_set);
hypre_UnorderedIntMapCreate(&send_map_elmts_RT_inverse_map, 2*send_map_elmts_unique_size, 16*hypre_NumThreads());
#pragma omp parallel for HYPRE_SMP_SCHEDULE
for (i = 0; i < send_map_elmts_unique_size; i++)
{
hypre_UnorderedIntMapPutIfAbsent(&send_map_elmts_RT_inverse_map, send_map_elmts_unique[i], i);
}
hypre_TFree(send_map_elmts_unique);
send_map_elmts_starts_RT_aggregated = hypre_TAlloc(HYPRE_Int, send_map_elmts_unique_size + 1);
send_map_elmts_RT_aggregated = hypre_TAlloc(HYPRE_Int, send_map_starts_RT[num_sends_RT]);
#pragma omp parallel for HYPRE_SMP_SCHEDULE
for (i = 0; i < send_map_elmts_unique_size; i++)
{
send_map_elmts_starts_RT_aggregated[i] = 0;
}
#pragma omp parallel for HYPRE_SMP_SCHEDULE
for (i = send_map_starts_RT[0]; i < send_map_starts_RT[num_sends_RT]; i++)
{
HYPRE_Int idx = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, send_map_elmts_RT[i]);
#pragma omp atomic
send_map_elmts_starts_RT_aggregated[idx]++;
}
for (i = 0; i < send_map_elmts_unique_size - 1; i++)
{
send_map_elmts_starts_RT_aggregated[i + 1] += send_map_elmts_starts_RT_aggregated[i];
}
send_map_elmts_starts_RT_aggregated[send_map_elmts_unique_size] = send_map_starts_RT[num_sends_RT];
#pragma omp parallel for HYPRE_SMP_SCHEDULE
for (i = send_map_starts_RT[num_sends_RT] - 1; i >= send_map_starts_RT[0]; i--)
{
HYPRE_Int idx = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, send_map_elmts_RT[i]);
HYPRE_Int offset = hypre_fetch_and_add(send_map_elmts_starts_RT_aggregated + idx, -1) - 1;
send_map_elmts_RT_aggregated[offset] = i;
}
}
#endif /* HYPRE_CONCURRENT_HOPSCOTCH */
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime();
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX_RAP] -= hypre_MPI_Wtime();
#endif
if (num_procs > 1)
{
Ps_ext = hypre_ParCSRMatrixExtractBExt(P,A,1);
Ps_ext_data = hypre_CSRMatrixData(Ps_ext);
Ps_ext_i = hypre_CSRMatrixI(Ps_ext);
Ps_ext_j = hypre_CSRMatrixJ(Ps_ext);
}
P_ext_diag_i = hypre_TAlloc(HYPRE_Int,num_cols_offd_A+1);
P_ext_offd_i = hypre_TAlloc(HYPRE_Int,num_cols_offd_A+1);
P_ext_diag_i[0] = 0;
P_ext_offd_i[0] = 0;
P_ext_diag_size = 0;
P_ext_offd_size = 0;
last_col_diag_P = first_col_diag_P + num_cols_diag_P - 1;
/*HYPRE_Int prefix_sum_workspace[2*(num_threads + 1)];*/
prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, 2*(num_threads + 1));
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,j)
#endif
{
HYPRE_Int i_begin, i_end;
hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_A);
HYPRE_Int P_ext_diag_size_private = 0;
HYPRE_Int P_ext_offd_size_private = 0;
for (i = i_begin; i < i_end; i++)
{
for (j=Ps_ext_i[i]; j < Ps_ext_i[i+1]; j++)
if (Ps_ext_j[j] < first_col_diag_P || Ps_ext_j[j] > last_col_diag_P)
P_ext_offd_size_private++;
else
P_ext_diag_size_private++;
}
hypre_prefix_sum_pair(&P_ext_diag_size_private, &P_ext_diag_size, &P_ext_offd_size_private, &P_ext_offd_size, prefix_sum_workspace);
#ifdef HYPRE_USING_OPENMP
#pragma omp master
#endif
{
if (P_ext_diag_size)
{
P_ext_diag_j = hypre_CTAlloc(HYPRE_Int, P_ext_diag_size);
P_ext_diag_data = hypre_CTAlloc(HYPRE_Real, P_ext_diag_size);
}
if (P_ext_offd_size)
{
P_ext_offd_j = hypre_CTAlloc(HYPRE_Int, P_ext_offd_size);
P_ext_offd_data = hypre_CTAlloc(HYPRE_Real, P_ext_offd_size);
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i = i_begin; i < i_end; i++)
{
for (j=Ps_ext_i[i]; j < Ps_ext_i[i+1]; j++)
{
if (Ps_ext_j[j] < first_col_diag_P || Ps_ext_j[j] > last_col_diag_P)
{
P_ext_offd_j[P_ext_offd_size_private] = Ps_ext_j[j];
P_ext_offd_data[P_ext_offd_size_private++] = Ps_ext_data[j];
}
else
{
P_ext_diag_j[P_ext_diag_size_private] = Ps_ext_j[j] - first_col_diag_P;
P_ext_diag_data[P_ext_diag_size_private++] = Ps_ext_data[j];
}
}
P_ext_diag_i[i+1] = P_ext_diag_size_private;
P_ext_offd_i[i+1] = P_ext_offd_size_private;
}
} /* omp parallel */
hypre_TFree(prefix_sum_workspace);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Ps_ext);
Ps_ext = NULL;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (P_ext_offd_size || num_cols_offd_P)
{
hypre_UnorderedIntSet found_set;
hypre_UnorderedIntSetCreate(&found_set, P_ext_offd_size + num_cols_offd_P, 16*hypre_NumThreads());
#pragma omp parallel private(i)
{
#pragma omp for HYPRE_SMP_SCHEDULE
for (i = 0; i < P_ext_offd_size; i++)
{
hypre_UnorderedIntSetPut(&found_set, P_ext_offd_j[i]);
}
#pragma omp for HYPRE_SMP_SCHEDULE
for (i = 0; i < num_cols_offd_P; i++)
{
hypre_UnorderedIntSetPut(&found_set, col_map_offd_P[i]);
}
} /* omp parallel */
temp = hypre_UnorderedIntSetCopyToArray(&found_set, &num_cols_offd_Pext);
hypre_UnorderedIntSetDestroy(&found_set);
hypre_UnorderedIntMap col_map_offd_Pext_inverse;
hypre_sort_and_create_inverse_map(temp, num_cols_offd_Pext, &col_map_offd_Pext, &col_map_offd_Pext_inverse);
#pragma omp parallel for HYPRE_SMP_SCHEDULE
for (i=0 ; i < P_ext_offd_size; i++)
P_ext_offd_j[i] = hypre_UnorderedIntMapGet(&col_map_offd_Pext_inverse, P_ext_offd_j[i]);
if (num_cols_offd_Pext) hypre_UnorderedIntMapDestroy(&col_map_offd_Pext_inverse);
}
#else /* !HYPRE_CONCURRENT_HOPSCOTCH */
if (P_ext_offd_size || num_cols_offd_P)
{
temp = hypre_CTAlloc(HYPRE_Int, P_ext_offd_size+num_cols_offd_P);
for (i=0; i < P_ext_offd_size; i++)
temp[i] = P_ext_offd_j[i];
cnt = P_ext_offd_size;
for (i=0; i < num_cols_offd_P; i++)
temp[cnt++] = col_map_offd_P[i];
}
if (cnt)
{
HYPRE_Int value;
hypre_qsort0(temp, 0, cnt-1);
num_cols_offd_Pext = 1;
value = temp[0];
for (i=1; i < cnt; i++)
{
if (temp[i] > value)
{
value = temp[i];
temp[num_cols_offd_Pext++] = value;
}
}
}
if (num_cols_offd_Pext)
col_map_offd_Pext = hypre_CTAlloc(HYPRE_Int,num_cols_offd_Pext);
for (i=0; i < num_cols_offd_Pext; i++)
col_map_offd_Pext[i] = temp[i];
if (P_ext_offd_size || num_cols_offd_P)
hypre_TFree(temp);
for (i=0 ; i < P_ext_offd_size; i++)
P_ext_offd_j[i] = hypre_BinarySearch(col_map_offd_Pext,
P_ext_offd_j[i],
num_cols_offd_Pext);
#endif /* !HYPRE_CONCURRENT_HOPSCOTCH */
if (num_cols_offd_P)
{
map_P_to_Pext = hypre_CTAlloc(HYPRE_Int,num_cols_offd_P);
cnt = 0;
for (i=0; i < num_cols_offd_Pext; i++)
if (col_map_offd_Pext[i] == col_map_offd_P[cnt])
{
map_P_to_Pext[cnt++] = i;
if (cnt == num_cols_offd_P) break;
}
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime();
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX_RAP] += hypre_MPI_Wtime();
#endif
/*-----------------------------------------------------------------------
* First Pass: Determine size of RAP_int and set up RAP_int_i if there
* are more than one processor and nonzero elements in R_offd
*-----------------------------------------------------------------------*/
P_mark_array = hypre_CTAlloc(HYPRE_Int *, num_threads);
A_mark_array = hypre_CTAlloc(HYPRE_Int *, num_threads);
if (num_cols_offd_RT)
{
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_counter,jj_row_begining,A_marker,P_marker) HYPRE_SMP_SCHEDULE
#endif
for (ii = 0; ii < num_threads; ii++)
{
size = num_cols_offd_RT/num_threads;
rest = num_cols_offd_RT - size*num_threads;
if (ii < rest)
{
ns = ii*size+ii;
ne = (ii+1)*size+ii+1;
}
else
{
ns = ii*size+rest;
ne = (ii+1)*size+rest;
}
/*-----------------------------------------------------------------------
* Allocate marker arrays.
*-----------------------------------------------------------------------*/
if (num_cols_offd_Pext || num_cols_diag_P)
{
P_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_cols_diag_P+num_cols_offd_Pext);
P_marker = P_mark_array[ii];
}
A_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_nz_cols_A);
A_marker = A_mark_array[ii];
/*-----------------------------------------------------------------------
* Initialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
for (ic = 0; ic < num_cols_diag_P+num_cols_offd_Pext; ic++)
{
P_marker[ic] = -1;
}
for (i = 0; i < num_nz_cols_A; i++)
{
A_marker[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over exterior c-points
*-----------------------------------------------------------------------*/
for (ic = ns; ic < ne; ic++)
{
jj_row_begining = jj_counter;
/*--------------------------------------------------------------------
* Loop over entries in row ic of R_offd.
*--------------------------------------------------------------------*/
for (jj1 = R_offd_i[ic]; jj1 < R_offd_i[ic+1]; jj1++)
{
i1 = R_offd_j[jj1];
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_offd.
*-----------------------------------------------------------------*/
for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++)
{
i2 = A_offd_j[jj2];
/*--------------------------------------------------------------
* Check A_marker to see if point i2 has been previously
* visited. New entries in RAP only occur from unmarked points.
*--------------------------------------------------------------*/
if (A_marker[i2] != ic)
{
/*-----------------------------------------------------------
* Mark i2 as visited.
*-----------------------------------------------------------*/
A_marker[i2] = ic;
/*-----------------------------------------------------------
* Loop over entries in row i2 of P_ext.
*-----------------------------------------------------------*/
for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2+1]; jj3++)
{
i3 = P_ext_diag_j[jj3];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begining)
{
P_marker[i3] = jj_counter;
jj_counter++;
}
}
for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2+1]; jj3++)
{
i3 = P_ext_offd_j[jj3] + num_cols_diag_P;
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begining)
{
P_marker[i3] = jj_counter;
jj_counter++;
}
}
}
}
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_diag.
*-----------------------------------------------------------------*/
for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++)
{
i2 = A_diag_j[jj2];
/*--------------------------------------------------------------
* Check A_marker to see if point i2 has been previously
* visited. New entries in RAP only occur from unmarked points.
*--------------------------------------------------------------*/
if (A_marker[i2+num_cols_offd_A] != ic)
{
/*-----------------------------------------------------------
* Mark i2 as visited.
*-----------------------------------------------------------*/
A_marker[i2+num_cols_offd_A] = ic;
/*-----------------------------------------------------------
* Loop over entries in row i2 of P_diag.
*-----------------------------------------------------------*/
for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2+1]; jj3++)
{
i3 = P_diag_j[jj3];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begining)
{
P_marker[i3] = jj_counter;
jj_counter++;
}
}
/*-----------------------------------------------------------
* Loop over entries in row i2 of P_offd.
*-----------------------------------------------------------*/
for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2+1]; jj3++)
{
i3 = map_P_to_Pext[P_offd_j[jj3]] + num_cols_diag_P;
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begining)
{
P_marker[i3] = jj_counter;
jj_counter++;
}
}
}
}
}
}
jj_count[ii] = jj_counter;
}
/*-----------------------------------------------------------------------
* Allocate RAP_int_data and RAP_int_j arrays.
*-----------------------------------------------------------------------*/
for (i = 0; i < num_threads-1; i++)
jj_count[i+1] += jj_count[i];
RAP_size = jj_count[num_threads-1];
RAP_int_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_RT+1);
RAP_int_data = hypre_CTAlloc(HYPRE_Real, RAP_size);
RAP_int_j = hypre_CTAlloc(HYPRE_Int, RAP_size);
RAP_int_i[num_cols_offd_RT] = RAP_size;
/*-----------------------------------------------------------------------
* Second Pass: Fill in RAP_int_data and RAP_int_j.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_counter,jj_row_begining,A_marker,P_marker,r_entry,r_a_product,r_a_p_product) HYPRE_SMP_SCHEDULE
#endif
for (ii = 0; ii < num_threads; ii++)
{
size = num_cols_offd_RT/num_threads;
rest = num_cols_offd_RT - size*num_threads;
if (ii < rest)
{
ns = ii*size+ii;
ne = (ii+1)*size+ii+1;
}
else
{
ns = ii*size+rest;
ne = (ii+1)*size+rest;
}
/*-----------------------------------------------------------------------
* Initialize some stuff.
*-----------------------------------------------------------------------*/
if (num_cols_offd_Pext || num_cols_diag_P)
P_marker = P_mark_array[ii];
A_marker = A_mark_array[ii];
jj_counter = start_indexing;
if (ii > 0) jj_counter = jj_count[ii-1];
for (ic = 0; ic < num_cols_diag_P+num_cols_offd_Pext; ic++)
{
P_marker[ic] = -1;
}
for (i = 0; i < num_nz_cols_A; i++)
{
A_marker[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over exterior c-points.
*-----------------------------------------------------------------------*/
for (ic = ns; ic < ne; ic++)
{
jj_row_begining = jj_counter;
RAP_int_i[ic] = jj_counter;
/*--------------------------------------------------------------------
* Loop over entries in row ic of R_offd.
*--------------------------------------------------------------------*/
for (jj1 = R_offd_i[ic]; jj1 < R_offd_i[ic+1]; jj1++)
{
i1 = R_offd_j[jj1];
r_entry = R_offd_data[jj1];
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_offd.
*-----------------------------------------------------------------*/
for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++)
{
i2 = A_offd_j[jj2];
r_a_product = r_entry * A_offd_data[jj2];
/*--------------------------------------------------------------
* Check A_marker to see if point i2 has been previously
* visited. New entries in RAP only occur from unmarked points.
*--------------------------------------------------------------*/
if (A_marker[i2] != ic)
{
/*-----------------------------------------------------------
* Mark i2 as visited.
*-----------------------------------------------------------*/
A_marker[i2] = ic;
/*-----------------------------------------------------------
* Loop over entries in row i2 of P_ext.
*-----------------------------------------------------------*/
for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2+1]; jj3++)
{
i3 = P_ext_diag_j[jj3];
r_a_p_product = r_a_product * P_ext_diag_data[jj3];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begining)
{
P_marker[i3] = jj_counter;
RAP_int_data[jj_counter] = r_a_p_product;
RAP_int_j[jj_counter] = i3 + first_col_diag_P;
jj_counter++;
}
else
{
RAP_int_data[P_marker[i3]] += r_a_p_product;
}
}
for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2+1]; jj3++)
{
i3 = P_ext_offd_j[jj3] + num_cols_diag_P;
r_a_p_product = r_a_product * P_ext_offd_data[jj3];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begining)
{
P_marker[i3] = jj_counter;
RAP_int_data[jj_counter] = r_a_p_product;
RAP_int_j[jj_counter]
= col_map_offd_Pext[i3-num_cols_diag_P];
jj_counter++;
}
else
{
RAP_int_data[P_marker[i3]] += r_a_p_product;
}
}
}
/*--------------------------------------------------------------
* If i2 is previously visited ( A_marker[12]=ic ) it yields
* no new entries in RAP and can just add new contributions.
*--------------------------------------------------------------*/
else
{
for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2+1]; jj3++)
{
i3 = P_ext_diag_j[jj3];
r_a_p_product = r_a_product * P_ext_diag_data[jj3];
RAP_int_data[P_marker[i3]] += r_a_p_product;
}
for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2+1]; jj3++)
{
i3 = P_ext_offd_j[jj3] + num_cols_diag_P;
r_a_p_product = r_a_product * P_ext_offd_data[jj3];
RAP_int_data[P_marker[i3]] += r_a_p_product;
}
}
}
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_diag.
*-----------------------------------------------------------------*/
for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++)
{
i2 = A_diag_j[jj2];
r_a_product = r_entry * A_diag_data[jj2];
/*--------------------------------------------------------------
* Check A_marker to see if point i2 has been previously
* visited. New entries in RAP only occur from unmarked points.
*--------------------------------------------------------------*/
if (A_marker[i2+num_cols_offd_A] != ic)
{
/*-----------------------------------------------------------
* Mark i2 as visited.
*-----------------------------------------------------------*/
A_marker[i2+num_cols_offd_A] = ic;
/*-----------------------------------------------------------
* Loop over entries in row i2 of P_diag.
*-----------------------------------------------------------*/
for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2+1]; jj3++)
{
i3 = P_diag_j[jj3];
r_a_p_product = r_a_product * P_diag_data[jj3];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begining)
{
P_marker[i3] = jj_counter;
RAP_int_data[jj_counter] = r_a_p_product;
RAP_int_j[jj_counter] = i3 + first_col_diag_P;
jj_counter++;
}
else
{
RAP_int_data[P_marker[i3]] += r_a_p_product;
}
}
for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2+1]; jj3++)
{
i3 = map_P_to_Pext[P_offd_j[jj3]] + num_cols_diag_P;
r_a_p_product = r_a_product * P_offd_data[jj3];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begining)
{
P_marker[i3] = jj_counter;
RAP_int_data[jj_counter] = r_a_p_product;
RAP_int_j[jj_counter] =
col_map_offd_Pext[i3-num_cols_diag_P];
jj_counter++;
}
else
{
RAP_int_data[P_marker[i3]] += r_a_p_product;
}
}
}
/*--------------------------------------------------------------
* If i2 is previously visited ( A_marker[12]=ic ) it yields
* no new entries in RAP and can just add new contributions.
*--------------------------------------------------------------*/
else
{
for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2+1]; jj3++)
{
i3 = P_diag_j[jj3];
r_a_p_product = r_a_product * P_diag_data[jj3];
RAP_int_data[P_marker[i3]] += r_a_p_product;
}
for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2+1]; jj3++)
{
i3 = map_P_to_Pext[P_offd_j[jj3]] + num_cols_diag_P;
r_a_p_product = r_a_product * P_offd_data[jj3];
RAP_int_data[P_marker[i3]] += r_a_p_product;
}
}
}
}
}
if (num_cols_offd_Pext || num_cols_diag_P)
hypre_TFree(P_mark_array[ii]);
hypre_TFree(A_mark_array[ii]);
}
RAP_int = hypre_CSRMatrixCreate(num_cols_offd_RT,num_rows_offd_RT,RAP_size);
hypre_CSRMatrixI(RAP_int) = RAP_int_i;
hypre_CSRMatrixJ(RAP_int) = RAP_int_j;
hypre_CSRMatrixData(RAP_int) = RAP_int_data;
hypre_TFree(jj_count);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime();
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX_RAP] -= hypre_MPI_Wtime();
#endif
RAP_ext_size = 0;
if (num_sends_RT || num_recvs_RT)
{
RAP_ext = hypre_ExchangeRAPData(RAP_int,comm_pkg_RT);
RAP_ext_i = hypre_CSRMatrixI(RAP_ext);
RAP_ext_j = hypre_CSRMatrixJ(RAP_ext);
RAP_ext_data = hypre_CSRMatrixData(RAP_ext);
RAP_ext_size = RAP_ext_i[hypre_CSRMatrixNumRows(RAP_ext)];
}
if (num_cols_offd_RT)
{
hypre_CSRMatrixDestroy(RAP_int);
RAP_int = NULL;
}
RAP_diag_i = hypre_TAlloc(HYPRE_Int, num_cols_diag_RT+1);
RAP_offd_i = hypre_TAlloc(HYPRE_Int, num_cols_diag_RT+1);
first_col_diag_RAP = first_col_diag_P;
last_col_diag_RAP = first_col_diag_P + num_cols_diag_P - 1;
/*-----------------------------------------------------------------------
* check for new nonzero columns in RAP_offd generated through RAP_ext
*-----------------------------------------------------------------------*/
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_UnorderedIntMap col_map_offd_RAP_inverse;
if (RAP_ext_size || num_cols_offd_Pext)
{
hypre_UnorderedIntSet found_set;
hypre_UnorderedIntSetCreate(&found_set, 2*(RAP_ext_size + num_cols_offd_Pext), 16*hypre_NumThreads());
cnt = 0;
#pragma omp parallel private(i)
{
#pragma omp for HYPRE_SMP_SCHEDULE
for (i = 0; i < RAP_ext_size; i++)
{
if (RAP_ext_j[i] < first_col_diag_RAP
|| RAP_ext_j[i] > last_col_diag_RAP)
hypre_UnorderedIntSetPut(&found_set, RAP_ext_j[i]);
}
#pragma omp for HYPRE_SMP_SCHEDULE
for (i = 0; i < num_cols_offd_Pext; i++)
{
hypre_UnorderedIntSetPut(&found_set, col_map_offd_Pext[i]);
}
} /* omp parallel */
temp = hypre_UnorderedIntSetCopyToArray(&found_set, &num_cols_offd_RAP);
hypre_UnorderedIntSetDestroy(&found_set);
hypre_sort_and_create_inverse_map(temp, num_cols_offd_RAP, &col_map_offd_RAP, &col_map_offd_RAP_inverse);
// num_cols_offd_RAP <= RAP_ext_size + num_cols_offd_Pext
}
#else /* !HYPRE_CONCURRENT_HOPSCOTCH */
if (RAP_ext_size || num_cols_offd_Pext)
{
temp = hypre_CTAlloc(HYPRE_Int,RAP_ext_size+num_cols_offd_Pext);
cnt = 0;
for (i=0; i < RAP_ext_size; i++)
if (RAP_ext_j[i] < first_col_diag_RAP
|| RAP_ext_j[i] > last_col_diag_RAP)
temp[cnt++] = RAP_ext_j[i];
for (i=0; i < num_cols_offd_Pext; i++)
temp[cnt++] = col_map_offd_Pext[i];
if (cnt)
{
HYPRE_Int value;
hypre_qsort0(temp,0,cnt-1);
value = temp[0];
num_cols_offd_RAP = 1;
for (i=1; i < cnt; i++)
{
if (temp[i] > value)
{
value = temp[i];
temp[num_cols_offd_RAP++] = value;
}
}
}
/* now evaluate col_map_offd_RAP */
if (num_cols_offd_RAP)
col_map_offd_RAP = hypre_CTAlloc(HYPRE_Int, num_cols_offd_RAP);
for (i=0 ; i < num_cols_offd_RAP; i++)
col_map_offd_RAP[i] = temp[i];
hypre_TFree(temp);
}
#endif /* !HYPRE_CONCURRENT_HOPSCOTCH */
if (num_cols_offd_P)
{
map_P_to_RAP = hypre_TAlloc(HYPRE_Int,num_cols_offd_P);
cnt = 0;
for (i=0; i < num_cols_offd_RAP; i++)
if (col_map_offd_RAP[i] == col_map_offd_P[cnt])
{
map_P_to_RAP[cnt++] = i;
if (cnt == num_cols_offd_P) break;
}
}
if (num_cols_offd_Pext)
{
map_Pext_to_RAP = hypre_TAlloc(HYPRE_Int,num_cols_offd_Pext);
cnt = 0;
for (i=0; i < num_cols_offd_RAP; i++)
if (col_map_offd_RAP[i] == col_map_offd_Pext[cnt])
{
map_Pext_to_RAP[cnt++] = i;
if (cnt == num_cols_offd_Pext) break;
}
}
/*-----------------------------------------------------------------------
* Convert RAP_ext column indices
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < RAP_ext_size; i++)
if (RAP_ext_j[i] < first_col_diag_RAP
|| RAP_ext_j[i] > last_col_diag_RAP)
RAP_ext_j[i] = num_cols_diag_P
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
+ hypre_UnorderedIntMapGet(&col_map_offd_RAP_inverse, RAP_ext_j[i]);
#else
+ hypre_BinarySearch(col_map_offd_RAP,
RAP_ext_j[i],num_cols_offd_RAP);
#endif
else
RAP_ext_j[i] -= first_col_diag_RAP;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (num_cols_offd_RAP)
hypre_UnorderedIntMapDestroy(&col_map_offd_RAP_inverse);
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime();
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX_RAP] += hypre_MPI_Wtime();
#endif
/* need to allocate new P_marker etc. and make further changes */
/*-----------------------------------------------------------------------
* Initialize some stuff.
*-----------------------------------------------------------------------*/
jj_cnt_diag = hypre_CTAlloc(HYPRE_Int, num_threads);
jj_cnt_offd = hypre_CTAlloc(HYPRE_Int, num_threads);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,k,jcol,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_count_diag,jj_count_offd,jj_row_begin_diag,jj_row_begin_offd,A_marker,P_marker) HYPRE_SMP_SCHEDULE
#endif
for (ii = 0; ii < num_threads; ii++)
{
size = num_cols_diag_RT/num_threads;
rest = num_cols_diag_RT - size*num_threads;
if (ii < rest)
{
ns = ii*size+ii;
ne = (ii+1)*size+ii+1;
}
else
{
ns = ii*size+rest;
ne = (ii+1)*size+rest;
}
P_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_cols_diag_P+num_cols_offd_RAP);
A_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_nz_cols_A);
P_marker = P_mark_array[ii];
A_marker = A_mark_array[ii];
jj_count_diag = start_indexing;
jj_count_offd = start_indexing;
for (ic = 0; ic < num_cols_diag_P+num_cols_offd_RAP; ic++)
{
P_marker[ic] = -1;
}
for (i = 0; i < num_nz_cols_A; i++)
{
A_marker[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over interior c-points.
*-----------------------------------------------------------------------*/
for (ic = ns; ic < ne; ic++)
{
/*--------------------------------------------------------------------
* Set marker for diagonal entry, RAP_{ic,ic}. and for all points
* being added to row ic of RAP_diag and RAP_offd through RAP_ext
*--------------------------------------------------------------------*/
jj_row_begin_diag = jj_count_diag;
jj_row_begin_offd = jj_count_offd;
if (square)
P_marker[ic] = jj_count_diag++;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (send_map_elmts_RT_inverse_map_initialized)
{
HYPRE_Int i = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, ic);
if (i != -1)
{
for (j = send_map_elmts_starts_RT_aggregated[i]; j < send_map_elmts_starts_RT_aggregated[i + 1]; j++)
{
HYPRE_Int jj = send_map_elmts_RT_aggregated[j];
for (k=RAP_ext_i[jj]; k < RAP_ext_i[jj+1]; k++)
{
jcol = RAP_ext_j[k];
if (jcol < num_cols_diag_P)
{
if (P_marker[jcol] < jj_row_begin_diag)
{
P_marker[jcol] = jj_count_diag;
jj_count_diag++;
}
}
else
{
if (P_marker[jcol] < jj_row_begin_offd)
{
P_marker[jcol] = jj_count_offd;
jj_count_offd++;
}
}
}
}
} // if (set)
}
#else /* !HYPRE_CONCURRENT_HOPSCOTCH */
for (i=0; i < num_sends_RT; i++)
for (j = send_map_starts_RT[i]; j < send_map_starts_RT[i+1]; j++)
if (send_map_elmts_RT[j] == ic)
{
for (k=RAP_ext_i[j]; k < RAP_ext_i[j+1]; k++)
{
jcol = RAP_ext_j[k];
if (jcol < num_cols_diag_P)
{
if (P_marker[jcol] < jj_row_begin_diag)
{
P_marker[jcol] = jj_count_diag;
jj_count_diag++;
}
}
else
{
if (P_marker[jcol] < jj_row_begin_offd)
{
P_marker[jcol] = jj_count_offd;
jj_count_offd++;
}
}
}
break;
}
#endif /* !HYPRE_CONCURRENT_HOPSCOTCH */
/*--------------------------------------------------------------------
* Loop over entries in row ic of R_diag.
*--------------------------------------------------------------------*/
for (jj1 = R_diag_i[ic]; jj1 < R_diag_i[ic+1]; jj1++)
{
i1 = R_diag_j[jj1];
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_offd.
*-----------------------------------------------------------------*/
if (num_cols_offd_A)
{
for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++)
{
i2 = A_offd_j[jj2];
/*--------------------------------------------------------------
* Check A_marker to see if point i2 has been previously
* visited. New entries in RAP only occur from unmarked points.
*--------------------------------------------------------------*/
if (A_marker[i2] != ic)
{
/*-----------------------------------------------------------
* Mark i2 as visited.
*-----------------------------------------------------------*/
A_marker[i2] = ic;
/*-----------------------------------------------------------
* Loop over entries in row i2 of P_ext.
*-----------------------------------------------------------*/
for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2+1]; jj3++)
{
i3 = P_ext_diag_j[jj3];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begin_diag)
{
P_marker[i3] = jj_count_diag;
jj_count_diag++;
}
}
for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2+1]; jj3++)
{
i3 = map_Pext_to_RAP[P_ext_offd_j[jj3]]+num_cols_diag_P;
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begin_offd)
{
P_marker[i3] = jj_count_offd;
jj_count_offd++;
}
}
}
}
}
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_diag.
*-----------------------------------------------------------------*/
for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++)
{
i2 = A_diag_j[jj2];
/*--------------------------------------------------------------
* Check A_marker to see if point i2 has been previously
* visited. New entries in RAP only occur from unmarked points.
*--------------------------------------------------------------*/
if (A_marker[i2+num_cols_offd_A] != ic)
{
/*-----------------------------------------------------------
* Mark i2 as visited.
*-----------------------------------------------------------*/
A_marker[i2+num_cols_offd_A] = ic;
/*-----------------------------------------------------------
* Loop over entries in row i2 of P_diag.
*-----------------------------------------------------------*/
for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2+1]; jj3++)
{
i3 = P_diag_j[jj3];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begin_diag)
{
P_marker[i3] = jj_count_diag;
jj_count_diag++;
}
}
/*-----------------------------------------------------------
* Loop over entries in row i2 of P_offd.
*-----------------------------------------------------------*/
if (num_cols_offd_P)
{
for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2+1]; jj3++)
{
i3 = map_P_to_RAP[P_offd_j[jj3]] + num_cols_diag_P;
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begin_offd)
{
P_marker[i3] = jj_count_offd;
jj_count_offd++;
}
}
}
}
}
}
/*--------------------------------------------------------------------
* Set RAP_diag_i and RAP_offd_i for this row.
*--------------------------------------------------------------------*/
/*
RAP_diag_i[ic] = jj_row_begin_diag;
RAP_offd_i[ic] = jj_row_begin_offd;
*/
}
jj_cnt_diag[ii] = jj_count_diag;
jj_cnt_offd[ii] = jj_count_offd;
}
for (i=0; i < num_threads-1; i++)
{
jj_cnt_diag[i+1] += jj_cnt_diag[i];
jj_cnt_offd[i+1] += jj_cnt_offd[i];
}
jj_count_diag = jj_cnt_diag[num_threads-1];
jj_count_offd = jj_cnt_offd[num_threads-1];
RAP_diag_i[num_cols_diag_RT] = jj_count_diag;
RAP_offd_i[num_cols_diag_RT] = jj_count_offd;
/*-----------------------------------------------------------------------
* Allocate RAP_diag_data and RAP_diag_j arrays.
* Allocate RAP_offd_data and RAP_offd_j arrays.
*-----------------------------------------------------------------------*/
RAP_diag_size = jj_count_diag;
if (RAP_diag_size)
{
RAP_diag_data = hypre_CTAlloc(HYPRE_Real, RAP_diag_size);
RAP_diag_j = hypre_CTAlloc(HYPRE_Int, RAP_diag_size);
}
RAP_offd_size = jj_count_offd;
if (RAP_offd_size)
{
RAP_offd_data = hypre_CTAlloc(HYPRE_Real, RAP_offd_size);
RAP_offd_j = hypre_CTAlloc(HYPRE_Int, RAP_offd_size);
}
if (RAP_offd_size == 0 && num_cols_offd_RAP != 0)
{
num_cols_offd_RAP = 0;
hypre_TFree(col_map_offd_RAP);
}
RA_diag_data_array = hypre_TAlloc(HYPRE_Real, num_cols_diag_A*num_threads);
RA_diag_j_array = hypre_TAlloc(HYPRE_Int, num_cols_diag_A*num_threads);
if (num_cols_offd_A)
{
RA_offd_data_array = hypre_TAlloc(HYPRE_Real, num_cols_offd_A*num_threads);
RA_offd_j_array = hypre_TAlloc(HYPRE_Int, num_cols_offd_A*num_threads);
}
/*-----------------------------------------------------------------------
* Second Pass: Fill in RAP_diag_data and RAP_diag_j.
* Second Pass: Fill in RAP_offd_data and RAP_offd_j.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,k,jcol,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_count_diag,jj_count_offd,jj_row_begin_diag,jj_row_begin_offd,A_marker,P_marker,r_entry,r_a_product,r_a_p_product) HYPRE_SMP_SCHEDULE
#endif
for (ii = 0; ii < num_threads; ii++)
{
size = num_cols_diag_RT/num_threads;
rest = num_cols_diag_RT - size*num_threads;
if (ii < rest)
{
ns = ii*size+ii;
ne = (ii+1)*size+ii+1;
}
else
{
ns = ii*size+rest;
ne = (ii+1)*size+rest;
}
/*-----------------------------------------------------------------------
* Initialize some stuff.
*-----------------------------------------------------------------------*/
P_marker = P_mark_array[ii];
A_marker = A_mark_array[ii];
for (ic = 0; ic < num_cols_diag_P+num_cols_offd_RAP; ic++)
{
P_marker[ic] = -1;
}
for (i = 0; i < num_nz_cols_A ; i++)
{
A_marker[i] = -1;
}
jj_count_diag = start_indexing;
jj_count_offd = start_indexing;
if (ii > 0)
{
jj_count_diag = jj_cnt_diag[ii-1];
jj_count_offd = jj_cnt_offd[ii-1];
}
// temporal matrix RA = R*A
// only need to store one row per thread because R*A and (R*A)*P are fused
// into one loop.
hypre_CSRMatrix RA_diag, RA_offd;
RA_diag.data = RA_diag_data_array + num_cols_diag_A*ii;
RA_diag.j = RA_diag_j_array + num_cols_diag_A*ii;
RA_diag.num_nonzeros = 0;
RA_offd.num_nonzeros = 0;
if (num_cols_offd_A)
{
RA_offd.data = RA_offd_data_array + num_cols_offd_A*ii;
RA_offd.j = RA_offd_j_array + num_cols_offd_A*ii;
}
/*-----------------------------------------------------------------------
* Loop over interior c-points.
*-----------------------------------------------------------------------*/
for (ic = ns; ic < ne; ic++)
{
/*--------------------------------------------------------------------
* Create diagonal entry, RAP_{ic,ic} and add entries of RAP_ext
*--------------------------------------------------------------------*/
jj_row_begin_diag = jj_count_diag;
jj_row_begin_offd = jj_count_offd;
RAP_diag_i[ic] = jj_row_begin_diag;
RAP_offd_i[ic] = jj_row_begin_offd;
HYPRE_Int ra_row_begin_diag = RA_diag.num_nonzeros;
HYPRE_Int ra_row_begin_offd = RA_offd.num_nonzeros;
if (square)
{
P_marker[ic] = jj_count_diag;
RAP_diag_data[jj_count_diag] = zero;
RAP_diag_j[jj_count_diag] = ic;
jj_count_diag++;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (send_map_elmts_RT_inverse_map_initialized)
{
HYPRE_Int i = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, ic);
if (i != -1)
{
for (j = send_map_elmts_starts_RT_aggregated[i]; j < send_map_elmts_starts_RT_aggregated[i + 1]; j++)
{
HYPRE_Int jj = send_map_elmts_RT_aggregated[j];
for (k=RAP_ext_i[jj]; k < RAP_ext_i[jj+1]; k++)
{
jcol = RAP_ext_j[k];
if (jcol < num_cols_diag_P)
{
if (P_marker[jcol] < jj_row_begin_diag)
{
P_marker[jcol] = jj_count_diag;
RAP_diag_data[jj_count_diag]
= RAP_ext_data[k];
RAP_diag_j[jj_count_diag] = jcol;
jj_count_diag++;
}
else
RAP_diag_data[P_marker[jcol]]
+= RAP_ext_data[k];
}
else
{
if (P_marker[jcol] < jj_row_begin_offd)
{
P_marker[jcol] = jj_count_offd;
RAP_offd_data[jj_count_offd]
= RAP_ext_data[k];
RAP_offd_j[jj_count_offd]
= jcol-num_cols_diag_P;
jj_count_offd++;
}
else
RAP_offd_data[P_marker[jcol]]
+= RAP_ext_data[k];
}
}
}
} // if (set)
}
#else /* !HYPRE_CONCURRENT_HOPSCOTCH */
for (i=0; i < num_sends_RT; i++)
for (j = send_map_starts_RT[i]; j < send_map_starts_RT[i+1]; j++)
if (send_map_elmts_RT[j] == ic)
{
for (k=RAP_ext_i[j]; k < RAP_ext_i[j+1]; k++)
{
jcol = RAP_ext_j[k];
if (jcol < num_cols_diag_P)
{
if (P_marker[jcol] < jj_row_begin_diag)
{
P_marker[jcol] = jj_count_diag;
RAP_diag_data[jj_count_diag]
= RAP_ext_data[k];
RAP_diag_j[jj_count_diag] = jcol;
jj_count_diag++;
}
else
RAP_diag_data[P_marker[jcol]]
+= RAP_ext_data[k];
}
else
{
if (P_marker[jcol] < jj_row_begin_offd)
{
P_marker[jcol] = jj_count_offd;
RAP_offd_data[jj_count_offd]
= RAP_ext_data[k];
RAP_offd_j[jj_count_offd]
= jcol-num_cols_diag_P;
jj_count_offd++;
}
else
RAP_offd_data[P_marker[jcol]]
+= RAP_ext_data[k];
}
}
break;
}
#endif /* !HYPRE_CONCURRENT_HOPSCOTCH */
/*--------------------------------------------------------------------
* Loop over entries in row ic of R_diag and compute row ic of RA.
*--------------------------------------------------------------------*/
for (jj1 = R_diag_i[ic]; jj1 < R_diag_i[ic+1]; jj1++)
{
i1 = R_diag_j[jj1];
r_entry = R_diag_data[jj1];
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_offd.
*-----------------------------------------------------------------*/
if (num_cols_offd_A)
{
for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++)
{
i2 = A_offd_j[jj2];
HYPRE_Real a_entry = A_offd_data[jj2];
HYPRE_Int marker = A_marker[i2];
/*--------------------------------------------------------------
* Check A_marker to see if point i2 has been previously
* visited. New entries in RAP only occur from unmarked points.
*--------------------------------------------------------------*/
if (marker < ra_row_begin_offd)
{
/*-----------------------------------------------------------
* Mark i2 as visited.
*-----------------------------------------------------------*/
A_marker[i2] = RA_offd.num_nonzeros;
RA_offd.data[RA_offd.num_nonzeros - ra_row_begin_offd] = r_entry * a_entry;
RA_offd.j[RA_offd.num_nonzeros - ra_row_begin_offd] = i2;
RA_offd.num_nonzeros++;
}
/*--------------------------------------------------------------
* If i2 is previously visited ( A_marker[12]=ic ) it yields
* no new entries in RA and can just add new contributions.
*--------------------------------------------------------------*/
else
{
RA_offd.data[marker - ra_row_begin_offd] += r_entry * a_entry;
// JSP: compiler will more likely to generate FMA instructions
// when we don't eliminate common subexpressions of
// r_entry * A_offd_data[jj2] manually.
}
} // loop over entries in row i1 of A_offd
} // num_cols_offd_A
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_diag.
*-----------------------------------------------------------------*/
for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++)
{
i2 = A_diag_j[jj2];
HYPRE_Real a_entry = A_diag_data[jj2];
HYPRE_Int marker = A_marker[i2+num_cols_offd_A];
/*--------------------------------------------------------------
* Check A_marker to see if point i2 has been previously
* visited. New entries in RAP only occur from unmarked points.
*--------------------------------------------------------------*/
if (marker < ra_row_begin_diag)
{
/*-----------------------------------------------------------
* Mark i2 as visited.
*-----------------------------------------------------------*/
A_marker[i2+num_cols_offd_A] = RA_diag.num_nonzeros;
RA_diag.data[RA_diag.num_nonzeros - ra_row_begin_diag] = r_entry * a_entry;
RA_diag.j[RA_diag.num_nonzeros - ra_row_begin_diag] = i2;
RA_diag.num_nonzeros++;
}
/*--------------------------------------------------------------
* If i2 is previously visited ( A_marker[12]=ic ) it yields
* no new entries in RA and can just add new contributions.
*--------------------------------------------------------------*/
else
{
RA_diag.data[marker - ra_row_begin_diag] += r_entry * a_entry;
}
} // loop over entries in row i1 of A_diag
} // loop over entries in row ic of R_diag
/*--------------------------------------------------------------------
* Loop over entries in row ic of RA_offd.
*--------------------------------------------------------------------*/
for (jj1 = ra_row_begin_offd; jj1 < RA_offd.num_nonzeros; jj1++)
{
i1 = RA_offd.j[jj1 - ra_row_begin_offd];
r_a_product = RA_offd.data[jj1 - ra_row_begin_offd];
/*-----------------------------------------------------------
* Loop over entries in row i1 of P_ext.
*-----------------------------------------------------------*/
for (jj2 = P_ext_diag_i[i1]; jj2 < P_ext_diag_i[i1+1]; jj2++)
{
i2 = P_ext_diag_j[jj2];
HYPRE_Real p_entry = P_ext_diag_data[jj2];
HYPRE_Int marker = P_marker[i2];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i2} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (marker < jj_row_begin_diag)
{
P_marker[i2] = jj_count_diag;
RAP_diag_data[jj_count_diag] = r_a_product * p_entry;
RAP_diag_j[jj_count_diag] = i2;
jj_count_diag++;
}
else
RAP_diag_data[marker] += r_a_product * p_entry;
}
for (jj2 = P_ext_offd_i[i1]; jj2 < P_ext_offd_i[i1+1]; jj2++)
{
i2 = map_Pext_to_RAP[P_ext_offd_j[jj2]] + num_cols_diag_P;
HYPRE_Real p_entry = P_ext_offd_data[jj2];
HYPRE_Int marker = P_marker[i2];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i2} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (marker < jj_row_begin_offd)
{
P_marker[i2] = jj_count_offd;
RAP_offd_data[jj_count_offd] = r_a_product * p_entry;
RAP_offd_j[jj_count_offd] = i2 - num_cols_diag_P;
jj_count_offd++;
}
else
RAP_offd_data[marker] += r_a_product * p_entry;
}
} // loop over entries in row ic of RA_offd
/*--------------------------------------------------------------------
* Loop over entries in row ic of RA_diag.
*--------------------------------------------------------------------*/
for (jj1 = ra_row_begin_diag; jj1 < RA_diag.num_nonzeros; jj1++)
{
HYPRE_Int i1 = RA_diag.j[jj1 - ra_row_begin_diag];
HYPRE_Real r_a_product = RA_diag.data[jj1 - ra_row_begin_diag];
/*-----------------------------------------------------------------
* Loop over entries in row i1 of P_diag.
*-----------------------------------------------------------------*/
for (jj2 = P_diag_i[i1]; jj2 < P_diag_i[i1+1]; jj2++)
{
i2 = P_diag_j[jj2];
HYPRE_Real p_entry = P_diag_data[jj2];
HYPRE_Int marker = P_marker[i2];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i2} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (marker < jj_row_begin_diag)
{
P_marker[i2] = jj_count_diag;
RAP_diag_data[jj_count_diag] = r_a_product * p_entry;
RAP_diag_j[jj_count_diag] = i2;
jj_count_diag++;
}
else
{
RAP_diag_data[marker] += r_a_product * p_entry;
}
}
if (num_cols_offd_P)
{
for (jj2 = P_offd_i[i1]; jj2 < P_offd_i[i1+1]; jj2++)
{
i2 = map_P_to_RAP[P_offd_j[jj2]] + num_cols_diag_P;
HYPRE_Real p_entry = P_offd_data[jj2];
HYPRE_Int marker = P_marker[i2];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i2} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (marker < jj_row_begin_offd)
{
P_marker[i2] = jj_count_offd;
RAP_offd_data[jj_count_offd] = r_a_product * p_entry;
RAP_offd_j[jj_count_offd] = i2 - num_cols_diag_P;
jj_count_offd++;
}
else
{
RAP_offd_data[marker] += r_a_product * p_entry;
}
}
} // num_cols_offd_P
} // loop over entries in row ic of RA_diag.
} // Loop over interior c-points.
hypre_TFree(P_mark_array[ii]);
hypre_TFree(A_mark_array[ii]);
} // omp parallel for
/* check if really all off-diagonal entries occurring in col_map_offd_RAP
are represented and eliminate if necessary */
P_marker = hypre_CTAlloc(HYPRE_Int,num_cols_offd_RAP);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_offd_RAP; i++)
P_marker[i] = -1;
jj_count_offd = 0;
#ifdef HYPRE_USING_ATOMIC
#pragma omp parallel for private(i3) reduction(+:jj_count_offd) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < RAP_offd_size; i++)
{
i3 = RAP_offd_j[i];
#ifdef HYPRE_USING_ATOMIC
if (hypre_compare_and_swap(P_marker + i3, -1, 0) == -1)
{
jj_count_offd++;
}
#else
if (P_marker[i3])
{
P_marker[i3] = 0;
jj_count_offd++;
}
#endif
}
if (jj_count_offd < num_cols_offd_RAP)
{
new_col_map_offd_RAP = hypre_CTAlloc(HYPRE_Int,jj_count_offd);
jj_counter = 0;
for (i=0; i < num_cols_offd_RAP; i++)
if (!P_marker[i])
{
P_marker[i] = jj_counter;
new_col_map_offd_RAP[jj_counter++] = col_map_offd_RAP[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i3) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < RAP_offd_size; i++)
{
i3 = RAP_offd_j[i];
RAP_offd_j[i] = P_marker[i3];
}
num_cols_offd_RAP = jj_count_offd;
hypre_TFree(col_map_offd_RAP);
col_map_offd_RAP = new_col_map_offd_RAP;
}
hypre_TFree(P_marker);
RAP = hypre_ParCSRMatrixCreate(comm, n_coarse_RT, n_coarse,
RT_partitioning, coarse_partitioning,
num_cols_offd_RAP, RAP_diag_size,
RAP_offd_size);
/* Have RAP own coarse_partitioning instead of P */
hypre_ParCSRMatrixSetColStartsOwner(P,0);
hypre_ParCSRMatrixSetColStartsOwner(RT,0);
RAP_diag = hypre_ParCSRMatrixDiag(RAP);
hypre_CSRMatrixI(RAP_diag) = RAP_diag_i;
if (RAP_diag_size)
{
hypre_CSRMatrixData(RAP_diag) = RAP_diag_data;
hypre_CSRMatrixJ(RAP_diag) = RAP_diag_j;
}
RAP_offd = hypre_ParCSRMatrixOffd(RAP);
hypre_CSRMatrixI(RAP_offd) = RAP_offd_i;
if (num_cols_offd_RAP)
{
hypre_CSRMatrixData(RAP_offd) = RAP_offd_data;
hypre_CSRMatrixJ(RAP_offd) = RAP_offd_j;
hypre_ParCSRMatrixColMapOffd(RAP) = col_map_offd_RAP;
}
if (num_procs > 1)
{
/* hypre_GenerateRAPCommPkg(RAP, A); */
hypre_MatvecCommPkgCreate(RAP);
}
*RAP_ptr = RAP;
/*-----------------------------------------------------------------------
* Free R, P_ext and marker arrays.
*-----------------------------------------------------------------------*/
if (keepTranspose)
{
hypre_ParCSRMatrixDiagT(RT) = R_diag;
}
else
{
hypre_CSRMatrixDestroy(R_diag);
}
R_diag = NULL;
if (num_cols_offd_RT)
{
if (keepTranspose)
{
hypre_ParCSRMatrixOffdT(RT) = R_offd;
}
else
{
hypre_CSRMatrixDestroy(R_offd);
}
R_offd = NULL;
}
if (num_sends_RT || num_recvs_RT)
{
hypre_CSRMatrixDestroy(RAP_ext);
RAP_ext = NULL;
}
hypre_TFree(P_mark_array);
hypre_TFree(A_mark_array);
hypre_TFree(P_ext_diag_i);
hypre_TFree(P_ext_offd_i);
hypre_TFree(jj_cnt_diag);
hypre_TFree(jj_cnt_offd);
if (num_cols_offd_P)
{
hypre_TFree(map_P_to_Pext);
hypre_TFree(map_P_to_RAP);
}
if (num_cols_offd_Pext)
{
hypre_TFree(col_map_offd_Pext);
hypre_TFree(map_Pext_to_RAP);
}
if (P_ext_diag_size)
{
hypre_TFree(P_ext_diag_data);
hypre_TFree(P_ext_diag_j);
}
if (P_ext_offd_size)
{
hypre_TFree(P_ext_offd_data);
hypre_TFree(P_ext_offd_j);
}
hypre_TFree(RA_diag_data_array);
hypre_TFree(RA_diag_j_array);
if (num_cols_offd_A)
{
hypre_TFree(RA_offd_data_array);
hypre_TFree(RA_offd_j_array);
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (send_map_elmts_RT_inverse_map_initialized)
{
hypre_UnorderedIntMapDestroy(&send_map_elmts_RT_inverse_map);
}
hypre_TFree(send_map_elmts_starts_RT_aggregated);
hypre_TFree(send_map_elmts_RT_aggregated);
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RAP] += hypre_MPI_Wtime();
#endif
return(0);
}
|
psd.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP SSSSS DDDD %
% P P SS D D %
% PPPP SSS D D %
% P SS D D %
% P SSSSS DDDD %
% %
% %
% Read/Write Adobe Photoshop Image Format %
% %
% Software Design %
% Cristy %
% Leonard Rosenthol %
% July 1992 %
% Dirk Lemstra %
% December 2013 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/channel.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/registry.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#ifdef MAGICKCORE_ZLIB_DELEGATE
#include <zlib.h>
#endif
#include "psd-private.h"
/*
Define declaractions.
*/
#define MaxPSDChannels 56
#define PSDQuantum(x) (((ssize_t) (x)+1) & -2)
/*
Enumerated declaractions.
*/
typedef enum
{
Raw = 0,
RLE = 1,
ZipWithoutPrediction = 2,
ZipWithPrediction = 3
} PSDCompressionType;
typedef enum
{
BitmapMode = 0,
GrayscaleMode = 1,
IndexedMode = 2,
RGBMode = 3,
CMYKMode = 4,
MultichannelMode = 7,
DuotoneMode = 8,
LabMode = 9
} PSDImageType;
/*
Typedef declaractions.
*/
typedef struct _ChannelInfo
{
short int
type;
size_t
size;
} ChannelInfo;
typedef struct _MaskInfo
{
Image
*image;
RectangleInfo
page;
unsigned char
background,
flags;
} MaskInfo;
typedef struct _LayerInfo
{
ChannelInfo
channel_info[MaxPSDChannels];
char
blendkey[4];
Image
*image;
MaskInfo
mask;
Quantum
opacity;
RectangleInfo
page;
size_t
offset_x,
offset_y;
unsigned char
clipping,
flags,
name[256],
visible;
unsigned short
channels;
StringInfo
*info;
} LayerInfo;
/*
Forward declarations.
*/
static MagickBooleanType
WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P S D %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPSD()() returns MagickTrue if the image format type, identified by the
% magick string, is PSD.
%
% The format of the IsPSD method is:
%
% MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"8BPS",4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPSDImage() reads an Adobe Photoshop image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadPSDImage method is:
%
% Image *ReadPSDImage(image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const char *CompositeOperatorToPSDBlendMode(CompositeOperator op)
{
const char
*blend_mode;
switch (op)
{
case ColorBurnCompositeOp: blend_mode = "idiv"; break;
case ColorDodgeCompositeOp: blend_mode = "div "; break;
case ColorizeCompositeOp: blend_mode = "colr"; break;
case DarkenCompositeOp: blend_mode = "dark"; break;
case DifferenceCompositeOp: blend_mode = "diff"; break;
case DissolveCompositeOp: blend_mode = "diss"; break;
case ExclusionCompositeOp: blend_mode = "smud"; break;
case HardLightCompositeOp: blend_mode = "hLit"; break;
case HardMixCompositeOp: blend_mode = "hMix"; break;
case HueCompositeOp: blend_mode = "hue "; break;
case LightenCompositeOp: blend_mode = "lite"; break;
case LinearBurnCompositeOp: blend_mode = "lbrn"; break;
case LinearDodgeCompositeOp:blend_mode = "lddg"; break;
case LinearLightCompositeOp:blend_mode = "lLit"; break;
case LuminizeCompositeOp: blend_mode = "lum "; break;
case MultiplyCompositeOp: blend_mode = "mul "; break;
case OverCompositeOp: blend_mode = "norm"; break;
case OverlayCompositeOp: blend_mode = "over"; break;
case PinLightCompositeOp: blend_mode = "pLit"; break;
case SaturateCompositeOp: blend_mode = "sat "; break;
case ScreenCompositeOp: blend_mode = "scrn"; break;
case SoftLightCompositeOp: blend_mode = "sLit"; break;
case VividLightCompositeOp: blend_mode = "vLit"; break;
default: blend_mode = "norm";
}
return(blend_mode);
}
/*
For some reason Photoshop seems to blend semi-transparent pixels with white.
This method reverts the blending. This can be disabled by setting the
option 'psd:alpha-unblend' to off.
*/
static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info,
Image *image,ExceptionInfo* exception)
{
const char
*option;
MagickBooleanType
status;
ssize_t
y;
if (image->alpha_trait != BlendPixelTrait || image->colorspace != sRGBColorspace)
return(MagickTrue);
option=GetImageOption(image_info,"psd:alpha-unblend");
if (IsStringFalse(option) != MagickFalse)
return(MagickTrue);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
register ssize_t
i;
gamma=QuantumScale*GetPixelAlpha(image, q);
if (gamma != 0.0 && gamma != 1.0)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
if (channel != AlphaPixelChannel)
q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma);
}
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static inline CompressionType ConvertPSDCompression(
PSDCompressionType compression)
{
switch (compression)
{
case RLE:
return RLECompression;
case ZipWithPrediction:
case ZipWithoutPrediction:
return ZipCompression;
default:
return NoCompression;
}
}
static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity,
MagickBooleanType revert,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying layer opacity %.20g", (double) opacity);
if (opacity == OpaqueAlpha)
return(MagickTrue);
image->alpha_trait=BlendPixelTrait;
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (revert == MagickFalse)
SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))*
opacity),q);
else if (opacity > 0)
SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/
(MagickRealType) opacity)),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask,
Quantum background,MagickBooleanType revert,ExceptionInfo *exception)
{
Image
*complete_mask;
MagickBooleanType
status;
PixelInfo
color;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying opacity mask");
complete_mask=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
complete_mask->alpha_trait=BlendPixelTrait;
GetPixelInfo(complete_mask,&color);
color.red=background;
SetImageColor(complete_mask,&color,exception);
status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue,
mask->page.x-image->page.x,mask->page.y-image->page.y,exception);
if (status == MagickFalse)
{
complete_mask=DestroyImage(complete_mask);
return(status);
}
image->alpha_trait=BlendPixelTrait;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register Quantum
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception);
if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
alpha,
intensity;
alpha=GetPixelAlpha(image,q);
intensity=GetPixelIntensity(complete_mask,p);
if (revert == MagickFalse)
SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q);
else if (intensity > 0)
SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q);
q+=GetPixelChannels(image);
p+=GetPixelChannels(complete_mask);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
complete_mask=DestroyImage(complete_mask);
return(status);
}
static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info,
ExceptionInfo *exception)
{
char
*key;
RandomInfo
*random_info;
StringInfo
*key_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" preserving opacity mask");
random_info=AcquireRandomInfo();
key_info=GetRandomKey(random_info,2+1);
key=(char *) GetStringInfoDatum(key_info);
key[8]=layer_info->mask.background;
key[9]='\0';
layer_info->mask.image->page.x+=layer_info->page.x;
layer_info->mask.image->page.y+=layer_info->page.y;
(void) SetImageRegistry(ImageRegistryType,(const char *) key,
layer_info->mask.image,exception);
(void) SetImageArtifact(layer_info->image,"psd:opacity-mask",
(const char *) key);
key_info=DestroyStringInfo(key_info);
random_info=DestroyRandomInfo(random_info);
}
static ssize_t DecodePSDPixels(const size_t number_compact_pixels,
const unsigned char *compact_pixels,const ssize_t depth,
const size_t number_pixels,unsigned char *pixels)
{
#define CheckNumberCompactPixels \
if (packets == 0) \
return(i); \
packets--
#define CheckNumberPixels(count) \
if (((ssize_t) i + count) > (ssize_t) number_pixels) \
return(i); \
i+=count
int
pixel;
register ssize_t
i,
j;
size_t
length;
ssize_t
packets;
packets=(ssize_t) number_compact_pixels;
for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); )
{
packets--;
length=(size_t) (*compact_pixels++);
if (length == 128)
continue;
if (length > 128)
{
length=256-length+1;
CheckNumberCompactPixels;
pixel=(*compact_pixels++);
for (j=0; j < (ssize_t) length; j++)
{
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(pixel >> 7) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 6) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 5) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 4) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 3) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 2) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 1) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(unsigned char) ((pixel >> 6) & 0x03);
*pixels++=(unsigned char) ((pixel >> 4) & 0x03);
*pixels++=(unsigned char) ((pixel >> 2) & 0x03);
*pixels++=(unsigned char) ((pixel & 0x03) & 0x03);
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(unsigned char) ((pixel >> 4) & 0xff);
*pixels++=(unsigned char) ((pixel & 0x0f) & 0xff);
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(unsigned char) pixel;
break;
}
}
}
continue;
}
length++;
for (j=0; j < (ssize_t) length; j++)
{
CheckNumberCompactPixels;
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(*compact_pixels >> 6) & 0x03;
*pixels++=(*compact_pixels >> 4) & 0x03;
*pixels++=(*compact_pixels >> 2) & 0x03;
*pixels++=(*compact_pixels & 0x03) & 0x03;
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(*compact_pixels >> 4) & 0xff;
*pixels++=(*compact_pixels & 0x0f) & 0xff;
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(*compact_pixels);
break;
}
}
compact_pixels++;
}
}
return(i);
}
static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info,
const ssize_t number_layers)
{
ssize_t
i;
for (i=0; i<number_layers; i++)
{
if (layer_info[i].image != (Image *) NULL)
layer_info[i].image=DestroyImage(layer_info[i].image);
if (layer_info[i].mask.image != (Image *) NULL)
layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image);
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
return (LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline size_t GetPSDPacketSize(Image *image)
{
if (image->storage_class == PseudoClass)
{
if (image->colors > 256)
return(2);
else if (image->depth > 8)
return(2);
}
else
if (image->depth > 8)
return(2);
return(1);
}
static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image)
{
if (psd_info->version == 1)
return((MagickSizeType) ReadBlobLong(image));
return((MagickSizeType) ReadBlobLongLong(image));
}
static inline size_t GetPSDRowSize(Image *image)
{
if (image->depth == 1)
return(((image->columns+7)/8)*GetPSDPacketSize(image));
else
return(image->columns*GetPSDPacketSize(image));
}
static const char *ModeToString(PSDImageType type)
{
switch (type)
{
case BitmapMode: return "Bitmap";
case GrayscaleMode: return "Grayscale";
case IndexedMode: return "Indexed";
case RGBMode: return "RGB";
case CMYKMode: return "CMYK";
case MultichannelMode: return "Multichannel";
case DuotoneMode: return "Duotone";
case LabMode: return "L*A*B";
default: return "unknown";
}
}
static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickBooleanType
status;
channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~
AlphaChannel));
status=NegateImage(image,MagickFalse,exception);
(void) SetImageChannelMask(image,channel_mask);
return(status);
}
static void ParseImageResourceBlocks(Image *image,
const unsigned char *blocks,size_t length,
MagickBooleanType *has_merged_image,ExceptionInfo *exception)
{
const unsigned char
*p;
StringInfo
*profile;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
if (length < 16)
return;
profile=BlobToStringInfo((const unsigned char *) NULL,length);
SetStringInfoDatum(profile,blocks);
(void) SetImageProfile(image,"8bim",profile,exception);
profile=DestroyStringInfo(profile);
for (p=blocks; (p >= blocks) && (p < (blocks+length-16)); )
{
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if ((p+count) > (blocks+length-16))
return;
switch (id)
{
case 0x03ed:
{
char
value[MagickPathExtent];
unsigned short
resolution;
/*
Resolution info.
*/
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.x=(double) resolution;
(void) FormatLocaleString(value,MagickPathExtent,"%g",image->resolution.x);
(void) SetImageProperty(image,"tiff:XResolution",value,exception);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.y=(double) resolution;
(void) FormatLocaleString(value,MagickPathExtent,"%g",image->resolution.y);
(void) SetImageProperty(image,"tiff:YResolution",value,exception);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
image->units=PixelsPerInchResolution;
break;
}
case 0x0421:
{
if (*(p+4) == 0)
*has_merged_image=MagickFalse;
p+=count;
break;
}
default:
{
p+=count;
break;
}
}
if ((count & 0x01) != 0)
p++;
}
return;
}
static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode)
{
if (mode == (const char *) NULL)
return(OverCompositeOp);
if (LocaleNCompare(mode,"norm",4) == 0)
return(OverCompositeOp);
if (LocaleNCompare(mode,"mul ",4) == 0)
return(MultiplyCompositeOp);
if (LocaleNCompare(mode,"diss",4) == 0)
return(DissolveCompositeOp);
if (LocaleNCompare(mode,"diff",4) == 0)
return(DifferenceCompositeOp);
if (LocaleNCompare(mode,"dark",4) == 0)
return(DarkenCompositeOp);
if (LocaleNCompare(mode,"lite",4) == 0)
return(LightenCompositeOp);
if (LocaleNCompare(mode,"hue ",4) == 0)
return(HueCompositeOp);
if (LocaleNCompare(mode,"sat ",4) == 0)
return(SaturateCompositeOp);
if (LocaleNCompare(mode,"colr",4) == 0)
return(ColorizeCompositeOp);
if (LocaleNCompare(mode,"lum ",4) == 0)
return(LuminizeCompositeOp);
if (LocaleNCompare(mode,"scrn",4) == 0)
return(ScreenCompositeOp);
if (LocaleNCompare(mode,"over",4) == 0)
return(OverlayCompositeOp);
if (LocaleNCompare(mode,"hLit",4) == 0)
return(HardLightCompositeOp);
if (LocaleNCompare(mode,"sLit",4) == 0)
return(SoftLightCompositeOp);
if (LocaleNCompare(mode,"smud",4) == 0)
return(ExclusionCompositeOp);
if (LocaleNCompare(mode,"div ",4) == 0)
return(ColorDodgeCompositeOp);
if (LocaleNCompare(mode,"idiv",4) == 0)
return(ColorBurnCompositeOp);
if (LocaleNCompare(mode,"lbrn",4) == 0)
return(LinearBurnCompositeOp);
if (LocaleNCompare(mode,"lddg",4) == 0)
return(LinearDodgeCompositeOp);
if (LocaleNCompare(mode,"lLit",4) == 0)
return(LinearLightCompositeOp);
if (LocaleNCompare(mode,"vLit",4) == 0)
return(VividLightCompositeOp);
if (LocaleNCompare(mode,"pLit",4) == 0)
return(PinLightCompositeOp);
if (LocaleNCompare(mode,"hMix",4) == 0)
return(HardMixCompositeOp);
return(OverCompositeOp);
}
static inline void ReversePSDString(Image *image,char *p,size_t length)
{
char
*q;
if (image->endian == MSBEndian)
return;
q=p+length;
for(--q; p < q; ++p, --q)
{
*p = *p ^ *q,
*q = *p ^ *q,
*p = *p ^ *q;
}
}
static inline void SetPSDPixel(Image *image,const size_t channels,
const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q,
ExceptionInfo *exception)
{
if (image->storage_class == PseudoClass)
{
if (packet_size == 1)
SetPixelIndex(image,ScaleQuantumToChar(pixel),q);
else
SetPixelIndex(image,ScaleQuantumToShort(pixel),q);
SetPixelViaPixelInfo(image,image->colormap+(ssize_t)
ConstrainColormapIndex(image,GetPixelIndex(image,q),exception),q);
return;
}
switch (type)
{
case -1:
{
SetPixelAlpha(image, pixel,q);
break;
}
case -2:
case 0:
{
SetPixelRed(image,pixel,q);
if (channels == 1 || type == -2)
SetPixelGray(image,pixel,q);
break;
}
case 1:
{
if (image->storage_class == PseudoClass)
SetPixelAlpha(image,pixel,q);
else
SetPixelGreen(image,pixel,q);
break;
}
case 2:
{
if (image->storage_class == PseudoClass)
SetPixelAlpha(image,pixel,q);
else
SetPixelBlue(image,pixel,q);
break;
}
case 3:
{
if (image->colorspace == CMYKColorspace)
SetPixelBlack(image,pixel,q);
else
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
case 4:
{
if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) &&
(channels > 3))
break;
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
}
}
static MagickBooleanType ReadPSDChannelPixels(Image *image,
const size_t channels,const size_t row,const ssize_t type,
const unsigned char *pixels,ExceptionInfo *exception)
{
Quantum
pixel;
register const unsigned char
*p;
register Quantum
*q;
register ssize_t
x;
size_t
packet_size;
unsigned short
nibble;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (Quantum *) NULL)
return MagickFalse;
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
{
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
if (image->depth > 1)
{
SetPSDPixel(image,channels,type,packet_size,pixel,q,exception);
q+=GetPixelChannels(image);
}
else
{
ssize_t
bit,
number_bits;
number_bits=image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit = 0; bit < number_bits; bit++)
{
SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception);
q+=GetPixelChannels(image);
x++;
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels,
const ssize_t type,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
count,
row_size;
ssize_t
y;
unsigned char
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RAW");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,row_size,pixels);
if (count != row_size)
break;
status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
static inline MagickOffsetType *ReadPSDRLESizes(Image *image,
const PSDInfo *psd_info,const size_t size)
{
MagickOffsetType
*sizes;
ssize_t
y;
sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes));
if(sizes != (MagickOffsetType *) NULL)
{
for (y=0; y < (ssize_t) size; y++)
{
if (psd_info->version == 1)
sizes[y]=(MagickOffsetType) ReadBlobShort(image);
else
sizes[y]=(MagickOffsetType) ReadBlobLong(image);
}
}
return sizes;
}
static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info,
const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
length,
row_size;
ssize_t
count,
y;
unsigned char
*compact_pixels,
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RLE compressed");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
if ((MagickOffsetType) length < sizes[y])
length=(size_t) sizes[y];
if (length > row_size + 256) // arbitrary number
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename);
}
compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels));
if (compact_pixels == (unsigned char *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) ResetMagickMemory(compact_pixels,0,length*sizeof(*compact_pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,(size_t) sizes[y],compact_pixels);
if (count != (ssize_t) sizes[y])
break;
count=DecodePSDPixels((size_t) sizes[y],compact_pixels,
(ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels,
exception);
if (status == MagickFalse)
break;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels,
const ssize_t type,const PSDCompressionType compression,
const size_t compact_size,ExceptionInfo *exception)
{
MagickBooleanType
status;
register unsigned char
*p;
size_t
count,
length,
packet_size,
row_size;
ssize_t
y;
unsigned char
*compact_pixels,
*pixels;
z_stream
stream;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is ZIP compressed");
compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size,
sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
packet_size=GetPSDPacketSize(image);
row_size=image->columns*packet_size;
count=image->rows*row_size;
pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
ResetMagickMemory(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
(void) ReadBlob(image,compact_size,compact_pixels);
stream.next_in=(Bytef *)compact_pixels;
stream.avail_in=(uInt) compact_size;
stream.next_out=(Bytef *)pixels;
stream.avail_out=(uInt) count;
if (inflateInit(&stream) == Z_OK)
{
int
ret;
while (stream.avail_out > 0)
{
ret=inflate(&stream,Z_SYNC_FLUSH);
if ((ret != Z_OK) && (ret != Z_STREAM_END))
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(MagickFalse);
}
}
}
if (compression == ZipWithPrediction)
{
p=pixels;
while (count > 0)
{
length=image->columns;
while (--length)
{
if (packet_size == 2)
{
p[2]+=p[0]+((p[1]+p[3]) >> 8);
p[3]+=p[1];
}
else
*(p+1)+=*p;
p+=packet_size;
}
p+=packet_size;
count-=row_size;
}
}
status=MagickTrue;
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=ReadPSDChannelPixels(image,channels,y,type,p,exception);
if (status == MagickFalse)
break;
p+=row_size;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#endif
static MagickBooleanType ReadPSDChannel(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info,
const size_t channel,const PSDCompressionType compression,
ExceptionInfo *exception)
{
Image
*channel_image,
*mask;
MagickOffsetType
offset;
MagickBooleanType
status;
channel_image=image;
mask=(Image *) NULL;
if (layer_info->channel_info[channel].type < -1)
{
const char
*option;
/*
Ignore mask that is not a user supplied layer mask, if the mask is
disabled or if the flags have unsupported values.
*/
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if ((layer_info->channel_info[channel].type != -2) ||
(layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) &&
(IsStringTrue(option) == MagickFalse)))
{
SeekBlob(image,layer_info->channel_info[channel].size-2,SEEK_CUR);
return(MagickTrue);
}
mask=CloneImage(image,layer_info->mask.page.width,
layer_info->mask.page.height,MagickFalse,exception);
if (mask != (Image *) NULL)
{
SetImageType(mask,GrayscaleType,exception);
channel_image=mask;
}
}
offset=TellBlob(image);
status=MagickTrue;
switch(compression)
{
case Raw:
status=ReadPSDChannelRaw(channel_image,psd_info->channels,
layer_info->channel_info[channel].type,exception);
break;
case RLE:
{
MagickOffsetType
*sizes;
sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ReadPSDChannelRLE(channel_image,psd_info,
layer_info->channel_info[channel].type,sizes,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
}
break;
case ZipWithPrediction:
case ZipWithoutPrediction:
#ifdef MAGICKCORE_ZLIB_DELEGATE
status=ReadPSDChannelZip(channel_image,layer_info->channels,
layer_info->channel_info[channel].type,compression,
layer_info->channel_info[channel].size-2,exception);
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (ZLIB)",image->filename);
#endif
break;
default:
(void) ThrowMagickException(exception,GetMagickModule(),TypeWarning,
"CompressionNotSupported","'%.20g'",(double) compression);
break;
}
SeekBlob(image,offset+layer_info->channel_info[channel].size-2,SEEK_SET);
if (status == MagickFalse)
{
if (mask != (Image *) NULL)
DestroyImage(mask);
ThrowBinaryException(CoderError,"UnableToDecompressImage",
image->filename);
}
layer_info->mask.image=mask;
return(status);
}
static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info,
const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception)
{
char
message[MagickPathExtent];
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
j;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" setting up new layer image");
if (psd_info->mode != IndexedMode)
(void) SetImageBackgroundColor(layer_info->image,exception);
layer_info->image->compose=PSDBlendModeToCompositeOperator(
layer_info->blendkey);
if (layer_info->visible == MagickFalse)
layer_info->image->compose=NoCompositeOp;
if (psd_info->mode == CMYKMode)
SetImageColorspace(layer_info->image,CMYKColorspace,exception);
else if ((psd_info->mode == BitmapMode) || (psd_info->mode == DuotoneMode) ||
(psd_info->mode == GrayscaleMode))
SetImageColorspace(layer_info->image,GRAYColorspace,exception);
/*
Set up some hidden attributes for folks that need them.
*/
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.x);
(void) SetImageArtifact(layer_info->image,"psd:layer.x",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.y);
(void) SetImageArtifact(layer_info->image,"psd:layer.y",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double)
layer_info->opacity);
(void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message);
(void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name,
exception);
status=MagickTrue;
for (j=0; j < (ssize_t) layer_info->channels; j++)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for channel %.20g",(double) j);
compression=(PSDCompressionType) ReadBlobShort(layer_info->image);
layer_info->image->compression=ConvertPSDCompression(compression);
if (layer_info->channel_info[j].type == -1)
layer_info->image->alpha_trait=BlendPixelTrait;
status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,j,
compression,exception);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity,
MagickFalse,exception);
if ((status != MagickFalse) &&
(layer_info->image->colorspace == CMYKColorspace))
status=NegateCMYK(layer_info->image,exception);
if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL))
{
const char
*option;
layer_info->mask.image->page.x=layer_info->mask.page.x;
layer_info->mask.image->page.y=layer_info->mask.page.y;
/* Do not composite the mask when it is disabled */
if ((layer_info->mask.flags & 0x02) == 0x02)
layer_info->mask.image->compose=NoCompositeOp;
else
status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image,
layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse,
exception);
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if (IsStringTrue(option) != MagickFalse)
PreservePSDOpacityMask(image,layer_info,exception);
layer_info->mask.image=DestroyImage(layer_info->mask.image);
}
return(status);
}
ModuleExport MagickBooleanType ReadPSDLayers(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
char
type[4];
LayerInfo
*layer_info;
MagickSizeType
size;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
count,
j,
number_layers;
size=GetPSDSize(psd_info,image);
if (size == 0)
{
/*
Skip layers & masks.
*/
(void) ReadBlobLong(image);
count=ReadBlob(image,4,(unsigned char *) type);
ReversePSDString(image,type,4);
status=MagickFalse;
if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0))
return(MagickTrue);
else
{
count=ReadBlob(image,4,(unsigned char *) type);
ReversePSDString(image,type,4);
if ((count != 0) && (LocaleNCompare(type,"Lr16",4) == 0))
size=GetPSDSize(psd_info,image);
else
return(MagickTrue);
}
}
status=MagickTrue;
if (size != 0)
{
layer_info=(LayerInfo *) NULL;
number_layers=(short) ReadBlobShort(image);
if (number_layers < 0)
{
/*
The first alpha channel in the merged result contains the
transparency data for the merged result.
*/
number_layers=MagickAbsoluteValue(number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" negative layer count corrected for");
image->alpha_trait=BlendPixelTrait;
}
/*
We only need to know if the image has an alpha channel
*/
if (skip_layers != MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image contains %.20g layers",(double) number_layers);
if (number_layers == 0)
ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers",
image->filename);
layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers,
sizeof(*layer_info));
if (layer_info == (LayerInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of LayerInfo failed");
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) ResetMagickMemory(layer_info,0,(size_t) number_layers*
sizeof(*layer_info));
for (i=0; i < number_layers; i++)
{
ssize_t
x,
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading layer #%.20g",(double) i+1);
layer_info[i].page.y=ReadBlobSignedLong(image);
layer_info[i].page.x=ReadBlobSignedLong(image);
y=ReadBlobSignedLong(image);
x=ReadBlobSignedLong(image);
layer_info[i].page.width=(size_t) (x-layer_info[i].page.x);
layer_info[i].page.height=(size_t) (y-layer_info[i].page.y);
layer_info[i].channels=ReadBlobShort(image);
if (layer_info[i].channels > MaxPSDChannels)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded",
image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g",
(double) layer_info[i].page.x,(double) layer_info[i].page.y,
(double) layer_info[i].page.height,(double)
layer_info[i].page.width,(double) layer_info[i].channels);
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
layer_info[i].channel_info[j].type=(short) ReadBlobShort(image);
layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info,
image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" channel[%.20g]: type=%.20g, size=%.20g",(double) j,
(double) layer_info[i].channel_info[j].type,
(double) layer_info[i].channel_info[j].size);
}
count=ReadBlob(image,4,(unsigned char *) type);
ReversePSDString(image,type,4);
if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer type was %.4s instead of 8BIM", type);
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey);
ReversePSDString(image,layer_info[i].blendkey,4);
layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
layer_info[i].clipping=(unsigned char) ReadBlobByte(image);
layer_info[i].flags=(unsigned char) ReadBlobByte(image);
layer_info[i].visible=!(layer_info[i].flags & 0x02);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s",
layer_info[i].blendkey,(double) layer_info[i].opacity,
layer_info[i].clipping ? "true" : "false",layer_info[i].flags,
layer_info[i].visible ? "true" : "false");
(void) ReadBlobByte(image); /* filler */
size=ReadBlobLong(image);
if (size != 0)
{
MagickSizeType
combined_length,
length;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer contains additional info");
length=ReadBlobLong(image);
combined_length=length+4;
if (length != 0)
{
/*
Layer mask info.
*/
layer_info[i].mask.page.y=ReadBlobSignedLong(image);
layer_info[i].mask.page.x=ReadBlobSignedLong(image);
layer_info[i].mask.page.height=(size_t) (ReadBlobLong(image)-
layer_info[i].mask.page.y);
layer_info[i].mask.page.width=(size_t) (ReadBlobLong(image)-
layer_info[i].mask.page.x);
layer_info[i].mask.background=(unsigned char) ReadBlobByte(
image);
layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image);
if (!(layer_info[i].mask.flags & 0x01))
{
layer_info[i].mask.page.y=layer_info[i].mask.page.y-
layer_info[i].page.y;
layer_info[i].mask.page.x=layer_info[i].mask.page.x-
layer_info[i].page.x;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g",
(double) layer_info[i].mask.page.x,(double)
layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width,
(double) layer_info[i].mask.page.height,(double)
((MagickOffsetType) length)-18);
/*
Skip over the rest of the layer mask information.
*/
if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=ReadBlobLong(image);
combined_length+=length+4;
if (length != 0)
{
/*
Layer blending ranges info.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer blending ranges: length=%.20g",(double)
((MagickOffsetType) length));
/*
We read it, but don't use it...
*/
for (j=0; j < (ssize_t) length; j+=8)
{
size_t blend_source=ReadBlobLong(image);
size_t blend_dest=ReadBlobLong(image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" source(%x), dest(%x)",(unsigned int)
blend_source,(unsigned int) blend_dest);
}
}
/*
Layer name.
*/
length=(MagickSizeType) (unsigned char) ReadBlobByte(image);
combined_length+=length+1;
if (length > 0)
(void) ReadBlob(image,(size_t) length++,layer_info[i].name);
layer_info[i].name[length]='\0';
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer name: %s",layer_info[i].name);
if ((length % 4) != 0)
{
length=4-(length % 4);
combined_length+=length;
/* Skip over the padding of the layer name */
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=(MagickSizeType) size-combined_length;
if (length > 0)
{
unsigned char
*info;
layer_info[i].info=AcquireStringInfo((const size_t) length);
info=GetStringInfoDatum(layer_info[i].info);
(void) ReadBlob(image,(const size_t) length,info);
}
}
}
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].page.width == 0) ||
(layer_info[i].page.height == 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is empty");
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
continue;
}
/*
Allocate layered image.
*/
layer_info[i].image=CloneImage(image,layer_info[i].page.width,
layer_info[i].page.height,MagickFalse,exception);
if (layer_info[i].image == (Image *) NULL)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of image for layer %.20g failed",(double) i);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (layer_info[i].info != (StringInfo *) NULL)
{
(void) SetImageProfile(layer_info[i].image,"psd:additional-info",
layer_info[i].info,exception);
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
}
if (image_info->ping == MagickFalse)
{
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=0; j < layer_info[i].channels; j++)
{
if (DiscardBlobBytes(image,(MagickSizeType)
layer_info[i].channel_info[j].size) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
continue;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for layer %.20g",(double) i);
status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i],
exception);
if (status == MagickFalse)
break;
status=SetImageProgress(image,LoadImagesTag,i,(MagickSizeType)
number_layers);
if (status == MagickFalse)
break;
}
}
if (status != MagickFalse)
{
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers > 0)
{
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
}
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
else
layer_info=DestroyLayerInfo(layer_info,number_layers);
}
return(status);
}
static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info,
Image *image,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickOffsetType
*sizes;
MagickBooleanType
status;
PSDCompressionType
compression;
register ssize_t
i;
compression=(PSDCompressionType) ReadBlobMSBShort(image);
image->compression=ConvertPSDCompression(compression);
if (compression != Raw && compression != RLE)
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression);
return(MagickFalse);
}
sizes=(MagickOffsetType *) NULL;
if (compression == RLE)
{
sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=MagickTrue;
for (i=0; i < (ssize_t) psd_info->channels; i++)
{
if (compression == RLE)
status=ReadPSDChannelRLE(image,psd_info,i,sizes+(i*image->rows),
exception);
else
status=ReadPSDChannelRaw(image,psd_info->channels,i,exception);
if (status != MagickFalse)
status=SetImageProgress(image,LoadImagesTag,i,psd_info->channels);
if (status == MagickFalse)
break;
}
if ((status != MagickFalse) && (image->colorspace == CMYKColorspace))
status=NegateCMYK(image,exception);
if (status != MagickFalse)
status=CorrectPSDAlphaBlend(image_info,image,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
return(status);
}
static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
has_merged_image,
skip_layers;
MagickOffsetType
offset;
MagickSizeType
length;
MagickBooleanType
status;
PSDInfo
psd_info;
register ssize_t
i;
ssize_t
count;
unsigned char
*data;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read image header.
*/
image->endian=MSBEndian;
count=ReadBlob(image,4,(unsigned char *) psd_info.signature);
psd_info.version=ReadBlobMSBShort(image);
if ((count == 0) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) ||
((psd_info.version != 1) && (psd_info.version != 2)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
(void) ReadBlob(image,6,psd_info.reserved);
psd_info.channels=ReadBlobMSBShort(image);
if (psd_info.channels > MaxPSDChannels)
ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded");
psd_info.rows=ReadBlobMSBLong(image);
psd_info.columns=ReadBlobMSBLong(image);
if ((psd_info.version == 1) && ((psd_info.rows > 30000) ||
(psd_info.columns > 30000)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.depth=ReadBlobMSBShort(image);
if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.mode=ReadBlobMSBShort(image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s",
(double) psd_info.columns,(double) psd_info.rows,(double)
psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType)
psd_info.mode));
/*
Initialize image.
*/
image->depth=psd_info.depth;
image->columns=psd_info.columns;
image->rows=psd_info.rows;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
if (SetImageBackgroundColor(image,exception) == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
if (psd_info.mode == LabMode)
SetImageColorspace(image,LabColorspace,exception);
if (psd_info.mode == CMYKMode)
{
SetImageColorspace(image,CMYKColorspace,exception);
if (psd_info.channels > 4)
SetImageAlphaChannel(image,ActivateAlphaChannel,exception);
}
else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) ||
(psd_info.mode == DuotoneMode))
{
status=AcquireImageColormap(image,psd_info.depth != 16 ? 256 : 65536,
exception);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image colormap allocated");
SetImageColorspace(image,GRAYColorspace,exception);
if (psd_info.channels > 1)
SetImageAlphaChannel(image,ActivateAlphaChannel,exception);
}
else
if (psd_info.channels > 3)
SetImageAlphaChannel(image,ActivateAlphaChannel,exception);
/*
Read PSD raster colormap only present for indexed and duotone images.
*/
length=ReadBlobMSBLong(image);
if (length != 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading colormap");
if (psd_info.mode == DuotoneMode)
{
/*
Duotone image data; the format of this data is undocumented.
*/
data=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*data));
if (data == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
(void) ReadBlob(image,(size_t) length,data);
data=(unsigned char *) RelinquishMagickMemory(data);
}
else
{
size_t
number_colors;
/*
Read PSD raster colormap.
*/
number_colors=length/3;
if (number_colors > 65536)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireImageColormap(image,number_colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].red=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].green=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].blue=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
image->alpha_trait=UndefinedPixelTrait;
}
}
if ((image->depth == 1) && (image->storage_class != PseudoClass))
ThrowReaderException(CorruptImageError, "ImproperImageHeader");
has_merged_image=MagickTrue;
length=ReadBlobMSBLong(image);
if (length != 0)
{
unsigned char
*blocks;
/*
Image resources block.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading image resource blocks - %.20g bytes",(double)
((MagickOffsetType) length));
blocks=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*blocks));
if (blocks == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) length,blocks);
if ((count != (ssize_t) length) || (length < 4) ||
(LocaleNCompare((char *) blocks,"8BIM",4) != 0))
{
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
ParseImageResourceBlocks(image,blocks,(size_t) length,&has_merged_image,
exception);
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
}
/*
Layer and mask block.
*/
length=GetPSDSize(&psd_info,image);
if (length == 8)
{
length=ReadBlobMSBLong(image);
length=ReadBlobMSBLong(image);
}
offset=TellBlob(image);
skip_layers=MagickFalse;
if ((image_info->number_scenes == 1) && (image_info->scene == 0) &&
(has_merged_image != MagickFalse))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" read composite only");
skip_layers=MagickTrue;
}
if (length == 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image has no layers");
}
else
{
if (ReadPSDLayers(image,image_info,&psd_info,skip_layers,exception) !=
MagickTrue)
{
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Skip the rest of the layer and mask information.
*/
SeekBlob(image,offset+length,SEEK_SET);
}
/*
If we are only "pinging" the image, then we're done - so return.
*/
if (image_info->ping != MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
Read the precombined layer, present for PSD < 4 compatibility.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading the precombined layer");
if ((has_merged_image != MagickFalse) || (GetImageListLength(image) == 1))
has_merged_image=(MagickBooleanType) ReadPSDMergedImage(image_info,image,
&psd_info,exception);
if ((has_merged_image == MagickFalse) && (GetImageListLength(image) == 1) &&
(length != 0))
{
SeekBlob(image,offset,SEEK_SET);
status=ReadPSDLayers(image,image_info,&psd_info,MagickFalse,exception);
if (status != MagickTrue)
{
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
}
if (has_merged_image == MagickFalse)
{
Image
*merged;
if (GetImageListLength(image) == 1)
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
SetImageAlphaChannel(image,TransparentAlphaChannel,exception);
image->background_color.alpha=TransparentAlpha;
image->background_color.alpha_trait=BlendPixelTrait;
merged=MergeImageLayers(image,FlattenLayer,exception);
ReplaceImageInList(&image,merged);
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPSDImage() adds properties for the PSD image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPSDImage method is:
%
% size_t RegisterPSDImage(void)
%
*/
ModuleExport size_t RegisterPSDImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPSDImage() removes format registrations made by the
% PSD module from the list of supported formats.
%
% The format of the UnregisterPSDImage method is:
%
% UnregisterPSDImage(void)
%
*/
ModuleExport void UnregisterPSDImage(void)
{
(void) UnregisterMagickInfo("PSB");
(void) UnregisterMagickInfo("PSD");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePSDImage() writes an image in the Adobe Photoshop encoded image format.
%
% The format of the WritePSDImage method is:
%
% MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image,
const size_t offset)
{
if (psd_info->version == 1)
return(WriteBlobMSBShort(image,(unsigned short) offset));
return(WriteBlobMSBLong(image,(unsigned short) offset));
}
static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickSizeType offset)
{
MagickSizeType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBShort(image,(unsigned short) size);
else
result=(WriteBlobMSBLong(image,(unsigned short) size));
SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size)
{
if (psd_info->version == 1)
return(WriteBlobMSBLong(image,(unsigned int) size));
return(WriteBlobMSBLongLong(image,size));
}
static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickSizeType offset)
{
MagickSizeType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBLong(image,(unsigned int) size);
else
result=WriteBlobMSBLongLong(image,size);
SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static size_t PSDPackbitsEncodeImage(Image *image,const size_t length,
const unsigned char *pixels,unsigned char *compact_pixels,
ExceptionInfo *exception)
{
int
count;
register ssize_t
i,
j;
register unsigned char
*q;
unsigned char
*packbits;
/*
Compress pixels with Packbits encoding.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (unsigned char *) NULL);
assert(compact_pixels != (unsigned char *) NULL);
packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits));
if (packbits == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
q=compact_pixels;
for (i=(ssize_t) length; i != 0; )
{
switch (i)
{
case 1:
{
i--;
*q++=(unsigned char) 0;
*q++=(*pixels);
break;
}
case 2:
{
i-=2;
*q++=(unsigned char) 1;
*q++=(*pixels);
*q++=pixels[1];
break;
}
case 3:
{
i-=3;
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
*q++=(unsigned char) ((256-3)+1);
*q++=(*pixels);
break;
}
*q++=(unsigned char) 2;
*q++=(*pixels);
*q++=pixels[1];
*q++=pixels[2];
break;
}
default:
{
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
/*
Packed run.
*/
count=3;
while (((ssize_t) count < i) && (*pixels == *(pixels+count)))
{
count++;
if (count >= 127)
break;
}
i-=count;
*q++=(unsigned char) ((256-count)+1);
*q++=(*pixels);
pixels+=count;
break;
}
/*
Literal run.
*/
count=0;
while ((*(pixels+count) != *(pixels+count+1)) ||
(*(pixels+count+1) != *(pixels+count+2)))
{
packbits[count+1]=pixels[count];
count++;
if (((ssize_t) count >= (i-3)) || (count >= 127))
break;
}
i-=count;
*packbits=(unsigned char) (count-1);
for (j=0; j <= (ssize_t) count; j++)
*q++=packbits[j];
pixels+=count;
break;
}
}
}
*q++=(unsigned char) 128; /* EOD marker */
packbits=(unsigned char *) RelinquishMagickMemory(packbits);
return((size_t) (q-compact_pixels));
}
static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image,
const Image *next_image,const ssize_t channels)
{
size_t
length;
ssize_t
i,
y;
if (next_image->compression == RLECompression)
{
length=WriteBlobMSBShort(image,RLE);
for (i=0; i < channels; i++)
for (y=0; y < (ssize_t) next_image->rows; y++)
length+=SetPSDOffset(psd_info,image,0);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (next_image->compression == ZipCompression)
length=WriteBlobMSBShort(image,ZipWithoutPrediction);
#endif
else
length=WriteBlobMSBShort(image,Raw);
return(length);
}
static size_t WritePSDChannel(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
const QuantumType quantum_type, unsigned char *compact_pixels,
MagickOffsetType size_offset,const MagickBooleanType separate,
ExceptionInfo *exception)
{
int
y;
MagickBooleanType
monochrome;
QuantumInfo
*quantum_info;
register const Quantum
*p;
register ssize_t
i;
size_t
count,
length;
unsigned char
*pixels;
#ifdef MAGICKCORE_ZLIB_DELEGATE
#define CHUNK 16384
int
flush,
level;
unsigned char
*compressed_pixels;
z_stream
stream;
compressed_pixels=(unsigned char *) NULL;
flush=Z_NO_FLUSH;
#endif
count=0;
if (separate != MagickFalse)
{
size_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,1);
}
if (next_image->depth > 8)
next_image->depth=16;
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
quantum_info=AcquireQuantumInfo(image_info,next_image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (next_image->compression == ZipCompression)
{
compressed_pixels=(unsigned char *) AcquireQuantumMemory(CHUNK,
sizeof(*compressed_pixels));
if (compressed_pixels == (unsigned char *) NULL)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
ResetMagickMemory(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
level=Z_DEFAULT_COMPRESSION;
if ((image_info->quality > 0 && image_info->quality < 10))
level=(int) image_info->quality;
if (deflateInit(&stream,level) != Z_OK)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
}
#endif
for (y=0; y < (ssize_t) next_image->rows; y++)
{
p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
if (monochrome != MagickFalse)
for (i=0; i < (ssize_t) length; i++)
pixels[i]=(~pixels[i]);
if (next_image->compression == RLECompression)
{
length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels,
exception);
count+=WriteBlob(image,length,compact_pixels);
size_offset+=WritePSDOffset(psd_info,image,length,size_offset);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (next_image->compression == ZipCompression)
{
stream.avail_in=(uInt) length;
stream.next_in=(Bytef *) pixels;
if (y == (ssize_t) next_image->rows-1)
flush=Z_FINISH;
do {
stream.avail_out=(uInt) CHUNK;
stream.next_out=(Bytef *) compressed_pixels;
if (deflate(&stream,flush) == Z_STREAM_ERROR)
break;
length=(size_t) CHUNK-stream.avail_out;
if (length > 0)
count+=WriteBlob(image,length,compressed_pixels);
} while (stream.avail_out == 0);
}
#endif
else
count+=WriteBlob(image,length,pixels);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (next_image->compression == ZipCompression)
{
(void) deflateEnd(&stream);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
}
#endif
quantum_info=DestroyQuantumInfo(quantum_info);
return(count);
}
static unsigned char *AcquireCompactPixels(const Image *image,
ExceptionInfo *exception)
{
size_t
packet_size;
unsigned char
*compact_pixels;
packet_size=image->depth > 8UL ? 2UL : 1UL;
compact_pixels=(unsigned char *) AcquireQuantumMemory((9*
image->columns)+1,packet_size*sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
}
return(compact_pixels);
}
static size_t WritePSDChannels(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
MagickOffsetType size_offset,const MagickBooleanType separate,
ExceptionInfo *exception)
{
Image
*mask;
MagickOffsetType
rows_offset;
size_t
channels,
count,
length,
offset_length;
unsigned char
*compact_pixels;
count=0;
offset_length=0;
rows_offset=0;
compact_pixels=(unsigned char *) NULL;
if (next_image->compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(next_image,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
channels=1;
if (separate == MagickFalse)
{
if (next_image->storage_class != PseudoClass)
{
if (IsImageGray(next_image) == MagickFalse)
channels=next_image->colorspace == CMYKColorspace ? 4 : 3;
if (next_image->alpha_trait != UndefinedPixelTrait)
channels++;
}
rows_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,channels);
offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4));
}
size_offset+=2;
if (next_image->storage_class == PseudoClass)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
IndexQuantum,compact_pixels,rows_offset,separate,exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (IsImageGray(next_image) != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
GrayQuantum,compact_pixels,rows_offset,separate,exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
length=WritePSDChannel(psd_info,image_info,image,next_image,
RedQuantum,compact_pixels,rows_offset,separate,exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
GreenQuantum,compact_pixels,rows_offset,separate,exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlueQuantum,compact_pixels,rows_offset,separate,exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
if (next_image->colorspace == CMYKColorspace)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlackQuantum,compact_pixels,rows_offset,separate,exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
if (next_image->alpha_trait != UndefinedPixelTrait)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
AlphaQuantum,compact_pixels,rows_offset,separate,exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
if (separate != MagickFalse)
{
const char
*property;
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
exception);
if (mask != (Image *) NULL)
{
if (mask->compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(mask,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
length=WritePSDChannel(psd_info,image_info,image,mask,
RedQuantum,compact_pixels,rows_offset,MagickTrue,exception);
(void) WritePSDSize(psd_info,image,length,size_offset);
count+=length;
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
}
}
}
return(count);
}
static size_t WritePascalString(Image *image,const char *value,size_t padding)
{
size_t
count,
length;
register ssize_t
i;
/*
Max length is 255.
*/
count=0;
length=(strlen(value) > 255UL ) ? 255UL : strlen(value);
if (length == 0)
count+=WriteBlobByte(image,0);
else
{
count+=WriteBlobByte(image,(unsigned char) length);
count+=WriteBlob(image,length,(const unsigned char *) value);
}
length++;
if ((length % padding) == 0)
return(count);
for (i=0; i < (ssize_t) (padding-(length % padding)); i++)
count+=WriteBlobByte(image,0);
return(count);
}
static void WriteResolutionResourceBlock(Image *image)
{
double
x_resolution,
y_resolution;
unsigned short
units;
if (image->units == PixelsPerCentimeterResolution)
{
x_resolution=2.54*65536.0*image->resolution.x+0.5;
y_resolution=2.54*65536.0*image->resolution.y+0.5;
units=2;
}
else
{
x_resolution=65536.0*image->resolution.x+0.5;
y_resolution=65536.0*image->resolution.y+0.5;
units=1;
}
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x03ED);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,16); /* resource size */
(void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */
(void) WriteBlobMSBShort(image,units); /* width unit */
(void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* vertical resolution unit */
(void) WriteBlobMSBShort(image,units); /* height unit */
}
static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image,
const signed short channel)
{
size_t
count;
count=WriteBlobMSBSignedShort(image,channel);
count+=SetPSDSize(psd_info,image,0);
return(count);
}
static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if (id == 0x0000040f)
{
ssize_t
quantum;
quantum=PSDQuantum(count)+12;
if ((quantum >= 12) && (quantum < (ssize_t) length))
{
if ((q+quantum < (datum+length-16)))
(void) CopyMagickMemory(q,q+quantum,length-quantum-(q-datum));
SetStringInfoLength(bim_profile,length-quantum);
}
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
ssize_t
cnt;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
return;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
cnt=PSDQuantum(count);
if (cnt < 0)
return;
if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)))
{
(void) CopyMagickMemory(q,q+cnt+12,length-(cnt+12)-(q-datum));
SetStringInfoLength(bim_profile,length-(cnt+12));
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
#define PSDKeySize 5
#define PSDAllowedLength 36
char
key[PSDKeySize];
/* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */
const char
allowed[PSDAllowedLength][PSDKeySize] = {
"blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk",
"GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr",
"lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl",
"post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA"
},
*option;
const StringInfo
*info;
MagickBooleanType
found;
register size_t
i;
size_t
remaining_length,
length;
StringInfo
*profile;
unsigned char
*p;
unsigned int
size;
info=GetImageProfile(image,"psd:additional-info");
if (info == (const StringInfo *) NULL)
return((const StringInfo *) NULL);
option=GetImageOption(image_info,"psd:additional-info");
if (LocaleCompare(option,"all") == 0)
return(info);
if (LocaleCompare(option,"selective") != 0)
{
profile=RemoveImageProfile(image,"psd:additional-info");
return(DestroyStringInfo(profile));
}
length=GetStringInfoLength(info);
p=GetStringInfoDatum(info);
remaining_length=length;
length=0;
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(*p++);
key[1]=(*p++);
key[2]=(*p++);
key[3]=(*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
return((const StringInfo *) NULL);
found=MagickFalse;
for (i=0; i < PSDAllowedLength; i++)
{
if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0)
continue;
found=MagickTrue;
break;
}
remaining_length-=(size_t) size;
if (found == MagickFalse)
{
if (remaining_length > 0)
p=(unsigned char *) CopyMagickMemory(p-12,p+size,remaining_length);
continue;
}
length+=(size_t) size+12;
p+=size;
}
profile=RemoveImageProfile(image,"psd:additional-info");
if (length == 0)
return(DestroyStringInfo(profile));
SetStringInfoLength(profile,(const size_t) length);
SetImageProfile(image,"psd:additional-info",info,exception);
return(profile);
}
static MagickBooleanType WritePSDImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
char
layer_name[MagickPathExtent];
const char
*property;
const StringInfo
*icc_profile,
*info;
Image
*base_image,
*next_image;
MagickBooleanType
status;
MagickOffsetType
*layer_size_offsets,
size_offset;
PSDInfo
psd_info;
register ssize_t
i;
size_t
layer_count,
layer_index,
length,
name_length,
num_channels,
packet_size,
rounded_size,
size;
StringInfo
*bim_profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
packet_size=(size_t) (image->depth > 8 ? 6 : 3);
if (image->alpha_trait != UndefinedPixelTrait)
packet_size+=image->depth > 8 ? 2 : 1;
psd_info.version=1;
if ((LocaleCompare(image_info->magick,"PSB") == 0) ||
(image->columns > 30000) || (image->rows > 30000))
psd_info.version=2;
(void) WriteBlob(image,4,(const unsigned char *) "8BPS");
(void) WriteBlobMSBShort(image,psd_info.version); /* version */
for (i=1; i <= 6; i++)
(void) WriteBlobByte(image, 0); /* 6 bytes of reserved */
if (SetImageGray(image,exception) != MagickFalse)
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
if ((image_info->type != TrueColorType) && (image_info->type !=
TrueColorAlphaType) && (image->storage_class == PseudoClass))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
{
if (image->storage_class == PseudoClass)
(void) SetImageStorageClass(image,DirectClass,exception);
if (image->colorspace != CMYKColorspace)
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL);
else
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL);
}
(void) WriteBlobMSBShort(image,(unsigned short) num_channels);
(void) WriteBlobMSBLong(image,(unsigned int) image->rows);
(void) WriteBlobMSBLong(image,(unsigned int) image->columns);
if (IsImageGray(image) != MagickFalse)
{
MagickBooleanType
monochrome;
/*
Write depth & mode.
*/
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8));
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? BitmapMode : GrayscaleMode));
}
else
{
(void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class ==
PseudoClass ? 8 : image->depth > 8 ? 16 : 8));
if (((image_info->colorspace != UndefinedColorspace) ||
(image->colorspace != CMYKColorspace)) &&
(image_info->colorspace != CMYKColorspace))
{
(void) TransformImageColorspace(image,sRGBColorspace,exception);
(void) WriteBlobMSBShort(image,(unsigned short)
(image->storage_class == PseudoClass ? IndexedMode : RGBMode));
}
else
{
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,CMYKColorspace,exception);
(void) WriteBlobMSBShort(image,CMYKMode);
}
}
if ((IsImageGray(image) != MagickFalse) ||
(image->storage_class == DirectClass) || (image->colors > 256))
(void) WriteBlobMSBLong(image,0);
else
{
/*
Write PSD raster colormap.
*/
(void) WriteBlobMSBLong(image,768);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].red));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(
image->colormap[i].green));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].blue));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
}
/*
Image resource block.
*/
length=28; /* 0x03EB */
bim_profile=(StringInfo *) GetImageProfile(image,"8bim");
icc_profile=GetImageProfile(image,"icc");
if (bim_profile != (StringInfo *) NULL)
{
bim_profile=CloneStringInfo(bim_profile);
if (icc_profile != (StringInfo *) NULL)
RemoveICCProfileFromResourceBlock(bim_profile);
RemoveResolutionFromResourceBlock(bim_profile);
length+=PSDQuantum(GetStringInfoLength(bim_profile));
}
if (icc_profile != (const StringInfo *) NULL)
length+=PSDQuantum(GetStringInfoLength(icc_profile))+12;
(void) WriteBlobMSBLong(image,(unsigned int) length);
WriteResolutionResourceBlock(image);
if (bim_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,GetStringInfoLength(bim_profile),
GetStringInfoDatum(bim_profile));
bim_profile=DestroyStringInfo(bim_profile);
}
if (icc_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x0000040F);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength(
icc_profile));
(void) WriteBlob(image,GetStringInfoLength(icc_profile),
GetStringInfoDatum(icc_profile));
if ((MagickOffsetType) GetStringInfoLength(icc_profile) !=
PSDQuantum(GetStringInfoLength(icc_profile)))
(void) WriteBlobByte(image,0);
}
base_image=GetNextImageInList(image);
if (base_image == (Image *) NULL)
base_image=image;
size=0;
size_offset=TellBlob(image);
SetPSDSize(&psd_info,image,0);
SetPSDSize(&psd_info,image,0);
layer_count=0;
for (next_image=base_image; next_image != NULL; )
{
layer_count++;
next_image=GetNextImageInList(next_image);
}
if (image->alpha_trait != UndefinedPixelTrait)
size+=WriteBlobMSBShort(image,-(unsigned short) layer_count);
else
size+=WriteBlobMSBShort(image,(unsigned short) layer_count);
layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory(
(size_t) layer_count,sizeof(MagickOffsetType));
if (layer_size_offsets == (MagickOffsetType *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
layer_index=0;
for (next_image=base_image; next_image != NULL; )
{
Image
*mask;
unsigned char
default_color;
unsigned short
channels,
total_channels;
mask=(Image *) NULL;
property=GetImageArtifact(next_image,"psd:opacity-mask");
default_color=0;
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception);
default_color=strlen(property) == 9 ? 255 : 0;
}
size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.y);
size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.x);
size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.y+
next_image->rows));
size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.x+
next_image->columns));
channels=1U;
if ((next_image->storage_class != PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
channels=next_image->colorspace == CMYKColorspace ? 4U : 3U;
total_channels=channels;
if (next_image->alpha_trait != UndefinedPixelTrait)
total_channels++;
if (mask != (Image *) NULL)
total_channels++;
size+=WriteBlobMSBShort(image,total_channels);
layer_size_offsets[layer_index++]=TellBlob(image);
for (i=0; i < (ssize_t) channels; i++)
size+=WriteChannelSize(&psd_info,image,(signed short) i);
if (next_image->alpha_trait != UndefinedPixelTrait)
size+=WriteChannelSize(&psd_info,image,-1);
if (mask != (Image *) NULL)
size+=WriteChannelSize(&psd_info,image,-2);
size+=WriteBlob(image,4,(const unsigned char *) "8BIM");
size+=WriteBlob(image,4,(const unsigned char *)
CompositeOperatorToPSDBlendMode(next_image->compose));
property=GetImageArtifact(next_image,"psd:layer.opacity");
if (property != (const char *) NULL)
{
Quantum
opacity;
opacity=(Quantum) StringToInteger(property);
size+=WriteBlobByte(image,ScaleQuantumToChar(opacity));
(void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception);
}
else
size+=WriteBlobByte(image,255);
size+=WriteBlobByte(image,0);
size+=WriteBlobByte(image,next_image->compose==NoCompositeOp ?
1 << 0x02 : 1); /* layer properties - visible, etc. */
size+=WriteBlobByte(image,0);
info=GetAdditionalInformation(image_info,next_image,exception);
property=(const char *) GetImageProperty(next_image,"label",exception);
if (property == (const char *) NULL)
{
(void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g",
(double) layer_index);
property=layer_name;
}
name_length=strlen(property)+1;
if ((name_length % 4) != 0)
name_length+=(4-(name_length % 4));
if (info != (const StringInfo *) NULL)
name_length+=GetStringInfoLength(info);
name_length+=8;
if (mask != (Image *) NULL)
name_length+=20;
size+=WriteBlobMSBLong(image,(unsigned int) name_length);
if (mask == (Image *) NULL)
size+=WriteBlobMSBLong(image,0);
else
{
if (mask->compose != NoCompositeOp)
(void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum(
default_color),MagickTrue,exception);
mask->page.y+=image->page.y;
mask->page.x+=image->page.x;
size+=WriteBlobMSBLong(image,20);
size+=WriteBlobMSBSignedLong(image,mask->page.y);
size+=WriteBlobMSBSignedLong(image,mask->page.x);
size+=WriteBlobMSBLong(image,(const unsigned int) mask->rows+
mask->page.y);
size+=WriteBlobMSBLong(image,(const unsigned int) mask->columns+
mask->page.x);
size+=WriteBlobByte(image,default_color);
size+=WriteBlobByte(image,mask->compose == NoCompositeOp ? 2 : 0);
size+=WriteBlobMSBShort(image,0);
}
size+=WriteBlobMSBLong(image,0);
size+=WritePascalString(image,property,4);
if (info != (const StringInfo *) NULL)
size+=WriteBlob(image,GetStringInfoLength(info),
GetStringInfoDatum(info));
next_image=GetNextImageInList(next_image);
}
/*
Now the image data!
*/
next_image=base_image;
layer_index=0;
while (next_image != NULL)
{
length=WritePSDChannels(&psd_info,image_info,image,next_image,
layer_size_offsets[layer_index++],MagickTrue,exception);
if (length == 0)
{
status=MagickFalse;
break;
}
size+=length;
next_image=GetNextImageInList(next_image);
}
(void) WriteBlobMSBLong(image,0); /* user mask data */
/*
Write the total size
*/
size_offset+=WritePSDSize(&psd_info,image,size+
(psd_info.version == 1 ? 8 : 16),size_offset);
if ((size/2) != ((size+1)/2))
rounded_size=size+1;
else
rounded_size=size;
(void) WritePSDSize(&psd_info,image,rounded_size,size_offset);
layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory(
layer_size_offsets);
/*
Remove the opacity mask from the registry
*/
next_image=base_image;
while (next_image != (Image *) NULL)
{
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
DeleteImageRegistry(property);
next_image=GetNextImageInList(next_image);
}
/*
Write composite image.
*/
if (status != MagickFalse)
{
CompressionType
compression;
compression=image->compression;
if (image->compression == ZipCompression)
image->compression=RLECompression;
if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse,
exception) == 0)
status=MagickFalse;
image->compression=compression;
}
(void) CloseBlob(image);
return(status);
}
|
DRB046-doall2-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Two-dimensional array computation:
Only one loop is associated with the omp for construct.
The inner loop's loop iteration variable needs an explicit private() clause,
otherwise it will be shared by default.
*/
int a[100][100];
int main()
{
int i,j;
#pragma omp parallel for
for (i=0;i<100;i++)
#pragma omp parallel for
for (j=0;j<100;j++)
a[i][j] = i + j;
#pragma omp parallel for
for (i=0;i<100;i++)
#pragma omp parallel for
for (j=0;j<100;j++)
a[i][j]=a[i][j]+1;
for (i=0;i<100;i++)
for (j=0;j<100;j++)
printf("%d\n", a[i][j]);
return 0;
}
|
veb-cycles.h | /*
* Copyright 2018-2021 Kyle Berney
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef VEB_CYCLES_H
#define VEB_CYCLES_H
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <math.h>
#include <omp.h>
#include <time.h>
#include "common.h"
#include "cycles.h"
//Permutes sorted array into the van Emde Boas tree layout via Level-order B-tree involutions
//B = # of leaf elements per leaf subtree, i.e., paramter l (in the below code)
//The top subtree has a height of ceil{(h - 1)/2} and leaf subtrees have height of floor{(h - 1)/2}
template<typename TYPE>
void permutevEB(TYPE *A, uint64_t n, uint32_t d) {
if (d == 1) return;
else if (d % 2 == 0) { //balanced, |T_{root}| = |T_{leaf}|
uint64_t m = (uint64_t)sqrt(n); //floor{sqrt(n)} = 2^{d/2} - 1
equidistant_gather_io<TYPE>(A, m, m);
for (uint64_t i = 0; i < n; i+= m) { //Recurse on each subtree
permutevEB<TYPE>(&A[i], m, d/2);
}
}
else { //unbalanced, |T_{leaf}| = 2*|T_{root}| + 1
uint64_t r = pow(2, d/2) - 1;
uint64_t l = 2*r + 1;
equidistant_gather_io<TYPE>(A, r, l);
permutevEB<TYPE>(A, r, d/2); //Recurse on root subtree
for (uint64_t i = r; i < n; i += l) { //Recurse on each leaf subtree
permutevEB<TYPE>(&A[i], l, d/2 + 1);
}
}
}
//Permutes sorted array into the van Emde Boas tree layout via Level-order B-tree involutions
//B = # of leaf elements per leaf subtree, i.e., paramter l (in the below code)
//The top subtree has a height of ceil{(h - 1)/2} and leaf subtrees have height of floor{(h - 1)/2}
template<typename TYPE>
void permutevEB_parallel(TYPE *A, uint64_t n, uint32_t d, uint32_t p) {
if (n == 1) return;
else if (d % 2 == 0) { //balanced, |T_{root}| = |T_{leaf}|
uint64_t m = (uint64_t)sqrt(n); //floor{sqrt(n)} = 2^{d/2} - 1
equidistant_gather_io_parallel<TYPE>(A, m, m, p);
if (n/m >= p) { //if number of recursive calls is larger than p, have each processor sequentially permute in parallel
#pragma omp parallel for shared(A, n, d, p, m) schedule(guided) num_threads(p)
for (uint64_t i = 0; i < n; i += m) { //Recurse on each subtree
permutevEB<TYPE>(&A[i], m, d/2);
}
}
else { //else number of processors available is larger than number of recursive calls
uint32_t threads_per = ceil(p/(double)(n/m));
#pragma omp parallel for shared(A, n, d, p, m, threads_per) schedule(guided) num_threads(n/m)
for (uint64_t i = 0; i < n; i += m) { //Recurse on each subtree
permutevEB_parallel<TYPE>(&A[i], m, d/2, threads_per);
}
}
}
else { //unbalanced, |T_{leaf}| = 2*|T_{root}| + 1
uint64_t r = pow(2, d/2) - 1;
uint64_t l = 2*r + 1;
equidistant_gather_io_parallel<TYPE>(A, r, l, p);
permutevEB_parallel<TYPE>(A, r, d/2, p);
uint64_t numLeafTrees = (n-r)/l;
if (p <= numLeafTrees) {
#pragma omp parallel for shared(A, n, d, p, r, l, numLeafTrees) schedule(guided) num_threads(p)
for (uint64_t i = r; i < n; i += l) { //Recurse on each leaf subtree
permutevEB<TYPE>(&A[i], l, d/2 + 1);
}
}
else {
uint32_t threads_per = ceil(p/(double)numLeafTrees);
#pragma omp parallel for shared(A, n, d, p, r, l, numLeafTrees, threads_per) schedule(guided) num_threads(numLeafTrees)
for (uint64_t i = r; i < n; i += l) { //Recurse on each leaf subtree
permutevEB_parallel<TYPE>(&A[i], l, d/2 + 1, threads_per);
}
}
}
}
//Assumes 2^{d-1} - 1 < n < 2^d - 1
template<typename TYPE>
void permutevEB_nonperfect(TYPE *A, uint64_t n, uint32_t d) {
//#ifdef DEBUG
//printf("d = %u; n = %lu ==> ", d, n);
//#endif
if (d == 1) return;
else {
uint32_t root_d = (d - 2)/2 + 1; //floor((d - 2)/2) + 1
uint32_t leaf_d = d - root_d; //ceil((d - 2)/2.) + 1
uint64_t r = pow(2, root_d) - 1; //number of elements in the root subtree
uint64_t l = pow(2, leaf_d) - 1; //number of elements in the full leaf subtrees
uint64_t num_full = (n - r) / l; //number of full leaf subtrees
uint64_t inc_n = n - r - num_full*l; //number of nodes in the incomplete leaf subtree
//#ifdef DEBUG
//printf("root_d = %u; leaf_d = %u; r = %lu; l = %lu\n", root_d, leaf_d, r, l);
//printf("num_full = %lu; inc_n = %lu\n", num_full, inc_n);
//#endif
//Gather root elements to the front of the array
equidistant_gather_io<TYPE>(A, num_full, l);
if (num_full < r) {
shift_right<TYPE>(&A[num_full], n - num_full, r - num_full);
}
//Recurse
uint64_t size;
if (root_d == leaf_d) {
size = (num_full + 1)*r;
for (uint64_t i = 0; i < size; i += r) { //Recurse on root and full leaf subtrees
permutevEB<TYPE>(&A[i], r, root_d);
}
}
else {
permutevEB<TYPE>(A, r, root_d); //Recurse on root subtree
size = r + num_full*l;
for (uint64_t i = r; i < size; i += l) { //Recurse on full leaf subtrees
permutevEB<TYPE>(&A[i], l, leaf_d);
}
}
if (inc_n > 0) {
uint32_t inc_d = log2(inc_n) + 1;
//#ifdef DEBUG
//printf("inc_d = %u\n\n", inc_d);
//#endif
//Recurse on incomplete leaf subtree
if (inc_n != pow(2, inc_d) - 1) { //non-perfect incomplete tree
permutevEB_nonperfect<TYPE>(&A[size], inc_n, inc_d);
}
else { //perfect incomplete tree
permutevEB<TYPE>(&A[size], inc_n, inc_d);
}
}
}
}
//Assumes 2^{d-1} - 1 < n < 2^d - 1
template<typename TYPE>
void permutevEB_nonperfect_parallel(TYPE *A, uint64_t n, uint32_t d, uint32_t p) {
if (d == 1) return;
else {
uint32_t root_d = (d - 2)/2 + 1; //floor((d - 2)/2) + 1
uint32_t leaf_d = d - root_d; //ceil((d - 2)/2.) + 1
uint64_t r = pow(2, root_d) - 1; //number of elements in the root subtree
uint64_t l = pow(2, leaf_d) - 1; //number of elements in the full leaf subtrees
uint64_t num_full = (n - r) / l; //number of full leaf subtrees
uint64_t inc_n = n - r - num_full*l; //number of nodes in the incomplete leaf subtree
//Gather root elements to the front of the array
equidistant_gather_io_parallel<TYPE>(A, num_full, l, p);
if (num_full < r) {
shift_right_parallel<TYPE>(&A[num_full], n - num_full, r - num_full, p);
}
//Recurse
//Parallel solution #1
uint64_t size;
if (root_d == leaf_d) {
size = (num_full + 1)*r;
if (p <= num_full + 1) {
#pragma omp parallel for shared(A, n, d, p, root_d, leaf_d, r, l, num_full, inc_n, size) schedule(guided) num_threads(p)
for (uint64_t i = 0; i < size; i += r) { //Recurse on root and full leaf subtrees
permutevEB<TYPE>(&A[i], r, root_d);
}
}
else {
uint32_t threads_per = ceil(p/(double)(num_full + 1));
#pragma omp parallel for shared(A, n, d, p, root_d, leaf_d, r, l, num_full, inc_n, size, threads_per) schedule(guided) num_threads(num_full+1)
for (uint64_t i = 0; i < size; i += r) { //Recurse on root and full leaf subtrees
permutevEB_parallel<TYPE>(&A[i], r, root_d, threads_per);
}
}
}
else {
permutevEB_parallel<TYPE>(A, r, root_d, p); //Recurse on root subtree
size = r + num_full*l;
if (p <= num_full) {
#pragma omp parallel for shared(A, n, d, p, root_d, leaf_d, r, l, num_full, inc_n, size) schedule(guided) num_threads(p)
for (uint64_t i = r; i < size; i += l) { //Recurse on full leaf subtrees
permutevEB<TYPE>(&A[i], l, leaf_d);
}
}
else {
uint32_t threads_per = ceil(p/(double)num_full);
#pragma omp parallel for shared(A, n, d, p, root_d, leaf_d, r, l, num_full, inc_n, size, threads_per) schedule(guided) num_threads(num_full)
for (uint64_t i = r; i < size; i += l) { //Recurse on full leaf subtrees
permutevEB_parallel<TYPE>(&A[i], l, leaf_d, threads_per);
}
}
}
if (inc_n > 0) {
uint32_t inc_d = log2(inc_n) + 1;
//Recurse on incomplete leaf subtree
if (inc_n != pow(2, inc_d) - 1) { //non-perfect incomplete tree
permutevEB_nonperfect_parallel<TYPE>(&A[size], inc_n, inc_d, p);
}
else { //perfect incomplete tree
permutevEB_parallel<TYPE>(&A[size], inc_n, inc_d, p);
}
}
//Parallel Solution #2: slightly slower than #1
/*if (root_d == leaf_d) {
uint64_t size = (num_full + 1)*r;
if (inc_n > 0) {
uint32_t inc_d = log2(inc_n) + 1;
if (p <= num_full + 2) {
//printf("case 1\n");
#pragma omp parallel for shared(A, n, d, p, root_d, leaf_d, r, l, num_full, inc_n, inc_d, size) schedule(guided) num_threads(p)
for (uint64_t i = 0; i < n; i += r) {
if (i < size) permutevEB<TYPE>(&A[i], r, root_d); //Recurse on root and full leaf subtrees
else {
//Recurse on incomplete leaf subtree
if (inc_n != pow(2, inc_d) - 1) { //non-perfect incomplete tree
permutevEB_nonperfect<TYPE>(&A[size], inc_n, inc_d);
}
else { //perfect incomplete tree
permutevEB<TYPE>(&A[size], inc_n, inc_d);
}
}
}
}
else {
//printf("case 2\n");
uint32_t threads_per = ceil(p/(double)(num_full + 2));
#pragma omp parallel for shared(A, n, d, p, root_d, leaf_d, r, l, num_full, inc_n, inc_d, size) schedule(guided) num_threads(num_full+2)
for (uint64_t i = 0; i < n; i += r) {
if (i < size) permutevEB_parallel<TYPE>(&A[i], r, root_d, threads_per); //Recurse on root and full leaf subtrees
else {
//Recurse on incomplete leaf subtree
if (inc_n != pow(2, inc_d) - 1) { //non-perfect incomplete tree
permutevEB_nonperfect_parallel<TYPE>(&A[size], inc_n, inc_d, threads_per);
}
else { //perfect incomplete tree
permutevEB_parallel<TYPE>(&A[size], inc_n, inc_d, threads_per);
}
}
}
}
}
else {
uint64_t size = (num_full + 1)*r;
if (p <= num_full + 1) {
//printf("case 3\n");
#pragma omp parallel for shared(A, n, d, p, root_d, leaf_d, r, l, num_full, inc_n, size) schedule(guided) num_threads(p)
for (uint64_t i = 0; i < size; i += r) { //Recurse on root and full leaf subtrees
permutevEB<TYPE>(&A[i], r, root_d);
}
}
else {
//printf("case 4\n");
uint32_t threads_per = ceil(p/(double)(num_full + 1));
#pragma omp parallel for shared(A, n, d, p, root_d, leaf_d, r, l, num_full, inc_n, size, threads_per) schedule(guided) num_threads(num_full+1)
for (uint64_t i = 0; i < size; i += r) { //Recurse on root and full leaf subtrees
permutevEB_parallel<TYPE>(&A[i], r, root_d, threads_per);
}
}
}
}
else {
if (inc_n > 0) {
uint32_t inc_d = log2(inc_n) + 1;
permutevEB_parallel<TYPE>(A, r, root_d, p); //Recurse on root subtree
uint64_t size = r + num_full*l;
if (p <= num_full + 1) {
//printf("case 5: inc_n = %lu\n", inc_n);
#pragma omp parallel for shared(A, n, d, p, root_d, leaf_d, r, l, num_full, inc_n, inc_d, size) schedule(guided) num_threads(p)
for (uint64_t i = r; i < n; i += l) {
if (i < size) permutevEB<TYPE>(&A[i], l, leaf_d); //Recurse on full leaf subtrees
else {
//Recurse on incomplete leaf subtree
if (inc_n != pow(2, inc_d) - 1) { //non-perfect incomplete tree
permutevEB_nonperfect<TYPE>(&A[size], inc_n, inc_d);
}
else { //perfect incomplete tree
permutevEB<TYPE>(&A[size], inc_n, inc_d);
}
}
}
}
else {
//printf("case 6\n");
uint32_t threads_per = ceil(p/(double)(num_full + 1));
#pragma omp parallel for shared(A, n, d, p, root_d, leaf_d, r, l, num_full, inc_n, inc_d, size, threads_per) schedule(guided) num_threads(num_full)
for (uint64_t i = r; i < n; i += l) {
if (i < size) permutevEB_parallel<TYPE>(&A[i], l, leaf_d, threads_per); //Recurse on full leaf subtrees
else {
//Recurse on incomplete leaf subtree
if (inc_n != pow(2, inc_d) - 1) { //non-perfect incomplete tree
permutevEB_nonperfect_parallel<TYPE>(&A[size], inc_n, inc_d, p);
}
else { //perfect incomplete tree
permutevEB_parallel<TYPE>(&A[size], inc_n, inc_d, p);
}
}
}
}
}
else {
permutevEB_parallel<TYPE>(A, r, root_d, p); //Recurse on root subtree
uint64_t size = r + num_full*l;
if (p <= num_full) {
//printf("case 7\n");
#pragma omp parallel for shared(A, n, d, p, root_d, leaf_d, r, l, num_full, inc_n, size) schedule(guided) num_threads(p)
for (uint64_t i = r; i < size; i += l) { //Recurse on full leaf subtrees
permutevEB<TYPE>(&A[i], l, leaf_d);
}
}
else {
//printf("case 8\n");
uint32_t threads_per = ceil(p/(double)num_full);
#pragma omp parallel for shared(A, n, d, p, root_d, leaf_d, r, l, num_full, inc_n, size, threads_per) schedule(guided) num_threads(num_full)
for (uint64_t i = r; i < size; i += l) { //Recurse on full leaf subtrees
permutevEB_parallel<TYPE>(&A[i], l, leaf_d, threads_per);
}
}
}
}*/
}
}
template<typename TYPE>
double timePermutevEB(TYPE *A, uint64_t n, uint32_t p) {
struct timespec start, end;
clock_gettime(CLOCK_MONOTONIC, &start);
uint32_t h = log2(n);
if (n != pow(2, h+1) - 1) { //non-full tree
if (p == 1) permutevEB_nonperfect<TYPE>(A, n, h+1);
else permutevEB_nonperfect_parallel<TYPE>(A, n, h+1, p);
}
else { //full tree
if (p == 1) permutevEB<TYPE>(A, n, h+1);
else permutevEB_parallel<TYPE>(A, n, h+1, p);
}
clock_gettime(CLOCK_MONOTONIC, &end);
double ms = ((end.tv_sec*1000000000. + end.tv_nsec) - (start.tv_sec*1000000000. + start.tv_nsec)) / 1000000.; //millisecond
return ms;
}
#endif |
omp_outmes.c | #include <omp.h>
#include <iostream>
#include <stdio.h>
int main(int argc, char ** argv)
{
int tid;
// C++ cout always bad
#pragma omp parallel private(tid)
{
tid = omp_get_thread_num();
std::cout << "cout " << tid << " hello world" << std::endl;
}
// printf sometimes bad, depends on implementation
#pragma omp parallel private(tid)
{
tid = omp_get_thread_num();
printf("printf %d hello world\n", tid);
}
// correct way
#pragma omp parallel private(tid)
{
tid = omp_get_thread_num();
// force printfs to execute in sequential way
#pragma omp critical
{
// printf("critical %d hello world\n", tid);
/// try to use cout instead printf here
std::cout << "cout " << tid << " hello world" << std::endl;
}
}
return 0;
} |
DenseVector.h | //=================================================================================================
/*!
// \file blaze/math/smp/openmp/DenseVector.h
// \brief Header file for the OpenMP-based dense vector SMP implementation
//
// Copyright (C) 2013 Klaus Iglberger - All Rights Reserved
//
// This file is part of the Blaze library. You can redistribute it and/or modify it under
// the terms of the New (Revised) BSD License. Redistribution and use in source and binary
// forms, with or without modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other materials
// provided with the distribution.
// 3. Neither the names of the Blaze development group nor the names of its contributors
// may be used to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
// SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
// BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
*/
//=================================================================================================
#ifndef _BLAZE_MATH_SMP_OPENMP_DENSEVECTOR_H_
#define _BLAZE_MATH_SMP_OPENMP_DENSEVECTOR_H_
//*************************************************************************************************
// Includes
//*************************************************************************************************
#include <omp.h>
#include <blaze/math/Aliases.h>
#include <blaze/math/constraints/SMPAssignable.h>
#include <blaze/math/expressions/DenseVector.h>
#include <blaze/math/expressions/SparseVector.h>
#include <blaze/math/Functions.h>
#include <blaze/math/simd/SIMDTrait.h>
#include <blaze/math/smp/ParallelSection.h>
#include <blaze/math/smp/SerialSection.h>
#include <blaze/math/traits/SubvectorExprTrait.h>
#include <blaze/math/typetraits/IsDenseVector.h>
#include <blaze/math/typetraits/IsSMPAssignable.h>
#include <blaze/math/views/Subvector.h>
#include <blaze/system/SMP.h>
#include <blaze/util/Assert.h>
#include <blaze/util/EnableIf.h>
#include <blaze/util/logging/FunctionTrace.h>
#include <blaze/util/mpl/And.h>
#include <blaze/util/mpl/Not.h>
#include <blaze/util/mpl/Or.h>
#include <blaze/util/StaticAssert.h>
#include <blaze/util/Types.h>
#include <blaze/util/typetraits/IsSame.h>
namespace blaze {
//=================================================================================================
//
// PLAIN ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP assignment of a dense vector to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side dense vector to be assigned.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP assignment of a dense
// vector to a dense vector.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side dense vector
, bool TF2 > // Transpose flag of the right-hand side dense vector
void smpAssign_backend( DenseVector<VT1,TF1>& lhs, const DenseVector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef ElementType_<VT1> ET1;
typedef ElementType_<VT2> ET2;
typedef SubvectorExprTrait_<VT1,aligned> AlignedTarget;
typedef SubvectorExprTrait_<VT1,unaligned> UnalignedTarget;
enum : size_t { SIMDSIZE = SIMDTrait< ElementType_<VT1> >::size };
const bool simdEnabled( VT1::simdEnabled && VT2::simdEnabled && IsSame_<ET1,ET2> );
const bool lhsAligned ( (~lhs).isAligned() );
const bool rhsAligned ( (~rhs).isAligned() );
const int threads ( omp_get_num_threads() );
const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL );
const size_t equalShare ( (~lhs).size() / threads + addon );
const size_t rest ( equalShare & ( SIMDSIZE - 1UL ) );
const size_t sizePerThread( ( simdEnabled && rest )?( equalShare - rest + SIMDSIZE ):( equalShare ) );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0UL; i<threads; ++i )
{
const size_t index( i*sizePerThread );
if( index >= (~lhs).size() )
continue;
const size_t size( min( sizePerThread, (~lhs).size() - index ) );
if( simdEnabled && lhsAligned && rhsAligned ) {
AlignedTarget target( subvector<aligned>( ~lhs, index, size ) );
assign( target, subvector<aligned>( ~rhs, index, size ) );
}
else if( simdEnabled && lhsAligned ) {
AlignedTarget target( subvector<aligned>( ~lhs, index, size ) );
assign( target, subvector<unaligned>( ~rhs, index, size ) );
}
else if( simdEnabled && rhsAligned ) {
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
assign( target, subvector<aligned>( ~rhs, index, size ) );
}
else {
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
assign( target, subvector<unaligned>( ~rhs, index, size ) );
}
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP assignment of a sparse vector to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side sparse vector to be assigned.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP assignment of a sparse
// vector to a dense vector.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side sparse vector
, bool TF2 > // Transpose flag of the right-hand side sparse vector
void smpAssign_backend( DenseVector<VT1,TF1>& lhs, const SparseVector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef SubvectorExprTrait_<VT1,unaligned> UnalignedTarget;
const int threads ( omp_get_num_threads() );
const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL );
const size_t sizePerThread( (~lhs).size() / threads + addon );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0UL; i<threads; ++i )
{
const size_t index( i*sizePerThread );
if( index >= (~lhs).size() )
continue;
const size_t size( min( sizePerThread, (~lhs).size() - index ) );
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
assign( target, subvector<unaligned>( ~rhs, index, size ) );
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side vector to be assigned.
// \return void
//
// This function implements the default OpenMP-based SMP assignment to a dense vector. Due to
// the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands are
// not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline EnableIf_< And< IsDenseVector<VT1>
, Or< Not< IsSMPAssignable<VT1> >
, Not< IsSMPAssignable<VT2> > > > >
smpAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
assign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side sparse vector to be assigned.
// \return void
//
// This function performs the OpenMP-based SMP assignment to a dense vector. Due to the
// explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline EnableIf_< And< IsDenseVector<VT1>, IsSMPAssignable<VT1>, IsSMPAssignable<VT2> > >
smpAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
assign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
smpAssign_backend( ~lhs, ~rhs );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// ADDITION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP addition assignment of a dense vector to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side dense vector to be added.
// \return void
//
// This function is the backend implementation the OpenMP-based SMP addition assignment of a
// dense vector to a dense vector.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side dense vector
, bool TF2 > // Transpose flag of the right-hand side dense vector
void smpAddAssign_backend( DenseVector<VT1,TF1>& lhs, const DenseVector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef ElementType_<VT1> ET1;
typedef ElementType_<VT2> ET2;
typedef SubvectorExprTrait_<VT1,aligned> AlignedTarget;
typedef SubvectorExprTrait_<VT1,unaligned> UnalignedTarget;
enum : size_t { SIMDSIZE = SIMDTrait< ElementType_<VT1> >::size };
const bool simdEnabled( VT1::simdEnabled && VT2::simdEnabled && IsSame_<ET1,ET2> );
const bool lhsAligned ( (~lhs).isAligned() );
const bool rhsAligned ( (~rhs).isAligned() );
const int threads ( omp_get_num_threads() );
const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL );
const size_t equalShare ( (~lhs).size() / threads + addon );
const size_t rest ( equalShare & ( SIMDSIZE - 1UL ) );
const size_t sizePerThread( ( simdEnabled && rest )?( equalShare - rest + SIMDSIZE ):( equalShare ) );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0UL; i<threads; ++i )
{
const size_t index( i*sizePerThread );
if( index >= (~lhs).size() )
continue;
const size_t size( min( sizePerThread, (~lhs).size() - index ) );
if( simdEnabled && lhsAligned && rhsAligned ) {
AlignedTarget target( subvector<aligned>( ~lhs, index, size ) );
addAssign( target, subvector<aligned>( ~rhs, index, size ) );
}
else if( simdEnabled && lhsAligned ) {
AlignedTarget target( subvector<aligned>( ~lhs, index, size ) );
addAssign( target, subvector<unaligned>( ~rhs, index, size ) );
}
else if( simdEnabled && rhsAligned ) {
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
addAssign( target, subvector<aligned>( ~rhs, index, size ) );
}
else {
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
addAssign( target, subvector<unaligned>( ~rhs, index, size ) );
}
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP addition assignment of a sparse vector to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side sparse vector to be added.
// \return void
//
// This function is the backend implementation the OpenMP-based SMP addition assignment of a
// sparse vector to a dense vector.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side sparse vector
, bool TF2 > // Transpose flag of the right-hand side sparse vector
void smpAddAssign_backend( DenseVector<VT1,TF1>& lhs, const SparseVector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef SubvectorExprTrait_<VT1,unaligned> UnalignedTarget;
const int threads ( omp_get_num_threads() );
const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL );
const size_t sizePerThread( (~lhs).size() / threads + addon );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0UL; i<threads; ++i )
{
const size_t index( i*sizePerThread );
if( index >= (~lhs).size() )
continue;
const size_t size( min( sizePerThread, (~lhs).size() - index ) );
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
addAssign( target, subvector<unaligned>( ~rhs, index, size ) );
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP addition assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side vector to be added.
// \return void
//
// This function implements the default OpenMP-based SMP addition assignment to a dense vector.
// Due to the explicit application of the SFINAE principle, this function can only be selected
// by the compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline EnableIf_< And< IsDenseVector<VT1>
, Or< Not< IsSMPAssignable<VT1> >
, Not< IsSMPAssignable<VT2> > > > >
smpAddAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
addAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP addition assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side sparse vector to be added.
// \return void
//
// This function implements the OpenMP-based SMP addition assignment to a dense vector. Due to
// the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands are
// not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline EnableIf_< And< IsDenseVector<VT1>, IsSMPAssignable<VT1>, IsSMPAssignable<VT2> > >
smpAddAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
addAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
smpAddAssign_backend( ~lhs, ~rhs );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// SUBTRACTION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP subtraction assignment of a dense vector to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side dense vector to be subtracted.
// \return void
//
// This function is the backend implementation the OpenMP-based SMP subtraction assignment of a
// dense vector to a dense vector.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side dense vector
, bool TF2 > // Transpose flag of the right-hand side dense vector
void smpSubAssign_backend( DenseVector<VT1,TF1>& lhs, const DenseVector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef ElementType_<VT1> ET1;
typedef ElementType_<VT2> ET2;
typedef SubvectorExprTrait_<VT1,aligned> AlignedTarget;
typedef SubvectorExprTrait_<VT1,unaligned> UnalignedTarget;
enum : size_t { SIMDSIZE = SIMDTrait< ElementType_<VT1> >::size };
const bool simdEnabled( VT1::simdEnabled && VT2::simdEnabled && IsSame_<ET1,ET2> );
const bool lhsAligned ( (~lhs).isAligned() );
const bool rhsAligned ( (~rhs).isAligned() );
const int threads ( omp_get_num_threads() );
const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL );
const size_t equalShare ( (~lhs).size() / threads + addon );
const size_t rest ( equalShare & ( SIMDSIZE - 1UL ) );
const size_t sizePerThread( ( simdEnabled && rest )?( equalShare - rest + SIMDSIZE ):( equalShare ) );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0UL; i<threads; ++i )
{
const size_t index( i*sizePerThread );
if( index >= (~lhs).size() )
continue;
const size_t size( min( sizePerThread, (~lhs).size() - index ) );
if( simdEnabled && lhsAligned && rhsAligned ) {
AlignedTarget target( subvector<aligned>( ~lhs, index, size ) );
subAssign( target, subvector<aligned>( ~rhs, index, size ) );
}
else if( simdEnabled && lhsAligned ) {
AlignedTarget target( subvector<aligned>( ~lhs, index, size ) );
subAssign( target, subvector<unaligned>( ~rhs, index, size ) );
}
else if( simdEnabled && rhsAligned ) {
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
subAssign( target, subvector<aligned>( ~rhs, index, size ) );
}
else {
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
subAssign( target, subvector<unaligned>( ~rhs, index, size ) );
}
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP subtraction assignment of a sparse vector to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side sparse vector to be subtracted.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP subtraction assignment of
// a sparse vector to a dense vector.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side sparse vector
, bool TF2 > // Transpose flag of the right-hand side sparse vector
void smpSubAssign_backend( DenseVector<VT1,TF1>& lhs, const SparseVector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef SubvectorExprTrait_<VT1,unaligned> UnalignedTarget;
const int threads ( omp_get_num_threads() );
const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL );
const size_t sizePerThread( (~lhs).size() / threads + addon );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0UL; i<threads; ++i )
{
const size_t index( i*sizePerThread );
if( index >= (~lhs).size() )
continue;
const size_t size( min( sizePerThread, (~lhs).size() - index ) );
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
subAssign( target, subvector<unaligned>( ~rhs, index, size ) );
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP subtraction assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side vector to be subtracted.
// \return void
//
// This function implements the default OpenMP-based SMP subtraction assignment of a vector to
// a dense vector. Due to the explicit application of the SFINAE principle, this function can
// only be selected by the compiler in case both operands are SMP-assignable and the element
// types of both operands are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline EnableIf_< And< IsDenseVector<VT1>
, Or< Not< IsSMPAssignable<VT1> >
, Not< IsSMPAssignable<VT2> > > > >
smpSubAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
subAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP subtraction assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side sparse vector to be subtracted.
// \return void
//
// This function implements the OpenMP-based SMP subtraction assignment to a dense vector. Due
// to the explicit application of the SFINAE principle, this function can only be selected by
// the compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline EnableIf_< And< IsDenseVector<VT1>, IsSMPAssignable<VT1>, IsSMPAssignable<VT2> > >
smpSubAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
subAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
smpSubAssign_backend( ~lhs, ~rhs );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// MULTIPLICATION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP multiplication assignment of a dense vector to a
// dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side dense vector to be multiplied.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP multiplication assignment
// of a dense vector to a dense vector.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side dense vector
, bool TF2 > // Transpose flag of the right-hand side dense vector
void smpMultAssign_backend( DenseVector<VT1,TF1>& lhs, const DenseVector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef ElementType_<VT1> ET1;
typedef ElementType_<VT2> ET2;
typedef SubvectorExprTrait_<VT1,aligned> AlignedTarget;
typedef SubvectorExprTrait_<VT1,unaligned> UnalignedTarget;
enum : size_t { SIMDSIZE = SIMDTrait< ElementType_<VT1> >::size };
const bool simdEnabled( VT1::simdEnabled && VT2::simdEnabled && IsSame_<ET1,ET2> );
const bool lhsAligned ( (~lhs).isAligned() );
const bool rhsAligned ( (~rhs).isAligned() );
const int threads ( omp_get_num_threads() );
const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL );
const size_t equalShare ( (~lhs).size() / threads + addon );
const size_t rest ( equalShare & ( SIMDSIZE - 1UL ) );
const size_t sizePerThread( ( simdEnabled && rest )?( equalShare - rest + SIMDSIZE ):( equalShare ) );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0UL; i<threads; ++i )
{
const size_t index( i*sizePerThread );
if( index >= (~lhs).size() )
continue;
const size_t size( min( sizePerThread, (~lhs).size() - index ) );
if( simdEnabled && lhsAligned && rhsAligned ) {
AlignedTarget target( subvector<aligned>( ~lhs, index, size ) );
multAssign( target, subvector<aligned>( ~rhs, index, size ) );
}
else if( simdEnabled && lhsAligned ) {
AlignedTarget target( subvector<aligned>( ~lhs, index, size ) );
multAssign( target, subvector<unaligned>( ~rhs, index, size ) );
}
else if( simdEnabled && rhsAligned ) {
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
multAssign( target, subvector<aligned>( ~rhs, index, size ) );
}
else {
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
multAssign( target, subvector<unaligned>( ~rhs, index, size ) );
}
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP multiplication assignment of a sparse vector to a
// dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side sparse vector to be multiplied.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP multiplication assignment
// of a sparse vector to a dense vector.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side sparse vector
, bool TF2 > // Transpose flag of the right-hand side sparse vector
void smpMultAssign_backend( DenseVector<VT1,TF1>& lhs, const SparseVector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef SubvectorExprTrait_<VT1,unaligned> UnalignedTarget;
const int threads ( omp_get_num_threads() );
const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL );
const size_t sizePerThread( (~lhs).size() / threads + addon );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0UL; i<threads; ++i )
{
const size_t index( i*sizePerThread );
if( index >= (~lhs).size() )
continue;
const size_t size( min( sizePerThread, (~lhs).size() - index ) );
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
multAssign( target, subvector<unaligned>( ~rhs, index, size ) );
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP multiplication assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side vector to be multiplied.
// \return void
//
// This function implements the default OpenMP-based SMP multiplication assignment to a dense
// vector. Due to the explicit application of the SFINAE principle, this function can only be
// selected by the compiler in case both operands are SMP-assignable and the element types of
// both operands are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline EnableIf_< And< IsDenseVector<VT1>
, Or< Not< IsSMPAssignable<VT1> >
, Not< IsSMPAssignable<VT2> > > > >
smpMultAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
multAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP multiplication assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side dense vector to be multiplied.
// \return void
//
// This function implements the OpenMP-based SMP multiplication assignment to a dense vector.
// Due to the explicit application of the SFINAE principle, this function can only be selected
// by the compiler in case both operands are SMP-assignable and the element types of both
// operands are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline EnableIf_< And< IsDenseVector<VT1>, IsSMPAssignable<VT1>, IsSMPAssignable<VT2> > >
smpMultAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
multAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
smpMultAssign_backend( ~lhs, ~rhs );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// DIVISION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP division assignment of a dense vector to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side dense vector divisor.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP division assignment of
// a dense vector to a dense vector.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side dense vector
, bool TF2 > // Transpose flag of the right-hand side dense vector
void smpDivAssign_backend( DenseVector<VT1,TF1>& lhs, const DenseVector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef ElementType_<VT1> ET1;
typedef ElementType_<VT2> ET2;
typedef SubvectorExprTrait_<VT1,aligned> AlignedTarget;
typedef SubvectorExprTrait_<VT1,unaligned> UnalignedTarget;
enum : size_t { SIMDSIZE = SIMDTrait< ElementType_<VT1> >::size };
const bool simdEnabled( VT1::simdEnabled && VT2::simdEnabled && IsSame_<ET1,ET2> );
const bool lhsAligned ( (~lhs).isAligned() );
const bool rhsAligned ( (~rhs).isAligned() );
const int threads ( omp_get_num_threads() );
const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL );
const size_t equalShare ( (~lhs).size() / threads + addon );
const size_t rest ( equalShare & ( SIMDSIZE - 1UL ) );
const size_t sizePerThread( ( simdEnabled && rest )?( equalShare - rest + SIMDSIZE ):( equalShare ) );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0UL; i<threads; ++i )
{
const size_t index( i*sizePerThread );
if( index >= (~lhs).size() )
continue;
const size_t size( min( sizePerThread, (~lhs).size() - index ) );
if( simdEnabled && lhsAligned && rhsAligned ) {
AlignedTarget target( subvector<aligned>( ~lhs, index, size ) );
divAssign( target, subvector<aligned>( ~rhs, index, size ) );
}
else if( simdEnabled && lhsAligned ) {
AlignedTarget target( subvector<aligned>( ~lhs, index, size ) );
divAssign( target, subvector<unaligned>( ~rhs, index, size ) );
}
else if( simdEnabled && rhsAligned ) {
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
divAssign( target, subvector<aligned>( ~rhs, index, size ) );
}
else {
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
divAssign( target, subvector<unaligned>( ~rhs, index, size ) );
}
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP division assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side vector divisor.
// \return void
//
// This function implements the default OpenMP-based SMP division assignment to a dense vector.
// Due to the explicit application of the SFINAE principle, this function can only be selected
// by the compiler in case both operands are SMP-assignable and the element types of both
// operands are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline EnableIf_< And< IsDenseVector<VT1>
, Or< Not< IsSMPAssignable<VT1> >
, Not< IsSMPAssignable<VT2> > > > >
smpDivAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
divAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP division assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side dense vector divisor.
// \return void
//
// This function implements the OpenMP-based SMP division assignment to a dense vector. Due to
// the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline EnableIf_< And< IsDenseVector<VT1>, IsSMPAssignable<VT1>, IsSMPAssignable<VT2> > >
smpDivAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
divAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
smpDivAssign_backend( ~lhs, ~rhs );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// COMPILE TIME CONSTRAINTS
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
namespace {
BLAZE_STATIC_ASSERT( BLAZE_OPENMP_PARALLEL_MODE );
}
/*! \endcond */
//*************************************************************************************************
} // namespace blaze
#endif
|
implicit_particle_mover.kernel_runtime.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include "local_header.h"
#include "openmp_pscmc_inc.h"
#include "implicit_particle_mover.kernel_inc.h"
int openmp_split_pass_xyzE_particle_push_r_init (openmp_pscmc_env * pe ,openmp_split_pass_xyzE_particle_push_r_struct * kerstr ){
return 0 ;}
void openmp_split_pass_xyzE_particle_push_r_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_split_pass_xyzE_particle_push_r_struct ));
}
int openmp_split_pass_xyzE_particle_push_r_get_num_compute_units (openmp_split_pass_xyzE_particle_push_r_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_split_pass_xyzE_particle_push_r_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_split_pass_xyzE_particle_push_r_exec (openmp_split_pass_xyzE_particle_push_r_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_split_pass_xyzE_particle_push_r_scmc_kernel ( ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->FoutJ , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass0)[0] , ( ( kerstr )->Charge0)[0] , ( ( kerstr )->Deltat)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->N_l)[0] , ( ( kerstr )->N_M)[0] , ( ( kerstr )->push_J)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_split_pass_xyzE_particle_push_r_scmc_set_parameter_xyzw (openmp_split_pass_xyzE_particle_push_r_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_split_pass_xyzE_particle_push_r_scmc_set_parameter_cu_cache (openmp_split_pass_xyzE_particle_push_r_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_split_pass_xyzE_particle_push_r_scmc_set_parameter_cu_xyzw (openmp_split_pass_xyzE_particle_push_r_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_split_pass_xyzE_particle_push_r_scmc_set_parameter_fieldE (openmp_split_pass_xyzE_particle_push_r_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_split_pass_xyzE_particle_push_r_scmc_set_parameter_fieldB (openmp_split_pass_xyzE_particle_push_r_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_split_pass_xyzE_particle_push_r_scmc_set_parameter_FoutJ (openmp_split_pass_xyzE_particle_push_r_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutJ = pm->d_data);
}
int openmp_split_pass_xyzE_particle_push_r_scmc_set_parameter_XLEN (openmp_split_pass_xyzE_particle_push_r_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_split_pass_xyzE_particle_push_r_scmc_set_parameter_YLEN (openmp_split_pass_xyzE_particle_push_r_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_split_pass_xyzE_particle_push_r_scmc_set_parameter_ZLEN (openmp_split_pass_xyzE_particle_push_r_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_split_pass_xyzE_particle_push_r_scmc_set_parameter_ovlp (openmp_split_pass_xyzE_particle_push_r_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_split_pass_xyzE_particle_push_r_scmc_set_parameter_numvec (openmp_split_pass_xyzE_particle_push_r_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_split_pass_xyzE_particle_push_r_scmc_set_parameter_num_ele (openmp_split_pass_xyzE_particle_push_r_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_split_pass_xyzE_particle_push_r_scmc_set_parameter_grid_cache_len (openmp_split_pass_xyzE_particle_push_r_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_split_pass_xyzE_particle_push_r_scmc_set_parameter_cu_cache_length (openmp_split_pass_xyzE_particle_push_r_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_split_pass_xyzE_particle_push_r_scmc_set_parameter_Mass0 (openmp_split_pass_xyzE_particle_push_r_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass0 = pm->d_data);
}
int openmp_split_pass_xyzE_particle_push_r_scmc_set_parameter_Charge0 (openmp_split_pass_xyzE_particle_push_r_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge0 = pm->d_data);
}
int openmp_split_pass_xyzE_particle_push_r_scmc_set_parameter_Deltat (openmp_split_pass_xyzE_particle_push_r_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_split_pass_xyzE_particle_push_r_scmc_set_parameter_DELTA_X (openmp_split_pass_xyzE_particle_push_r_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_split_pass_xyzE_particle_push_r_scmc_set_parameter_DELTA_Y (openmp_split_pass_xyzE_particle_push_r_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_split_pass_xyzE_particle_push_r_scmc_set_parameter_DELTA_Z (openmp_split_pass_xyzE_particle_push_r_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_split_pass_xyzE_particle_push_r_scmc_set_parameter_N_l (openmp_split_pass_xyzE_particle_push_r_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->N_l = pm->d_data);
}
int openmp_split_pass_xyzE_particle_push_r_scmc_set_parameter_N_M (openmp_split_pass_xyzE_particle_push_r_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->N_M = pm->d_data);
}
int openmp_split_pass_xyzE_particle_push_r_scmc_set_parameter_push_J (openmp_split_pass_xyzE_particle_push_r_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->push_J = pm->d_data);
}
int openmp_split_pass_xyzE_particle_init (openmp_pscmc_env * pe ,openmp_split_pass_xyzE_particle_struct * kerstr ){
return 0 ;}
void openmp_split_pass_xyzE_particle_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_split_pass_xyzE_particle_struct ));
}
int openmp_split_pass_xyzE_particle_get_num_compute_units (openmp_split_pass_xyzE_particle_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_split_pass_xyzE_particle_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_split_pass_xyzE_particle_exec (openmp_split_pass_xyzE_particle_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_split_pass_xyzE_particle_scmc_kernel ( ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->FoutJ , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass0)[0] , ( ( kerstr )->Charge0)[0] , ( ( kerstr )->Deltat)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->N_l)[0] , ( ( kerstr )->N_M)[0] , ( ( kerstr )->push_J)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_split_pass_xyzE_particle_scmc_set_parameter_xyzw (openmp_split_pass_xyzE_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_split_pass_xyzE_particle_scmc_set_parameter_cu_cache (openmp_split_pass_xyzE_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_split_pass_xyzE_particle_scmc_set_parameter_cu_xyzw (openmp_split_pass_xyzE_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_split_pass_xyzE_particle_scmc_set_parameter_fieldE (openmp_split_pass_xyzE_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_split_pass_xyzE_particle_scmc_set_parameter_fieldB (openmp_split_pass_xyzE_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_split_pass_xyzE_particle_scmc_set_parameter_FoutJ (openmp_split_pass_xyzE_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutJ = pm->d_data);
}
int openmp_split_pass_xyzE_particle_scmc_set_parameter_XLEN (openmp_split_pass_xyzE_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_split_pass_xyzE_particle_scmc_set_parameter_YLEN (openmp_split_pass_xyzE_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_split_pass_xyzE_particle_scmc_set_parameter_ZLEN (openmp_split_pass_xyzE_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_split_pass_xyzE_particle_scmc_set_parameter_ovlp (openmp_split_pass_xyzE_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_split_pass_xyzE_particle_scmc_set_parameter_numvec (openmp_split_pass_xyzE_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_split_pass_xyzE_particle_scmc_set_parameter_num_ele (openmp_split_pass_xyzE_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_split_pass_xyzE_particle_scmc_set_parameter_grid_cache_len (openmp_split_pass_xyzE_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_split_pass_xyzE_particle_scmc_set_parameter_cu_cache_length (openmp_split_pass_xyzE_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_split_pass_xyzE_particle_scmc_set_parameter_Mass0 (openmp_split_pass_xyzE_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass0 = pm->d_data);
}
int openmp_split_pass_xyzE_particle_scmc_set_parameter_Charge0 (openmp_split_pass_xyzE_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge0 = pm->d_data);
}
int openmp_split_pass_xyzE_particle_scmc_set_parameter_Deltat (openmp_split_pass_xyzE_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_split_pass_xyzE_particle_scmc_set_parameter_DELTA_X (openmp_split_pass_xyzE_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_split_pass_xyzE_particle_scmc_set_parameter_DELTA_Y (openmp_split_pass_xyzE_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_split_pass_xyzE_particle_scmc_set_parameter_DELTA_Z (openmp_split_pass_xyzE_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_split_pass_xyzE_particle_scmc_set_parameter_N_l (openmp_split_pass_xyzE_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->N_l = pm->d_data);
}
int openmp_split_pass_xyzE_particle_scmc_set_parameter_N_M (openmp_split_pass_xyzE_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->N_M = pm->d_data);
}
int openmp_split_pass_xyzE_particle_scmc_set_parameter_push_J (openmp_split_pass_xyzE_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->push_J = pm->d_data);
}
|
Dory.c | //#define PY_SSIZE_T_CLEAN
//#include <Python.h>
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <math.h>
#include <unistd.h>
#include <sys/types.h>
#include <time.h>
#include <string.h>
#include <unistd.h>
#include <assert.h>
#include <time.h>
#include <pthread.h>
//////////////////
#define VREDUCE1
//////////////////
#define VREDUCE2
//#define COMBIDX
// Extract hom cycles
#define HOM_CYCLES
// Store V adaptively to extract homology basis
#define ADAPTIVE_V_STORAGE
// minimize lengths of birth cycles
//#define MINIMIZE_BIRTH_CYCLES
#define STORE_LENGTHS_CYCLES
//#define ADD_0PERS_CYCLES
//#define MINIMIZE_HOM_CYCLES
/////////////////////////////////////////////////
// min of max dist. reduction
/////////////////////////////////////////////////
#define DISTMAT_MINMAX
/////////////////////////////////////////////////
/////////////////////////////////////////////////
/////////////////////////////////////////////////
//#define POINTCLOUD_MINMAX
#define RECORD_V_USAGE
/////////////////
// SAVING MACROS
/////////////////
//#define SAVEV
//#define SAVEPD
//#define PRINT
/////////////////
// DEBUG MACROS
/////////////////
//#define DEBUGCOMBIDX
//#define VDEBUG
//#define DEBUGPIVOTS
//#define COH1DEBUG
//#define COMB_IDX(a, b)((a) > (b) ?
// self->g_edges_comb_idx[(EDGE_ID)((a*(a-1))/2 + b)] : \
// ( (a) < (b) ? self->g_edges_comb_idx[(EDGE_ID)((b*(b-1))/2 + a)] \
// : self->g_n_valid_edges))
//
#define COMB_IDX0(a,b) ( a > b ? (EDGE_ID)((a*(a-1))/2 + b) : (EDGE_ID)((b*(b-1))/2 + a) )
#define COMB_IDX(a,b) ( a == b ? self->g_n_valid_edges : self->g_edges_comb_idx[COMB_IDX0(a, b)])
typedef unsigned long long int BIGINT;
typedef unsigned int EDGE_ID;
typedef unsigned int VERT_ID;
typedef double PAR;
typedef struct{
VERT_ID verts[2];
PAR length;
}F1;
typedef struct{
VERT_ID neighbor;
EDGE_ID order;
}Neighbors;
typedef struct{
EDGE_ID key1;
EDGE_ID key2;
}simplex;
typedef struct{
EDGE_ID col_idx;
EDGE_ID o_ab;
}H0_pivots;
typedef struct{
EDGE_ID key2;
EDGE_ID col_idx;
EDGE_ID bndry;
}H1_cohom_pivots;
typedef struct{
EDGE_ID key2;
EDGE_ID col_idx;
simplex bndry;
}H2_cohom_pivots;
typedef struct{
int a_ptr;
int b_ptr;
EDGE_ID o_ab;
simplex low;
}coboundary_H1;
typedef struct{
// The simplex
simplex triangle;
// Note: triangle.key1 is o_ab
// Note: triangle.key2 is c
VERT_ID a_ptr;
VERT_ID b_ptr;
VERT_ID c_ptr;
// The low of the simplex
simplex low;
//key1 is 0: ab, 1: ad, 2: bd, 3: cd
int vertex;
//vertex is -1 should mean empty. But have not been consistent.
//low.key1 = n_valid_edges also means empty.
}coboundary_H2;
typedef struct{
int original;
EDGE_ID len;
EDGE_ID max_len;
int flag_first;
int flag_reduce;
int flag_red_w_complex;
int flag_append_to_complex;
int flag_empty;
EDGE_ID pivot;
simplex triangle;
EDGE_ID R_col_idx;
EDGE_ID reduce_with_len;
EDGE_ID* trivial_boundary;
}boundary_H1_ws;
typedef struct{
int original;
EDGE_ID len;
EDGE_ID max_len;
int flag_first;
int flag_reduce;
int flag_red_w_complex;
int flag_append_to_complex;
int flag_empty;
simplex pivot;
simplex tetrahedron;
EDGE_ID R_col_idx;
EDGE_ID reduce_with_len;
simplex* trivial_boundary;
}boundary_H2_ws;
typedef struct{
int original;
EDGE_ID cob;
EDGE_ID len;
EDGE_ID max_len;
int flag_red_w_complex;
int flag_append_to_complex;
int flag_non_empty;
EDGE_ID pivot;
}boundary_H0_ws;
typedef struct{
EDGE_ID max_len;
EDGE_ID last;
EDGE_ID* o_ab;
}edges_list;
typedef struct{
EDGE_ID k2;
EDGE_ID o_ab;
EDGE_ID a_ptr;
EDGE_ID b_ptr;
int flag_next;
}implicit_keys2;
typedef struct{
EDGE_ID k1;
int flag_empty;
implicit_keys2* keys2;
EDGE_ID max_len;
EDGE_ID last;
}implicit_keys1;
typedef struct{
EDGE_ID k1_ptr;
EDGE_ID k2_ptr;
EDGE_ID edge;
implicit_keys1* keys1;
edges_list v_edges;
EDGE_ID max_len;
EDGE_ID last;
simplex pivot;
int flag_first;
int flag_red_w_complex;
int flag_red_w_trivial;
EDGE_ID reduce_w_bndry;
EDGE_ID V_col_idx;
int flag_append_to_complex;
int flag_non_empty;
}coboundary_H1_ws;
typedef struct{
EDGE_ID max_len;
EDGE_ID last;
simplex* o_abc;
}triangles_list;
typedef struct{
EDGE_ID k2;
simplex o_abc;
VERT_ID a_ptr;
VERT_ID b_ptr;
VERT_ID c_ptr;
int vertex;
int flag_next;
}coH2_implicit_keys2;
typedef struct{
EDGE_ID k1;
int flag_empty;
coH2_implicit_keys2* keys2;
EDGE_ID max_len;
EDGE_ID last;
}coH2_implicit_keys1;
typedef struct{
EDGE_ID k1_ptr;
EDGE_ID k2_ptr;
simplex triangle;
coH2_implicit_keys1* keys1;
triangles_list v_triangles;
EDGE_ID max_len;
EDGE_ID last;
simplex pivot;
int flag_first;
int flag_red_w_complex;
int flag_red_w_trivial;
simplex reduce_w_bndry;
EDGE_ID V_col_idx;
int flag_append_to_complex;
int flag_non_empty;
}coboundary_H2_ws;
typedef struct{
EDGE_ID len;
EDGE_ID max_len;
EDGE_ID* VV;
}hom1_birth;
typedef struct{
EDGE_ID* RR;
int original;
EDGE_ID len;
EDGE_ID max_len;
}R_struct;
typedef struct{
EDGE_ID birth_edge;
EDGE_ID death_triangle_key1;
EDGE_ID R_col_idx;
}homH1_pers;
typedef struct{
EDGE_ID len;
EDGE_ID max_len;
simplex* VV;
}hom2_birth;
typedef struct{
EDGE_ID* RR;
int original;
EDGE_ID len;
EDGE_ID max_len;
}R_struct_H2;
typedef struct{
simplex birth_simplex;
EDGE_ID death_edge;
EDGE_ID R_col_idx;
}homH2_pers;
typedef struct{
// o_ab
EDGE_ID key1;
// o_cd
EDGE_ID key2;
EDGE_ID o_ac;
EDGE_ID o_ad;
EDGE_ID o_bd;
EDGE_ID o_bc;
}H2_preprocess;
typedef struct{
EDGE_ID key2;
EDGE_ID col_idx;
simplex tetrahedron;
}H2_pivots;
typedef struct{
EDGE_ID coface;
EDGE_ID V_usage;
#ifdef RECORD_V_USAGE
EDGE_ID V_depth;
#endif
int V_stored;
EDGE_ID V_len;
EDGE_ID* VV;
}V_H0;
typedef struct{
simplex coface;
EDGE_ID V_usage;
#ifdef RECORD_V_USAGE
EDGE_ID V_depth;
#endif
int V_stored;
EDGE_ID V_len;
simplex* VV;
}V_H1;
typedef struct{
EDGE_ID cycid;
EDGE_ID Lidx;
EDGE_ID V_len;
EDGE_ID* VV;
//EDGE_ID ops_len;
//EDGE_ID* ops;
}min_update_V;
typedef struct{
EDGE_ID cycid;
EDGE_ID Lidx;
EDGE_ID V_len;
simplex* VV;
//EDGE_ID ops_len;
//EDGE_ID* ops;
}min_update_V_H2;
//#ifdef MINIMIZE_BIRTH_CYCLES
typedef struct{
EDGE_ID* boundary;
EDGE_ID len;
EDGE_ID redw;
EDGE_ID diff;
//EDGE_ID* ops;
//EDGE_ID ops_len;
PAR perspair[2];
EDGE_ID Lidx;
PAR updated_birth;
//EDGE_ID* in_cycles;
//EDGE_ID in_cycles_len;
//EDGE_ID in_cycles_max_len;
}cyc_info;
typedef struct{
EDGE_ID cyc;
int flag;
}update_in_cyc;
typedef struct{
EDGE_ID cj;
EDGE_ID diff;
}cyc_in_cyc;
typedef struct{
simplex* boundary;
EDGE_ID len;
EDGE_ID redw;
EDGE_ID diff;
PAR perspair[2];
EDGE_ID Lidx;
PAR updated_birth;
}cyc_info_H2;
//#endif
int compare_neighbors_vertex(Neighbors s1, Neighbors s2){
if (s1.neighbor < s2.neighbor) return -1;
else if (s1.neighbor > s2.neighbor) return 1;
else return 0;
}
int compare_neighbors_order(Neighbors s1, Neighbors s2){
if (s1.order < s2.order) return -1;
else if (s1.order > s2.order) return 1;
else return 0;
}
// This is for tim sort
int compare_simplex(simplex s1, simplex s2){
if (s1.key1 > s2.key1) return 1;
if (s1.key1 < s2.key1) return -1;
else{
if (s1.key2 > s2.key2) return 1;
else if (s1.key2 < s2.key2) return -1;
else return 0;
}
}
// This is for tim sort
int compare_cob_H1(coboundary_H1 s1, coboundary_H1 s2){
if (s1.o_ab > s2.o_ab) return 1;
else if (s1.o_ab < s2.o_ab) return -1;
else return 0;
}
int compare_coboundary_H2(coboundary_H2 s1, coboundary_H2 s2){
if (s1.triangle.key1 < s2.triangle.key1) return -1;
else if (s1.triangle.key1 > s2.triangle.key1) return 1;
else{
if (s1.triangle.key2 < s2.triangle.key2) return -1;
else if (s1.triangle.key2 > s2.triangle.key2) return 1;
else return 0;
}
}
int compare_EDGE_ID ( EDGE_ID x, EDGE_ID y){
if (x < y) return -1;
else if (x > y) return 1;
else return 0;
}
#define SORT_NAME sorter
//#define SORT_TYPE int64_t
#define SORT_TYPE Neighbors
#define SORT_CMP(x, y) compare_neighbors_vertex((x), (y))
/* You can redefine the comparison operator.
The default is
#define SORT_CMP(x, y) ((x) < (y) ? -1 : ((x) == (y) ? 0 : 1))
but the one below is often faster for integer types.
*/
//#define SORT_CMP(x, y) ((x->key1) < (y->key1) ? -1 : ((x->key1) == (y->key1) ? ((x->key2 < y->key2 ? -1 : ((x->key2) == (y->key2) ? 0: 1))) : 1))
//#define SORT_CMP(x, y,) compare_partial_cob_dec((x), (y))
#include "sort.h"
#undef SORT_NAME
#undef SORT_TYPE
#undef SORT_CMP
#define SORT_NAME sorter2
//#define SORT_TYPE int64_t
#define SORT_TYPE Neighbors
#define SORT_CMP(x, y) compare_neighbors_order((x), (y))
/* You can redefine the comparison operator.
The default is
#define SORT_CMP(x, y) ((x) < (y) ? -1 : ((x) == (y) ? 0 : 1))
but the one below is often faster for integer types.
*/
//#define SORT_CMP(x, y) ((x->key1) < (y->key1) ? -1 : ((x->key1) == (y->key1) ? ((x->key2 < y->key2 ? -1 : ((x->key2) == (y->key2) ? 0: 1))) : 1))
//#define SORT_CMP(x, y,) compare_partial_cob_dec((x), (y))
#include "sort2.h"
#undef SORT_NAME
#undef SORT_TYPE
#undef SORT_CMP
#define SORT_NAME sorter3
//#define SORT_TYPE int64_t
#define SORT_TYPE EDGE_ID
#define SORT_CMP(x, y) compare_EDGE_ID(x, y)
/* You can redefine the comparison operator.
The default is
#define SORT_CMP(x, y) ((x) < (y) ? -1 : ((x) == (y) ? 0 : 1))
but the one below is often faster for integer types.
*/
//#define SORT_CMP(x, y) ((x->key1) < (y->key1) ? -1 : ((x->key1) == (y->key1) ? ((x->key2 < y->key2 ? -1 : ((x->key2) == (y->key2) ? 0: 1))) : 1))
//#define SORT_CMP(x, y,) compare_partial_cob_dec((x), (y))
#include "sort3.h"
#undef SORT_NAME
#undef SORT_TYPE
#undef SORT_CMP
#define SORT_NAME sorter4
//#define SORT_TYPE int64_t
#define SORT_TYPE simplex
#define SORT_CMP(x, y) compare_simplex((x), (y))
//#define SORT_CMP(x, y) compare_EDGE_ID(x, y)
/* You can redefine the comparison operator.
The default is
#define SORT_CMP(x, y) ((x) < (y) ? -1 : ((x) == (y) ? 0 : 1))
but the one below is often faster for integer types.
*/
//#define SORT_CMP(x, y) ((x->key1) < (y->key1) ? -1 : ((x->key1) == (y->key1) ? ((x->key2 < y->key2 ? -1 : ((x->key2) == (y->key2) ? 0: 1))) : 1))
//#define SORT_CMP(x, y,) compare_partial_cob_dec((x), (y))
#include "sort4.h"
#undef SORT_NAME
#undef SORT_TYPE
#undef SORT_CMP
#define SORT_NAME sorter5
//#define SORT_TYPE int64_t
#define SORT_TYPE coboundary_H1
#define SORT_CMP(x, y) compare_cob_H1((x), (y))
//#define SORT_CMP(x, y) compare_EDGE_ID(x, y)
/* You can redefine the comparison operator.
The default is
#define SORT_CMP(x, y) ((x) < (y) ? -1 : ((x) == (y) ? 0 : 1))
but the one below is often faster for integer types.
*/
//#define SORT_CMP(x, y) ((x->key1) < (y->key1) ? -1 : ((x->key1) == (y->key1) ? ((x->key2 < y->key2 ? -1 : ((x->key2) == (y->key2) ? 0: 1))) : 1))
//#define SORT_CMP(x, y,) compare_partial_cob_dec((x), (y))
#include "sort5.h"
#undef SORT_NAME
#undef SORT_TYPE
#undef SORT_CMP
#define SORT_NAME sorter6
//#define SORT_TYPE int64_t
#define SORT_TYPE coboundary_H2
#define SORT_CMP(x, y) compare_coboundary_H2((x), (y))
//#define SORT_CMP(x, y) compare_EDGE_ID(x, y)
/* You can redefine the comparison operator.
The default is
#define SORT_CMP(x, y) ((x) < (y) ? -1 : ((x) == (y) ? 0 : 1))
but the one below is often faster for integer types.
*/
//#define SORT_CMP(x, y) ((x->key1) < (y->key1) ? -1 : ((x->key1) == (y->key1) ? ((x->key2 < y->key2 ? -1 : ((x->key2) == (y->key2) ? 0: 1))) : 1))
//#define SORT_CMP(x, y,) compare_partial_cob_dec((x), (y))
#include "sort6.h"
#undef SORT_NAME
#undef SORT_TYPE
#undef SORT_CMP
#define SORT_NAME sorter7
//#define SORT_TYPE int64_t
#define SORT_TYPE implicit_keys2
int compare_implicit_keys2(implicit_keys2 s1, implicit_keys2 s2){
if (s1.k2 < s2.k2) return -1;
else if (s1.k2 > s2.k2) return 1;
else{
if (s1.o_ab < s2.o_ab) return -1;
else if (s1.o_ab > s2.o_ab) return 1;
else return 0;
}
}
#define SORT_CMP(x, y) compare_implicit_keys2((x), (y))
//#define SORT_CMP(x, y) compare_EDGE_ID(x, y)
/* You can redefine the comparison operator.
The default is
#define SORT_CMP(x, y) ((x) < (y) ? -1 : ((x) == (y) ? 0 : 1))
but the one below is often faster for integer types.
*/
//#define SORT_CMP(x, y) ((x->key1) < (y->key1) ? -1 : ((x->key1) == (y->key1) ? ((x->key2 < y->key2 ? -1 : ((x->key2) == (y->key2) ? 0: 1))) : 1))
//#define SORT_CMP(x, y,) compare_partial_cob_dec((x), (y))
#include "sort7.h"
#undef SORT_NAME
#undef SORT_TYPE
#undef SORT_CMP
#define SORT_NAME sorter8
//#define SORT_TYPE int64_t
#define SORT_TYPE EDGE_ID
int compare_edges(EDGE_ID s1, EDGE_ID s2){
if (s1 < s2) return -1;
else if (s1 > s2) return 1;
else return 0;
}
#define SORT_CMP(x, y) compare_edges((x), (y))
//#define SORT_CMP(x, y) compare_EDGE_ID(x, y)
/* You can redefine the comparison operator.
The default is
#define SORT_CMP(x, y) ((x) < (y) ? -1 : ((x) == (y) ? 0 : 1))
but the one below is often faster for integer types.
*/
//#define SORT_CMP(x, y) ((x->key1) < (y->key1) ? -1 : ((x->key1) == (y->key1) ? ((x->key2 < y->key2 ? -1 : ((x->key2) == (y->key2) ? 0: 1))) : 1))
//#define SORT_CMP(x, y,) compare_partial_cob_dec((x), (y))
#include "sort8.h"
#undef SORT_NAME
#undef SORT_TYPE
#undef SORT_CMP
#define SORT_NAME sorter9
//#define SORT_TYPE int64_t
#define SORT_TYPE coH2_implicit_keys2
int coH2_compare_implicit_keys2(coH2_implicit_keys2 s1, coH2_implicit_keys2 s2){
if (s1.k2 < s2.k2) return -1;
else if (s1.k2 > s2.k2) return 1;
else{
if (s1.o_abc.key1 < s2.o_abc.key1) return -1;
else if (s1.o_abc.key1 > s2.o_abc.key1) return 1;
else{
if (s1.o_abc.key2 < s2.o_abc.key2) return -1;
else if (s1.o_abc.key2 > s2.o_abc.key2) return 1;
else return 0;
}
}
}
#define SORT_CMP(x, y) coH2_compare_implicit_keys2((x), (y))
//#define SORT_CMP(x, y) compare_EDGE_ID(x, y)
/* You can redefine the comparison operator.
The default is
#define SORT_CMP(x, y) ((x) < (y) ? -1 : ((x) == (y) ? 0 : 1))
but the one below is often faster for integer types.
*/
//#define SORT_CMP(x, y) ((x->key1) < (y->key1) ? -1 : ((x->key1) == (y->key1) ? ((x->key2 < y->key2 ? -1 : ((x->key2) == (y->key2) ? 0: 1))) : 1))
//#define SORT_CMP(x, y,) compare_partial_cob_dec((x), (y))
#include "sort9.h"
#undef SORT_NAME
#undef SORT_TYPE
#undef SORT_CMP
#define SORT_NAME sorter10
//#define SORT_TYPE int64_t
#define SORT_TYPE H2_preprocess
int compare_tetra_key2(H2_preprocess s1, H2_preprocess s2){
if (s1.key2 < s2.key2) return -1;
else if (s1.key2 > s2.key2) return 1;
else return 0;
}
#define SORT_CMP(x, y) compare_tetra_key2((x), (y))
//#define SORT_CMP(x, y) compare_EDGE_ID(x, y)
/* You can redefine the comparison operator.
The default is
#define SORT_CMP(x, y) ((x) < (y) ? -1 : ((x) == (y) ? 0 : 1))
but the one below is often faster for integer types.
*/
//#define SORT_CMP(x, y) ((x->key1) < (y->key1) ? -1 : ((x->key1) == (y->key1) ? ((x->key2 < y->key2 ? -1 : ((x->key2) == (y->key2) ? 0: 1))) : 1))
//#define SORT_CMP(x, y,) compare_partial_cob_dec((x), (y))
#include "sort10.h"
int compare_implicit(implicit_keys2 s1, coboundary_H1 phi){
if (s1.k2 < phi.low.key2) return 0;
else if (s1.k2 > phi.low.key2) return 1;
else{
if (s1.o_ab < phi.o_ab) return 0;
else return 1;
}
}
int coH2_compare_implicit(coH2_implicit_keys2 s1, coboundary_H2 phi){
if (s1.k2 < phi.low.key2) return 0;
else if (s1.k2 > phi.low.key2) return 1;
else{
if (s1.o_abc.key1 < phi.triangle.key1) return 0;
else if (s1.o_abc.key1 > phi.triangle.key1) return 1;
else{
if (s1.o_abc.key2 < phi.triangle.key2) return 0;
else return 1;
}
}
}
typedef struct{
char* filename;
#ifdef SAVEPD
char* g_H0_pers_file;
char* g_H1_pers_file;
char* g_H2_pers_file;
#endif
#ifdef SAVEV
char* g_coH1_V_file;
char* g_coH2_V_file;
#endif
int g_suppress_output;
//#ifdef MINIMIZE_BIRTH_CYCLES
char* g_minimal_V_H0_file;
//char* g_minimal_V_H0_in_cycles_file;
//#ifdef STORE_LENGTHS_CYCLES
char* g_V_H0_birthcyc_lens_file;
char* g_minimal_V_H0_birthcyc_lens_file;
char* g_birth_subset_points_file_H0;
char* g_V_H1_birthcyc_lens_file;
char* g_minimal_V_H1_birthcyc_lens_file;
//#endif
char* g_minimal_V_H1_file;
//#endif
#ifdef MINIMIZE_HOM_CYCLES
char* g_minimal_V_hom_H1_file;
char* g_minimal_V_hom_H2_file;
#ifdef STORE_LENGTHS_CYCLES
char* g_minimal_V_H1_homcyc_lens;
char* g_minimal_V_H2_homcyc_lens;
#endif
#endif
#ifdef RECORD_V_USAGE
char* g_V_H0_usage_file;
char* g_V_H1_usage_file;
#endif
//char* g_file_prefix;
//const char* g_H1_boundaries;
//const char* g_H1_indices;
//const char* g_H2_boundaries;
char* g_source;
char* g_target;
int g_cpu_count;
int g_dim_lim;
int g_compute_cycles;
int g_reduce_cyc_lengths;
int g_filetype;
PAR g_thresh;
VERT_ID g_n_vert;
EDGE_ID g_n_valid_edges;
BIGINT g_n_all_simp;
EDGE_ID* g_edges_list;
PAR* g_edge_parameter;
#ifdef COMBIDX
// Combinatorial idx
EDGE_ID g_n_edges;
EDGE_ID* g_edges_comb_idx;
#endif
// Neighbor data structures
Neighbors** g_Neighbors;
Neighbors** g_Neighbors_e;
VERT_ID* g_Neigh_len;
EDGE_ID g_max_neighbors;
// WORKSPACE PARAMETERS
int g_workspace_size;
int g_ws_pre_alloc;
int g_ws_counter;
////////////////////////////////////
// Parallel job allocation
////////////////////////////////////
int* g_jobs;
int g_sleeping_threads;
int g_processed_threads;
int g_thread_id;
int g_delete_threads;
pthread_t *g_threads;
pthread_mutex_t g_thread_lock;
pthread_cond_t g_start_boss;
pthread_cond_t g_start_workers;
////////////////////////////////////
// H0 Structures
////////////////////////////////////
// Pivots for H0
// i is pivot of A[i]
EDGE_ID* g_pivots_H0;
// STORE R for H0
EDGE_ID* g_R_sparse_H0;
EDGE_ID g_R_sparse_ptr_H0;
EDGE_ID g_R_sparse_max_H0;
// Mapping of R columns to sparse R linear
EDGE_ID* g_R_col_indices_H0;
EDGE_ID g_R_col_indices_max_H0;
EDGE_ID g_R_col_indices_ptr_H0;
// Store pivot for H0
EDGE_ID* g_edges_with_pivots_H0;
// H0 WORKSPACE STRUCTURES
EDGE_ID** g_R_ws_H0;
boundary_H0_ws* g_R_ws_H0_info;
////////////////////////////////////
// cohomology H1 structures
////////////////////////////////////
EDGE_ID g_this_edge;
coboundary_H1* g_coH1_all_lows;
// V SPARSE
EDGE_ID* g_V_sparse_H1;
EDGE_ID g_V_sparse_max;
EDGE_ID g_V_sparse_ptr;
EDGE_ID g_V_sparse_beg_ptr;
EDGE_ID g_V_sparse_end_ptr;
EDGE_ID* g_V_col_indices;
EDGE_ID g_V_col_indices_max;
EDGE_ID g_V_col_indices_ptr;
// V workspace
coboundary_H1_ws* g_V_ws_H1;
// PIVOTS OF H1 COHOMOLOGY
H1_cohom_pivots** g_H1_cohom_pivots;
EDGE_ID* g_H1_cohom_pivots_len;
EDGE_ID* g_H1_cohom_pivots_max_len;
// Pers pairs
EDGE_ID g_H1_pers_pairs_max_len;
EDGE_ID g_H1_pers_pairs_len;
PAR* g_H1_pers_pairs;
////////////////////////////////////
// cohomology H2 structures
////////////////////////////////////
simplex* g_V_sparse_H2;
// WORKSPACE STRUCTURES
int g_cohom_ws_size;
coboundary_H2_ws* g_V_ws_H2;
// NEW PIVOTS OF H2 COHOMOLOGY
H2_cohom_pivots** g_H2_cohom_pivots;
EDGE_ID* g_H2_cohom_pivots_len;
EDGE_ID* g_H2_cohom_pivots_max_len;
// Pers pairs
EDGE_ID g_H2_pers_pairs_max_len;
EDGE_ID g_H2_pers_pairs_len;
PAR* g_H2_pers_pairs;
////////////////////////////////////
// Timers
////////////////////////////////////
double g_timer_H2_low;
double g_timer_H2_next;
double g_timer_H2_greater;
struct timespec g_start_wall_clock;
struct timespec g_finish_wall_clock;
double g_timer_process_input;
double g_timer_neigh;
double g_timer_H0;
double g_timer_coH1;
double g_timer_coH2;
double g_timer_computeH1;
double g_timer_computeH2;
double g_timer_H1cycles;
double g_timer_H2cycles;
double g_timer_minimize_H1cycles;
double g_timer_minimize_H2cycles;
double g_timer_minimize_H1_homcycles;
double g_timer_minimize_H2_homcycles;
double g_timer_coH2_serial;
double g_timer_coH2_parallel;
BIGINT g_n_H1_birth_cycles;
BIGINT g_n_H2_birth_cycles;
BIGINT g_n_H0_stored_V;
BIGINT g_n_H1_stored_V;
// Temporary
int g_p_flag;
EDGE_ID g_counter;
////////////////////////////////////
// homology H1 structures
////////////////////////////////////
char* g_homH1_cycles_file;
EDGE_ID g_R_max_len_H1;
EDGE_ID g_R_len_H1;
EDGE_ID* g_R_H1;
EDGE_ID g_R_col_idx_max_len_H1;
EDGE_ID* g_R_col_idx_H1;
EDGE_ID g_R_col_idx_H1_ptr;
EDGE_ID* g_pivots_H1;
//
EDGE_ID** g_workspace_H1;
boundary_H1_ws* g_workspace_H1_info;
////////////////////////////////////
// For H1 birth cycles
////////////////////////////////////
#ifdef HOM_CYCLES
V_H0* g_H0_pivot_of;
#endif
R_struct g_temp_R_birth_cycles;
hom1_birth g_temp_V_primary;
EDGE_ID g_homH1_pers_len;
EDGE_ID g_homH1_pers_max_len;
homH1_pers* g_homH1_pers;
EDGE_ID* g_H1_undead;
EDGE_ID g_H1_undead_ptr;
EDGE_ID g_H1_undead_max;
EDGE_ID g_depth;
////////////////////////////////////
// homology H2 structures
////////////////////////////////////
char* g_homH2_cycles_file;
EDGE_ID g_R_max_len_H2;
EDGE_ID g_R_len_H2;
simplex* g_R_H2;
EDGE_ID g_R_col_idx_max_len_H2;
EDGE_ID* g_R_col_idx_H2;
EDGE_ID g_R_col_idx_H2_ptr;
H2_pivots** g_H2_pivots;
EDGE_ID* g_H2_pivots_len;
EDGE_ID* g_H2_pivots_max_len;
simplex** g_workspace_H2;
boundary_H2_ws* g_workspace_H2_info;
////////////////////////////////////
// For H2 birth cycles
////////////////////////////////////
#ifdef HOM_CYCLES
V_H1* g_H1_pivot_of;
#endif
R_struct_H2 g_temp_R_H2_birth_cycles;
hom2_birth g_temp_V_H2_primary;
// Pers pairs info
EDGE_ID g_homH2_pers_len;
EDGE_ID g_homH2_pers_max_len;
homH2_pers* g_homH2_pers;
simplex* g_H2_undead;
EDGE_ID g_H2_undead_ptr;
EDGE_ID g_H2_undead_max;
int g_extract_cycles;
#ifdef ADAPTIVE_V_STORAGE
EDGE_ID g_cycle_usage_thresh;
EDGE_ID g_cycle_depth_thresh;
EDGE_ID g_store_V_for_len;
EDGE_ID g_store_V_for_max_len;
EDGE_ID* g_store_V_for;
simplex* g_store_V_voids_for;
#endif
//#ifdef MINIMIZE_BIRTH_CYCLES
EDGE_ID g_global_minimizer;
EDGE_ID g_all_V_stored_num;
EDGE_ID g_all_V_stored_max_num;
cyc_info* g_all_V_H0_stored;
//EDGE_ID** g_all_V_H0_stored;
cyc_info_H2* g_all_V_H1_stored;
EDGE_ID** g_edges_in_cycles;
EDGE_ID* g_edges_in_cycles_len;
//#endif
#ifdef MINIMIZE_HOM_CYCLES
cyc_info* g_all_V_hom_H1_stored;
EDGE_ID g_all_V_hom_stored_num;
// LEGACY
EDGE_ID g_all_V_hom_stored_max_num;
EDGE_ID* g_all_V_hom_stored_len;
EDGE_ID** g_all_V_hom_H1_stored;
simplex** g_all_V_hom_H2_stored;
#endif
int g_new_debug;
int g_new_debug2;
EDGE_ID g_debug_edge;
simplex g_debug_triangle;
// Cycle minimization birth threshold
//PAR g_cycle_min_birth_thresh;
} filtration;
int simplex1_check(VERT_ID, VERT_ID, PAR, PAR);
int simplex2_check(VERT_ID, VERT_ID, VERT_ID);
int simplex3_check(VERT_ID, VERT_ID, VERT_ID, VERT_ID);
// MERGE SORT ALGORITHM
void mergeSort(PAR* , EDGE_ID* , EDGE_ID , EDGE_ID ) ;
void merge(PAR* , EDGE_ID* , EDGE_ID , EDGE_ID , EDGE_ID ) ;
// MERGE SORT ALGORITHM 2
void mergeSort_V_H0(EDGE_ID* , EDGE_ID** , EDGE_ID*, EDGE_ID*, EDGE_ID , EDGE_ID ) ;
void merge_V_H0(EDGE_ID* , EDGE_ID** , EDGE_ID*, EDGE_ID*, EDGE_ID , EDGE_ID , EDGE_ID ) ;
// MERGE SORT ALGORITHM 3
void mergeSort_V_H1(EDGE_ID* , simplex** , EDGE_ID , EDGE_ID ) ;
void merge_V_H1(EDGE_ID* , simplex** , EDGE_ID , EDGE_ID , EDGE_ID ) ;
//
// MERGE SORT ALGORITHM 4
void mergeSort_update_V(min_update_V* , EDGE_ID , EDGE_ID ) ;
void merge_update_V(min_update_V* , EDGE_ID , EDGE_ID , EDGE_ID ) ;
// MERGE SORT ALGORITHM 5
void mergeSort_update_V_byLidx(min_update_V* , EDGE_ID , EDGE_ID ) ;
void merge_update_V_byLidx(min_update_V* , EDGE_ID , EDGE_ID , EDGE_ID ) ;
// MERGE SORT ALGORITHM 6: Sort Lcycid, Llen, Lupdated by Llen
void mergeSort_Llen(EDGE_ID*, EDGE_ID*, EDGE_ID*, EDGE_ID, EDGE_ID ) ;
void merge_Llen(EDGE_ID*, EDGE_ID*, EDGE_ID*, EDGE_ID, EDGE_ID, EDGE_ID ) ;
// MERGE SORT ALGORITHM 7: Sort
void mergeSort_temp_par(PAR*, EDGE_ID*, EDGE_ID, EDGE_ID ) ;
void merge_temp_par(PAR*, EDGE_ID*, EDGE_ID, EDGE_ID, EDGE_ID ) ;
// MERGE SORT ALGORITHM 8: Sort
void mergeSort_incycleslen(EDGE_ID*, cyc_info*, EDGE_ID, EDGE_ID ) ;
void merge_incycleslen(EDGE_ID*, cyc_info*, EDGE_ID, EDGE_ID, EDGE_ID ) ;
// MERGE SORT ALGORITHM 9: Sort
void mergeSort_edges_in_cycles(EDGE_ID*, cyc_info*, EDGE_ID, EDGE_ID ) ;
void merge_edges_in_cycles(EDGE_ID*, cyc_info*, EDGE_ID, EDGE_ID, EDGE_ID ) ;
// MERGE SORT ALGORITHM 10: Sort
void mergeSort_edges_in_cycles_bycycid(EDGE_ID*, EDGE_ID, EDGE_ID ) ;
void merge_edges_in_cycles_bycycid(EDGE_ID*, EDGE_ID, EDGE_ID, EDGE_ID ) ;
//////////////////////////////////////////////////////////////
// NEIGHBOR CREATION AND SEARCH ALGORITHMS
//////////////////////////////////////////////////////////////
void update_neighbors_new(filtration* , VERT_ID , VERT_ID , EDGE_ID);
VERT_ID search_Neighbors(filtration* , VERT_ID , VERT_ID , VERT_ID , VERT_ID);
VERT_ID search_Neighbors_e(filtration* , VERT_ID , EDGE_ID , VERT_ID , VERT_ID, EDGE_ID);
VERT_ID bin_search_min_geq_Ne(Neighbors* , VERT_ID, VERT_ID, VERT_ID, EDGE_ID);
VERT_ID bin_search_min_geq_N(Neighbors* , VERT_ID, VERT_ID, VERT_ID, EDGE_ID);
EDGE_ID bin_search_cycle_ops(EDGE_ID*, EDGE_ID, EDGE_ID, EDGE_ID, EDGE_ID);
EDGE_ID bin_search_cyc_in_cyc(cyc_in_cyc* , EDGE_ID , EDGE_ID , EDGE_ID , EDGE_ID );
//////////////////////////////////////////////////////////////
// H0 HOMOLOGY FUNCTIONS
// Parallel homology reduction H0
//main reduction
void reduce_ws_H0(filtration* );
//reduction with complex
void* reduce_with_complex_H0(void* );
//reduction with self
void reduce_with_self_H0(filtration* );
//Update R
void update_R_H0(filtration* , int );
void allocate_jobs(filtration*, int);
BIGINT compute_num_simplices(filtration* );
// H1 cohomology functions
void update_V_coH1 (filtration*, int);
void find_H1_cohom_next (filtration* , coboundary_H1* );
void find_H1_cohom_low(filtration* , coboundary_H1* );
void find_H1_cohom_greater(filtration* , coboundary_H1* , simplex* );
void insert_in_implicit_v(filtration* , int, coboundary_H1*, int);
void print_v_implicit(filtration*, int );
void reduce_ws_coH1(filtration* );
void reduce_with_self_coH1(filtration* );
void* reduce_with_complex_coH1(void* );
void reduce_hash_table_coH1(filtration*, int );
void coH2_insert_in_implicit_v(filtration*, int , coboundary_H2* , int );
void reduce_hash_table_coH2(filtration*, int );
void coH2_print_v_implicit(filtration*, int );
void find_H2_cohom_next (filtration* , coboundary_H2* );
void find_H2_cohom_low(filtration* , coboundary_H2* );
void find_H2_cohom_greater(filtration* , coboundary_H2* , simplex* );
int H2_case1 (filtration*, coboundary_H2*);
void H2_case2 (filtration*, coboundary_H2*);
EDGE_ID search_H1_cohom_pivots(H1_cohom_pivots* , EDGE_ID , EDGE_ID , EDGE_ID , EDGE_ID );
EDGE_ID search_H2_cohom_pivots(H2_cohom_pivots* , EDGE_ID , EDGE_ID , EDGE_ID , EDGE_ID );
void H2_reduce (filtration*, coboundary_H2*, EDGE_ID, int);
void update_V_coH2(filtration* , int );
void add_coH2_pivot(filtration*, simplex, simplex, EDGE_ID);
void reduce_ws_coH2(filtration* );
void* reduce_with_complex_coH2(void* );
void reduce_with_self_coH2(filtration* );
// H1 HOMOLOGY FUNCTIONS
//main reduction
void reduce_ws_H1(filtration* );
//reduction with complex
void* reduce_with_complex_H1(void* );
//reduction with self
void reduce_with_self_H1(filtration* );
//Update R
void update_R_H1(filtration* , int );
EDGE_ID bin_search_max_less_V(EDGE_ID* , EDGE_ID , EDGE_ID , EDGE_ID , EDGE_ID );
EDGE_ID bin_search_min_greater_updated_V_byLidx(EDGE_ID* , EDGE_ID , EDGE_ID , EDGE_ID , EDGE_ID );
EDGE_ID find_first_diff_H0(EDGE_ID* \
, EDGE_ID \
, EDGE_ID** );
// H1 cycles
void compute_H1_homology_cycles(filtration* );
void get_birth_cycle(filtration*, EDGE_ID);
void find_V_recursively_edges(filtration*, EDGE_ID, EDGE_ID);
void shuffle_cyc(cyc_info*, EDGE_ID);
//#ifdef MINIMIZE_BIRTH_CYCLES
void minimize_birth_cycles_H0(filtration*, EDGE_ID**, EDGE_ID*, EDGE_ID, char*);
void minimize_birth_cycles_H0_v2(filtration*, EDGE_ID**, EDGE_ID*, EDGE_ID, char*);
void minimize_birth_cycles_H1(filtration*);
void minimize_birth_cycles_H1_v2(filtration* \
, cyc_info_H2* \
, EDGE_ID \
, char* \
, char* \
, char* \
);
void minimize_birth_cycles_H0_v3(filtration* \
, cyc_info* \
, EDGE_ID \
, char* \
, char* \
, char* \
, char* \
);
void minimize_birth_cycles_H0_v4(filtration* \
, cyc_info* \
, EDGE_ID \
, char* \
, char* \
);
void minimal_CASE1(EDGE_ID , cyc_info* , EDGE_ID* , EDGE_ID*, EDGE_ID );
void minimal_CASE2(filtration*, EDGE_ID , cyc_info* , EDGE_ID* , EDGE_ID* \
, EDGE_ID* , EDGE_ID );
void update_diff(filtration* , EDGE_ID , EDGE_ID* , int \
, EDGE_ID* , cyc_info* , EDGE_ID);
void find_first_diff(filtration* , EDGE_ID , EDGE_ID*\
, EDGE_ID* , cyc_info* , EDGE_ID);
//#endif
void store_V_H0(filtration* );
void reduce_temp_V_H0(filtration* );
// H2 HOMOLOGY FUNCTIONS
//main reduction
void reduce_ws_H2(filtration* );
//reduction with complex
void* reduce_with_complex_H2(void* );
//reduction with self
void reduce_with_self_H2(filtration* );
//Update R
void update_R_H2(filtration* , int );
//add_pivot
void add_H2_pivot (filtration* , simplex , simplex , EDGE_ID );
//search pivot
EDGE_ID search_H2_pivots(H2_pivots* , EDGE_ID , EDGE_ID , EDGE_ID , EDGE_ID );
// H2 cycles
void compute_boundary_triangle(filtration* , simplex , EDGE_ID* );
void compute_boundary_tetra(filtration* , simplex , simplex* );
void compute_H2_homology_cycles(filtration* );
void get_birth_void(filtration*, simplex);
void find_V_recursively_triangles(filtration*, simplex, EDGE_ID);
void store_V_H1(filtration* );
void reduce_temp_V_H1(filtration* );
// DEALLOCATE
void deallocator(filtration*);
int main(int argc, char* argv[]){
//static PyObject *compute_PH(PyObject *self2, PyObject *args){
struct timespec start_wall_clock, finish_wall_clock;
clock_gettime(CLOCK_MONOTONIC, &start_wall_clock);
// Filetype = 0 : Distance matrix
// Filetype = 1 : Locations
// Filetype = 2 : Edge list with edge length
//printf("%ld", (long)getpid());
filtration* self;
self = (filtration*)malloc(sizeof(filtration));
self->g_new_debug = 0;
//////////////////////////////////////////////////////
// Set testing timers and test counters
//////////////////////////////////////////////////////
self->g_counter = 0;
self->g_timer_H2_low = 0;
self->g_timer_H2_next = 0;
self->g_timer_H2_greater = 0;
self->g_timer_coH2_serial = 0;
self->g_timer_coH2_parallel = 0;
//////////////////////////////////////////////////////
//////////////////////////////////////////////////////
//if (!PyArg_ParseTuple(args, "sdiisiiii"\
// , &(self->g_source), &(self->g_thresh)\
// , &(self->g_filetype), &(self->g_cpu_count)\
// , &(self->g_target), &(self->g_dim_lim)\
// , &(self->g_compute_cycles), &(self->g_reduce_cyc_lengths)\
// , &(self->g_suppress_output)\
// )){
// printf("\nERROR in parse args");
// return NULL;
// //, &(self->g_cycle_min_birth_thresh)\
//}
//int file_len = strlen(self->g_target) + 100;
int file_len = strlen(argv[5]) + 100;
self->g_thresh = atof(argv[2]);
self->g_filetype = atoi(argv[3]);
self->g_cpu_count = atoi(argv[4]);
self->g_dim_lim = atoi(argv[6]);
self->g_compute_cycles = atoi(argv[7]);
self->g_reduce_cyc_lengths = atoi(argv[8]);
self->g_suppress_output = atoi(argv[9]);
char* duplicate = (char*)malloc(file_len*sizeof(char));
//strcpy(duplicate, self->g_target);
strcpy(duplicate, argv[1]);
self->filename = strdup(duplicate);
strcpy(duplicate, argv[5]);
self->g_target = strdup(duplicate);
strcpy(duplicate, self->g_target);
strcat(duplicate, "homH1_cycles.txt");
self->g_homH1_cycles_file = strdup(duplicate);
strcpy(duplicate, self->g_target);
strcat(duplicate, "homH2_cycles.txt");
self->g_homH2_cycles_file = strdup(duplicate);
//#ifdef MINIMIZE_BIRTH_CYCLES
strcpy(duplicate, self->g_target);
strcat(duplicate, "minimal_V_birth_H1.txt");
self->g_minimal_V_H0_file = strdup(duplicate);
strcpy(duplicate, self->g_target);
strcat(duplicate, "birth_subsets_H1.txt");
self->g_birth_subset_points_file_H0 = strdup(duplicate);
//strcpy(duplicate, argv[5]);
//strcat(duplicate, "minimal_V_birth_H1_in_cycles.txt");
//self->g_minimal_V_H0_in_cycles_file = strdup(duplicate);
strcpy(duplicate, self->g_target);
strcat(duplicate, "minimal_V_birth_H2.txt");
self->g_minimal_V_H1_file = strdup(duplicate);
//#ifdef STORE_LENGTHS_CYCLES
strcpy(duplicate, self->g_target);
strcat(duplicate, "V_birth_len_H1.txt");
self->g_V_H0_birthcyc_lens_file = strdup(duplicate);
strcpy(duplicate, self->g_target);
strcat(duplicate, "minimal_V_birth_len_H1.txt");
self->g_minimal_V_H0_birthcyc_lens_file = strdup(duplicate);
strcpy(duplicate, self->g_target);
strcat(duplicate, "V_birth_len_H2.txt");
self->g_V_H1_birthcyc_lens_file = strdup(duplicate);
strcpy(duplicate, self->g_target);
strcat(duplicate, "minimal_V_birth_len_H2.txt");
self->g_minimal_V_H1_birthcyc_lens_file = strdup(duplicate);
//#endif
//#endif
#ifdef MINIMIZE_HOM_CYCLES
if (self->g_reduce_cyc_lengths){
strcpy(duplicate, self->g_target);
strcat(duplicate, "minimal_V_hom_H1.txt");
self->g_minimal_V_hom_H1_file = strdup(duplicate);
strcpy(duplicate, self->g_target);
strcat(duplicate, "minimal_V_hom_H2.txt");
self->g_minimal_V_hom_H2_file = strdup(duplicate);
}
#endif
#ifdef RECORD_V_USAGE
strcpy(duplicate, self->g_target);
strcat(duplicate, "V_H0_usage.txt");
self->g_V_H0_usage_file = strdup(duplicate);
strcpy(duplicate, self->g_target);
strcat(duplicate, "V_H1_usage.txt");
self->g_V_H1_usage_file = strdup(duplicate);
#endif
#if defined(SAVEPD) || defined(SAVEV)
strcpy(duplicate, self->g_target);
strcat(duplicate, "H0_pers_data.txt");
self->g_H0_pers_file = strdup(duplicate);
if (self->g_dim_lim > 0){
strcpy(duplicate, self->g_target);
strcat(duplicate, "H1_pers_data.txt");
self->g_H1_pers_file = strdup(duplicate);
#ifdef SAVEV
strcpy(duplicate, self->g_target);
strcat(duplicate, "coH1_V_data.txt");
self->g_coH1_V_file = strdup(duplicate);
#endif
if (self->g_dim_lim > 1){
strcpy(duplicate, self->g_target);
strcat(duplicate, "H2_pers_data.txt");
self->g_H2_pers_file = strdup(duplicate);
#ifdef SAVEV
strcpy(duplicate, self->g_target);
strcat(duplicate, "coH2_V_data.txt");
self->g_coH2_V_file = strdup(duplicate);
#endif
}
}
#endif
free(duplicate);
//self->g_extract_cycles = atoi(argv[6]);
//self->g_cycle_birth_limit = atof(argv[7]);
#ifdef ADAPTIVE_V_STORAGE
self->g_cycle_usage_thresh = 2;
self->g_cycle_depth_thresh = 1;
#endif
omp_set_num_threads(self->g_cpu_count);
FILE *fp = fopen(self->filename, "r");
if (fp == NULL){
perror("Unable to open file!");
exit(1);
}
char* line = NULL;
size_t len = 0;
char* dist;
PAR dist_d;
char* end;
getline(&line, &len, fp);
dist = strtok(line, " ,");
fclose(fp);
fp = fopen(self->filename, "r");
int prealloc = 100000;
VERT_ID row = 0;
VERT_ID col = 0;
self->g_edges_list = (EDGE_ID*)malloc(2*prealloc*sizeof(EDGE_ID));
self->g_edge_parameter = (PAR*)malloc(prealloc*sizeof(PAR));
self->g_n_valid_edges = 0;
if (self->g_filetype == 0){
#ifdef DISTMAT_MINMAX
PAR dist_min_max, dist_max;
dist_min_max = INFINITY;
// this is a distance matrix
while(getline(&line, &len, fp) != -1) {
//col = 0;
dist = strtok(line, " ,");
dist_max = 0;
while(dist != NULL){
dist_d = strtod(dist, &end);
dist = strtok(NULL, ",");
if (dist_d > dist_max){
dist_max = dist_d;
}
//col += 1;
}
if (dist_max < dist_min_max){
dist_min_max = dist_max;
}
//row += 1;
}
self->g_thresh = dist_min_max;
rewind(fp);
#endif
EDGE_ID edge_list_ptr = 0;
// this is a distance matrix
while(getline(&line, &len, fp) != -1) {
col = 0;
dist = strtok(line, " ,");
while(dist != NULL){
dist_d = strtod(dist, &end);
//if (dist_d != 0) dist_d = 1/dist_d;
dist = strtok(NULL, ",");
if (col > row){
//if (simplex1_check(row, col, dist_d, self->g_thresh)){
if (dist_d < self->g_thresh){
//self->g_edges_list[self->g_n_valid_edges] = (EDGE_ID*)malloc(2*sizeof(EDGE_ID));
// Note that g_edges_list is sorted, row < col
//self->g_edges_list[self->g_n_valid_edges][0] = row;
//self->g_edges_list[self->g_n_valid_edges][1] = col;
self->g_edges_list[edge_list_ptr++] = row;
self->g_edges_list[edge_list_ptr++] = col;
// parameter
self->g_edge_parameter[self->g_n_valid_edges] = dist_d;
self->g_n_valid_edges += 1;
if (self->g_n_valid_edges == prealloc){
prealloc += 100000;
self->g_edges_list = (EDGE_ID*)realloc(self->g_edges_list, 2*prealloc*sizeof(EDGE_ID));
self->g_edge_parameter = (PAR*)realloc(self->g_edge_parameter, prealloc*sizeof(PAR));
}
}
}
col += 1;
}
row += 1;
}
self->g_n_vert = row;
}
else if (self->g_filetype == 1){
self->g_thresh = self->g_thresh * self->g_thresh;
//Locations information
if (!self->g_suppress_output){
printf("extracting edges");
}
int dim_space = 0;
while(getline(&line, &len, fp) != -1) {
dist = strtok(line, " ,");
while(dist != NULL){
dist_d = strtod(dist, &end);
dist = strtok(NULL, ",");
dim_space++;
}
break;
}
rewind(fp);
PAR** locations;
locations = (PAR**)malloc(sizeof(PAR*));
while(getline(&line, &len, fp) != -1) {
col = 0;
if (!self->g_suppress_output){
printf("\rrow %d", row);
}
locations = (PAR**)realloc(locations, (row+1)*sizeof(PAR*));
locations[row] = (PAR*)malloc(dim_space*sizeof(PAR));
dist = strtok(line, " ,");
while(dist != NULL){
dist_d = strtod(dist, &end);
//if (dist_d != 0) dist_d = 1/dist_d;
dist = strtok(NULL, ",");
locations[row][col++] = dist_d;
}
row++;
}
PAR diff;
#ifdef POINTCLOUD_MINMAX
if (!self->g_suppress_output){
printf("\nthresh is %lf", self->g_thresh);
}
PAR dist_min_max, dist_max;
dist_min_max = INFINITY;
for (int i = 0; i < row; i++){
//if (i%1000 == 0)
if (!self->g_suppress_output){
printf("\n%d", i);
}
dist_max = 0;
for (int j = 0; j < row; j++){
for (int k = 0; k < dim_space; k++){
diff = locations[i][k] - locations[j][k];
dist_d += diff*diff;
}
if (dist_d > dist_max){
dist_max = dist_d;
}
}
if (dist_max < dist_min_max){
dist_min_max = dist_max;
}
}
if (self->g_thresh > dist_min_max){
self->g_thresh = dist_min_max;
}
if (!self->g_suppress_output){
printf("\nupdated thresh is %lf", self->g_thresh);
}
#endif
EDGE_ID edge_list_ptr = 0;
if (!self->g_suppress_output){
printf("\n");
}
for (int i = 0; i < row-1; i++){
if (!self->g_suppress_output){
printf("Done %f percent edges %d\r", (float)i/(float)(row-1), self->g_n_valid_edges);
}
for (int j = i+1; j < row; j++){
dist_d = 0;
for (int k = 0; k < dim_space; k++){
diff = locations[i][k] - locations[j][k];
dist_d += diff*diff;
}
//dist_d = sqrt(dist_d);
//if (simplex1_check(i, j, dist_d, self->g_thresh)){
if (dist_d < self->g_thresh){
//self->g_edges_list[self->g_n_valid_edges] = (EDGE_ID*)malloc(2*sizeof(EDGE_ID));
// Note that g_edges_list is sorted, row < col
//self->g_edges_list[self->g_n_valid_edges][0] = i;
//self->g_edges_list[self->g_n_valid_edges][1] = j;
self->g_edges_list[edge_list_ptr++] = i;
self->g_edges_list[edge_list_ptr++] = j;
// parameter
self->g_edge_parameter[self->g_n_valid_edges] = dist_d;
self->g_n_valid_edges += 1;
if (self->g_n_valid_edges == prealloc){
prealloc += 100000;
self->g_edges_list = (EDGE_ID*)realloc(self->g_edges_list, 2*prealloc*sizeof(EDGE_ID));
self->g_edge_parameter = (PAR*)realloc(self->g_edge_parameter, prealloc*sizeof(PAR));
}
}
}
}
for (int i = 0; i < row; i++) free(locations[i]);
free(locations);
self->g_n_vert = row;
if (!self->g_suppress_output){
printf("\nExtracted edges\n");
}
}
else if (self->g_filetype == 2){
//List of edges with lengths
//Format is v1, v2, length
if (!self->g_suppress_output){
printf("extracting edges");
}
int n_edges = 0;
row = 0;
//while(getline(&line, &len, fp) != -1)
// n_edges++;
//printf("\nnumber of edges %d", n_edges);
//rewind(fp);
int vv1, vv2;
int max_v = 0;
EDGE_ID edge_list_ptr = 0;
while(getline(&line, &len, fp) != -1) {
col = 0;
//self->g_edges_list[self->g_n_valid_edges] = (EDGE_ID*)malloc(2*sizeof(EDGE_ID));
dist = strtok(line, " ,");
while(dist != NULL){
if (col == 0){
vv1 = atoi(dist);
if (vv1 > max_v)
max_v = vv1;
}
else if (col == 1){
vv2 = atoi(dist);
if (vv2 > max_v)
max_v = vv2;
}
else if (col == 2){
PAR edge_length = strtod(dist, &end);
//if (simplex1_check(vv1, vv2, edge_length, self->g_thresh)){
if (edge_length < self->g_thresh){
self->g_edge_parameter[self->g_n_valid_edges] = edge_length;
if (vv1 < vv2){
//self->g_edges_list[self->g_n_valid_edges][0] = vv1;
//self->g_edges_list[self->g_n_valid_edges][1] = vv2;
self->g_edges_list[edge_list_ptr++] = vv1;
self->g_edges_list[edge_list_ptr++] = vv2;
}
else {
//self->g_edges_list[self->g_n_valid_edges][0] = vv2;
//self->g_edges_list[self->g_n_valid_edges][1] = vv1;
self->g_edges_list[edge_list_ptr++] = vv2;
self->g_edges_list[edge_list_ptr++] = vv1;
}
self->g_n_valid_edges++;
if (self->g_n_valid_edges == prealloc){
prealloc += 100000;
self->g_edges_list = (EDGE_ID*)realloc(self->g_edges_list, 2*prealloc*sizeof(EDGE_ID));
self->g_edge_parameter = (PAR*)realloc(self->g_edge_parameter, prealloc*sizeof(PAR));
}
}
}
dist = strtok(NULL, ",");
col++;
}
row++;
}
self->g_n_vert = max_v+1;
if (!self->g_suppress_output){
printf("\nExtracted edges\n");
}
}
self->g_edges_list = (EDGE_ID*)realloc(self->g_edges_list, 2*self->g_n_valid_edges*sizeof(EDGE_ID));
self->g_edge_parameter = (PAR*)realloc(self->g_edge_parameter, self->g_n_valid_edges*sizeof(PAR));
fclose(fp);
free(line);
if (!self->g_suppress_output){
printf("\nNumber of vertices %d", self->g_n_vert);
}
mergeSort(self->g_edge_parameter, self->g_edges_list, 0, self->g_n_valid_edges-1);
if (!self->g_suppress_output){
printf("\nSorted %d edges\n", self->g_n_valid_edges);
}
//exit(0);
clock_gettime(CLOCK_MONOTONIC, &finish_wall_clock);
self->g_timer_process_input = (finish_wall_clock.tv_sec - start_wall_clock.tv_sec);
self->g_timer_process_input += (finish_wall_clock.tv_nsec - start_wall_clock.tv_nsec) / 1000000000.0;
///////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////
//
// STEP 1
// Generate Neighbor matrices
//
///////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////
//printf("\nPress key to start...");
//getchar();
clock_gettime(CLOCK_MONOTONIC, &start_wall_clock);
// Initiate the Neighbor data structures
self->g_Neighbors_e = (Neighbors**)malloc(self->g_n_vert*sizeof(Neighbors*));
self->g_Neighbors = (Neighbors**)malloc(self->g_n_vert*sizeof(Neighbors*));
self->g_Neigh_len = (VERT_ID*)calloc(self->g_n_vert, sizeof(VERT_ID));
self->g_pivots_H0 = (EDGE_ID*)calloc(self->g_n_vert, sizeof(EDGE_ID));
EDGE_ID* n_neigh = (EDGE_ID*)calloc(self->g_n_vert, sizeof(EDGE_ID));
VERT_ID vv;
for (EDGE_ID i = 0; i < self->g_n_valid_edges; i++){
n_neigh[self->g_edges_list[2*i]]++;
n_neigh[self->g_edges_list[(2*i)+1]]++;
}
for (VERT_ID i = 0; i < self->g_n_vert; i++){
self->g_Neighbors_e[i] = (Neighbors*)malloc(n_neigh[i]*sizeof(Neighbors));
self->g_Neighbors[i] = (Neighbors*)malloc(n_neigh[i]*sizeof(Neighbors));
}
free(n_neigh);
if (!self->g_suppress_output){
printf("\nCreating neighbors...");
}
//double time_create_neigh = omp_get_wtime();
self->g_max_neighbors = 0;
for (EDGE_ID i = 0; i < self->g_n_valid_edges; i++){
VERT_ID v1 = self->g_edges_list[2*i];
VERT_ID v2 = self->g_edges_list[(2*i)+1];
len = self->g_Neigh_len[v1];
self->g_Neighbors[v1][len].order = i;
self->g_Neighbors[v1][len].neighbor = v2;
self->g_Neighbors_e[v1][len].order = i;
self->g_Neighbors_e[v1][len].neighbor = v2;
self->g_Neigh_len[v1]++;
len = self->g_Neigh_len[v2];
self->g_Neighbors[v2][len].order = i;
self->g_Neighbors[v2][len].neighbor = v1;
self->g_Neighbors_e[v2][len].order = i;
self->g_Neighbors_e[v2][len].neighbor = v1;
self->g_Neigh_len[v2]++;
}
//printf("Time taken %f", omp_get_wtime() - time_create_neigh);
if (!self->g_suppress_output){
printf("\nSorting neighbors...");
}
//double time_sort_neigh = omp_get_wtime();
#pragma omp parallel for schedule(static) shared(self)
for (EDGE_ID i = 0; i < self->g_n_vert; i++){
//self->g_Neighbors[i] = (Neighbors*)realloc(self->g_Neighbors[i],\
// self->g_Neigh_len[i]*sizeof(Neighbors));
//self->g_Neighbors_e[i] = (Neighbors*)realloc(self->g_Neighbors_e[i],\
// self->g_Neigh_len[i]*sizeof(Neighbors));
if (self->g_Neigh_len[i] > 1){
sorter_tim_sort(self->g_Neighbors[i], self->g_Neigh_len[i]);
sorter2_tim_sort(self->g_Neighbors_e[i], self->g_Neigh_len[i]);
}
}
#ifdef COMBIDX
self->g_n_edges = (EDGE_ID)((self->g_n_vert) * (self->g_n_vert-1))/2;
self->g_edges_comb_idx = (EDGE_ID*)malloc(self->g_n_edges*sizeof(EDGE_ID));
for (EDGE_ID mm = 0; mm < self->g_n_edges; mm++){
self->g_edges_comb_idx[mm] = self->g_n_valid_edges;
}
for (EDGE_ID mm = 0; mm < self->g_n_valid_edges; mm++){
EDGE_ID idx = COMB_IDX0(self->g_edges_list[2*mm], self->g_edges_list[2*mm+1]);
self->g_edges_comb_idx[idx] = mm;
#ifdef DEBUGCOMBIDX
VERT_ID idx2 = search_Neighbors(self\
, self->g_edges_list[2*mm]\
, self->g_edges_list[2*mm+1]\
, 0\
, self->g_Neigh_len[self->g_edges_list[2*mm]] - 1);
if (idx2 == self->g_n_vert){
if (idx != self->g_n_valid_edges){
printf("\nERRRRROR 0");
getchar();
}
}
else{
if (self->g_Neighbors[self->g_edges_list[2*mm]][idx2].order != self->g_edges_comb_idx[idx]){
printf("\nERRRRROR 1");
getchar();
}
}
#endif
}
#endif
clock_gettime(CLOCK_MONOTONIC, &finish_wall_clock);
self->g_timer_neigh = (finish_wall_clock.tv_sec - start_wall_clock.tv_sec);
self->g_timer_neigh += (finish_wall_clock.tv_nsec - start_wall_clock.tv_nsec) / 1000000000.0;
//printf("Time taken %f", omp_get_wtime() - time_sort_neigh);
//compute_num_simplices(self);
//exit(1);
///////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////
//
// STEP H0.1: Reduce the edges using column method
//
///////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////
if (!self->g_suppress_output){
printf("\n\n---------------");
printf("\nComputing H0...");
printf("\n---------------\n");
}
clock_gettime(CLOCK_MONOTONIC, &start_wall_clock);
// R Sparse
self->g_R_sparse_max_H0 = 1000;
self->g_R_sparse_H0 = (EDGE_ID*)malloc(self->g_R_sparse_max_H0*sizeof(EDGE_ID));
self->g_R_sparse_ptr_H0 = 0;
// R sparse col mapping
self->g_R_col_indices_max_H0 = 100;
self->g_R_col_indices_H0 = (EDGE_ID*)malloc(self->g_R_col_indices_max_H0*sizeof(EDGE_ID));
self->g_R_col_indices_ptr_H0 = 1;
// Note which edges have pivots in H0
self->g_edges_with_pivots_H0 = \
(EDGE_ID*)calloc(self->g_n_valid_edges, sizeof(EDGE_ID));
//#ifdef HOM_CYCLES
// For birth cycles
if (self->g_compute_cycles){
self->g_H0_pivot_of = (V_H0*)malloc(self->g_n_vert*sizeof(V_H0));
}
//#endif
/////////////
// WORKSPACE
/////////////
self->g_ws_pre_alloc = 100;
self->g_workspace_size = 1000;
// H0 workspace structures
self->g_R_ws_H0 = \
(EDGE_ID**)malloc(self->g_workspace_size*sizeof(EDGE_ID*));
// H0 workspace info
self->g_R_ws_H0_info = (boundary_H0_ws*)malloc(self->g_workspace_size*sizeof(boundary_H0_ws));
// Initialize ws counter
self->g_ws_counter = 0;
for (int ws_counter = 0; ws_counter < self->g_workspace_size; ws_counter++){
self->g_R_ws_H0_info[ws_counter].max_len = self->g_ws_pre_alloc;
self->g_R_ws_H0[ws_counter] = (EDGE_ID*)malloc(2*self->g_R_ws_H0_info[ws_counter].max_len*sizeof(EDGE_ID));
}
////////////////////////////////
// Allocate jobs for parallel H0
////////////////////////////////
self->g_jobs = (int*)malloc((self->g_cpu_count + 1)*sizeof(int));
allocate_jobs(self, self->g_workspace_size);
int rtn;
self->g_threads = (pthread_t *)malloc(self->g_cpu_count*sizeof(pthread_t));
if ((rtn = pthread_mutex_init(&(self->g_thread_lock), NULL)) !=0)
fprintf(stderr, "pthread_mutex_init %s", strerror(rtn)), exit(-1);
if ((rtn = pthread_cond_init(&(self->g_start_boss), NULL)) !=0)
fprintf(stderr, "pthread_cond_init %s", strerror(rtn)), exit(-1);
if ((rtn = pthread_cond_init(&(self->g_start_workers), NULL)) !=0)
fprintf(stderr, "pthread_cond_init %s", strerror(rtn)), exit(-1);
// Initialize thread creation
self->g_thread_id = 0;
self->g_sleeping_threads = 0;
self->g_delete_threads = 0;
for (int i = 0; i < self->g_cpu_count; i++){
if ((rtn = pthread_create( \
&(self->g_threads[i]) \
, NULL \
, reduce_with_complex_H0 \
, (void*)self)!= 0))
fprintf(stderr, "pthread_create %d", rtn), exit(-1);
}
// Wait for threads to be initialized
pthread_mutex_lock(&(self->g_thread_lock));
while(self->g_sleeping_threads != self->g_cpu_count){
pthread_cond_wait(&(self->g_start_boss) \
, &(self->g_thread_lock));
}
////////////////////////////////
////////////////////////////////
// Main H0 Homology loop
////////////////////////////////
for (EDGE_ID i = 0; i < self->g_n_valid_edges; i++){
//printf("Percentage %f\r", (float)i/(float)self->g_n_valid_edges);
//
//if (i%10000 == 0){
// printf("\rProcessing edge %d", i);
//}
////////////////////
// Append to workspace_H0
////////////////////
//self->g_ws_simplices_H0[self->g_ws_counter] = i;
boundary_H0_ws* this_ws = self->g_R_ws_H0_info + self->g_ws_counter;
// coboundary
this_ws->cob = i;
// Initially, the original is at 0
this_ws->original = 0;
// Length
this_ws->len = 2;
// Non empty
this_ws->flag_non_empty = 1;
// Recall: edge_list has v_max at 1 and v_min at 0
self->g_R_ws_H0[self->g_ws_counter][0] = self->g_edges_list[2*i];
self->g_R_ws_H0[self->g_ws_counter][1] = self->g_edges_list[2*i+1];
// Pivot
this_ws->pivot = self->g_edges_list[2*i+1];
self->g_ws_counter += 1;
if (self->g_ws_counter == self->g_workspace_size){
reduce_ws_H0(self);
}
}
// Reduction of final batch
while (self->g_ws_counter){
// Allocate the last batch of size g_ws_counter
allocate_jobs(self, self->g_ws_counter);
reduce_ws_H0(self);
}
self->g_R_sparse_H0 = (EDGE_ID*)realloc( \
self->g_R_sparse_H0\
, (self->g_R_sparse_ptr_H0+1)*sizeof(EDGE_ID));
self->g_R_col_indices_H0 = (EDGE_ID*)realloc( \
self->g_R_col_indices_H0 \
, (self->g_R_col_indices_ptr_H0+1)*sizeof(EDGE_ID));
/////////////////////////
// Cancel the threads
/////////////////////////
self->g_delete_threads = 1;
pthread_cond_broadcast(&(self->g_start_workers));
pthread_mutex_unlock(&(self->g_thread_lock));
for (int i = 0; i < self->g_cpu_count; i++){
pthread_join(self->g_threads[i], NULL);
}
free(self->g_threads);
free(self->g_jobs);
//////////////////////////////////////////////////
// Clear H0 parallel workspace
//////////////////////////////////////////////////
for (int ws_counter = 0; ws_counter < self->g_workspace_size; ws_counter++){
free(self->g_R_ws_H0[ws_counter]);
}
free(self->g_R_ws_H0);
free(self->g_R_ws_H0_info);
/////////////////////////
// Write H0 deaths to file
/////////////////////////
//// BINARY FILE
//FILE* fp2 = fopen("H0_pers_pairs.bin", "wb");
//fwrite(self->g_H0_pers_pairs, sizeof(PAR),self->g_H0_pers_pairs_len, fp2);
//fclose(fp2);
#ifdef SAVEPD
// TEXT FILE
FILE* fp2 = fopen(self->g_H0_pers_file, "w");
if (self->g_filetype == 1){
for (EDGE_ID it = 0; it < self->g_n_valid_edges; it++){
if (self->g_edges_with_pivots_H0[it]){
fprintf(fp2, "%.12lf,", sqrt(self->g_edge_parameter[it]));
}
}
}
else{
for (EDGE_ID it = 0; it < self->g_n_valid_edges; it++){
if (self->g_edges_with_pivots_H0[it]){
fprintf(fp2, "%.12lf,", self->g_edge_parameter[it]);
}
}
}
fclose(fp2);
#endif
#ifdef PRINT
if (!self->g_suppress_output){
printf("\nPers pairs in dim 0");
}
if (self->g_filetype == 1){
for (EDGE_ID it = 0; it < self->g_n_valid_edges; it++){
if (self->g_edges_with_pivots_H0[it]){
printf("\n%.12lf,", sqrt(self->g_edge_parameter[it]));
}
}
}
else{
for (EDGE_ID it = 0; it < self->g_n_valid_edges; it++){
if (self->g_edges_with_pivots_H0[it]){
printf("\n%.12lf,", self->g_edge_parameter[it]);
}
}
}
#endif
clock_gettime(CLOCK_MONOTONIC, &finish_wall_clock);
self->g_timer_H0 = (finish_wall_clock.tv_sec - start_wall_clock.tv_sec);
self->g_timer_H0 += (finish_wall_clock.tv_nsec - start_wall_clock.tv_nsec) / 1000000000.0;
///////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////
//
// STEP coH1.1: Find cohomology now for the edges
//
///////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////
if (!self->g_suppress_output){
printf("\n\n-----------------");
printf("\nComputing coH1...");
printf("\n-----------------\n");
}
//double time_compute_coH1 = omp_get_wtime();
clock_gettime(CLOCK_MONOTONIC, &start_wall_clock);
// V sparse
EDGE_ID pre_alloc = 1000;
self->g_V_sparse_max = pre_alloc;
self->g_V_sparse_H1 = (EDGE_ID*)malloc(self->g_V_sparse_max*sizeof(EDGE_ID));
self->g_V_sparse_ptr = 1;
self->g_V_sparse_beg_ptr = 1;
self->g_V_sparse_end_ptr = 1;
self->g_V_col_indices_max = pre_alloc;
self->g_V_col_indices = (EDGE_ID*)malloc(self->g_V_col_indices_max*sizeof(EDGE_ID));
self->g_V_col_indices_ptr = 1;
////////////////////////////////////////////////////
// INITIALIZE WORKSPACE
////////////////////////////////////////////////////
self->g_cohom_ws_size = 100;
self->g_V_ws_H1 = (coboundary_H1_ws*)malloc(self->g_cohom_ws_size*sizeof(coboundary_H1_ws));
for (EDGE_ID mm = 0; mm < self->g_cohom_ws_size; mm++){
self->g_V_ws_H1[mm].max_len = 10;
self->g_V_ws_H1[mm].last = 0;
self->g_V_ws_H1[mm].keys1 = (implicit_keys1*)malloc(self->g_V_ws_H1[mm].max_len*sizeof(implicit_keys1));
for (EDGE_ID nn = 0; nn < self->g_V_ws_H1[mm].max_len; nn++){
self->g_V_ws_H1[mm].keys1[nn].max_len = 10;
self->g_V_ws_H1[mm].keys1[nn].last = 0;
self->g_V_ws_H1[mm].keys1[nn].flag_empty = 1;
self->g_V_ws_H1[mm].keys1[nn].keys2 =\
(implicit_keys2*)malloc(self->g_V_ws_H1[mm].keys1[nn].max_len*sizeof(implicit_keys2));
}
self->g_V_ws_H1[mm].v_edges.max_len = 100;
self->g_V_ws_H1[mm].v_edges.last = 0;
self->g_V_ws_H1[mm].v_edges.o_ab = (EDGE_ID*)malloc(self->g_V_ws_H1[mm].v_edges.max_len*sizeof(EDGE_ID));
}
////////////////////////////////////////////////////
// H1 pivots
self->g_H1_cohom_pivots = (H1_cohom_pivots**)malloc(self->g_n_valid_edges*sizeof(H1_cohom_pivots*));
self->g_H1_cohom_pivots_len = (EDGE_ID*)calloc(self->g_n_valid_edges, sizeof(EDGE_ID));
self->g_H1_cohom_pivots_max_len = (EDGE_ID*)calloc(self->g_n_valid_edges, sizeof(EDGE_ID));
// H1 Pers pairs
self->g_H1_pers_pairs_max_len = 1000;
self->g_H1_pers_pairs_len = 0;
self->g_H1_pers_pairs = (PAR*)malloc(self->g_H1_pers_pairs_max_len*sizeof(PAR));
//#ifdef HOM_CYCLES
if (self->g_compute_cycles){
self->g_H1_undead_ptr = 0;
self->g_H1_undead_max = 10;
self->g_H1_undead = (EDGE_ID*)malloc(self->g_H1_undead_max*sizeof(EDGE_ID));
}
//#endif
int new_debug = 0;
self->g_coH1_all_lows = (coboundary_H1*)malloc(self->g_n_valid_edges*sizeof(coboundary_H1));
////////////////////////////////////////////////////////////////
// Allocate jobs/threads for parallel coH1
////////////////////////////////////////////////////////////////
self->g_jobs = (int*)malloc((self->g_cpu_count + 1)*sizeof(int));
allocate_jobs(self, self->g_cohom_ws_size);
self->g_threads = (pthread_t *)malloc(self->g_cpu_count*sizeof(pthread_t));
if ((rtn = pthread_mutex_init(&(self->g_thread_lock), NULL)) !=0)
fprintf(stderr, "pthread_mutex_init %s", strerror(rtn)), exit(-1);
if ((rtn = pthread_cond_init(&(self->g_start_boss), NULL)) !=0)
fprintf(stderr, "pthread_cond_init %s", strerror(rtn)), exit(-1);
if ((rtn = pthread_cond_init(&(self->g_start_workers), NULL)) !=0)
fprintf(stderr, "pthread_cond_init %s", strerror(rtn)), exit(-1);
// Initialize thread creation
self->g_thread_id = 0;
self->g_sleeping_threads = 0;
self->g_delete_threads = 0;
for (int i = 0; i < self->g_cpu_count; i++){
if ((rtn = pthread_create( \
&(self->g_threads[i]) \
, NULL \
, reduce_with_complex_coH1 \
, (void*)self)!= 0))
fprintf(stderr, "pthread_create %d", rtn), exit(-1);
}
// Wait for threads to be initialized
pthread_mutex_lock(&(self->g_thread_lock));
while(self->g_sleeping_threads != self->g_cpu_count){
pthread_cond_wait(&(self->g_start_boss) \
, &(self->g_thread_lock));
}
////////////////////////////////
#pragma omp parallel for schedule(static) shared(self)
for (EDGE_ID mm = 0; mm < self->g_n_valid_edges; mm++) {
self->g_coH1_all_lows[mm].o_ab = mm;
find_H1_cohom_low(self, &(self->g_coH1_all_lows[mm]));
// Need to find a_ptr and b_ptr if first low.key1 > e
if (self->g_coH1_all_lows[mm].low.key1 > self->g_coH1_all_lows[mm].o_ab){
VERT_ID a = self->g_edges_list[2*self->g_coH1_all_lows[mm].o_ab];
VERT_ID b = self->g_edges_list[2*self->g_coH1_all_lows[mm].o_ab+1];
self->g_coH1_all_lows[mm].a_ptr = bin_search_min_geq_Ne(self->g_Neighbors_e[a], 0, self->g_Neigh_len[a]-1\
, self->g_coH1_all_lows[mm].low.key1, self->g_Neigh_len[a]);
self->g_coH1_all_lows[mm].b_ptr = bin_search_min_geq_Ne(self->g_Neighbors_e[b], 0, self->g_Neigh_len[b]-1\
, self->g_coH1_all_lows[mm].low.key1, self->g_Neigh_len[b]);
}
}
self->g_this_edge = self->g_n_valid_edges;
///////////////////////////////////////////////////
// MAIN coH1 loop
///////////////////////////////////////////////////
//getchar();
self->g_new_debug = 0;
self->g_new_debug2 = 0;
self->g_ws_counter = 0;
//self->g_debug_edge = self->g_n_valid_edges;
self->g_debug_edge = 307605;
while(self->g_this_edge){
self->g_this_edge--;
//if (self->g_this_edge%10000 == 0){
// printf("\nProcessing edge %d", self->g_this_edge);
//}
///////////////////////////////////////////////////
// CLEARING ALGORITHM
// Does this edge have a pivot?
///////////////////////////////////////////////////
if (self->g_edges_with_pivots_H0[self->g_this_edge]){
//This edge has a pivot in H0. So, skip it. Continue;
//skip++;
#ifdef COH1DEBUG
if (self->g_this_edge == self->g_debug_edge ){
printf("\nskipping because cleared. so, cannot have anything relevant");
}
#endif
continue;
}
///////////////////////////////////////////////////
///////////////////////////////////////////////////
if (self->g_coH1_all_lows[self->g_this_edge].low.key1 == self->g_n_valid_edges){
// This edge has no coboundary
//if (self->g_new_debug){
// printf("\nno cob, skipping");
//}
// Add this as undead in H1
if (self->g_H1_pers_pairs_len+2 == self->g_H1_pers_pairs_max_len){
self->g_H1_pers_pairs_max_len += 1000;
self->g_H1_pers_pairs = (PAR*)realloc(self->g_H1_pers_pairs\
, self->g_H1_pers_pairs_max_len*sizeof(PAR));
}
self->g_H1_pers_pairs[self->g_H1_pers_pairs_len++] = \
self->g_edge_parameter[self->g_this_edge];
self->g_H1_pers_pairs[self->g_H1_pers_pairs_len++] = -1;
//#ifdef HOM_CYCLES
if (self->g_compute_cycles){
self->g_H1_undead[self->g_H1_undead_ptr++] = self->g_this_edge;
if (self->g_H1_undead_ptr == self->g_H1_undead_max){
self->g_H1_undead_max += 100;
self->g_H1_undead = (EDGE_ID*)realloc(self->g_H1_undead\
, self->g_H1_undead_max*sizeof(EDGE_ID));
}
}
//#endif
#ifdef COH1DEBUG
if (self->g_this_edge == self->g_debug_edge ){
printf("\nskipping because has no cob");
}
#endif
continue;
}
// This is a trivial pair
if (self->g_coH1_all_lows[self->g_this_edge].low.key1 == self->g_this_edge){
#ifdef COH1DEBUG
if (self->g_this_edge == self->g_debug_edge ){
printf("\nis a trivial pers pair. dont have to add pivot.");
}
#endif
// I DO NOT KNOW WHY I HAVE THIS HERE
//self->g_edges_with_pivots_H0[self->g_this_edge] = 10;
continue;
}
#ifdef COH1DEBUG
if (self->g_this_edge == self->g_debug_edge ){
printf("\nhave to start reduction");
self->g_new_debug = 1;
self->g_new_debug2 = 1;
}
#endif
coboundary_H1_ws* this_ws = self->g_V_ws_H1 + self->g_ws_counter;
this_ws->edge = self->g_this_edge;
this_ws->pivot = self->g_coH1_all_lows[this_ws->edge].low;
this_ws->flag_first = 1;
this_ws->flag_red_w_complex = 0;
this_ws->flag_red_w_trivial = 0;
this_ws->flag_append_to_complex = 0;
this_ws->flag_non_empty = 1;
// FIRST ENTRY IN hash-table
this_ws->k1_ptr = 0;
this_ws->k2_ptr = 0;
this_ws->last = 1;
this_ws->keys1[0].last = 1;
this_ws->keys1[0].flag_empty = 0;
this_ws->keys1[0].k1 = this_ws->pivot.key1;
this_ws->keys1[0].keys2[0].k2 = this_ws->pivot.key2;
this_ws->keys1[0].keys2[0].o_ab = self->g_coH1_all_lows[this_ws->edge].o_ab;
this_ws->keys1[0].keys2[0].a_ptr = self->g_coH1_all_lows[this_ws->edge].a_ptr;
this_ws->keys1[0].keys2[0].b_ptr = self->g_coH1_all_lows[this_ws->edge].b_ptr;
this_ws->keys1[0].keys2[0].flag_next = 1;
this_ws->v_edges.last = 0;
self->g_ws_counter++;
if (self->g_ws_counter == self->g_cohom_ws_size){
reduce_ws_coH1(self);
}
}
while(self->g_ws_counter){
allocate_jobs(self, self->g_ws_counter);
reduce_ws_coH1(self);
}
/////////////////////////
// Cancel the threads used in getting next during reduction
/////////////////////////
self->g_delete_threads = 1;
pthread_cond_broadcast(&(self->g_start_workers));
pthread_mutex_unlock(&(self->g_thread_lock));
for (int i = 0; i < self->g_cpu_count; i++){
pthread_join(self->g_threads[i], NULL);
}
free(self->g_threads);
free(self->g_jobs);
if (!self->g_suppress_output){
printf("\nsparse V coH1 length %d", self->g_V_sparse_ptr);
}
self->g_H1_pers_pairs = (PAR*)realloc(self->g_H1_pers_pairs, self->g_H1_pers_pairs_len*sizeof(PAR));
//// BINARY FILE
//FILE* fp2 = fopen("H1_pers_pairs.bin", "wb");
//fwrite(self->g_H1_pers_pairs, sizeof(PAR),self->g_H1_pers_pairs_len, fp2);
//fclose(fp2);
#ifdef SAVEPD
// TEXT FILE
fp2 = fopen(self->g_H1_pers_file, "w");
PAR ddeath;
if (self->g_filetype == 1){
for (EDGE_ID it = 0; it < self->g_H1_pers_pairs_len; it+=2){
if (self->g_H1_pers_pairs[it+1] == -1){
ddeath = -1;
}
else{
ddeath = sqrt(self->g_H1_pers_pairs[it+1]);
}
fprintf(fp2, "%0.12lf, %0.12lf\n", sqrt(self->g_H1_pers_pairs[it]), ddeath);
}
}
else{
for (EDGE_ID it = 0; it < self->g_H1_pers_pairs_len; it+=2){
fprintf(fp2, "%0.12lf, %0.12lf\n", self->g_H1_pers_pairs[it], self->g_H1_pers_pairs[it+1]);
}
}
fclose(fp2);
#endif
#ifdef PRINT
// TEXT FILE
if (!self->g_suppress_output){
printf("\nPers pairs in dim 1");
}
if (self->g_filetype == 1){
for (EDGE_ID it = 0; it < self->g_H1_pers_pairs_len; it+=2){
printf("\n%0.12lf, %0.12lf", sqrt(self->g_H1_pers_pairs[it]), sqrt(self->g_H1_pers_pairs[it+1]));
}
}
else{
for (EDGE_ID it = 0; it < self->g_H1_pers_pairs_len; it+=2){
printf("\n%0.12lf, %0.12lf", self->g_H1_pers_pairs[it], self->g_H1_pers_pairs[it+1]);
}
}
#endif
#ifdef SAVEV
// TEXT FILE
fp2 = fopen(self->g_coH1_V_file, "w");
for (EDGE_ID it = 1; it < self->g_V_sparse_max; it++){
fprintf(fp2, "%d\n", self->g_V_sparse_H1[it]);
}
fclose(fp2);
#endif
// FREE coH1 Workspace
for (int bb = 0; bb < self->g_cohom_ws_size; bb++){
for (int mm = 0; mm < self->g_V_ws_H1[bb].max_len; mm++){
free(self->g_V_ws_H1[bb].keys1[mm].keys2);
}
free(self->g_V_ws_H1[bb].keys1);
free(self->g_V_ws_H1[bb].v_edges.o_ab);
}
free(self->g_V_ws_H1);
// FREE V_sparse
free(self->g_V_sparse_H1);
clock_gettime(CLOCK_MONOTONIC, &finish_wall_clock);
self->g_timer_coH1 = (finish_wall_clock.tv_sec - start_wall_clock.tv_sec);
self->g_timer_coH1 += (finish_wall_clock.tv_nsec - start_wall_clock.tv_nsec) / 1000000000.0;
//printf("Time: %lf\n", elapsed_wall_clock);
/////////////////////////////////////////
// Computing H1 homology and birth cycles
/////////////////////////////////////////
clock_gettime(CLOCK_MONOTONIC, &start_wall_clock);
self->g_timer_computeH1 = 0;
self->g_timer_H1cycles = 0;
self->g_timer_minimize_H1cycles = 0;
//#ifdef HOM_CYCLES
if (self->g_compute_cycles) compute_H1_homology_cycles(self);
//#endif
if (self->g_dim_lim == 1){
if (!self->g_suppress_output){
printf("\nTime to process input : %lf" , self->g_timer_process_input);
printf("\nTime to create neigh: %lf" , self->g_timer_neigh);
printf("\nTime to compute H0: %lf" , self->g_timer_H0);
printf("\nTime to compute coH1: %lf" , self->g_timer_coH1);
//#ifdef HOM_CYCLES
if (self->g_compute_cycles){
printf("\nTime to compute H1: %lf" , self->g_timer_computeH1);
printf("\nTime to compute %llu H1 cycles: %lf" , self->g_n_H1_birth_cycles, self->g_timer_H1cycles);
printf("\nStored V_H0 %llu" , self->g_n_H0_stored_V);
}
//#endif
//#ifdef MINIMIZE_BIRTH_CYCLES
if (self->g_reduce_cyc_lengths) printf("\nTime to minimize H1 birth cycles: %lf" , self->g_timer_minimize_H1cycles);
//#endif
//#ifdef MINIMIZE_HOM_CYCLES
// printf("\nTime to minimize H1 hom cycles: %lf" , self->g_timer_minimize_H1_homcycles);
//#endif
printf("\nTotal time taken: %lf", \
self->g_timer_process_input\
+ self->g_timer_neigh\
+ self->g_timer_H0\
+ self->g_timer_coH1\
+ self->g_timer_computeH1\
+ self->g_timer_H1cycles\
+ self->g_timer_minimize_H1cycles\
+ self->g_timer_minimize_H1_homcycles\
);
}
deallocator(self);
if (!self->g_suppress_output){
printf("\nQuitting after coH1");
}
//Py_RETURN_NONE;
exit(0);
}
///////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////
//
// STEP coH2.1: Find cohomology now for the triangles
//
///////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////
clock_gettime(CLOCK_MONOTONIC, &start_wall_clock);
if (!self->g_suppress_output){
printf("\n\n--------------");
printf("\nComputing coH2");
printf("\n--------------");
}
// sparse V coH2
self->g_V_sparse_max = 100000;
self->g_V_sparse_H2 = (simplex*)malloc(self->g_V_sparse_max*sizeof(simplex));
self->g_V_sparse_ptr = 1;
self->g_V_sparse_beg_ptr = 1;
self->g_V_sparse_end_ptr = 1;
self->g_V_col_indices_ptr = 1;
////////////////////////////////////////////////////
// INITIALIZE WORKSPACE
////////////////////////////////////////////////////
self->g_cohom_ws_size = 100;
self->g_V_ws_H2 = (coboundary_H2_ws*)malloc(self->g_cohom_ws_size*sizeof(coboundary_H2_ws));
for (EDGE_ID mm = 0; mm < self->g_cohom_ws_size; mm++){
self->g_V_ws_H2[mm].max_len = 10;
self->g_V_ws_H2[mm].last = 0;
self->g_V_ws_H2[mm].keys1 = (coH2_implicit_keys1*)malloc(self->g_V_ws_H2[mm].max_len*sizeof(coH2_implicit_keys1));
for (EDGE_ID nn = 0; nn < self->g_V_ws_H2[mm].max_len; nn++){
self->g_V_ws_H2[mm].keys1[nn].max_len = 10;
self->g_V_ws_H2[mm].keys1[nn].last = 0;
self->g_V_ws_H2[mm].keys1[nn].flag_empty = 1;
self->g_V_ws_H2[mm].keys1[nn].keys2 =\
(coH2_implicit_keys2*)malloc(self->g_V_ws_H2[mm].keys1[nn].max_len*sizeof(coH2_implicit_keys2));
}
self->g_V_ws_H2[mm].v_triangles.max_len = 100;
self->g_V_ws_H2[mm].v_triangles.last = 0;
self->g_V_ws_H2[mm].v_triangles.o_abc = (simplex*)malloc(self->g_V_ws_H2[mm].v_triangles.max_len*sizeof(simplex));
}
////////////////////////////////////////////////////
// PIVOTS
self->g_H2_cohom_pivots = (H2_cohom_pivots**)malloc(self->g_n_valid_edges*sizeof(H2_cohom_pivots*));
self->g_H2_cohom_pivots_len = (EDGE_ID*)calloc(self->g_n_valid_edges, sizeof(EDGE_ID));
self->g_H2_cohom_pivots_max_len = (EDGE_ID*)calloc(self->g_n_valid_edges, sizeof(EDGE_ID));
// H1 Pers pairs
self->g_H2_pers_pairs_max_len = 1000;
self->g_H2_pers_pairs_len = 0;
self->g_H2_pers_pairs = (PAR*)malloc(self->g_H2_pers_pairs_max_len*sizeof(PAR));
//#ifdef HOM_CYCLES
if (self->g_compute_cycles){
self->g_H2_undead_ptr = 0;
self->g_H2_undead_max = 10;
self->g_H2_undead = (simplex*)malloc(self->g_H2_undead_max*sizeof(simplex));
}
//#endif
////////////////////////////////////////////////////////////////
// Allocate jobs/threads for parallel coH2
////////////////////////////////////////////////////////////////
self->g_jobs = (int*)malloc((self->g_cpu_count + 1)*sizeof(int));
allocate_jobs(self, self->g_cohom_ws_size);
self->g_threads = (pthread_t *)malloc(self->g_cpu_count*sizeof(pthread_t));
if ((rtn = pthread_mutex_init(&(self->g_thread_lock), NULL)) !=0)
fprintf(stderr, "pthread_mutex_init %s", strerror(rtn)), exit(-1);
if ((rtn = pthread_cond_init(&(self->g_start_boss), NULL)) !=0)
fprintf(stderr, "pthread_cond_init %s", strerror(rtn)), exit(-1);
if ((rtn = pthread_cond_init(&(self->g_start_workers), NULL)) !=0)
fprintf(stderr, "pthread_cond_init %s", strerror(rtn)), exit(-1);
// Initialize thread creation
self->g_thread_id = 0;
self->g_sleeping_threads = 0;
self->g_delete_threads = 0;
for (int i = 0; i < self->g_cpu_count; i++){
if ((rtn = pthread_create( \
&(self->g_threads[i]) \
, NULL \
, reduce_with_complex_coH2 \
, (void*)self)!= 0))
fprintf(stderr, "pthread_create %d", rtn), exit(-1);
}
// Wait for threads to be initialized
pthread_mutex_lock(&(self->g_thread_lock));
while(self->g_sleeping_threads != self->g_cpu_count){
pthread_cond_wait(&(self->g_start_boss) \
, &(self->g_thread_lock));
}
////////////////////////////////
// BUFFER
EDGE_ID buffer_len = 1000000;
int buffer_ptr = 0;
coboundary_H2* coH2_lows_buffer = (coboundary_H2*)malloc(buffer_len*sizeof(coboundary_H2));
///////////////////////////////////////////////////
// MAIN coH2 loop
///////////////////////////////////////////////////
//
//EDGE_ID i = self->g_n_valid_edges;
coboundary_H2* temp_temp_triangles = (coboundary_H2*)malloc(self->g_n_vert*sizeof(coboundary_H2));
VERT_ID temp_temp_len = 0;
coboundary_H2 temp_triangle;
self->g_debug_triangle.key1 = self->g_n_valid_edges ;
self->g_debug_triangle.key2 = self->g_n_valid_edges ;
//self->g_debug_triangle.key1 = 46494 ;
//self->g_debug_triangle.key2 = 269 ;
VERT_ID a, b, c, a_ptr, b_ptr;
EDGE_ID ac, bc, has_pivot;
EDGE_ID ab = self->g_n_valid_edges;
while(ab){
ab--;
if (!self->g_suppress_output){
if (self->g_n_valid_edges > 1000){
if (ab % (self->g_n_valid_edges/100) == 0){
printf("\rProcessing coH2 for edge %d out of %d", ab, self->g_n_valid_edges);
}
}
else{
printf("\rProcessing coH2 for edge %d out of %d", ab, self->g_n_valid_edges);
}
}
a = self->g_edges_list[2*ab];
b = self->g_edges_list[2*ab+1];
// Find the faces which are created when this edge is formed
// That means, the o_max will be ab
//ab = i;
a_ptr = 0;
b_ptr = 0;
while ((a_ptr < self->g_Neigh_len[a])\
&& (b_ptr < self->g_Neigh_len[b])){
if (self->g_Neighbors[a][a_ptr].neighbor < self->g_Neighbors[b][b_ptr].neighbor)
{
a_ptr++;
}
else if (self->g_Neighbors[a][a_ptr].neighbor > self->g_Neighbors[b][b_ptr].neighbor)
{
b_ptr++;
}
else{
c = self->g_Neighbors[a][a_ptr].neighbor;
//if (!simplex2_check(a, b, c)) continue;
ac = self->g_Neighbors[a][a_ptr++].order;
bc = self->g_Neighbors[b][b_ptr++].order;
if ((ac > ab) \
|| (bc > ab))
continue;
temp_triangle.triangle.key1 = ab;
temp_triangle.triangle.key2 = (EDGE_ID)c;
///////////////////////////////////////////////////
// CLEARING ALGORITHM
// Does this triangle have a pivot in coH1?
///////////////////////////////////////////////////
// Check whether the triangle is pivot of a trivial pair in coH1
if ((self->g_coH1_all_lows[temp_triangle.triangle.key1].low.key1 == temp_triangle.triangle.key1)\
&&(self->g_coH1_all_lows[temp_triangle.triangle.key1].low.key2 == temp_triangle.triangle.key2)){
//printf("\nSkipping");
continue;
}
// Check whether the triangle is a pivot in coH1
if (self->g_H1_cohom_pivots_len[temp_triangle.triangle.key1]){
has_pivot = search_H1_cohom_pivots(self->g_H1_cohom_pivots[temp_triangle.triangle.key1]\
, 0 \
, self->g_H1_cohom_pivots_len[temp_triangle.triangle.key1] - 1\
, temp_triangle.triangle.key2 \
, self->g_n_valid_edges);
if (has_pivot != self->g_n_valid_edges){
//This triangle has a pivot in H1. So, skip it. Continue;
//printf("\nSkipping");
//getchar();
continue;
}
}
// END OF CLEARING ALGORITHM
///////////////////////////////////////////////////
temp_temp_triangles[temp_temp_len++] = temp_triangle;
}
}
while (temp_temp_len > 0){
temp_temp_len--;
coH2_lows_buffer[buffer_ptr++].triangle = temp_temp_triangles[temp_temp_len].triangle;
if (buffer_ptr == buffer_len){
#pragma omp parallel for schedule(static) \
shared(self, coH2_lows_buffer)
for (EDGE_ID mm = 0; mm < buffer_len; mm++) {
find_H2_cohom_low(self, &(coH2_lows_buffer[mm]));
}
EDGE_ID mm = 0;
while (mm < buffer_len){
if (coH2_lows_buffer[mm].vertex == -1){
//If it has empty cob, then it is undead cycle
//printf("\n Adding undead for H2");
if (self->g_H2_pers_pairs_len+2 == self->g_H2_pers_pairs_max_len){
self->g_H2_pers_pairs_max_len += 1000;
self->g_H2_pers_pairs = (PAR*)realloc(self->g_H2_pers_pairs\
, self->g_H2_pers_pairs_max_len*sizeof(PAR));
}
self->g_H2_pers_pairs[self->g_H2_pers_pairs_len++] =\
self->g_edge_parameter[coH2_lows_buffer[mm].triangle.key1];
self->g_H2_pers_pairs[self->g_H2_pers_pairs_len++] = -1;
//#ifdef HOM_CYCLES
if (self->g_compute_cycles){
self->g_H2_undead[self->g_H2_undead_ptr++] = coH2_lows_buffer[mm].triangle;
if (self->g_H2_undead_ptr == self->g_H2_undead_max){
self->g_H2_undead_max += 100;
self->g_H2_undead = (simplex*)realloc(self->g_H2_undead\
, self->g_H2_undead_max*sizeof(simplex));
}
}
//#endif
mm++;
continue;
}
// Is this is a trivial pair?
if ((coH2_lows_buffer[mm].low.key1 == coH2_lows_buffer[mm].triangle.key1)\
&&(self->g_edges_list[2*coH2_lows_buffer[mm].low.key2+1]== coH2_lows_buffer[mm].triangle.key2)){
mm++;
continue;
}
coboundary_H2_ws* this_ws = self->g_V_ws_H2 + self->g_ws_counter;
this_ws->triangle = coH2_lows_buffer[mm].triangle;
this_ws->pivot = coH2_lows_buffer[mm].low;
this_ws->flag_first = 1;
this_ws->flag_red_w_complex = 0;
this_ws->flag_red_w_trivial = 0;
this_ws->flag_append_to_complex = 0;
this_ws->flag_non_empty = 1;
this_ws->k1_ptr = 0;
this_ws->k2_ptr = 0;
this_ws->last = 1;
this_ws->keys1[0].last = 1;
this_ws->keys1[0].flag_empty = 0;
this_ws->keys1[0].k1 = this_ws->pivot.key1;
this_ws->keys1[0].keys2[0].k2 = this_ws->pivot.key2;
this_ws->keys1[0].keys2[0].o_abc = coH2_lows_buffer[mm].triangle;
this_ws->keys1[0].keys2[0].a_ptr = coH2_lows_buffer[mm].a_ptr;
this_ws->keys1[0].keys2[0].b_ptr = coH2_lows_buffer[mm].b_ptr;
this_ws->keys1[0].keys2[0].c_ptr = coH2_lows_buffer[mm].c_ptr;
this_ws->keys1[0].keys2[0].vertex = coH2_lows_buffer[mm].vertex;
this_ws->keys1[0].keys2[0].flag_next = 1;
this_ws->v_triangles.last = 0;
self->g_ws_counter++;
if (self->g_ws_counter == self->g_cohom_ws_size){
reduce_ws_coH2(self);
}
mm++;
}
buffer_ptr = 0;
}
}
}
#pragma omp parallel for schedule(static) \
shared(self, coH2_lows_buffer)
for (EDGE_ID mm = 0; mm < buffer_ptr; mm++) {
find_H2_cohom_low(self, &(coH2_lows_buffer[mm]));
}
//for (EDGE_ID mm = 0; mm < buffer_ptr; mm++) {
EDGE_ID mm = 0;
while (mm < buffer_ptr){
if (coH2_lows_buffer[mm].vertex == -1){
mm++;
continue;
}
// Is this is a trivial pair?
if ((coH2_lows_buffer[mm].low.key1 == coH2_lows_buffer[mm].triangle.key1)\
&&(self->g_edges_list[2*coH2_lows_buffer[mm].low.key2+1]== coH2_lows_buffer[mm].triangle.key2)){
mm++;
continue;
}
coboundary_H2_ws* this_ws = self->g_V_ws_H2 + self->g_ws_counter;
this_ws->triangle = coH2_lows_buffer[mm].triangle;
this_ws->pivot = coH2_lows_buffer[mm].low;
this_ws->flag_first = 1;
this_ws->flag_red_w_complex = 0;
this_ws->flag_red_w_trivial = 0;
this_ws->flag_append_to_complex = 0;
this_ws->flag_non_empty = 1;
this_ws->k1_ptr = 0;
this_ws->k2_ptr = 0;
this_ws->last = 1;
this_ws->keys1[0].last = 1;
this_ws->keys1[0].flag_empty = 0;
this_ws->keys1[0].k1 = this_ws->pivot.key1;
this_ws->keys1[0].keys2[0].k2 = this_ws->pivot.key2;
this_ws->keys1[0].keys2[0].o_abc = coH2_lows_buffer[mm].triangle;
this_ws->keys1[0].keys2[0].a_ptr = coH2_lows_buffer[mm].a_ptr;
this_ws->keys1[0].keys2[0].b_ptr = coH2_lows_buffer[mm].b_ptr;
this_ws->keys1[0].keys2[0].c_ptr = coH2_lows_buffer[mm].c_ptr;
this_ws->keys1[0].keys2[0].vertex = coH2_lows_buffer[mm].vertex;
this_ws->keys1[0].keys2[0].flag_next = 1;
this_ws->v_triangles.last = 0;
self->g_ws_counter++;
if (self->g_ws_counter == self->g_cohom_ws_size){
reduce_ws_coH2(self);
}
mm++;
}
while(self->g_ws_counter){
allocate_jobs(self, self->g_ws_counter);
reduce_ws_coH2(self);
}
//////////////////////////////////////////////////
// Cancel the threads used in getting next during reduction
//////////////////////////////////////////////////
self->g_delete_threads = 1;
pthread_cond_broadcast(&(self->g_start_workers));
pthread_mutex_unlock(&(self->g_thread_lock));
for (int i = 0; i < self->g_cpu_count; i++){
pthread_join(self->g_threads[i], NULL);
}
free(self->g_threads);
free(self->g_jobs);
//printf("\nTime taken to compute coH1 %f", omp_get_wtime() - time_compute_coH1);
if (!self->g_suppress_output){
printf("\nsparse V coH2 length %d", self->g_V_sparse_ptr);
}
//////////////////////////////////////////////////
// FREE coH2 Workspace
//////////////////////////////////////////////////
for (int bb = 0; bb < self->g_cohom_ws_size; bb++){
for (int mm = 0; mm < self->g_V_ws_H2[bb].max_len; mm++){
free(self->g_V_ws_H2[bb].keys1[mm].keys2);
}
free(self->g_V_ws_H2[bb].keys1);
free(self->g_V_ws_H2[bb].v_triangles.o_abc);
}
free(self->g_V_ws_H2);
// FREE temp_temp_triangles
free(temp_temp_triangles);
self->g_H2_pers_pairs = (PAR*)realloc(self->g_H2_pers_pairs, self->g_H2_pers_pairs_len*sizeof(PAR));
// BINARY FILE
//fp2 = fopen("H2_pers_pairs.bin", "wb");
//fwrite(self->g_H2_pers_pairs, sizeof(PAR),self->g_H2_pers_pairs_len, fp2);
//fclose(fp2);
#ifdef SAVEPD
// TEXT FILE
fp2 = fopen(self->g_H2_pers_file, "w");
if (self->g_filetype == 1){
for (EDGE_ID it = 0; it < self->g_H2_pers_pairs_len; it+=2){
if (self->g_H2_pers_pairs[it+1] == -1){
ddeath = -1;
}
else{
ddeath = sqrt(self->g_H2_pers_pairs[it+1]);
}
fprintf(fp2, "%0.12lf, %0.12lf\n", sqrt(self->g_H2_pers_pairs[it]), ddeath);
}
}
else{
for (EDGE_ID it = 0; it < self->g_H2_pers_pairs_len; it+=2){
fprintf(fp2, "%0.12lf, %0.12lf\n", self->g_H2_pers_pairs[it], self->g_H2_pers_pairs[it+1]);
}
}
fclose(fp2);
#endif
#ifdef PRINT
// TEXT FILE
if (!self->g_suppress_output){
printf("\nPers pairs in dim 2");
}
if (self->g_filetype == 1){
for (EDGE_ID it = 0; it < self->g_H2_pers_pairs_len; it+=2){
printf("\n%0.12lf, %0.12lf", sqrt(self->g_H2_pers_pairs[it]), sqrt(self->g_H2_pers_pairs[it+1]));
}
}
else{
for (EDGE_ID it = 0; it < self->g_H2_pers_pairs_len; it+=2){
printf("\n%0.12lf, %0.12lf", self->g_H2_pers_pairs[it], self->g_H2_pers_pairs[it+1]);
}
}
#endif
#ifdef SAVEV
// TEXT FILE
fp2 = fopen(self->g_coH2_V_file, "w");
for (EDGE_ID it = 1; it < self->g_V_sparse_max; it++){
fprintf(fp2, "%d\n", self->g_V_sparse_H2[it]);
}
fclose(fp2);
#endif
// FREE V_sparse
free(self->g_V_sparse_H2);
clock_gettime(CLOCK_MONOTONIC, &finish_wall_clock);
self->g_timer_coH2 = (finish_wall_clock.tv_sec - start_wall_clock.tv_sec);
self->g_timer_coH2 += (finish_wall_clock.tv_nsec - start_wall_clock.tv_nsec) / 1000000000.0;
/////////////////////////////////////////
// Computing H2 homology and birth cycles
/////////////////////////////////////////
self->g_timer_computeH2 = 0;
self->g_timer_H2cycles = 0;
self->g_timer_minimize_H2cycles = 0;
//#ifdef HOM_CYCLES
if (self->g_compute_cycles) compute_H2_homology_cycles(self);
//#endif
if (!self->g_suppress_output){
printf("\nTime to process input : %lf" , self->g_timer_process_input);
printf("\nTime to create neigh: %lf" , self->g_timer_neigh);
printf("\nTime to compute H0: %lf" , self->g_timer_H0);
printf("\nTime to compute coH1: %lf" , self->g_timer_coH1);
printf("\nTime to compute coH2: %lf" , self->g_timer_coH2);
}
//#ifdef HOM_CYCLES
if (self->g_compute_cycles){
if (!self->g_suppress_output){
printf("\nTime to compute H1: %lf" , self->g_timer_computeH1);
printf("\nTime to compute %llu H1cycles: %lf" , self->g_n_H1_birth_cycles, self->g_timer_H1cycles);
printf("\nStored V_H0 %llu" , self->g_n_H0_stored_V);
//#ifdef MINIMIZE_BIRTH_CYCLES
if (self->g_reduce_cyc_lengths){
printf("\nTime to minimize H1 birth cycles: %lf" , self->g_timer_minimize_H1cycles);
}
//#endif
//#ifdef MINIMIZE_HOM_CYCLES
// printf("\nTime to minimize H1 hom cycles: %lf" , self->g_timer_minimize_H1_homcycles);
//#endif
printf("\nTime to compute H2: %lf" , self->g_timer_computeH2);
printf("\nTime to compute %llu H2cycles: %lf" , self->g_n_H2_birth_cycles, self->g_timer_H2cycles);
printf("\nStored V_H1 %llu" , self->g_n_H1_stored_V);
//#ifdef MINIMIZE_BIRTH_CYCLES
if (self->g_reduce_cyc_lengths){
printf("\nTime to minimize H2 cycles: %lf" , self->g_timer_minimize_H2cycles);
}
}
//#endif
//#ifdef MINIMIZE_HOM_CYCLES
//printf("\nTime to minimize H2 hom cycles: %lf" , self->g_timer_minimize_H2_homcycles);
//#endif
}
//#endif
//printf("Time to compute coH2 serial: %lf\n" , self->g_timer_coH2_serial);
//printf("Time to compute coH2 parallel: %lf\n" , self->g_timer_coH2_parallel);
if (!self->g_suppress_output){
printf("\nTotal time taken: %lf",\
self->g_timer_process_input\
+ self->g_timer_neigh\
+ self->g_timer_H0\
+ self->g_timer_coH1\
+ self->g_timer_coH2\
+ self->g_timer_computeH1\
+ self->g_timer_computeH2\
+ self->g_timer_H1cycles\
+ self->g_timer_H2cycles\
+ self->g_timer_minimize_H1cycles\
+ self->g_timer_minimize_H2cycles\
+ self->g_timer_minimize_H1_homcycles\
+ self->g_timer_minimize_H2_homcycles\
);
}
//printf("Time in H2_low: %lf\n", self->g_timer_H2_low);
//printf("Time in H2_greater: %lf\n", self->g_timer_H2_greater);
//printf("Time in H2_next: %lf\n", self->g_timer_H2_next);
deallocator(self);
if (!self->g_suppress_output){
printf("\nQuitting after coH2");
}
//Py_RETURN_NONE;
}
VERT_ID search_Neighbors(filtration* self, VERT_ID v1, VERT_ID v2, VERT_ID l, VERT_ID r){
if (r >= l) {
VERT_ID mid = l + (r - l) / 2;
// If the element is present at the middle
// itself
if (self->g_Neighbors[v1][mid].neighbor == v2)
return mid;
// If element is smaller than mid, then
// it can only be present in left subarray
if (self->g_Neighbors[v1][mid].neighbor > v2){
if (!mid) return self->g_n_vert;
return search_Neighbors(self, v1, v2, l, mid - 1);
}
// Else the element can only be present
// in right subarray
return search_Neighbors(self, v1, v2, mid + 1, r);
}
// We reach here when element is not
// present in array
return self->g_n_vert;
}
VERT_ID search_Neighbors_e(filtration* self, VERT_ID v1, EDGE_ID order, VERT_ID l, VERT_ID r, EDGE_ID len){
int mid = l + (r - l) / 2;
if (self->g_Neighbors_e[v1][mid].order < order){
if (mid < len-1)
if (self->g_Neighbors_e[v1][mid+1].order > order)
return mid+1;
return search_Neighbors_e(self, v1, order, mid+1, r, len);
}
else if (self->g_Neighbors_e[v1][mid].order > order){
if (!mid) return 0;
return search_Neighbors_e(self, v1, order, l, mid-1, len);
}
else{
return mid+1;
}
}
//////////////////////////////////////////////////////////
// MERGING ALGORITHMS
//////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////
// END OF MERGING ALGORITHMS
//////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////
// CUSTOM BOUNDARIES
//////////////////////////////////////////////////////////
int simplex1_check(VERT_ID v1, VERT_ID v2, PAR dist, PAR thresh){
if (dist == -1){
return 0;
}
//if (dist == 0){
// return 0;
//}
if (dist > thresh){
return 0;
}
return 1;
}
int simplex2_check(VERT_ID v1, VERT_ID v2, VERT_ID v3){
return 1;
}
int simplex3_check(VERT_ID v1, VERT_ID v2, VERT_ID v3, VERT_ID v4){
return 1;
}
//////////////////////////////////////////////////////////
int compare_simplices(simplex* s1, simplex* s2){
// returns 1 if s1 > s2
// returns 0 if s1 < s2
// returns -1 if s1 = s2
if (s1->key1 > s2->key1) return 1;
else if (s1->key1 < s2->key1) return 0;
else{
if (s1->key2 > s2->key2) return 1;
else if (s1->key2 < s2->key2) return 0;
else return -1;
}
}
int compare_simplices_keys(EDGE_ID key11, EDGE_ID key12 \
, EDGE_ID key21, EDGE_ID key22 \
){
// returns 1 if s1 > s2
// returns 0 if s1 < s2
// returns -1 if s1 = s2
if (key11 > key21) return 1;
else if (key11 < key21) return 0;
else{
if (key12 > key22) return 1;
else if (key12 < key22) return 0;
else return -1;
}
}
void find_H1_cohom_low(filtration* self, coboundary_H1* V_info){
VERT_ID a = self->g_edges_list[2*V_info->o_ab];
VERT_ID b = self->g_edges_list[2*V_info->o_ab+1];
V_info->a_ptr = 0;
V_info->b_ptr = 0;
EDGE_ID o_min = self->g_n_valid_edges;
EDGE_ID v_min = 0;
EDGE_ID o_s, v_s;
while(1){
if ((V_info->a_ptr < self->g_Neigh_len[a]) \
&& (V_info->b_ptr < self->g_Neigh_len[b])){
if (self->g_Neighbors[a][V_info->a_ptr].neighbor < self->g_Neighbors[b][V_info->b_ptr].neighbor){
V_info->a_ptr++;
}
else if (self->g_Neighbors[a][V_info->a_ptr].neighbor > self->g_Neighbors[b][V_info->b_ptr].neighbor){
V_info->b_ptr++;
}
else{
EDGE_ID ac = self->g_Neighbors[a][V_info->a_ptr].order;
EDGE_ID bc = self->g_Neighbors[b][V_info->b_ptr].order;
EDGE_ID c = self->g_Neighbors[a][V_info->a_ptr].neighbor;
//printf("\n a, b, c: %d, %d, %d", a, b, c);
//printf("\n ab, ac, bc: %d, %d, %d", V_info->o_ab, ac, bc);
o_s = ac;
v_s = b;
if (bc > ac){
o_s = bc;
v_s = a;
}
if (o_s < V_info->o_ab){
V_info->low.key1 = V_info->o_ab;
V_info->low.key2 = c;
return;
}
// Reached here means o_s > o_ab
//printf("\n o_s, v_s, : %d, %d", o_s, v_s);
if (o_s < o_min){
o_min = o_s;
v_min = v_s;
}
else if ((o_s == o_min) && (v_s < v_min)){
v_min = v_s;
}
//printf("\n o_min, v_min, : %d, %d", o_min, v_min);
V_info->a_ptr++;
V_info->b_ptr++;
}
}
else{
//printf("\nReturning lowest of > a(%d)b(%d) (%d, %d)", o_s, v_s);
V_info->low.key1 = o_min;
V_info->low.key2 = v_min;
return;
}
}
}
// A recursive binary search function. It returns
// location of x in given array arr[l..r] is present,
// otherwise -1
EDGE_ID search_H1_cohom_pivots(H1_cohom_pivots* arr, EDGE_ID l, EDGE_ID r, EDGE_ID key2, EDGE_ID max)
{
if (r >= l) {
EDGE_ID mid = l + (r - l) / 2;
if (arr[mid].key2 == key2)
return mid;
// If element is smaller than mid, then
// it can only be present in left subarray
if (arr[mid].key2 > key2)
{
/// PRECAUTIONARY: CAN REMOVE LATER
if (!mid){
return max;
printf("\nMID 0 WILL GIVE ERROR FOR UNSIGNED NEXT");
getchar();
}
///////////////////
return search_H1_cohom_pivots(arr, l, mid - 1, key2, max);
}
// Else the element can only be present
// in right subarray
return search_H1_cohom_pivots(arr, mid + 1, r, key2, max);
}
// We reach here when element is not
// present in array
//printf("\nNOT FOUND");
return max;
}
//// A recursive binary search function. It returns
//// location of x in given array arr[l..r] is present,
//// otherwise -1
//EDGE_ID bin_search_min_update_V(min_update_V* arr, EDGE_ID l, EDGE_ID r, EDGE_ID mm, EDGE_ID max)
//{
// if (r >= l) {
// EDGE_ID mid = l + (r - l) / 2;
//
// if (arr[mid].mm == mm)
// return mid;
//
// // If element is smaller than mid, then
// // it can only be present in left subarray
// if (arr[mid].mm > mm)
// {
//
// /// PRECAUTIONARY: CAN REMOVE LATER
// if (!mid){
// return max;
// printf("\nMID 0 WILL GIVE ERROR FOR UNSIGNED NEXT");
// getchar();
// }
// ///////////////////
// return bin_search_min_update_V(arr, l, mid - 1, mm, max);
// }
//
// // Else the element can only be present
// // in right subarray
// return bin_search_min_update_V(arr, mid + 1, r, mm, max);
// }
//
// // We reach here when element is not
// // present in array
// //printf("\nNOT FOUND");
// return max;
//}
// A recursive binary search function. It returns
// location of x in given array arr[l..r] is present,
// otherwise -1
EDGE_ID bin_search_cycle_ops(EDGE_ID* arr, EDGE_ID l, EDGE_ID r, EDGE_ID mm, EDGE_ID max)
{
if (r >= l) {
EDGE_ID mid = l + (r - l) / 2;
if (arr[mid] == mm)
return mid;
// If element is smaller than mid, then
// it can only be present in left subarray
if (arr[mid] > mm)
{
/// PRECAUTIONARY: CAN REMOVE LATER
if (!mid){
return max;
printf("\nMID 0 WILL GIVE ERROR FOR UNSIGNED NEXT");
getchar();
}
///////////////////
return bin_search_cycle_ops(arr, l, mid - 1, mm, max);
}
// Else the element can only be present
// in right subarray
return bin_search_cycle_ops(arr, mid + 1, r, mm, max);
}
// We reach here when element is not
// present in array
//printf("\nNOT FOUND");
return max;
}
// A recursive binary search function. It returns
// location of x in given array arr[l..r] is present,
// otherwise -1
EDGE_ID bin_search_cyc_in_cyc(cyc_in_cyc* arr, EDGE_ID l, EDGE_ID r, EDGE_ID mm, EDGE_ID max)
{
if (r >= l) {
EDGE_ID mid = l + (r - l) / 2;
if (arr[mid].cj == mm)
return mid;
// If element is smaller than mid, then
// it can only be present in left subarray
if (arr[mid].cj > mm)
{
/// PRECAUTIONARY: CAN REMOVE LATER
if (!mid){
return max;
printf("\nMID 0 WILL GIVE ERROR FOR UNSIGNED NEXT");
getchar();
}
///////////////////
return bin_search_cyc_in_cyc(arr, l, mid - 1, mm, max);
}
// Else the element can only be present
// in right subarray
return bin_search_cyc_in_cyc(arr, mid + 1, r, mm, max);
}
// We reach here when element is not
// present in array
//printf("\nNOT FOUND");
return max;
}
// returns greater than or equal to
void find_H1_cohom_greater(filtration* self, coboundary_H1* V_info, simplex* pivot){
//EDGE_ID o_min = self->g_n_valid_edges;
//EDGE_ID v_min = 0;
if (pivot->key1 < V_info->o_ab){
// Find first low of o_ab
find_H1_cohom_low(self, V_info);
// If it has a low
if (V_info->low.key1 < self->g_n_valid_edges){
// Need to find a_ptr and b_ptr if first low.key1 > e
if (V_info->low.key1 > V_info->o_ab){
VERT_ID a = self->g_edges_list[2*V_info->o_ab];
VERT_ID b = self->g_edges_list[2*V_info->o_ab+1];
V_info->a_ptr = bin_search_min_geq_Ne(self->g_Neighbors_e[a], 0, self->g_Neigh_len[a]-1\
, V_info->low.key1, self->g_Neigh_len[a]);
V_info->b_ptr = bin_search_min_geq_Ne(self->g_Neighbors_e[b], 0, self->g_Neigh_len[b]-1\
, V_info->low.key1, self->g_Neigh_len[b]);
}
}
return;
}
else if (pivot->key1 == V_info->o_ab){
VERT_ID a = self->g_edges_list[2*V_info->o_ab];
VERT_ID b = self->g_edges_list[2*V_info->o_ab+1];
V_info->a_ptr = 0;
V_info->b_ptr = 0;
EDGE_ID o_min = self->g_n_valid_edges;
EDGE_ID v_min;
EDGE_ID o_s;
EDGE_ID v_s;
V_info->a_ptr = bin_search_min_geq_N(self->g_Neighbors[a], 0, self->g_Neigh_len[a]-1\
, pivot->key2, self->g_Neigh_len[a]);
V_info->b_ptr = bin_search_min_geq_N(self->g_Neighbors[b], 0, self->g_Neigh_len[b]-1\
, pivot->key2, self->g_Neigh_len[b]);
if ((self->g_Neighbors[a][V_info->a_ptr].neighbor == pivot->key2) \
&& (self->g_Neighbors[b][V_info->b_ptr].neighbor == pivot->key2)){
V_info->low.key1 = V_info->o_ab;
V_info->low.key2 = pivot->key2;
return;
}
while(1) {
if ((V_info->a_ptr < self->g_Neigh_len[a]) \
&& (V_info->b_ptr < self->g_Neigh_len[b])){
if (self->g_Neighbors[a][V_info->a_ptr].neighbor < self->g_Neighbors[b][V_info->b_ptr].neighbor) {
V_info->a_ptr++;
}
else if (self->g_Neighbors[a][V_info->a_ptr].neighbor > self->g_Neighbors[b][V_info->b_ptr].neighbor) {
V_info->b_ptr++;
}
else {
EDGE_ID o_ac = self->g_Neighbors[a][V_info->a_ptr].order;
EDGE_ID o_bc = self->g_Neighbors[b][V_info->b_ptr].order;
EDGE_ID c = self->g_Neighbors[b][V_info->b_ptr].neighbor;
o_s = o_ac;
v_s = b;
if (o_bc > o_s){
o_s = o_bc;
v_s = a;
}
if (o_s < V_info->o_ab){
if ((c > pivot->key2) || (c == pivot->key2)) {
V_info->low.key1 = V_info->o_ab;
V_info->low.key2 = c;
return;
}
}
else{
if (o_s < o_min){
o_min = o_s;
v_min = v_s;
}
else if (o_s == o_min){
if (v_s < v_min){
v_min = v_s;
}
}
}
V_info->a_ptr++;
V_info->b_ptr++;
}
}
else{
if (o_min != self->g_n_valid_edges){
V_info->a_ptr = bin_search_min_geq_Ne(self->g_Neighbors_e[a], 0, self->g_Neigh_len[a]-1\
, o_min, self->g_Neigh_len[a]);
V_info->b_ptr = bin_search_min_geq_Ne(self->g_Neighbors_e[b], 0, self->g_Neigh_len[b]-1\
, o_min, self->g_Neigh_len[b]);
}
V_info->low.key1 = o_min;
V_info->low.key2 = v_min;
return;
}
}
}
else {
VERT_ID a = self->g_edges_list[2*V_info->o_ab];
VERT_ID b = self->g_edges_list[2*V_info->o_ab+1];
V_info->a_ptr = bin_search_min_geq_Ne(self->g_Neighbors_e[a], 0, self->g_Neigh_len[a]-1\
, pivot->key1, self->g_Neigh_len[a]);
V_info->b_ptr = bin_search_min_geq_Ne(self->g_Neighbors_e[b], 0, self->g_Neigh_len[b]-1\
, pivot->key1, self->g_Neigh_len[b]);
//printf("\na_len b_len %d, %d", self->g_Neigh_len[a], self->g_Neigh_len[b]);
while (1) {
//printf("\nptrs are %d, %d", V_info->a_ptr, V_info->b_ptr);
//getchar();
if ((V_info->a_ptr < self->g_Neigh_len[a]) \
&& (V_info->b_ptr < self->g_Neigh_len[b])){
if (self->g_Neighbors_e[a][V_info->a_ptr].order < self->g_Neighbors_e[b][V_info->b_ptr].order){
EDGE_ID o_ac = self->g_Neighbors_e[a][V_info->a_ptr].order;
VERT_ID c = self->g_Neighbors_e[a][V_info->a_ptr].neighbor;
VERT_ID idx = search_Neighbors(self, b, c, 0, self->g_Neigh_len[b]-1);
if (idx < self->g_n_vert) {
EDGE_ID o_bc = self->g_Neighbors[b][idx].order;
if (o_bc < o_ac){
if ((o_ac > pivot->key1)\
||((o_ac == pivot->key1) && (b > pivot->key2))\
||((o_ac == pivot->key1) && (b == pivot->key2))){
V_info->low.key1 = o_ac;
V_info->low.key2 = b;
return;
}
}
}
//else{
V_info->a_ptr++;
//}
}
else {
EDGE_ID o_bc = self->g_Neighbors_e[b][V_info->b_ptr].order;
VERT_ID c = self->g_Neighbors_e[b][V_info->b_ptr].neighbor;
#ifdef COMBIDX
EDGE_ID o_ac = COMB_IDX(a, c);
if (o_ac != self->g_n_valid_edges){
#else
VERT_ID idx = search_Neighbors(self, a, c, 0, self->g_Neigh_len[a]-1);
if (idx < self->g_n_vert) {
EDGE_ID o_ac = self->g_Neighbors[a][idx].order;
#endif
if (o_ac < o_bc){
if ((o_bc > pivot->key1)\
||((o_bc == pivot->key1) && (a > pivot->key2))\
||((o_bc == pivot->key1) && (a == pivot->key2))){
V_info->low.key1 = o_bc;
V_info->low.key2 = a;
return;
}
}
}
//else{
V_info->b_ptr++;
//}
}
}
else if (V_info->a_ptr < self->g_Neigh_len[a]){
//Here b_ptr has reached end. So, o_bc should be less than o_ac
EDGE_ID o_ac = self->g_Neighbors_e[a][V_info->a_ptr].order;
VERT_ID c = self->g_Neighbors_e[a][V_info->a_ptr].neighbor;
#ifdef COMBIDX
EDGE_ID o_bc = COMB_IDX(b, c);
if (o_bc != self->g_n_valid_edges){
#else
VERT_ID idx = search_Neighbors(self, b, c, 0, self->g_Neigh_len[b]-1);
if (idx < self->g_n_vert) {
#endif
// check if errors
//if (o_bc > o_ac){
// printf("\nError check obc_oac");
// getchar();
//}
if ((o_ac > pivot->key1)\
||((o_ac == pivot->key1) && (b > pivot->key2))\
||((o_ac == pivot->key1) && (b == pivot->key2))){
V_info->low.key1 = o_ac;
V_info->low.key2 = b;
return;
}
}
//else{
V_info->a_ptr++;
//}
}
else if (V_info->b_ptr < self->g_Neigh_len[b]){
//Here b_ptr has reached end. So, o_ac should be less than o_bc
EDGE_ID o_bc = self->g_Neighbors_e[b][V_info->b_ptr].order;
VERT_ID c = self->g_Neighbors_e[b][V_info->b_ptr].neighbor;
#ifdef COMBIDX
EDGE_ID o_ac = COMB_IDX(a, c);
if (o_ac != self->g_n_valid_edges){
#else
VERT_ID idx = search_Neighbors(self, a, c, 0, self->g_Neigh_len[a]-1);
if (idx < self->g_n_vert) {
#endif
// check if errors
//if (o_ac > o_bc){
// printf("\nError check obc_oac");
// getchar();
//}
if ((o_bc > pivot->key1)\
||((o_bc == pivot->key1) && (a > pivot->key2))\
||((o_bc == pivot->key1) && (a == pivot->key2))){
V_info->low.key1 = o_bc;
V_info->low.key2 = a;
return;
}
}
//else{
V_info->b_ptr++;
//}
}
else{
break;
}
}
V_info->low.key1 = self->g_n_valid_edges;
return;
}
}
void find_H1_cohom_next (filtration* self, coboundary_H1* V_info){
VERT_ID a = self->g_edges_list[2*V_info->o_ab];
VERT_ID b = self->g_edges_list[2*V_info->o_ab+1];
//printf("\nLOW of %d is (%d, %d)", V_info->o_ab, V_info->low.key1, V_info->low.key2);
//
//printf("\nCurrent c, ac, c, bc %d, %d, %d, %d", self->g_Neighbors[a][V_info->a_ptr].neighbor\
// , self->g_Neighbors[a][V_info->a_ptr].order\
// , self->g_Neighbors[b][V_info->b_ptr].neighbor\
// , self->g_Neighbors[b][V_info->b_ptr].order\
// );
if (V_info->o_ab == V_info->low.key1){
V_info->a_ptr++;
V_info->b_ptr++;
//printf("\naptr, max, bptr, max %d, %d, %d, %d", V_info->a_ptr\
// , self->g_Neigh_len[a]\
// , V_info->b_ptr\
// , self->g_Neigh_len[b]);
//
//printf("\nstarting loop1");
//EDGE_ID o_min = self->g_n_valid_edges;
//EDGE_ID v_min;
while (1){
//printf("\naptr, max, bptr, max %d, %d, %d, %d", V_info->a_ptr\
// , self->g_Neigh_len[a]\
// , V_info->b_ptr\
// , self->g_Neigh_len[b]);
if ((V_info->a_ptr < self->g_Neigh_len[a]) \
&& (V_info->b_ptr < self->g_Neigh_len[b])){
if (self->g_Neighbors[a][V_info->a_ptr].neighbor < self->g_Neighbors[b][V_info->b_ptr].neighbor){
V_info->a_ptr++;
}
else if (self->g_Neighbors[a][V_info->a_ptr].neighbor > self->g_Neighbors[b][V_info->b_ptr].neighbor){
V_info->b_ptr++;
}
else{
EDGE_ID o_ac = self->g_Neighbors[a][V_info->a_ptr].order;
EDGE_ID o_bc = self->g_Neighbors[b][V_info->b_ptr].order;
EDGE_ID c = self->g_Neighbors[b][V_info->b_ptr].neighbor;
//printf("\nINSIDE FOUND NEXT COMMON %d", c);
EDGE_ID o_s = o_ac;
EDGE_ID v_s = b;
if (o_bc > o_ac) {
o_s = o_bc;
v_s = a;
}
if (o_s < V_info->low.key1) {
V_info->low.key2 = c;
//printf("\nReturn 1 ");
return;
}
//else if (o_s < o_min){
//
// o_min = o_s;
// v_min = v_s;
//
//}
//else if ((o_s == o_min) && (v_s < v_min)){
// v_min = v_s;
//}
V_info->a_ptr++;
V_info->b_ptr++;
}
}
else {
break;
}
}
//printf("\nended loop1");
//printf("\nsearching ptrs");
V_info->a_ptr = bin_search_min_geq_Ne(self->g_Neighbors_e[a], 0, self->g_Neigh_len[a] - 1\
, V_info->o_ab, self->g_Neigh_len[a]);
V_info->b_ptr = bin_search_min_geq_Ne(self->g_Neighbors_e[b], 0, self->g_Neigh_len[b] - 1\
, V_info->o_ab, self->g_Neigh_len[b]);
//if (o_min < self->g_n_valid_edges){
//
// V_info->a_ptr = bin_search_min_geq_Ne(self->g_Neighbors_e[a], 0, self->g_Neigh_len[a] - 1\
// , o_min, self->g_Neigh_len[a]);
// V_info->b_ptr = bin_search_min_geq_Ne(self->g_Neighbors_e[b], 0, self->g_Neigh_len[b] - 1\
// , o_min, self->g_Neigh_len[b]);
//}
//V_info->low.key1 = o_min;
//V_info->low.key2 = v_min;
//printf("\nReturn 3 ");
//return;
}
// Here
//V_info->low.key1 > V_info->o_ab
//printf("\norders are ac: %d, bc: %d", self->g_Neighbors_e[a][V_info->a_ptr].order\
// , self->g_Neighbors_e[b][V_info->b_ptr].order);
if ((V_info->a_ptr < self->g_Neigh_len[a]) \
&& (V_info->b_ptr < self->g_Neigh_len[b])){
if ((self->g_Neighbors_e[a][V_info->a_ptr].order < self->g_Neighbors_e[b][V_info->b_ptr].order)){
V_info->a_ptr++;
}
else{
V_info->b_ptr++;
}
}
else{
if (V_info->a_ptr < self->g_Neigh_len[a]){
V_info->a_ptr++;
}
else{
V_info->b_ptr++;
}
}
//printf("\nstarting loop2");
while (1){
//printf("\naptr, max, bptr, max %d, %d, %d, %d", V_info->a_ptr\
// , self->g_Neigh_len[a]\
// , V_info->b_ptr\
// , self->g_Neigh_len[b]);
if ((V_info->a_ptr < self->g_Neigh_len[a]) \
&& (V_info->b_ptr < self->g_Neigh_len[b])){
if (self->g_Neighbors_e[a][V_info->a_ptr].order < self->g_Neighbors_e[b][V_info->b_ptr].order) {
EDGE_ID o_ac = self->g_Neighbors_e[a][V_info->a_ptr].order;
EDGE_ID c = self->g_Neighbors_e[a][V_info->a_ptr].neighbor;
#ifdef COMBIDX
EDGE_ID o_bc = COMB_IDX(b, c);
if (o_bc != self->g_n_valid_edges){
#else
VERT_ID idx = search_Neighbors(self, b, c, 0, self->g_Neigh_len[b]-1);
if (idx < self->g_n_vert) {
EDGE_ID o_bc = self->g_Neighbors[b][idx].order;
#endif
if (o_bc < o_ac){
V_info->low.key1 = o_ac;
V_info->low.key2 = b;
//printf("\nReturn 4 ");
return;
}
}
V_info->a_ptr++;
}
else{
EDGE_ID o_bc = self->g_Neighbors_e[b][V_info->b_ptr].order;
EDGE_ID c = self->g_Neighbors_e[b][V_info->b_ptr].neighbor;
#ifdef COMBIDX
EDGE_ID o_ac = COMB_IDX(a, c);
if (o_ac != self->g_n_valid_edges){
#else
VERT_ID idx = search_Neighbors(self, a, c, 0, self->g_Neigh_len[a]-1);
if (idx < self->g_n_vert) {
EDGE_ID o_ac = self->g_Neighbors[a][idx].order;
#endif
if (o_ac < o_bc){
V_info->low.key1 = o_bc;
V_info->low.key2 = a;
//printf("\nReturn 5 ");
return;
}
}
V_info->b_ptr++;
}
}
else if (V_info->a_ptr < self->g_Neigh_len[a]){
EDGE_ID o_ac = self->g_Neighbors_e[a][V_info->a_ptr].order;
EDGE_ID c = self->g_Neighbors_e[a][V_info->a_ptr].neighbor;
#ifdef COMBIDX
EDGE_ID o_bc = COMB_IDX(b, c);
if (o_bc != self->g_n_valid_edges){
#else
VERT_ID idx = search_Neighbors(self, b, c, 0, self->g_Neigh_len[b]-1);
if (idx < self->g_n_vert) {
#endif
//EDGE_ID o_bc = self->g_Neighbors[b][idx];
// SHOULD HAVE THE NEED TO CHECK
//if (o_bc < o_ac){
V_info->low.key1 = o_ac;
V_info->low.key2 = b;
//printf("\nReturn 5 ");
return;
//}
}
V_info->a_ptr++;
}
else if (V_info->b_ptr < self->g_Neigh_len[b]){
EDGE_ID o_bc = self->g_Neighbors_e[b][V_info->b_ptr].order;
EDGE_ID c = self->g_Neighbors_e[b][V_info->b_ptr].neighbor;
#ifdef COMBIDX
EDGE_ID o_ac = COMB_IDX(a, c);
if (o_ac != self->g_n_valid_edges){
#else
VERT_ID idx = search_Neighbors(self, a, c, 0, self->g_Neigh_len[a]-1);
if (idx < self->g_n_vert) {
#endif
//EDGE_ID o_bc = self->g_Neighbors[b][idx];
// SHOULD HAVE THE NEED TO CHECK
//if (o_bc < o_ac){
V_info->low.key1 = o_bc;
V_info->low.key2 = a;
//printf("\nReturn 6 ");
return;
//}
}
V_info->b_ptr++;
}
else{
break;
}
}
//printf("\nended loop2");
V_info->low.key1 = self->g_n_valid_edges;
//printf("\nReturn 7 ");
return;
}
EDGE_ID bin_search_min_geq_Ne(Neighbors* arr, VERT_ID l, VERT_ID r, VERT_ID x, EDGE_ID MAX){
if (arr[r].order < x){
return MAX;
}
if (arr[l].order > x){
return l;
}
VERT_ID mid = l + (r-l)/2;
if (arr[mid].order < x){
l = mid + 1;
if ((arr[l].order > x) || (arr[l].order == x)){
return l;
}
bin_search_min_geq_Ne(arr, l, r, x, MAX);
}
else{
r = mid;
if (arr[r].order == x) return r;
bin_search_min_geq_Ne(arr, l , r, x, MAX);
}
}
EDGE_ID bin_search_min_geq_N(Neighbors* arr, VERT_ID l, VERT_ID r, VERT_ID x, EDGE_ID MAX){
if (arr[r].neighbor < x){
return MAX;
}
if (arr[l].neighbor > x){
return l;
}
VERT_ID mid = l + (r-l)/2;
if (arr[mid].neighbor < x){
l = mid + 1;
if ((arr[l].neighbor > x) || (arr[l].neighbor == x)){
return l;
}
bin_search_min_geq_N(arr, l, r, x, MAX);
}
else{
r = mid;
if (arr[r].neighbor == x) return r;
bin_search_min_geq_N(arr, l , r, x, MAX);
}
}
void find_H2_cohom_low (filtration* self, coboundary_H2* V_info){
V_info->c_ptr = 0;
//int flag = H2_case1 (self, V_info);
if (H2_case1(self, V_info)){
return;
}
VERT_ID a = self->g_edges_list[2*V_info->triangle.key1];
VERT_ID b = self->g_edges_list[2*V_info->triangle.key1+1];
V_info->a_ptr = bin_search_min_geq_Ne(self->g_Neighbors_e[a], 0, self->g_Neigh_len[a]-1\
, V_info->triangle.key1, self->g_Neigh_len[a]);
V_info->b_ptr = bin_search_min_geq_Ne(self->g_Neighbors_e[b], 0, self->g_Neigh_len[b]-1\
, V_info->triangle.key1, self->g_Neigh_len[b]);
V_info->a_ptr++;
V_info->b_ptr++;
H2_case2(self, V_info);
}
void find_H2_cohom_next (filtration* self, coboundary_H2* V_info){
//clock_gettime(CLOCK_MONOTONIC, &(self->g_start_wall_clock));
//printf("\nfinding H2 next");
EDGE_ID o_ab = V_info->triangle.key1;
//VERT_ID c = V_info->triangle.key2;
VERT_ID a = self->g_edges_list[2*o_ab];
VERT_ID b = self->g_edges_list[2*o_ab+1];
//EDGE_ID o_ad, o_bd;
//VERT_ID idxa, idxb, d;
int flag = 0;
//if (V_info->low.key1 == o_ab){
if (V_info->vertex == 0){
V_info->c_ptr++;
if (H2_case1(self, V_info)){
return;
}
// Here means that we did not return and
// not a_ptr and b_ptr are at o_ab
// So, both need to be incremented
V_info->a_ptr = bin_search_min_geq_Ne(self->g_Neighbors_e[a], 0, self->g_Neigh_len[a]-1\
, V_info->triangle.key1, self->g_Neigh_len[a]);
V_info->b_ptr = bin_search_min_geq_Ne(self->g_Neighbors_e[b], 0, self->g_Neigh_len[b]-1\
, V_info->triangle.key1, self->g_Neigh_len[b]);
V_info->a_ptr++;
V_info->b_ptr++;
flag = 1;
}
if (!flag){
if (V_info->vertex == 1){
V_info->a_ptr++;
}
else if (V_info->vertex == 2){
V_info->b_ptr++;
}
else if (V_info->vertex == 3){
V_info->c_ptr++;
}
}
H2_case2(self, V_info);
}
void find_H2_cohom_greater (filtration* self, coboundary_H2* V_info, simplex* pivot){
//if (self->g_p_flag){
// printf("\nfinding H2 greater than (%d, %d) for (%d, %d)", pivot->key1, pivot->key2
// , V_info->triangle.key1\
// , V_info->triangle.key2);
// getchar();
//}
//EDGE_ID o_ab;
VERT_ID c, a, b;
if (pivot->key1 < V_info->triangle.key1){
// Find first low of o_ab
find_H2_cohom_low(self, V_info);
return;
}
else if (pivot->key1 == V_info->triangle.key1){
//o_ab = V_info->triangle.key1;
VERT_ID c = V_info->triangle.key2;
//a = self->g_edges_list[o_ab][0];
//b = self->g_edges_list[o_ab][1];
V_info->c_ptr = bin_search_min_geq_Ne(self->g_Neighbors_e[c], 0, self->g_Neigh_len[c]-1\
, pivot->key2, self->g_Neigh_len[c]);
if (self->g_Neighbors_e[c][V_info->c_ptr].order == pivot->key2){
V_info->low = *pivot;
V_info->vertex= 0;
return;
}
if (H2_case1(self, V_info)){
return;
}
// Here means that we did not return and
// not a_ptr and b_ptr are at o_ab
// So, both need to be incremented
a = self->g_edges_list[2*V_info->triangle.key1];
b = self->g_edges_list[2*V_info->triangle.key1+1];
V_info->a_ptr = bin_search_min_geq_Ne(self->g_Neighbors_e[a], 0, self->g_Neigh_len[a]-1\
, V_info->triangle.key1, self->g_Neigh_len[a]);
V_info->b_ptr = bin_search_min_geq_Ne(self->g_Neighbors_e[b], 0, self->g_Neigh_len[b]-1\
, V_info->triangle.key1, self->g_Neigh_len[b]);
V_info->a_ptr++;
V_info->b_ptr++;
}
else{
c = V_info->triangle.key2;
a = self->g_edges_list[2*V_info->triangle.key1];
b = self->g_edges_list[2*V_info->triangle.key1+1];
V_info->a_ptr = bin_search_min_geq_Ne(self->g_Neighbors_e[a], 0, self->g_Neigh_len[a]-1\
, pivot->key1, self->g_Neigh_len[a]);
V_info->b_ptr = bin_search_min_geq_Ne(self->g_Neighbors_e[b], 0, self->g_Neigh_len[b]-1\
, pivot->key1, self->g_Neigh_len[b]);
V_info->c_ptr = bin_search_min_geq_Ne(self->g_Neighbors_e[c], 0, self->g_Neigh_len[c]-1\
, pivot->key1, self->g_Neigh_len[c]);
}
while (1){
H2_case2(self, V_info);
if (((V_info->low.key1 == pivot->key1) && (V_info->low.key2 > pivot->key2))\
||(V_info->low.key1 > pivot->key1) || (V_info->low.key1 == self->g_n_valid_edges)\
||((V_info->low.key1 == pivot->key1) && (V_info->low.key2 == pivot->key2)) ){
break;
}
if (V_info->vertex == 1){
V_info->a_ptr++;
}
else if (V_info->vertex == 2){
V_info->b_ptr++;
}
else if (V_info->vertex == 3){
V_info->c_ptr++;
}
}
}
// A recursive binary search function. It returns
// location of x in given array arr[l..r] is present,
// otherwise -1
EDGE_ID search_H2_cohom_pivots(H2_cohom_pivots* arr, EDGE_ID l, EDGE_ID r, EDGE_ID key2, EDGE_ID max)
{
if (r >= l) {
EDGE_ID mid = l + (r - l) / 2;
if (arr[mid].key2 == key2)
return mid;
// If element is smaller than mid, then
// it can only be present in left subarray
if (arr[mid].key2 > key2)
{
/// PRECAUTIONARY: CAN REMOVE LATER
if (!mid){
return max;
printf("\nMID 0 WILL GIVE ERROR FOR UNSIGNED NEXT");
getchar();
}
///////////////////
return search_H2_cohom_pivots(arr, l, mid - 1, key2, max);
}
// Else the element can only be present
// in right subarray
return search_H2_cohom_pivots(arr, mid + 1, r, key2, max);
}
// We reach here when element is not
// present in array
//printf("\nNOT FOUND");
return max;
}
// Reduces with complex in parallel
void* reduce_with_complex_H0(void* arg){
filtration* self = arg;
pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, 0);
pthread_mutex_lock(&(self->g_thread_lock));
int tid = ++self->g_thread_id;
for (;;){
self->g_sleeping_threads++;
if (self->g_sleeping_threads == self->g_cpu_count)
pthread_cond_signal(&(self->g_start_boss));
pthread_cond_wait(&(self->g_start_workers), &(self->g_thread_lock));
if (self->g_delete_threads){
//printf("\nexiting from thread %d", tid);
pthread_mutex_unlock(&(self->g_thread_lock));
pthread_exit(NULL);
}
self->g_sleeping_threads--;
pthread_mutex_unlock(&(self->g_thread_lock));
for (int ws_counter = self->g_jobs[tid - 1]; ws_counter < self->g_jobs[tid]; ws_counter++){
boundary_H0_ws* this_ws = self->g_R_ws_H0_info + ws_counter;
EDGE_ID* orig = self->g_R_ws_H0[ws_counter] + this_ws->original*this_ws->max_len;
this_ws->flag_red_w_complex = 0;
this_ws->flag_append_to_complex = 1;
EDGE_ID idx = self->g_pivots_H0[this_ws->pivot];
while(idx){
//reduced_col = self->g_pivots[self->g_dim_now][idx].red_col;
//reduced_col = self->g_pivots_H0[idx];
EDGE_ID red_start_idx = self->g_R_col_indices_H0[idx];
EDGE_ID red_finish_idx = self->g_R_col_indices_H0[idx+1];
EDGE_ID red_len = red_finish_idx - red_start_idx;
if ((this_ws->len + red_len) > this_ws->max_len){
if (this_ws->original){
for (EDGE_ID it=0; it < this_ws->len; it++){
self->g_R_ws_H0[ws_counter][it] = \
self->g_R_ws_H0[ws_counter][it + this_ws->max_len];
}
this_ws->original = 0;
}
this_ws->max_len = this_ws->len + red_len + 100;
pthread_mutex_lock(&(self->g_thread_lock));
self->g_R_ws_H0[ws_counter] = (EDGE_ID*)realloc(self->g_R_ws_H0[ws_counter]\
, 2*this_ws->max_len*sizeof(EDGE_ID));
pthread_mutex_unlock(&(self->g_thread_lock));
orig = self->g_R_ws_H0[ws_counter];
}
EDGE_ID* scratch = self->g_R_ws_H0[ws_counter] + (1-this_ws->original)*this_ws->max_len;
EDGE_ID orig_ptr = 0;
EDGE_ID red_ptr = red_start_idx;
EDGE_ID scratch_ptr = 0;
while ((orig_ptr < this_ws->len) && (red_ptr < red_finish_idx)){
if (orig[orig_ptr] < self->g_R_sparse_H0[red_ptr]){
scratch[scratch_ptr++] = orig[orig_ptr++];
}
else if (orig[orig_ptr] > self->g_R_sparse_H0[red_ptr]){
scratch[scratch_ptr++] = self->g_R_sparse_H0[red_ptr++];
}
else{
orig_ptr++;
red_ptr++;
}
}
while (orig_ptr < this_ws->len){
scratch[scratch_ptr++] = orig[orig_ptr++];
}
while (red_ptr < red_finish_idx){
scratch[scratch_ptr++] = self->g_R_sparse_H0[red_ptr++];
}
this_ws->len = scratch_ptr;
if (!this_ws->len){
//idx = self->g_n_reduced_simplex[self->g_dim_now];
//idx = -1;
break;
}
else{
this_ws->original = 1 - this_ws->original;
orig = self->g_R_ws_H0[ws_counter] + this_ws->original*this_ws->max_len;
this_ws->pivot = orig[this_ws->len-1];
idx = self->g_pivots_H0[this_ws->pivot];
}
}
}
pthread_mutex_lock(&(self->g_thread_lock));
self->g_processed_threads++;
}
}
void allocate_jobs(filtration* self, int ws_size){
int x = (int)(ws_size/self->g_cpu_count);
int y = (ws_size % self->g_cpu_count);
self->g_jobs[0] = 0;
for (int i = 1; i < self->g_cpu_count+1; i++){
if (i < y + 1)
self->g_jobs[i] = self->g_jobs[i-1] + x + 1;
else
self->g_jobs[i] = self->g_jobs[i-1] + x;
}
}
void reduce_ws_H0(filtration* self){
//if (self->g_n_reduced_simplex_H0 > 0){
self->g_processed_threads = 0;
pthread_cond_broadcast(&(self->g_start_workers));
while (self->g_processed_threads != self->g_cpu_count){
pthread_cond_wait(&(self->g_start_boss) \
,&(self->g_thread_lock));
}
//}
reduce_with_self_H0( \
self \
);
int count_valid = 0;
for (int ws_counter=0; ws_counter < self->g_ws_counter; ws_counter++){
if (!self->g_R_ws_H0_info[ws_counter].len){continue;}
if (self->g_R_ws_H0_info[ws_counter].flag_append_to_complex){
update_R_H0(self \
, ws_counter
);
continue;
}
// Swap R
EDGE_ID* temp = self->g_R_ws_H0[count_valid];
self->g_R_ws_H0[count_valid] = self->g_R_ws_H0[ws_counter];
self->g_R_ws_H0[ws_counter] = temp;
// Swap R info
boundary_H0_ws temp2 = self->g_R_ws_H0_info[count_valid];
self->g_R_ws_H0_info[count_valid] = self->g_R_ws_H0_info[ws_counter];
self->g_R_ws_H0_info[ws_counter] = temp2;
// At this point, this has to be a non-zero column
self->g_R_ws_H0_info[count_valid].flag_non_empty = 1;
count_valid += 1;
}
self->g_ws_counter = count_valid;
//if (dim)
// self->g_H0_MAX = self->g_n_reduced_simplex[dim];
}
void reduce_with_self_H0( \
filtration* self \
){
int m;
EDGE_ID orig_ptr, scratch_ptr, m_ptr, idx;
EDGE_ID *orig, *scratch, *original_m;
for (int ws_counter=0; ws_counter < self->g_ws_counter; ws_counter++){
boundary_H0_ws* this_ws = self->g_R_ws_H0_info + ws_counter;
// If the simplex has already been reduced to 0
// then continue
if (!this_ws->len){
this_ws->flag_append_to_complex = 0;
continue;
}
m = 0;
while (m < ws_counter){
boundary_H0_ws* m_ws = self->g_R_ws_H0_info + m;
if (!m_ws->len){
m++;
continue;
}
if (m_ws->pivot > this_ws->pivot){
if (m_ws->flag_red_w_complex){
this_ws->flag_append_to_complex = 0;
break;
}
m++;
continue;
}
if (m_ws->pivot < this_ws->pivot){
m++;
continue;
}
if (!m_ws->flag_append_to_complex){
m++;
continue;
}
orig = self->g_R_ws_H0[ws_counter] + this_ws->original*this_ws->max_len;
if ((this_ws->len + m_ws->len) > this_ws->max_len){
if (this_ws->original){
for (EDGE_ID it = 0; it < this_ws->len; it++){
orig[it] = orig[it + this_ws->max_len];
}
this_ws->original = 0;
}
this_ws->max_len = this_ws->len + m_ws->len + 100;
self->g_R_ws_H0[ws_counter] = (EDGE_ID*)realloc(self->g_R_ws_H0[ws_counter]\
, 2*this_ws->max_len*sizeof(EDGE_ID));
orig = self->g_R_ws_H0[ws_counter];
}
scratch = self->g_R_ws_H0[ws_counter] + (1-this_ws->original)*this_ws->max_len;
original_m = self->g_R_ws_H0[m] + m_ws->original*m_ws->max_len;
// Store the result in scratch
orig_ptr = 0;
scratch_ptr = 0;
m_ptr = 0;
while ((orig_ptr < this_ws->len) && (m_ptr < m_ws->len)){
if (orig[orig_ptr] < original_m[m_ptr]){
scratch[scratch_ptr++] = orig[orig_ptr++];
}
else if (orig[orig_ptr] > original_m[m_ptr]){
scratch[scratch_ptr++] = original_m[m_ptr++];
}
else{
orig_ptr++;
m_ptr++;
}
}
while (orig_ptr < this_ws->len){
scratch[scratch_ptr++] = orig[orig_ptr++];
}
while (m_ptr < m_ws->len){
scratch[scratch_ptr++] = original_m[m_ptr++];
}
this_ws->len = scratch_ptr;
if (!scratch_ptr){
this_ws->flag_append_to_complex = 0;
break;
}
this_ws->pivot = scratch[scratch_ptr - 1];
this_ws->original = 1 - this_ws->original;
//if (self->g_n_reduced_simplex_H0){
idx = self->g_pivots_H0[this_ws->pivot];
// If the pivot is in red complex, then this has to be reduced w/ complex
//if (idx != self->g_n_reduced_simplex[self->g_dim_now]){
if (idx){
this_ws->flag_red_w_complex = 1;
this_ws->flag_append_to_complex = 0;
break;
}
//}
m = 0;
}//End of m loop
}
}//End of red_ws_w_self_single
void update_R_H0(filtration* self, int ws_counter){
boundary_H0_ws* this_ws = self->g_R_ws_H0_info + ws_counter;
EDGE_ID* orig = self->g_R_ws_H0[ws_counter] + this_ws->original*this_ws->max_len;
// Check space for R Sparse
if ((this_ws->len + self->g_R_sparse_ptr_H0) > self->g_R_sparse_max_H0 ){
self->g_R_sparse_max_H0 = this_ws->len + self->g_R_sparse_ptr_H0 + 1000;
self->g_R_sparse_H0 = (EDGE_ID*)realloc(self->g_R_sparse_H0\
, self->g_R_sparse_max_H0*sizeof(EDGE_ID));
}
// Check space for R col indices
if ((self->g_R_col_indices_ptr_H0 + 3) > self->g_R_col_indices_max_H0){
self->g_R_col_indices_max_H0 += 100;
self->g_R_col_indices_H0 = (EDGE_ID*)realloc(self->g_R_col_indices_H0\
, self->g_R_col_indices_max_H0*sizeof(EDGE_ID));
}
self->g_pivots_H0[this_ws->pivot] = self->g_R_col_indices_ptr_H0;
self->g_R_col_indices_H0[self->g_R_col_indices_ptr_H0++] = self->g_R_sparse_ptr_H0;
for (EDGE_ID j=0; j < this_ws->len; j++){
self->g_R_sparse_H0[self->g_R_sparse_ptr_H0++] = orig[j];
}
self->g_R_col_indices_H0[self->g_R_col_indices_ptr_H0++] = self->g_R_sparse_ptr_H0;
// Update edges with pivots for H0 to be used in clearing algo
self->g_edges_with_pivots_H0[this_ws->cob] = 1;
//#ifdef HOM_CYCLES
// Update vertex in pivot to edge mapping
if (self->g_compute_cycles){
self->g_H0_pivot_of[this_ws->pivot].coface = this_ws->cob;
}
//#endif
}
//////////////////////////////////////////////////////////
// MERGE SORT ALGORITHMS
//////////////////////////////////////////////////////////
// Merges two subarrays of arr[].
// First subarray is arr[l..m]
// Second subarray is arr[m+1..r]
void merge(PAR* arr, EDGE_ID* aux, EDGE_ID l, EDGE_ID m, EDGE_ID r)
{
EDGE_ID i, j, k;
EDGE_ID n1 = m - l + 1;
EDGE_ID n2 = r - m;
//printf("\nn1, n2: %u, %u", n1, n2);
/* create temp arrays */
PAR *L, *R;
L = (PAR*)malloc(n1*sizeof(PAR));
R = (PAR*)malloc(n2*sizeof(PAR));
/* create temp arrays */
EDGE_ID* L_aux;
EDGE_ID* R_aux;
L_aux = (EDGE_ID*)malloc(2*n1*sizeof(EDGE_ID));
R_aux = (EDGE_ID*)malloc(2*n2*sizeof(EDGE_ID));
//int L[n1], R[n2];
/* Copy data to temp arrays L[] and R[] */
for (i = 0; i < n1; i++){
L[i] = arr[l + i];
L_aux[2*i] = aux[2*(l + i)];
L_aux[2*i+1] = aux[2*(l + i) + 1];
}
for (j = 0; j < n2; j++) {
R[j] = arr[m + 1+ j];
R_aux[2*j] = aux[2*(m + 1+ j)];
R_aux[2*j+1] = aux[2*(m + 1+ j)+1];
}
/* Merge the temp arrays back into arr[l..r]*/
i = 0; // Initial index of first subarray
j = 0; // Initial index of second subarray
k = l; // Initial index of merged subarray
while (i < n1 && j < n2)
{
if (L[i] <= R[j])
{
arr[k] = L[i];
aux[2*k] = L_aux[2*i];
aux[2*k+1] = L_aux[2*i+1];
i++;
}
else
{
arr[k] = R[j];
aux[2*k] = R_aux[2*j];
aux[2*k+1] = R_aux[2*j+1];
j++;
}
k++;
}
/* Copy the remaining elements of L[], if there
are any */
while (i < n1)
{
arr[k] = L[i];
aux[2*k] = L_aux[2*i];
aux[2*k+1] = L_aux[2*i+1];
i++;
k++;
}
/* Copy the remaining elements of R[], if there
are any */
while (j < n2)
{
arr[k] = R[j];
aux[2*k] = R_aux[2*j];
aux[2*k+1] = R_aux[2*j+1];
j++;
k++;
}
free(L);
free(R);
free(L_aux);
free(R_aux);
}
/* l is for left index and r is right index of the
sub-array of arr to be sorted */
void mergeSort(PAR* arr, EDGE_ID* aux, EDGE_ID l, EDGE_ID r)
{
if (l < r)
{
// Same as (l+r)/2, but avoids overflow for
// large l and h
EDGE_ID m = l+(r-l)/2;
// Sort first and second halves
mergeSort(arr, aux, l, m);
mergeSort(arr, aux, m+1, r);
merge(arr, aux, l, m, r);
}
}
//////////////////////////////////////////////////////////
// MERGE SORT ALGORITHMS FOR V_H0
//////////////////////////////////////////////////////////
// Merges two subarrays of arr[].
// First subarray is arr[l..m]
// Second subarray is arr[m+1..r]
void merge_V_H0(EDGE_ID* arr, EDGE_ID** aux, EDGE_ID* aux2, EDGE_ID* aux3, EDGE_ID l, EDGE_ID m, EDGE_ID r)
{
EDGE_ID i, j, k;
EDGE_ID n1 = m - l + 1;
EDGE_ID n2 = r - m;
//printf("\nn1, n2: %u, %u", n1, n2);
/* create temp arrays */
EDGE_ID *L, *R;
L = (EDGE_ID*)malloc(n1*sizeof(EDGE_ID));
R = (EDGE_ID*)malloc(n2*sizeof(EDGE_ID));
/* create temp arrays */
EDGE_ID** L_aux;
EDGE_ID** R_aux;
L_aux = (EDGE_ID**)malloc(n1*sizeof(EDGE_ID*));
R_aux = (EDGE_ID**)malloc(n2*sizeof(EDGE_ID*));
/* create temp arrays */
EDGE_ID* L_aux2;
EDGE_ID* R_aux2;
L_aux2 = (EDGE_ID*)malloc(n1*sizeof(EDGE_ID));
R_aux2 = (EDGE_ID*)malloc(n2*sizeof(EDGE_ID));
/* create temp arrays */
EDGE_ID* L_aux3;
EDGE_ID* R_aux3;
L_aux3 = (EDGE_ID*)malloc(n1*sizeof(EDGE_ID));
R_aux3 = (EDGE_ID*)malloc(n2*sizeof(EDGE_ID));
//int L[n1], R[n2];
/* Copy data to temp arrays L[] and R[] */
for (i = 0; i < n1; i++){
L[i] = arr[l + i];
L_aux[i] = aux[l + i];
L_aux2[i] = aux2[l + i];
L_aux3[i] = aux3[l + i];
}
for (j = 0; j < n2; j++) {
R[j] = arr[m + 1+ j];
R_aux[j] = aux[m + 1+ j];
R_aux2[j] = aux2[m + 1+ j];
R_aux3[j] = aux3[m + 1+ j];
}
/* Merge the temp arrays back into arr[l..r]*/
i = 0; // Initial index of first subarray
j = 0; // Initial index of second subarray
k = l; // Initial index of merged subarray
while (i < n1 && j < n2)
{
if (L[i] >= R[j])
{
arr[k] = L[i];
aux[k] = L_aux[i];
aux2[k] = L_aux2[i];
aux3[k] = L_aux3[i];
i++;
}
else
{
arr[k] = R[j];
aux[k] = R_aux[j];
aux2[k] = R_aux2[j];
aux3[k] = R_aux3[j];
j++;
}
k++;
}
/* Copy the remaining elements of L[], if there
are any */
while (i < n1)
{
arr[k] = L[i];
aux[k] = L_aux[i];
aux2[k] = L_aux2[i];
aux3[k] = L_aux3[i];
i++;
k++;
}
/* Copy the remaining elements of R[], if there
are any */
while (j < n2)
{
arr[k] = R[j];
aux[k] = R_aux[j];
aux2[k] = R_aux2[j];
aux3[k] = R_aux3[j];
j++;
k++;
}
free(L);
free(R);
free(L_aux);
free(R_aux);
free(L_aux2);
free(R_aux2);
free(L_aux3);
free(R_aux3);
}
/* l is for left index and r is right index of the
sub-array of arr to be sorted */
void mergeSort_V_H0(EDGE_ID* arr, EDGE_ID** aux, EDGE_ID* aux2, EDGE_ID* aux3, EDGE_ID l, EDGE_ID r)
{
if (l < r)
{
// Same as (l+r)/2, but avoids overflow for
// large l and h
EDGE_ID m = l+(r-l)/2;
// Sort first and second halves
mergeSort_V_H0(arr, aux, aux2, aux3, l, m);
mergeSort_V_H0(arr, aux, aux2, aux3, m+1, r);
merge_V_H0(arr, aux, aux2, aux3, l, m, r);
}
}
//////////////////////////////////////////////////////////
// MERGE SORT ALGORITHMS FOR V_H1
//////////////////////////////////////////////////////////
// Merges two subarrays of arr[].
// First subarray is arr[l..m]
// Second subarray is arr[m+1..r]
void merge_V_H1(EDGE_ID* arr, simplex** aux, EDGE_ID l, EDGE_ID m, EDGE_ID r)
{
EDGE_ID i, j, k;
EDGE_ID n1 = m - l + 1;
EDGE_ID n2 = r - m;
//printf("\nn1, n2: %u, %u", n1, n2);
/* create temp arrays */
EDGE_ID *L, *R;
L = (EDGE_ID*)malloc(n1*sizeof(EDGE_ID));
R = (EDGE_ID*)malloc(n2*sizeof(EDGE_ID));
/* create temp arrays */
simplex** L_aux;
simplex** R_aux;
L_aux = (simplex**)malloc(n1*sizeof(simplex*));
R_aux = (simplex**)malloc(n2*sizeof(simplex*));
//int L[n1], R[n2];
/* Copy data to temp arrays L[] and R[] */
for (i = 0; i < n1; i++){
L[i] = arr[l + i];
L_aux[i] = aux[l + i];
}
for (j = 0; j < n2; j++) {
R[j] = arr[m + 1+ j];
R_aux[j] = aux[m + 1+ j];
}
/* Merge the temp arrays back into arr[l..r]*/
i = 0; // Initial index of first subarray
j = 0; // Initial index of second subarray
k = l; // Initial index of merged subarray
while (i < n1 && j < n2)
{
if (L[i] >= R[j])
{
arr[k] = L[i];
aux[k] = L_aux[i];
i++;
}
else
{
arr[k] = R[j];
aux[k] = R_aux[j];
j++;
}
k++;
}
/* Copy the remaining elements of L[], if there
are any */
while (i < n1)
{
arr[k] = L[i];
aux[k] = L_aux[i];
i++;
k++;
}
/* Copy the remaining elements of R[], if there
are any */
while (j < n2)
{
arr[k] = R[j];
aux[k] = R_aux[j];
j++;
k++;
}
free(L);
free(R);
free(L_aux);
free(R_aux);
}
/* l is for left index and r is right index of the
sub-array of arr to be sorted */
void mergeSort_V_H1(EDGE_ID* arr, simplex** aux, EDGE_ID l, EDGE_ID r)
{
if (l < r)
{
// Same as (l+r)/2, but avoids overflow for
// large l and h
EDGE_ID m = l+(r-l)/2;
// Sort first and second halves
mergeSort_V_H1(arr, aux, l, m);
mergeSort_V_H1(arr, aux, m+1, r);
merge_V_H1(arr, aux, l, m, r);
}
}
//////////////////////////////////////////////////////////
// MERGE SORT ALGORITHMS FOR Llen
//////////////////////////////////////////////////////////
// Merges two subarrays of arr[].
// First subarray is arr[l..m]
// Second subarray is arr[m+1..r]
void merge_Llen(EDGE_ID* arr, EDGE_ID* aux, EDGE_ID* aux2, EDGE_ID l, EDGE_ID m, EDGE_ID r)
{
EDGE_ID i, j, k;
EDGE_ID n1 = m - l + 1;
EDGE_ID n2 = r - m;
//printf("\nn1, n2: %u, %u", n1, n2);
/* create temp arrays */
EDGE_ID *L, *R;
L = (EDGE_ID*)malloc(n1*sizeof(EDGE_ID));
R = (EDGE_ID*)malloc(n2*sizeof(EDGE_ID));
/* create temp arrays */
EDGE_ID *L_aux, *R_aux;
L_aux = (EDGE_ID*)malloc(n1*sizeof(EDGE_ID));
R_aux = (EDGE_ID*)malloc(n2*sizeof(EDGE_ID));
/* create temp arrays */
EDGE_ID *L_aux2, *R_aux2;
L_aux2 = (EDGE_ID*)malloc(n1*sizeof(EDGE_ID));
R_aux2 = (EDGE_ID*)malloc(n2*sizeof(EDGE_ID));
//int L[n1], R[n2];
/* Copy data to temp arrays L[] and R[] */
for (i = 0; i < n1; i++){
L[i] = arr[l + i];
L_aux[i] = aux[l + i];
L_aux2[i] = aux2[l + i];
}
for (j = 0; j < n2; j++) {
R[j] = arr[m + 1+ j];
R_aux[j] = aux[m + 1+ j];
R_aux2[j] = aux2[m + 1+ j];
}
/* Merge the temp arrays back into arr[l..r]*/
i = 0; // Initial index of first subarray
j = 0; // Initial index of second subarray
k = l; // Initial index of merged subarray
while (i < n1 && j < n2)
{
if (L[i] >= R[j])
{
arr[k] = L[i];
aux[k] = L_aux[i];
aux2[k] = L_aux2[i];
i++;
}
else
{
arr[k] = R[j];
aux[k] = R_aux[j];
aux2[k] = R_aux2[j];
j++;
}
k++;
}
/* Copy the remaining elements of L[], if there
are any */
while (i < n1)
{
arr[k] = L[i];
aux[k] = L_aux[i];
aux2[k] = L_aux2[i];
i++;
k++;
}
/* Copy the remaining elements of R[], if there
are any */
while (j < n2)
{
arr[k] = R[j];
aux[k] = R_aux[j];
aux2[k] = R_aux2[j];
j++;
k++;
}
free(L);
free(R);
free(L_aux);
free(R_aux);
free(L_aux2);
free(R_aux2);
}
/* l is for left index and r is right index of the
sub-array of arr to be sorted */
void mergeSort_Llen(EDGE_ID* arr, EDGE_ID* aux, EDGE_ID* aux2, EDGE_ID l, EDGE_ID r)
{
if (l < r)
{
// Same as (l+r)/2, but avoids overflow for
// large l and h
EDGE_ID m = l+(r-l)/2;
// Sort first and second halves
mergeSort_Llen(arr, aux, aux2, l, m);
mergeSort_Llen(arr, aux, aux2, m+1, r);
merge_Llen(arr, aux, aux2, l, m, r);
}
}
//////////////////////////////////////////////////////////
// MERGE SORT ALGORITHMS FOR temp_par
//////////////////////////////////////////////////////////
// Merges two subarrays of arr[].
// First subarray is arr[l..m]
// Second subarray is arr[m+1..r]
void merge_temp_par(PAR* arr, EDGE_ID* aux, EDGE_ID l, EDGE_ID m, EDGE_ID r)
{
EDGE_ID i, j, k;
EDGE_ID n1 = m - l + 1;
EDGE_ID n2 = r - m;
//printf("\nn1, n2: %u, %u", n1, n2);
/* create temp arrays */
PAR *L, *R;
L = (PAR*)malloc(n1*sizeof(PAR));
R = (PAR*)malloc(n2*sizeof(PAR));
/* create aux arrays */
EDGE_ID *L_aux, *R_aux;
L_aux = (EDGE_ID*)malloc(n1*sizeof(EDGE_ID));
R_aux = (EDGE_ID*)malloc(n2*sizeof(EDGE_ID));
//int L[n1], R[n2];
/* Copy data to temp arrays L[] and R[] */
for (i = 0; i < n1; i++){
L[i] = arr[l + i];
L_aux[i] = aux[l + i];
}
for (j = 0; j < n2; j++) {
R[j] = arr[m + 1+ j];
R_aux[j] = aux[m + 1+ j];
}
/* Merge the temp arrays back into arr[l..r]*/
i = 0; // Initial index of first subarray
j = 0; // Initial index of second subarray
k = l; // Initial index of merged subarray
while (i < n1 && j < n2)
{
if (L[i] >= R[j])
{
arr[k] = L[i];
aux[k] = L_aux[i];
i++;
}
else
{
arr[k] = R[j];
aux[k] = R_aux[j];
j++;
}
k++;
}
/* Copy the remaining elements of L[], if there
are any */
while (i < n1)
{
arr[k] = L[i];
aux[k] = L_aux[i];
i++;
k++;
}
/* Copy the remaining elements of R[], if there
are any */
while (j < n2)
{
arr[k] = R[j];
aux[k] = R_aux[j];
j++;
k++;
}
free(L);
free(R);
free(L_aux);
free(R_aux);
}
/* l is for left index and r is right index of the
sub-array of arr to be sorted */
void mergeSort_temp_par(PAR* arr, EDGE_ID* aux, EDGE_ID l, EDGE_ID r)
{
if (l < r)
{
// Same as (l+r)/2, but avoids overflow for
// large l and h
EDGE_ID m = l+(r-l)/2;
// Sort first and second halves
mergeSort_temp_par(arr, aux, l, m);
mergeSort_temp_par(arr, aux, m+1, r);
merge_temp_par(arr, aux, l, m, r);
}
}
//////////////////////////////////////////////////////////
// MERGE SORT ALGORITHMS FOR in_cycles_len
//////////////////////////////////////////////////////////
// Merges two subarrays of arr[].
// First subarray is arr[l..m]
// Second subarray is arr[m+1..r]
void merge_incycleslen(EDGE_ID* arr, cyc_info* aux, EDGE_ID l, EDGE_ID m, EDGE_ID r)
{
EDGE_ID i, j, k;
EDGE_ID n1 = m - l + 1;
EDGE_ID n2 = r - m;
//printf("\nn1, n2: %u, %u", n1, n2);
/* create temp arrays */
EDGE_ID *L, *R;
L = (EDGE_ID*)malloc(n1*sizeof(EDGE_ID));
R = (EDGE_ID*)malloc(n2*sizeof(EDGE_ID));
//int L[n1], R[n2];
/* Copy data to temp arrays L[] and R[] */
for (i = 0; i < n1; i++){
L[i] = arr[l + i];
}
for (j = 0; j < n2; j++) {
R[j] = arr[m + 1+ j];
}
/* Merge the temp arrays back into arr[l..r]*/
i = 0; // Initial index of first subarray
j = 0; // Initial index of second subarray
k = l; // Initial index of merged subarray
while (i < n1 && j < n2)
{
if (aux[L[i]].len <= aux[R[j]].len)
{
arr[k] = L[i];
i++;
}
else
{
arr[k] = R[j];
j++;
}
k++;
}
/* Copy the remaining elements of L[], if there
are any */
while (i < n1)
{
arr[k] = L[i];
i++;
k++;
}
/* Copy the remaining elements of R[], if there
are any */
while (j < n2)
{
arr[k] = R[j];
j++;
k++;
}
free(L);
free(R);
}
/* l is for left index and r is right index of the
sub-array of arr to be sorted */
void mergeSort_incycleslen(EDGE_ID* arr, cyc_info* aux, EDGE_ID l, EDGE_ID r)
{
if (l < r)
{
// Same as (l+r)/2, but avoids overflow for
// large l and h
EDGE_ID m = l+(r-l)/2;
// Sort first and second halves
mergeSort_incycleslen(arr, aux, l, m);
mergeSort_incycleslen(arr, aux, m+1, r);
merge_incycleslen(arr, aux, l, m, r);
}
}
//////////////////////////////////////////////////////////
// MERGE SORT ALGORITHMS FOR edges_in_cycles
//////////////////////////////////////////////////////////
// Merges two subarrays of arr[].
// First subarray is arr[l..m]
// Second subarray is arr[m+1..r]
void merge_edges_in_cycles(EDGE_ID* arr, cyc_info* aux, EDGE_ID l, EDGE_ID m, EDGE_ID r)
{
EDGE_ID i, j, k;
EDGE_ID n1 = m - l + 1;
EDGE_ID n2 = r - m;
//printf("\nn1, n2: %u, %u", n1, n2);
/* create temp arrays */
EDGE_ID *L, *R;
L = (EDGE_ID*)malloc(n1*sizeof(EDGE_ID));
R = (EDGE_ID*)malloc(n2*sizeof(EDGE_ID));
//int L[n1], R[n2];
/* Copy data to temp arrays L[] and R[] */
for (i = 0; i < n1; i++){
L[i] = arr[l + i];
}
for (j = 0; j < n2; j++) {
R[j] = arr[m + 1+ j];
}
/* Merge the temp arrays back into arr[l..r]*/
i = 0; // Initial index of first subarray
j = 0; // Initial index of second subarray
k = l; // Initial index of merged subarray
while (i < n1 && j < n2)
{
if (aux[L[i]].len >= aux[R[j]].len)
{
arr[k] = L[i];
i++;
}
else
{
arr[k] = R[j];
j++;
}
k++;
}
/* Copy the remaining elements of L[], if there
are any */
while (i < n1)
{
arr[k] = L[i];
i++;
k++;
}
/* Copy the remaining elements of R[], if there
are any */
while (j < n2)
{
arr[k] = R[j];
j++;
k++;
}
free(L);
free(R);
}
/* l is for left index and r is right index of the
sub-array of arr to be sorted */
void mergeSort_edges_in_cycles(EDGE_ID* arr, cyc_info* aux, EDGE_ID l, EDGE_ID r)
{
if (l < r)
{
// Same as (l+r)/2, but avoids overflow for
// large l and h
EDGE_ID m = l+(r-l)/2;
// Sort first and second halves
mergeSort_edges_in_cycles(arr, aux, l, m);
mergeSort_edges_in_cycles(arr, aux, m+1, r);
merge_edges_in_cycles(arr, aux, l, m, r);
}
}
//////////////////////////////////////////////////////////
// MERGE SORT ALGORITHMS FOR edges_in_cycles by cycid
//////////////////////////////////////////////////////////
// Merges two subarrays of arr[].
// First subarray is arr[l..m]
// Second subarray is arr[m+1..r]
void merge_edges_in_cycles_bycycid(EDGE_ID* arr, EDGE_ID l, EDGE_ID m, EDGE_ID r)
{
EDGE_ID i, j, k;
EDGE_ID n1 = m - l + 1;
EDGE_ID n2 = r - m;
//printf("\nn1, n2: %u, %u", n1, n2);
/* create temp arrays */
EDGE_ID *L, *R;
L = (EDGE_ID*)malloc(n1*sizeof(EDGE_ID));
R = (EDGE_ID*)malloc(n2*sizeof(EDGE_ID));
//int L[n1], R[n2];
/* Copy data to temp arrays L[] and R[] */
for (i = 0; i < n1; i++){
L[i] = arr[l + i];
}
for (j = 0; j < n2; j++) {
R[j] = arr[m + 1+ j];
}
/* Merge the temp arrays back into arr[l..r]*/
i = 0; // Initial index of first subarray
j = 0; // Initial index of second subarray
k = l; // Initial index of merged subarray
while (i < n1 && j < n2)
{
if (L[i] <= R[j])
{
arr[k] = L[i];
i++;
}
else
{
arr[k] = R[j];
j++;
}
k++;
}
/* Copy the remaining elements of L[], if there
are any */
while (i < n1)
{
arr[k] = L[i];
i++;
k++;
}
/* Copy the remaining elements of R[], if there
are any */
while (j < n2)
{
arr[k] = R[j];
j++;
k++;
}
free(L);
free(R);
}
/* l is for left index and r is right index of the
sub-array of arr to be sorted */
void mergeSort_edges_in_cycles_bycycid(EDGE_ID* arr, EDGE_ID l, EDGE_ID r)
{
if (l < r)
{
// Same as (l+r)/2, but avoids overflow for
// large l and h
EDGE_ID m = l+(r-l)/2;
// Sort first and second halves
mergeSort_edges_in_cycles_bycycid(arr, l, m);
mergeSort_edges_in_cycles_bycycid(arr, m+1, r);
merge_edges_in_cycles_bycycid(arr, l, m, r);
}
}
#ifdef COMBIDX
int H2_case1(filtration* self, coboundary_H2* V_info){
//if (self->g_p_flag){
// printf("\nstarting H2 case 1");
// getchar();
//}
//EDGE_ID o_ab = V_info->triangle.key1;
VERT_ID a = self->g_edges_list[2*V_info->triangle.key1];
VERT_ID b = self->g_edges_list[2*V_info->triangle.key1+1];
VERT_ID c = V_info->triangle.key2;
VERT_ID idxa, idxb, idxc;
while ((V_info->c_ptr < self->g_Neigh_len[c])\
&& (self->g_Neighbors_e[c][V_info->c_ptr].order < V_info->triangle.key1)){
VERT_ID d = self->g_Neighbors_e[c][V_info->c_ptr].neighbor;
if ((d == a) || (d == b)){
V_info->c_ptr++;
continue;
}
if (COMB_IDX(a, d) > V_info->triangle.key1){
V_info->c_ptr++;
continue;
}
if (COMB_IDX(b, d) > V_info->triangle.key1){
V_info->c_ptr++;
continue;
}
V_info->low.key1 = V_info->triangle.key1;
V_info->low.key2 = self->g_Neighbors_e[c][V_info->c_ptr].order;
V_info->vertex = 0;
return 1;
}
return 0;
}
void H2_case2 ( filtration* self, coboundary_H2* V_info){
//if (self->g_p_flag){
// printf("\nstarting H2 case 2");
// getchar();
//}
VERT_ID idxa, idxb, idxc, idx;
VERT_ID a, b, c;
EDGE_ID o_ad, o_bd, o_cd;
c = V_info->triangle.key2;
a = self->g_edges_list[2*V_info->triangle.key1];
b = self->g_edges_list[2*V_info->triangle.key1+1];
while (1){
EDGE_ID ep = self->g_n_valid_edges;
VERT_ID d;
int flag = -1;
if (V_info->a_ptr < self->g_Neigh_len[a]){
ep = self->g_Neighbors_e[a][V_info->a_ptr].order;
flag = 1;
}
if (V_info->b_ptr < self->g_Neigh_len[b]){
if (self->g_Neighbors_e[b][V_info->b_ptr].order < ep){
ep = self->g_Neighbors_e[b][V_info->b_ptr].order;
flag = 2;
}
}
if (V_info->c_ptr < self->g_Neigh_len[c]){
if (self->g_Neighbors_e[c][V_info->c_ptr].order < ep){
ep = self->g_Neighbors_e[c][V_info->c_ptr].order;
flag = 3;
}
}
if (flag == -1){
V_info->low.key1 = ep;
V_info->vertex = -1;
return;
}
else if (flag == 1){
d = self->g_Neighbors_e[a][V_info->a_ptr].neighbor;
if ((d == b) || (d == c)){
V_info->a_ptr++;
continue;
}
o_bd = COMB_IDX(b, d);
if (o_bd > ep){
V_info->a_ptr++;
continue;
}
o_cd = COMB_IDX(c, d);
if (o_cd > ep){
V_info->a_ptr++;
continue;
}
V_info->low.key1 = ep;
//o_bc = COMB_IDX(b, c);
V_info->low.key2 = COMB_IDX(b, c);
V_info->vertex = 1;
return;
}
else if (flag == 2){
d = self->g_Neighbors_e[b][V_info->b_ptr].neighbor;
if ((d == a) || (d == c)){
V_info->b_ptr++;
continue;
}
o_ad = COMB_IDX(a, d);
if (o_ad > ep){
V_info->b_ptr++;
continue;
}
o_cd = COMB_IDX(c, d);
if (o_cd > ep){
V_info->b_ptr++;
continue;
}
V_info->low.key1 = ep;
//o_ac = COMB_IDX(a, c);
V_info->low.key2 = COMB_IDX(a, c);
V_info->vertex = 2;
return;
}
else if (flag == 3){
d = self->g_Neighbors_e[c][V_info->c_ptr].neighbor;
if ((d == a) || (d == b)){
V_info->c_ptr++;
continue;
}
o_ad = COMB_IDX(a, d);
if (o_ad > ep){
V_info->c_ptr++;
continue;
}
o_bd = COMB_IDX(b, d);
if (o_bd > ep){
V_info->c_ptr++;
continue;
}
V_info->low.key1 = ep;
V_info->low.key2 = V_info->triangle.key1;
V_info->vertex = 3;
return;
}
}
//V_info->low.key1 = self->g_n_valid_edges;
//V_info->vertex = -1;
}
#else
int H2_case1(filtration* self, coboundary_H2* V_info){
//if (self->g_p_flag){
// printf("\nstarting H2 case 1");
// getchar();
//}
//EDGE_ID o_ab = V_info->triangle.key1;
VERT_ID a = self->g_edges_list[2*V_info->triangle.key1];
VERT_ID b = self->g_edges_list[2*V_info->triangle.key1+1];
VERT_ID c = V_info->triangle.key2;
VERT_ID idxa, idxb, idxc;
while ((V_info->c_ptr < self->g_Neigh_len[c])\
&& (self->g_Neighbors_e[c][V_info->c_ptr].order < V_info->triangle.key1)){
VERT_ID d = self->g_Neighbors_e[c][V_info->c_ptr].neighbor;
idxa = search_Neighbors(self, a, d, 0, self->g_Neigh_len[a] - 1);
if (idxa == self->g_n_vert){
V_info->c_ptr++;
continue;
}
if (self->g_Neighbors[a][idxa].order > V_info->triangle.key1){
V_info->c_ptr++;
continue;
}
idxb = search_Neighbors(self, b, d, 0, self->g_Neigh_len[b] - 1);
if (idxb == self->g_n_vert){
V_info->c_ptr++;
continue;
}
if (self->g_Neighbors[b][idxb].order > V_info->triangle.key1){
V_info->c_ptr++;
continue;
}
V_info->low.key1 = V_info->triangle.key1;
V_info->low.key2 = self->g_Neighbors_e[c][V_info->c_ptr].order;
V_info->vertex = 0;
return 1;
}
return 0;
}
void H2_case2 ( filtration* self, coboundary_H2* V_info){
//if (self->g_p_flag){
// printf("\nstarting H2 case 2");
// getchar();
//}
VERT_ID idxa, idxb, idxc, idx;
VERT_ID a, b, c;
EDGE_ID o_ad, o_bd, o_cd;
c = V_info->triangle.key2;
a = self->g_edges_list[2*V_info->triangle.key1];
b = self->g_edges_list[2*V_info->triangle.key1+1];
while (1){
EDGE_ID ep = self->g_n_valid_edges;
VERT_ID d;
int flag = -1;
if (V_info->a_ptr < self->g_Neigh_len[a]){
ep = self->g_Neighbors_e[a][V_info->a_ptr].order;
flag = 1;
}
if (V_info->b_ptr < self->g_Neigh_len[b]){
if (self->g_Neighbors_e[b][V_info->b_ptr].order < ep){
ep = self->g_Neighbors_e[b][V_info->b_ptr].order;
flag = 2;
}
}
if (V_info->c_ptr < self->g_Neigh_len[c]){
if (self->g_Neighbors_e[c][V_info->c_ptr].order < ep){
ep = self->g_Neighbors_e[c][V_info->c_ptr].order;
flag = 3;
}
}
if (flag == -1){
V_info->low.key1 = ep;
V_info->vertex = -1;
return;
}
else if (flag == 1){
d = self->g_Neighbors_e[a][V_info->a_ptr].neighbor;
idxb = search_Neighbors(self, b, d, 0, self->g_Neigh_len[b]-1);
if (idxb == self->g_n_vert){
V_info->a_ptr++;
continue;
}
o_bd = self->g_Neighbors[b][idxb].order;
if (o_bd > ep){
V_info->a_ptr++;
continue;
}
idxc = search_Neighbors(self, c, d, 0, self->g_Neigh_len[c]-1);
if (idxc == self->g_n_vert){
V_info->a_ptr++;
continue;
}
o_cd = self->g_Neighbors[c][idxc].order;
if (o_cd > ep){
V_info->a_ptr++;
continue;
}
V_info->low.key1 = ep;
idx = search_Neighbors(self, b, c, 0, self->g_Neigh_len[b]-1);
V_info->low.key2 = self->g_Neighbors[b][idx].order;
V_info->vertex = 1;
return;
}
else if (flag == 2){
d = self->g_Neighbors_e[b][V_info->b_ptr].neighbor;
idxa = search_Neighbors(self, a, d, 0, self->g_Neigh_len[a]-1);
if (idxa == self->g_n_vert){
V_info->b_ptr++;
continue;
}
o_ad = self->g_Neighbors[a][idxa].order;
if (o_ad > ep){
V_info->b_ptr++;
continue;
}
idxc = search_Neighbors(self, c, d, 0, self->g_Neigh_len[c]-1);
if (idxc == self->g_n_vert){
V_info->b_ptr++;
continue;
}
o_cd = self->g_Neighbors[c][idxc].order;
if (o_cd > ep){
V_info->b_ptr++;
continue;
}
V_info->low.key1 = ep;
idx = search_Neighbors(self, a, c, 0, self->g_Neigh_len[a]-1);
V_info->low.key2 = self->g_Neighbors[a][idx].order;
V_info->vertex = 2;
return;
}
else if (flag == 3){
d = self->g_Neighbors_e[c][V_info->c_ptr].neighbor;
idxb = search_Neighbors(self, b, d, 0, self->g_Neigh_len[b]-1);
if (idxb == self->g_n_vert){
V_info->c_ptr++;
continue;
}
o_bd = self->g_Neighbors[b][idxb].order;
if (o_bd > ep){
V_info->c_ptr++;
continue;
}
idxa = search_Neighbors(self, a, d, 0, self->g_Neigh_len[a]-1);
if (idxa == self->g_n_vert){
V_info->c_ptr++;
continue;
}
o_ad = self->g_Neighbors[a][idxa].order;
if (o_ad > ep){
V_info->c_ptr++;
continue;
}
V_info->low.key1 = ep;
//idx = search_Neighbors(self, a, c, 0, self->g_Neigh_len[a]-1);
V_info->low.key2 = V_info->triangle.key1;
V_info->vertex = 3;
return;
}
}
//V_info->low.key1 = self->g_n_valid_edges;
//V_info->vertex = -1;
}
#endif
void update_V_coH1(filtration* self, int ws_counter){
EDGE_ID red_col = 0;
coboundary_H1_ws* this_ws = self->g_V_ws_H1 + ws_counter;
//if (self->g_new_debug){
//
// printf("\n ADDDDDING %d, %d, %d", this_ws->edge\
// , this_ws->pivot.key1\
// , this_ws->pivot.key2\
// );
// getchar();
//}
//printf("\n%d, %d, %d", this_ws->edge\
// , this_ws->pivot.key1\
// , this_ws->pivot.key2\
// );
self->g_V_sparse_beg_ptr = self->g_V_sparse_ptr;
if (this_ws->v_edges.last){
if ((this_ws->v_edges.last + self->g_V_sparse_ptr) + 1 > self->g_V_sparse_max){
self->g_V_sparse_max = self->g_V_sparse_ptr + this_ws->v_edges.last + 10000;
self->g_V_sparse_H1 = (EDGE_ID*)realloc(self->g_V_sparse_H1\
, self->g_V_sparse_max*sizeof(EDGE_ID));
}
if (this_ws->v_edges.last > 1){
#ifdef VREDUCE1
sorter8_tim_sort(this_ws->v_edges.o_ab, this_ws->v_edges.last);
int coeff = 1;
for (EDGE_ID vv = 0; vv < this_ws->v_edges.last-1; vv++){
if (this_ws->v_edges.o_ab[vv] == this_ws->v_edges.o_ab[vv+1])
{
coeff = 1 - coeff;
}
else{
if (coeff){
self->g_V_sparse_H1[self->g_V_sparse_ptr++] = this_ws->v_edges.o_ab[vv];
}
coeff = 1;
}
}
if (coeff){
self->g_V_sparse_H1[self->g_V_sparse_ptr++] = this_ws->v_edges.o_ab[this_ws->v_edges.last-1];
}
#else
for (EDGE_ID vv = 0; vv < this_ws->v_edges.last; vv++){
self->g_V_sparse_H1[self->g_V_sparse_ptr++] = this_ws->v_edges.o_ab[vv];
}
#endif
}
else if (this_ws->v_edges.last == 1){
self->g_V_sparse_H1[self->g_V_sparse_ptr++] = this_ws->v_edges.o_ab[0];
}
//if (this_ws->edge == self->g_debug_edge){
// printf("\nAfter adding to V sparse ");
// for (EDGE_ID bb = self->g_V_sparse_beg_ptr; bb < self->g_V_sparse_ptr; bb++){
// printf("%d, ", self->g_V_sparse_H1[bb]);
// }
// getchar();
//}
// All have been added
this_ws->v_edges.last = 0;
if ((self->g_V_sparse_ptr - self->g_V_sparse_beg_ptr) > 0){
red_col = self->g_V_col_indices_ptr;
if (self->g_V_col_indices_ptr+1 == self->g_V_col_indices_max){
self->g_V_col_indices_max += 1000;
self->g_V_col_indices = (EDGE_ID*)realloc(self->g_V_col_indices
, self->g_V_col_indices_max*sizeof(EDGE_ID));
}
self->g_V_col_indices[self->g_V_col_indices_ptr] = self->g_V_sparse_beg_ptr;
self->g_V_col_indices[self->g_V_col_indices_ptr+1] = self->g_V_sparse_ptr;
self->g_V_col_indices_ptr++;
}
}
#ifdef COH1DEBUG
if (this_ws->edge == self->g_debug_edge){
printf("\n%d, %d, %d, %d: "\
, this_ws->pivot.key1\
, this_ws->pivot.key2\
, this_ws->edge\
, self->g_V_sparse_ptr - self->g_V_sparse_beg_ptr\
);
for (EDGE_ID mm = self->g_V_sparse_beg_ptr; mm < self->g_V_sparse_ptr; mm++){
printf("%d, ", self->g_V_sparse_H1[mm]);
}
getchar();
}
#endif
#ifdef VDEBUG
if (self->g_V_sparse_ptr - self->g_V_sparse_beg_ptr > 0){
printf("\n%d, %d, %d, %d"\
, this_ws->pivot.key1\
, this_ws->pivot.key2\
, this_ws->edge\
, self->g_V_sparse_ptr - self->g_V_sparse_beg_ptr\
);
//getchar();
}
#endif
// ADDING THE LOW
if (!self->g_H1_cohom_pivots_max_len[this_ws->pivot.key1]){
self->g_H1_cohom_pivots_max_len[this_ws->pivot.key1] = 2;
self->g_H1_cohom_pivots[this_ws->pivot.key1] = \
(H1_cohom_pivots*)malloc(self->g_H1_cohom_pivots_max_len[this_ws->pivot.key1]*sizeof(H1_cohom_pivots));
}
if (self->g_H1_cohom_pivots_len[this_ws->pivot.key1]\
== self->g_H1_cohom_pivots_max_len[this_ws->pivot.key1]){
self->g_H1_cohom_pivots_max_len[this_ws->pivot.key1] += 5;
self->g_H1_cohom_pivots[this_ws->pivot.key1] = (H1_cohom_pivots*)realloc( \
self->g_H1_cohom_pivots[this_ws->pivot.key1] \
, self->g_H1_cohom_pivots_max_len[this_ws->pivot.key1]*sizeof(H1_cohom_pivots));
//self->g_cohom_ALL_pivots_len += 5;
}
EDGE_ID old_ptr = self->g_H1_cohom_pivots_len[this_ws->pivot.key1];
EDGE_ID new_ptr = self->g_H1_cohom_pivots_len[this_ws->pivot.key1];
while (old_ptr){
old_ptr--;
if (self->g_H1_cohom_pivots[this_ws->pivot.key1][old_ptr].key2 > this_ws->pivot.key2){
self->g_H1_cohom_pivots[this_ws->pivot.key1][new_ptr--] =\
self->g_H1_cohom_pivots[this_ws->pivot.key1][old_ptr];
continue;
}
break;
}
//printf("\nAdding pivot (%d, %d) for edge %d"\
// , this_ws->pivot.key1\
// , this_ws->pivot.key2\
// , this_ws->edge\
// );
self->g_H1_cohom_pivots[this_ws->pivot.key1][new_ptr].key2 = this_ws->pivot.key2;
self->g_H1_cohom_pivots[this_ws->pivot.key1][new_ptr].col_idx = red_col;
self->g_H1_cohom_pivots[this_ws->pivot.key1][new_ptr].bndry = this_ws->edge;
self->g_H1_cohom_pivots_len[this_ws->pivot.key1]++;
// PERS PAIRS
// Add non-zero barcodes
PAR birth = self->g_edge_parameter[this_ws->edge];
PAR death = self->g_edge_parameter[this_ws->pivot.key1];
if (birth != death){
//printf("\nNon trivial pers pair (%f, %f)", birth, death);
#ifdef DEBUGPIVOTS
printf("\nBirth, death (%lf, %lf)", birth, death);
printf("\n%d at pair (%d, %d)", this_ws->edge\
, this_ws->pivot.key1\
, this_ws->pivot.key2);
getchar();
#endif
//if (birth > death){
//
//}
if (self->g_H1_pers_pairs_len+2 == self->g_H1_pers_pairs_max_len){
self->g_H1_pers_pairs_max_len += 1000;
self->g_H1_pers_pairs = (PAR*)realloc(self->g_H1_pers_pairs\
, self->g_H1_pers_pairs_max_len*sizeof(PAR));
}
self->g_H1_pers_pairs[self->g_H1_pers_pairs_len++] = birth;
self->g_H1_pers_pairs[self->g_H1_pers_pairs_len++] = death;
}
}
void deallocator(filtration* self){
struct timespec start_wall_clock;
struct timespec finish_wall_clock;
double timer;
if (!self->g_suppress_output){
clock_gettime(CLOCK_MONOTONIC, &start_wall_clock);
}
free(self->filename);
//free(self->g_homH1_cycles_file);
// Deallocate edges
free(self->g_edge_parameter);
free(self->g_edges_list);
for (EDGE_ID mm = 0; mm < self->g_n_valid_edges; mm++){
if (self->g_dim_lim > 0){
if (self->g_H1_cohom_pivots_max_len[mm]){
free(self->g_H1_cohom_pivots[mm]);
}
if (self->g_dim_lim > 1){
if (self->g_H2_cohom_pivots_max_len[mm]){
free(self->g_H2_cohom_pivots[mm]);
}
}
}
}
// Deallocate Neighbors
for (EDGE_ID mm = 0; mm < self->g_n_vert; mm++){
if (self->g_Neigh_len[mm]){
free(self->g_Neighbors[mm]);
free(self->g_Neighbors_e[mm]);
}
}
free(self->g_Neighbors);
free(self->g_Neighbors_e);
free(self->g_Neigh_len);
// Deallocate R0
free(self->g_pivots_H0);
free(self->g_R_sparse_H0);
free(self->g_R_col_indices_H0);
free(self->g_edges_with_pivots_H0);
#ifdef SAVEPD
free(self->g_H0_pers_file);
#endif
if (self->g_dim_lim > 0){
free(self->g_coH1_all_lows);
free(self->g_H1_cohom_pivots);
free(self->g_H1_cohom_pivots_len);
free(self->g_H1_cohom_pivots_max_len);
#ifdef SAVEPD
free(self->g_H1_pers_file);
#endif
#ifdef SAVEV
free(self->g_coH1_V_file);
#endif
free(self->g_H1_pers_pairs);
free(self->g_V_col_indices);
if (self->g_dim_lim > 1){
free(self->g_H2_cohom_pivots);
free(self->g_H2_cohom_pivots_len);
free(self->g_H2_cohom_pivots_max_len);
#ifdef SAVEPD
free(self->g_H2_pers_file);
#endif
#ifdef SAVEV
free(self->g_coH2_V_file);
#endif
free(self->g_H2_pers_pairs);
}
}
if (self->g_compute_cycles){
free(self->g_H1_undead);
if (self->g_dim_lim > 1){
free(self->g_H2_undead);
}
}
free(self);
if (!self->g_suppress_output){
clock_gettime(CLOCK_MONOTONIC, &finish_wall_clock);
timer = (finish_wall_clock.tv_sec - start_wall_clock.tv_sec);
timer += (finish_wall_clock.tv_nsec - start_wall_clock.tv_nsec) / 1000000000.0;
printf("\nTime taken to deallocate: %lf", timer);
}
}
void insert_in_implicit_v(filtration* self, int ws_counter, coboundary_H1* phi, int flag_next){
if (phi->low.key1 == self->g_n_valid_edges){
return;
}
coboundary_H1_ws* this_ws = self->g_V_ws_H1 + ws_counter;
if (phi->low.key1 == this_ws->keys1[this_ws->k1_ptr].k1){
if (this_ws->keys1[this_ws->k1_ptr].last ==\
this_ws->keys1[this_ws->k1_ptr].max_len){
self->g_V_ws_H1[ws_counter].keys1[self->g_V_ws_H1[ws_counter].k1_ptr].max_len += 10;
self->g_V_ws_H1[ws_counter].keys1[self->g_V_ws_H1[ws_counter].k1_ptr].keys2 = \
(implicit_keys2*)realloc\
(self->g_V_ws_H1[ws_counter].keys1[self->g_V_ws_H1[ws_counter].k1_ptr].keys2\
, self->g_V_ws_H1[ws_counter].keys1[self->g_V_ws_H1[ws_counter].k1_ptr].max_len\
*sizeof(implicit_keys2));
}
EDGE_ID mm = this_ws->keys1[this_ws->k1_ptr].last;
int compare;
while (1){
//int compare = compare_implicit(this_ws->keys1[this_ws->k1_ptr].keys2[mm-1], *phi);
if (this_ws->keys1[this_ws->k1_ptr].keys2[mm-1].k2 < phi->low.key2) compare = 0;
else if (this_ws->keys1[this_ws->k1_ptr].keys2[mm-1].k2 > phi->low.key2) compare = 1;
else{
if (this_ws->keys1[this_ws->k1_ptr].keys2[mm-1].o_ab < phi->o_ab) compare = 0;
else compare = 1;
}
if (compare){
this_ws->keys1[this_ws->k1_ptr].keys2[mm] =\
this_ws->keys1[this_ws->k1_ptr].keys2[mm-1];
}
else{
this_ws->keys1[this_ws->k1_ptr].keys2[mm].k2 = phi->low.key2;
this_ws->keys1[this_ws->k1_ptr].keys2[mm].k2 = phi->low.key2;
this_ws->keys1[this_ws->k1_ptr].keys2[mm].o_ab = phi->o_ab;
this_ws->keys1[this_ws->k1_ptr].keys2[mm].a_ptr = phi->a_ptr;
this_ws->keys1[this_ws->k1_ptr].keys2[mm].b_ptr = phi->b_ptr;
this_ws->keys1[this_ws->k1_ptr].keys2[mm].flag_next = flag_next;
this_ws->keys1[this_ws->k1_ptr].last++;
return;
}
mm--;
//// ERROR CHECKING, REMOVE LATER
//if (!mm){
// printf("\nk2_ptr %d", v_implicit->k2_ptr);
// printf("\nADDING %d:(%d, %d) to ", phi->o_ab, phi->low.key1, phi->low.key2);
// print_v_implicit(self);
// exit(0);
//
//}
if (mm == this_ws->k2_ptr){
this_ws->keys1[this_ws->k1_ptr].keys2[mm].k2 = phi->low.key2;
this_ws->keys1[this_ws->k1_ptr].keys2[mm].o_ab = phi->o_ab;
this_ws->keys1[this_ws->k1_ptr].keys2[mm].a_ptr = phi->a_ptr;
this_ws->keys1[this_ws->k1_ptr].keys2[mm].b_ptr = phi->b_ptr;
this_ws->keys1[this_ws->k1_ptr].keys2[mm].flag_next = flag_next;
this_ws->keys1[this_ws->k1_ptr].last++;
return;
}
}
}
for (EDGE_ID mm = 0; mm < this_ws->last; mm++){
if (this_ws->keys1[mm].k1 == phi->low.key1){
//check_space_implicit_keys2(&(v_implicit->keys1[mm]));
if (this_ws->keys1[mm].last ==\
this_ws->keys1[mm].max_len){
self->g_V_ws_H1[ws_counter].keys1[mm].max_len += 10;
self->g_V_ws_H1[ws_counter].keys1[mm].keys2 = (implicit_keys2*)realloc\
(self->g_V_ws_H1[ws_counter].keys1[mm].keys2\
, self->g_V_ws_H1[ws_counter].keys1[mm].max_len*sizeof(implicit_keys2));
}
this_ws->keys1[mm].flag_empty = 0;
this_ws->keys1[mm].keys2[this_ws->keys1[mm].last].k2 = phi->low.key2;
this_ws->keys1[mm].keys2[this_ws->keys1[mm].last].o_ab = phi->o_ab;
this_ws->keys1[mm].keys2[this_ws->keys1[mm].last].a_ptr = phi->a_ptr;
this_ws->keys1[mm].keys2[this_ws->keys1[mm].last].b_ptr = phi->b_ptr;
this_ws->keys1[mm].keys2[this_ws->keys1[mm].last].flag_next = flag_next;
this_ws->keys1[mm].last++;
return;
}
}
//if (self->g_new_debug){
// printf("\nBefore inserting c4");
// print_v_implicit(self);
// getchar();
//}
//check_space_implicit_keys1(v_implicit);
if (self->g_V_ws_H1[ws_counter].last == self->g_V_ws_H1[ws_counter].max_len){
EDGE_ID mm = self->g_V_ws_H1[ws_counter].max_len;
self->g_V_ws_H1[ws_counter].max_len += 10;
self->g_V_ws_H1[ws_counter].keys1 = (implicit_keys1*)realloc(self->g_V_ws_H1[ws_counter].keys1\
, self->g_V_ws_H1[ws_counter].max_len*sizeof(implicit_keys1));
while (mm < self->g_V_ws_H1[ws_counter].max_len){
this_ws->keys1[mm].flag_empty = 1;
this_ws->keys1[mm].max_len = 10;
this_ws->keys1[mm].last = 0;
self->g_V_ws_H1[ws_counter].keys1[mm].keys2 = (implicit_keys2*)malloc(10*sizeof(implicit_keys2));
mm++;
}
}
this_ws->keys1[this_ws->last].flag_empty = 0;
this_ws->keys1[this_ws->last].k1 = phi->low.key1;
this_ws->keys1[this_ws->last].keys2[0].k2 = phi->low.key2;
this_ws->keys1[this_ws->last].keys2[0].o_ab = phi->o_ab;
this_ws->keys1[this_ws->last].keys2[0].a_ptr = phi->a_ptr;
this_ws->keys1[this_ws->last].keys2[0].b_ptr = phi->b_ptr;
this_ws->keys1[this_ws->last].keys2[0].flag_next = flag_next;
this_ws->keys1[this_ws->last].last = 1;
this_ws->last++;
return;
}
void print_v_implicit(filtration* self, int ws_counter){
if (self->g_V_ws_H1[ws_counter].edge == self->g_debug_edge){
EDGE_ID k1_ptr = 0;
if (k1_ptr == self->g_V_ws_H1[ws_counter].last){
printf("\nv implicit is empty");
return;
}
//EDGE_ID k2_ptr = self->g_v_implicit.k2_ptr;
EDGE_ID k2_ptr = 0;
while (k1_ptr < self->g_V_ws_H1[ws_counter].last){
//printf("\n%d, %d, last %d, flag_e %d ", k1_ptr\
// , self->g_v_implicit.keys1[k1_ptr].k1\
// , self->g_v_implicit.keys1[k1_ptr].last\
// , self->g_v_implicit.keys1[k1_ptr].flag_empty\
// );
if (self->g_V_ws_H1[ws_counter].keys1[k1_ptr].flag_empty){
printf("empty", k1_ptr, self->g_V_ws_H1[ws_counter].keys1[k1_ptr].k1);
k1_ptr++;
continue;
}
//if (k1_ptr == self->g_v_implicit.k1_ptr){
// printf("\nk1_ptr is %d, k2_ptr is %d", self->g_v_implicit.k1_ptr\
// , self->g_v_implicit.k2_ptr);
//}
//printf("\nentries in %d are %d", k1_ptr, self->g_v_implicit.keys1[k1_ptr].last);
printf("\n");
printf("idx %d, last %d:: ", k1_ptr, self->g_V_ws_H1[ws_counter].keys1[k1_ptr].last);
while (k2_ptr < self->g_V_ws_H1[ws_counter].keys1[k1_ptr].last){
printf("%d:(%d, %d):%d, ", self->g_V_ws_H1[ws_counter].keys1[k1_ptr].keys2[k2_ptr].o_ab\
, self->g_V_ws_H1[ws_counter].keys1[k1_ptr].k1\
, self->g_V_ws_H1[ws_counter].keys1[k1_ptr].keys2[k2_ptr].k2\
, self->g_V_ws_H1[ws_counter].keys1[k1_ptr].keys2[k2_ptr].flag_next\
);
k2_ptr++;
}
k2_ptr = 0;
k1_ptr++;
}
}
}
void coH2_print_v_implicit(filtration* self, int ws_counter){
printf("\nk1ptr is %d, k2ptr is %d", self->g_V_ws_H2[ws_counter].k1_ptr\
, self->g_V_ws_H2[ws_counter].k2_ptr);
EDGE_ID k1_ptr = 0;
if (k1_ptr == self->g_V_ws_H2[ws_counter].last){
printf("\nv implicit is empty");
return;
}
//EDGE_ID k2_ptr = self->g_v_implicit.k2_ptr;
EDGE_ID k2_ptr = 0;
while (k1_ptr < self->g_V_ws_H2[ws_counter].last){
//printf("\n%d, %d, last %d, flag_e %d ", k1_ptr\
// , self->g_v_implicit.keys1[k1_ptr].k1\
// , self->g_v_implicit.keys1[k1_ptr].last\
// , self->g_v_implicit.keys1[k1_ptr].flag_empty\
// );
if (self->g_V_ws_H2[ws_counter].keys1[k1_ptr].flag_empty){
printf("empty", k1_ptr, self->g_V_ws_H2[ws_counter].keys1[k1_ptr].k1);
k1_ptr++;
continue;
}
//if (k1_ptr == self->g_v_implicit.k1_ptr){
// printf("\nk1_ptr is %d, k2_ptr is %d", self->g_v_implicit.k1_ptr\
// , self->g_v_implicit.k2_ptr);
//}
//printf("\nentries in %d are %d", k1_ptr, self->g_v_implicit.keys1[k1_ptr].last);
printf("\n");
printf("idx %d, last %d:: ", k1_ptr, self->g_V_ws_H2[ws_counter].keys1[k1_ptr].last);
while (k2_ptr < self->g_V_ws_H2[ws_counter].keys1[k1_ptr].last){
printf("(%d, %d):%d, "\
, self->g_V_ws_H2[ws_counter].keys1[k1_ptr].keys2[k2_ptr].o_abc.key1\
, self->g_V_ws_H2[ws_counter].keys1[k1_ptr].keys2[k2_ptr].o_abc.key2\
, self->g_V_ws_H2[ws_counter].keys1[k1_ptr].keys2[k2_ptr].flag_next\
);
k2_ptr++;
}
k2_ptr = 0;
k1_ptr++;
//, self->g_V_ws_H2[ws_counter].keys1[k1_ptr].k1\
//, self->g_V_ws_H2[ws_counter].keys1[k1_ptr].keys2[k2_ptr].k2\
}
}
void* reduce_with_complex_coH1(void* arg){
filtration* self = arg;
pthread_mutex_lock(&(self->g_thread_lock));
int tid = ++self->g_thread_id;
pthread_mutex_unlock(&(self->g_thread_lock));
for (;;){
pthread_mutex_lock(&(self->g_thread_lock));
self->g_sleeping_threads++;
self->g_processed_threads++;
if (self->g_sleeping_threads == self->g_cpu_count){
pthread_cond_signal(&(self->g_start_boss));
}
pthread_cond_wait(&(self->g_start_workers), &(self->g_thread_lock));
if (self->g_delete_threads){
pthread_mutex_unlock(&(self->g_thread_lock));
pthread_exit(NULL);
}
self->g_sleeping_threads--;
pthread_mutex_unlock(&(self->g_thread_lock));
for (int ws_counter = self->g_jobs[tid - 1]; ws_counter < self->g_jobs[tid]; ws_counter++){
coboundary_H1_ws* this_ws = self->g_V_ws_H1 + ws_counter;
if (!this_ws->flag_non_empty){
// We are sure that we will exit only if there is no reduction
// required with existing complex or with trivial pair
this_ws->flag_red_w_complex = 0;
this_ws->flag_red_w_trivial = 0;
this_ws->flag_append_to_complex = 0;
continue;
}
if (this_ws->flag_append_to_complex){
continue;
}
// If being processed for the first time...
if (this_ws->flag_first){
this_ws->flag_first = 0;
if ((self->g_coH1_all_lows[this_ws->pivot.key1].low.key1 == this_ws->pivot.key1)\
&& (self->g_coH1_all_lows[this_ws->pivot.key1].low.key2 == this_ws->pivot.key2)){
this_ws->reduce_w_bndry = this_ws->pivot.key1;
this_ws->V_col_idx = 0;
this_ws->flag_red_w_trivial = 1;
}
else{
// If this low is not a pivot
if (!self->g_H1_cohom_pivots_len[this_ws->pivot.key1]){
#ifdef COH1DEBUG
if (this_ws->edge == self->g_debug_edge ){
printf("\n(%d, %d) pivot of %d is not a pivot 1"\
, this_ws->pivot.key1\
, this_ws->pivot.key2\
, this_ws->edge\
);
}
#endif
this_ws->flag_append_to_complex = 1;
continue;
}
else{
EDGE_ID idx = search_H1_cohom_pivots(self->g_H1_cohom_pivots[this_ws->pivot.key1]\
, 0 \
, self->g_H1_cohom_pivots_len[this_ws->pivot.key1] - 1\
, this_ws->pivot.key2 \
, self->g_n_valid_edges);
// If this low is not a pivot
if (idx == self->g_n_valid_edges){
#ifdef COH1DEBUG
if (this_ws->edge == self->g_debug_edge ){
printf("\n(%d, %d) pivot of %d is not a pivot 2"\
, this_ws->pivot.key1\
, this_ws->pivot.key2\
, this_ws->edge\
);
}
#endif
this_ws->flag_append_to_complex = 1;
continue;
}
else{
this_ws->flag_red_w_complex = 1;
this_ws->reduce_w_bndry = self->g_H1_cohom_pivots[this_ws->pivot.key1][idx].bndry;
this_ws->V_col_idx = self->g_H1_cohom_pivots[this_ws->pivot.key1][idx].col_idx;
}
}
}
}
if ((!this_ws->flag_red_w_trivial) && (!this_ws->flag_red_w_complex)){
this_ws->flag_append_to_complex = 1;
continue;
}
// We know that parallel will end only when there are no more red. to be with trivial and complex
this_ws->flag_red_w_complex = 0;
this_ws->flag_red_w_trivial = 0;
this_ws->flag_append_to_complex = 1;
while(1){
EDGE_ID check_len = this_ws->v_edges.last + 1;
if (this_ws->V_col_idx){
check_len += self->g_V_col_indices[this_ws->V_col_idx+1] -\
self->g_V_col_indices[this_ws->V_col_idx];
}
if (check_len > this_ws->v_edges.max_len){
this_ws->v_edges.max_len = check_len + 100;
self->g_V_ws_H1[ws_counter].v_edges.o_ab =\
(EDGE_ID*)realloc(self->g_V_ws_H1[ws_counter].v_edges.o_ab\
, this_ws->v_edges.max_len*sizeof(EDGE_ID));
}
this_ws->v_edges.o_ab[this_ws->v_edges.last++] = this_ws->reduce_w_bndry;
coboundary_H1 ttemp;
ttemp.o_ab = this_ws->reduce_w_bndry;
//if (this_ws->edge == self->g_debug_edge){
// printf("\n%d: Appending to v edge in parallel %d", this_ws->edge, ttemp.o_ab);
// getchar();
//}
find_H1_cohom_greater(self, &(ttemp), &(this_ws->pivot));
insert_in_implicit_v(self, ws_counter, &(ttemp), 1);
// IF the V was recorded, add the bndries
if (this_ws->V_col_idx){
// We have to cycle through the col in V and add all the other boundary columns for reduction
EDGE_ID start = self->g_V_col_indices[this_ws->V_col_idx];
EDGE_ID end = self->g_V_col_indices[this_ws->V_col_idx+1];
for (EDGE_ID mm = start; mm < end; mm++){
this_ws->v_edges.o_ab[this_ws->v_edges.last++] = self->g_V_sparse_H1[mm];
ttemp.o_ab = self->g_V_sparse_H1[mm];
//if (this_ws->edge == self->g_debug_edge){
// printf(", %d", ttemp.o_ab);
//}
// Find the first low greater than or equal pivot
find_H1_cohom_greater(self, &(ttemp), &(this_ws->pivot));
insert_in_implicit_v(self, ws_counter, &(ttemp), 1);
}
}
//if (this_ws->edge == self->g_debug_edge){
// getchar();
//}
reduce_hash_table_coH1(self, ws_counter);
//if (this_ws->edge == self->g_debug_edge){
// printf("\nPivot after reduction in parallel is (%d, %d)", this_ws->pivot.key1\
// , this_ws->pivot.key2);
//}
if (!this_ws->flag_non_empty){
break;
}
// Check with trivial pair
if ((self->g_coH1_all_lows[this_ws->pivot.key1].low.key1 == this_ws->pivot.key1)\
&& (self->g_coH1_all_lows[this_ws->pivot.key1].low.key2 == this_ws->pivot.key2)){
this_ws->reduce_w_bndry = this_ws->pivot.key1;
this_ws->V_col_idx = 0;
continue;
}
// If this low is not a pivot
if (!self->g_H1_cohom_pivots_len[this_ws->pivot.key1]){
break;
}
EDGE_ID idx = search_H1_cohom_pivots(self->g_H1_cohom_pivots[this_ws->pivot.key1]\
, 0 \
, self->g_H1_cohom_pivots_len[this_ws->pivot.key1] - 1\
, this_ws->pivot.key2 \
, self->g_n_valid_edges);
if (idx == self->g_n_valid_edges){
break;
}
this_ws->reduce_w_bndry = self->g_H1_cohom_pivots[this_ws->pivot.key1][idx].bndry;
this_ws->V_col_idx = self->g_H1_cohom_pivots[this_ws->pivot.key1][idx].col_idx;
}
}
}
}
void reduce_with_self_coH1(filtration* self){
// Now we have to reduce
for (int ws_counter = 0; ws_counter < self->g_ws_counter; ws_counter++){
coboundary_H1_ws* this_ws = self->g_V_ws_H1 + ws_counter;
// If empty, then continue and don't append to complex
if (!this_ws->flag_non_empty){
//this_ws->flag_append_to_complex = 0;
continue;
}
int m = 0;
// Keep reducing if reduce with complex flag is 0 and reduce with trivial flag is 0
while((m < ws_counter)\
&& (!this_ws->flag_red_w_complex)\
&& (!this_ws->flag_red_w_trivial)){
coboundary_H1_ws* m_ws = self->g_V_ws_H1 + m;
// If m is empty, continue
if (!m_ws->flag_non_empty){
m++;
continue;
}
int compare;
if (m_ws->pivot.key1 > this_ws->pivot.key1) compare = 1;
else if (m_ws->pivot.key1 < this_ws->pivot.key1) compare = 0;
else{
if (m_ws->pivot.key2 > this_ws->pivot.key2) compare = 1;
else if (m_ws->pivot.key2 < this_ws->pivot.key2) compare = 0;
else compare = -1;
}
// If pivot of m is higher than pivot of ws_counter
// then we don't care
if (compare == 1){
m++;
continue;
}
// If pivot of m is lower than pivot of ws_counter
// then if m has to be reduced, we have to hold ws_counter
if (compare == 0){
if (m_ws->flag_red_w_complex || m_ws->flag_red_w_trivial){
this_ws->flag_append_to_complex = 0;
break;
}
m++;
continue;
}
// At this point they have same low
if (m_ws->flag_red_w_complex || m_ws->flag_red_w_trivial){
this_ws->flag_append_to_complex = 0;
//m++;
break;
//continue;
}
//printf("\nin serial reducing %d with %d", this_ws->edge, m_ws->edge);
//getchar();
// Merge m and this_ws
//
// Merge v_edges
if (this_ws->v_edges.last + m_ws->v_edges.last > this_ws->v_edges.max_len - 1){
this_ws->v_edges.max_len += m_ws->v_edges.last + 100;
self->g_V_ws_H1[ws_counter].v_edges.o_ab = (EDGE_ID*)realloc(\
self->g_V_ws_H1[ws_counter].v_edges.o_ab\
, this_ws->v_edges.max_len*sizeof(EDGE_ID));
}
// Add the original edge
//if (this_ws->edge == self->g_debug_edge){
// printf("\nAdding to v edge in serial %d", m_ws->edge);
//}
this_ws->v_edges.o_ab[this_ws->v_edges.last++] = m_ws->edge;
for (EDGE_ID bb = 0; bb < m_ws->v_edges.last; bb++){
//if (this_ws->edge == self->g_debug_edge){
// printf(", %d", m_ws->v_edges.o_ab[bb]);
//}
this_ws->v_edges.o_ab[this_ws->v_edges.last++] =\
m_ws->v_edges.o_ab[bb];
}
// Merge hash tables
coboundary_H1 ttemp;
for (EDGE_ID bb = 0; bb < m_ws->last; bb++){
EDGE_ID m_start = 0;
if (bb == m_ws->k1_ptr){
m_start = m_ws->k2_ptr;
}
ttemp.low.key1 = m_ws->keys1[bb].k1;
for (EDGE_ID mm = m_start; mm < m_ws->keys1[bb].last; mm++){
ttemp.low.key2 = m_ws->keys1[bb].keys2[mm].k2;
ttemp.o_ab = m_ws->keys1[bb].keys2[mm].o_ab;
ttemp.a_ptr = m_ws->keys1[bb].keys2[mm].a_ptr;
ttemp.b_ptr = m_ws->keys1[bb].keys2[mm].b_ptr;
insert_in_implicit_v(self, ws_counter, &(ttemp)\
, m_ws->keys1[bb].keys2[mm].flag_next);
}
}
// Now reduce
reduce_hash_table_coH1(self, ws_counter);
//if (this_ws->edge == self->g_debug_edge){
// printf("\nPivot after reduction in serial is (%d, %d)", this_ws->pivot.key1\
// , this_ws->pivot.key2);
//}
if (!this_ws->flag_non_empty){
break;
}
// Check with trivial pair
if ((self->g_coH1_all_lows[this_ws->pivot.key1].low.key1 == this_ws->pivot.key1)\
&& (self->g_coH1_all_lows[this_ws->pivot.key1].low.key2 == this_ws->pivot.key2)){
this_ws->flag_red_w_trivial = 1;
this_ws->flag_red_w_complex = 0;
this_ws->flag_append_to_complex = 0;
this_ws->reduce_w_bndry = this_ws->pivot.key1;
this_ws->V_col_idx = 0;
break;
}
// If this low is not a pivot
if (self->g_H1_cohom_pivots_len[this_ws->pivot.key1]){
EDGE_ID idx = search_H1_cohom_pivots(self->g_H1_cohom_pivots[this_ws->pivot.key1]\
, 0 \
, self->g_H1_cohom_pivots_len[this_ws->pivot.key1] - 1\
, this_ws->pivot.key2 \
, self->g_n_valid_edges);
if (idx != self->g_n_valid_edges){
this_ws->flag_red_w_trivial = 0;
this_ws->flag_red_w_complex = 1;
this_ws->flag_append_to_complex = 0;
this_ws->reduce_w_bndry = self->g_H1_cohom_pivots[this_ws->pivot.key1][idx].bndry;
this_ws->V_col_idx = self->g_H1_cohom_pivots[this_ws->pivot.key1][idx].col_idx;
break;
}
}
// Reset m after single reduction
m = 0;
}
}
}
void reduce_hash_table_coH1(filtration* self, int ws_counter){
// Now we have to reduce
int coeff = 1;
coboundary_H1_ws* this_ws = self->g_V_ws_H1 + ws_counter;
coboundary_H1 ttemp;
EDGE_ID* k1_ptr = &(this_ws->k1_ptr);
EDGE_ID* k2_ptr = &(this_ws->k2_ptr);
while (1){
if (this_ws->keys1[*k1_ptr].last == 1){
this_ws->pivot.key1 = this_ws->keys1[*k1_ptr].k1;
this_ws->pivot.key2 = this_ws->keys1[*k1_ptr].keys2[*k2_ptr].k2;
break;
}
if (this_ws->keys1[*k1_ptr].keys2[*k2_ptr].k2 ==\
this_ws->keys1[*k1_ptr].keys2[*k2_ptr+1].k2){
coeff = 1 - coeff;
if (this_ws->keys1[*k1_ptr].keys2[*k2_ptr].o_ab ==\
this_ws->keys1[*k1_ptr].keys2[*k2_ptr+1].o_ab){
if (this_ws->keys1[*k1_ptr].keys2[*k2_ptr].flag_next==\
this_ws->keys1[*k1_ptr].keys2[*k2_ptr+1].flag_next){
this_ws->keys1[*k1_ptr].keys2[*k2_ptr].flag_next = 0;
this_ws->keys1[*k1_ptr].keys2[*k2_ptr+1].flag_next = 0;
}
}
}
else{
if (coeff){
this_ws->pivot.key1 = this_ws->keys1[*k1_ptr].k1;
this_ws->pivot.key2 = this_ws->keys1[*k1_ptr].keys2[*k2_ptr].k2;
break;
}
else{
coeff = 1;
}
}
if (this_ws->keys1[*k1_ptr].keys2[*k2_ptr].flag_next){
ttemp.o_ab = this_ws->keys1[*k1_ptr].keys2[*k2_ptr].o_ab;
ttemp.a_ptr = this_ws->keys1[*k1_ptr].keys2[*k2_ptr].a_ptr;
ttemp.b_ptr = this_ws->keys1[*k1_ptr].keys2[*k2_ptr].b_ptr;
ttemp.low.key1 = this_ws->keys1[*k1_ptr].k1;
ttemp.low.key2 = this_ws->keys1[*k1_ptr].keys2[*k2_ptr].k2;
//if (this_ws->edge == self->g_debug_edge){
// printf("\nFinding next of %d:(%d, %d)", ttemp.o_ab\
// , ttemp.low.key1\
// , ttemp.low.key2\
// );
//}
find_H1_cohom_next(self, &(ttemp));
this_ws->keys1[*k1_ptr].keys2[*k2_ptr].flag_next = 0;
insert_in_implicit_v(self, ws_counter, &(ttemp), 1);
// It is possible that last key1 and last key2 changed. Make sure last is consistent
}
*k2_ptr = *k2_ptr + 1;
if (*k2_ptr == this_ws->keys1[*k1_ptr].last-1){
if (coeff){
this_ws->pivot.key1 = this_ws->keys1[*k1_ptr].k1;
this_ws->pivot.key2 = this_ws->keys1[*k1_ptr].keys2[*k2_ptr].k2;
break;
}
if (this_ws->keys1[*k1_ptr].keys2[*k2_ptr].flag_next){
ttemp.o_ab = this_ws->keys1[*k1_ptr].keys2[*k2_ptr].o_ab;
ttemp.a_ptr = this_ws->keys1[*k1_ptr].keys2[*k2_ptr].a_ptr;
ttemp.b_ptr = this_ws->keys1[*k1_ptr].keys2[*k2_ptr].b_ptr;
ttemp.low.key1 = this_ws->keys1[*k1_ptr].k1;
ttemp.low.key2 = this_ws->keys1[*k1_ptr].keys2[*k2_ptr].k2;
//if (this_ws->edge == self->g_debug_edge){
// printf("\nFinding next of %d:(%d, %d)", ttemp.o_ab\
// , ttemp.low.key1\
// , ttemp.low.key2\
// );
//}
find_H1_cohom_next(self, &(ttemp));
this_ws->keys1[*k1_ptr].keys2[*k2_ptr].flag_next = 0;
insert_in_implicit_v(self, ws_counter, &(ttemp), 1);
}
if (*k2_ptr == this_ws->keys1[*k1_ptr].last-2){
*k2_ptr = *k2_ptr + 1;
this_ws->pivot.key1 = this_ws->keys1[*k1_ptr].k1;
this_ws->pivot.key2 = this_ws->keys1[*k1_ptr].keys2[*k2_ptr].k2;
break;
}
else{
// Mark this key1 as empty
this_ws->keys1[*k1_ptr].flag_empty = 1;
// Reallocate to prune space
if (this_ws->keys1[*k1_ptr].max_len > 5){
this_ws->keys1[*k1_ptr].max_len = 5;
self->g_V_ws_H1[ws_counter].keys1[*k1_ptr].keys2 = \
(implicit_keys2*)realloc\
(self->g_V_ws_H1[ws_counter].keys1[*k1_ptr].keys2\
, self->g_V_ws_H1[ws_counter].keys1[*k1_ptr].max_len\
*sizeof(implicit_keys2));
}
EDGE_ID current_ptr = 0;
EDGE_ID minn = self->g_n_valid_edges;
for (EDGE_ID mm = 0; mm < this_ws->last; mm++){
if (this_ws->keys1[mm].flag_empty){
continue;
}
implicit_keys1 ttemp = this_ws->keys1[current_ptr];
this_ws->keys1[current_ptr] = this_ws->keys1[mm];
this_ws->keys1[mm] = ttemp;
if (this_ws->keys1[current_ptr].k1 < minn){
minn = this_ws->keys1[current_ptr].k1;
*k1_ptr = current_ptr;
}
current_ptr++;
}
this_ws->last = current_ptr;
if (minn == self->g_n_valid_edges){
this_ws->flag_non_empty = 0;
break;
}
coeff = 1;
*k2_ptr = 0;
sorter7_tim_sort(this_ws->keys1[*k1_ptr].keys2\
, this_ws->keys1[*k1_ptr].last);
}
}
}
}
void reduce_ws_coH1(filtration* self){
//printf("\nBefore parallel");
//for (EDGE_ID mm = 0; mm < self->g_cohom_ws_size; mm++){
// printf("\n%d:(%d, %d) flag %d", self->g_V_ws_H1[mm].edge\
// , self->g_V_ws_H1[mm].pivot.key1\
// , self->g_V_ws_H1[mm].pivot.key2\
// , self->g_V_ws_H1[mm].flag_append_to_complex);
// printf(" v: ");
// for (EDGE_ID bb = 0; bb < self->g_V_ws_H1[mm].v_edges.last; bb++){
// printf("%d, ", self->g_V_ws_H1[mm].v_edges.o_ab[bb]);
// }
//}
//
// PARALLEL
self->g_processed_threads = 0;
pthread_cond_broadcast(&(self->g_start_workers));
while (self->g_processed_threads != self->g_cpu_count){
pthread_cond_wait(&(self->g_start_boss) \
,&(self->g_thread_lock));
}
//printf("\nAfter parallel");
//for (EDGE_ID mm = 0; mm < self->g_cohom_ws_size; mm++){
// printf("\n%d:(%d, %d) flag %d", self->g_V_ws_H1[mm].edge\
// , self->g_V_ws_H1[mm].pivot.key1\
// , self->g_V_ws_H1[mm].pivot.key2\
// , self->g_V_ws_H1[mm].flag_append_to_complex);
// printf(" v: ");
// for (EDGE_ID bb = 0; bb < self->g_V_ws_H1[mm].v_edges.last; bb++){
// printf("%d, ", self->g_V_ws_H1[mm].v_edges.o_ab[bb]);
// }
//}
// SERIAL
reduce_with_self_coH1(self);
//printf("\nAfter serial");
//for (EDGE_ID mm = 0; mm < self->g_cohom_ws_size; mm++){
// printf("\n%d:(%d, %d) flag %d", self->g_V_ws_H1[mm].edge\
// , self->g_V_ws_H1[mm].pivot.key1\
// , self->g_V_ws_H1[mm].pivot.key2\
// , self->g_V_ws_H1[mm].flag_append_to_complex);
// printf(" v: ");
// for (EDGE_ID bb = 0; bb < self->g_V_ws_H1[mm].v_edges.last; bb++){
// printf("%d, ", self->g_V_ws_H1[mm].v_edges.o_ab[bb]);
// }
//}
//getchar();
// CLEARANCE
int count_valid = 0;
for (int ws_counter = 0; ws_counter < self->g_ws_counter; ws_counter++){
if (!self->g_V_ws_H1[ws_counter].flag_non_empty){
// Add the undead H1
if (self->g_H1_pers_pairs_len+2 == self->g_H1_pers_pairs_max_len){
self->g_H1_pers_pairs_max_len += 1000;
self->g_H1_pers_pairs = (PAR*)realloc(self->g_H1_pers_pairs\
, self->g_H1_pers_pairs_max_len*sizeof(PAR));
}
self->g_H1_pers_pairs[self->g_H1_pers_pairs_len++] = \
self->g_edge_parameter[self->g_V_ws_H1[ws_counter].edge];
self->g_H1_pers_pairs[self->g_H1_pers_pairs_len++] = -1;
//#ifdef HOM_CYCLES
if (self->g_compute_cycles){
self->g_H1_undead[self->g_H1_undead_ptr++] = self->g_V_ws_H1[ws_counter].edge;
if (self->g_H1_undead_ptr == self->g_H1_undead_max){
self->g_H1_undead_max += 100;
self->g_H1_undead = (EDGE_ID*)realloc(self->g_H1_undead\
, self->g_H1_undead_max*sizeof(EDGE_ID));
}
}
//#endif
continue;
}
if (self->g_V_ws_H1[ws_counter].flag_append_to_complex){
update_V_coH1(self, ws_counter);
continue;
}
// Swap V
coboundary_H1_ws temp = self->g_V_ws_H1[count_valid];
self->g_V_ws_H1[count_valid] = self->g_V_ws_H1[ws_counter];
self->g_V_ws_H1[ws_counter] = temp;
// At this point, this has to be a non-zero column
self->g_V_ws_H1[count_valid].flag_non_empty = 1;
// Run through parallel at least once
self->g_V_ws_H1[count_valid].flag_append_to_complex = 0;
count_valid++;
}
self->g_ws_counter = count_valid;
}
void reduce_with_self_coH2(filtration* self){
// Now we have to reduce
for (int ws_counter = 0; ws_counter < self->g_ws_counter; ws_counter++){
coboundary_H2_ws* this_ws = self->g_V_ws_H2 + ws_counter;
// If empty, then continue and don't append to complex
if (!this_ws->flag_non_empty){
//this_ws->flag_append_to_complex = 0;
continue;
}
int m = 0;
// Keep reducing if reduce with complex flag is 0 and reduce with trivial flag is 0
while((m < ws_counter)\
&& (!this_ws->flag_red_w_complex)\
&& (!this_ws->flag_red_w_trivial)){
coboundary_H2_ws* m_ws = self->g_V_ws_H2 + m;
// If m is empty, continue
if (!m_ws->flag_non_empty){
m++;
continue;
}
int compare;
if (m_ws->pivot.key1 > this_ws->pivot.key1) compare = 1;
else if (m_ws->pivot.key1 < this_ws->pivot.key1) compare = 0;
else{
if (m_ws->pivot.key2 > this_ws->pivot.key2) compare = 1;
else if (m_ws->pivot.key2 < this_ws->pivot.key2) compare = 0;
else compare = -1;
}
// If pivot of m is higher than pivot of ws_counter
// then we don't care
if (compare == 1){
m++;
continue;
}
// If pivot of m is lower than pivot of ws_counter
// then if m has to be reduced, we have to hold ws_counter
if (compare == 0){
if (m_ws->flag_red_w_complex || m_ws->flag_red_w_trivial){
this_ws->flag_append_to_complex = 0;
break;
}
m++;
continue;
}
// At this point they have same low
if (m_ws->flag_red_w_complex || m_ws->flag_red_w_trivial){
this_ws->flag_append_to_complex = 0;
//m++;
break;
//continue;
}
//printf("\nin serial reducing %d with %d", this_ws->edge, m_ws->edge);
//getchar();
// Merge m and this_ws
//
// Merge v_triangles
if (this_ws->v_triangles.last + m_ws->v_triangles.last > this_ws->v_triangles.max_len - 1){
this_ws->v_triangles.max_len += m_ws->v_triangles.last + 100;
self->g_V_ws_H2[ws_counter].v_triangles.o_abc = (simplex*)realloc(\
self->g_V_ws_H2[ws_counter].v_triangles.o_abc\
, this_ws->v_triangles.max_len*sizeof(simplex));
}
// Add the original edge
this_ws->v_triangles.o_abc[this_ws->v_triangles.last++] = m_ws->triangle;
for (EDGE_ID bb = 0; bb < m_ws->v_triangles.last; bb++){
this_ws->v_triangles.o_abc[this_ws->v_triangles.last++] =\
m_ws->v_triangles.o_abc[bb];
}
// Merge hash tables
coboundary_H2 ttemp;
for (EDGE_ID bb = 0; bb < m_ws->last; bb++){
EDGE_ID m_start = 0;
if (bb == m_ws->k1_ptr){
m_start = m_ws->k2_ptr;
}
ttemp.low.key1 = m_ws->keys1[bb].k1;
for (EDGE_ID mm = m_start; mm < m_ws->keys1[bb].last; mm++){
ttemp.low.key2 = m_ws->keys1[bb].keys2[mm].k2;
ttemp.triangle = m_ws->keys1[bb].keys2[mm].o_abc;
ttemp.a_ptr = m_ws->keys1[bb].keys2[mm].a_ptr;
ttemp.b_ptr = m_ws->keys1[bb].keys2[mm].b_ptr;
ttemp.c_ptr = m_ws->keys1[bb].keys2[mm].c_ptr;
ttemp.vertex = m_ws->keys1[bb].keys2[mm].vertex;
coH2_insert_in_implicit_v(self, ws_counter, &(ttemp)\
, m_ws->keys1[bb].keys2[mm].flag_next);
}
}
// Now reduce
reduce_hash_table_coH2(self, ws_counter);
if (!this_ws->flag_non_empty){
break;
}
coboundary_H2 temptemp;
// CHECK FOR TRIVIAL PAIR
// Get low for maximum triangle <ab, d> in this_pivot
temptemp.triangle.key1 = this_ws->pivot.key1;
temptemp.triangle.key2 = self->g_edges_list[2*this_ws->pivot.key2+1];
find_H2_cohom_low(self, &temptemp);
// Check if the low of this triangle is same as self->g_this_pivot
if ((temptemp.low.key1 == this_ws->pivot.key1)\
&& (temptemp.low.key2 == this_ws->pivot.key2)){
this_ws->flag_red_w_trivial = 1;
this_ws->flag_red_w_complex = 0;
this_ws->flag_append_to_complex = 0;
this_ws->reduce_w_bndry = temptemp.triangle;
this_ws->V_col_idx = 0;
break;
}
// If this low is not a pivot
if (self->g_H2_cohom_pivots_len[this_ws->pivot.key1]){
EDGE_ID idx = search_H2_cohom_pivots(self->g_H2_cohom_pivots[this_ws->pivot.key1]\
, 0 \
, self->g_H2_cohom_pivots_len[this_ws->pivot.key1] - 1\
, this_ws->pivot.key2 \
, self->g_n_valid_edges);
if (idx != self->g_n_valid_edges){
this_ws->flag_red_w_trivial = 0;
this_ws->flag_red_w_complex = 1;
this_ws->flag_append_to_complex = 0;
this_ws->reduce_w_bndry = self->g_H2_cohom_pivots[this_ws->pivot.key1][idx].bndry;
this_ws->V_col_idx = self->g_H2_cohom_pivots[this_ws->pivot.key1][idx].col_idx;
break;
}
}
// Reset m after single reduction
m = 0;
}
}
}
void* reduce_with_complex_coH2(void* arg){
filtration* self = arg;
pthread_mutex_lock(&(self->g_thread_lock));
int tid = ++self->g_thread_id;
coboundary_H2_ws* this_ws;
coboundary_H2 temp;
EDGE_ID idx, check_len, start, end;
pthread_mutex_unlock(&(self->g_thread_lock));
for (;;){
pthread_mutex_lock(&(self->g_thread_lock));
self->g_sleeping_threads++;
self->g_processed_threads++;
if (self->g_sleeping_threads == self->g_cpu_count){
pthread_cond_signal(&(self->g_start_boss));
}
pthread_cond_wait(&(self->g_start_workers), &(self->g_thread_lock));
if (self->g_delete_threads){
pthread_mutex_unlock(&(self->g_thread_lock));
pthread_exit(NULL);
}
self->g_sleeping_threads--;
pthread_mutex_unlock(&(self->g_thread_lock));
for (int ws_counter = self->g_jobs[tid - 1]; ws_counter < self->g_jobs[tid]; ws_counter++){
this_ws = self->g_V_ws_H2 + ws_counter;
//coboundary_H2 temp;
//printf("\nProcessing (%d, %d)", this_ws->triangle.key1\
, this_ws->triangle.key2);
if (!this_ws->flag_non_empty){
// We are sure that we will exit only if there is no reduction
// required with existing complex or with trivial pair
//printf("\nEmpty. Skipping.");
this_ws->flag_red_w_complex = 0;
this_ws->flag_red_w_trivial = 0;
this_ws->flag_append_to_complex = 0;
continue;
}
if (this_ws->flag_append_to_complex){
//printf("\nAppend to complex. Nothing to do.");
continue;
}
if (this_ws->flag_first){
this_ws->flag_first = 0;
// CHECK WITH TRIVIAL
temp.triangle.key1 = this_ws->pivot.key1;
temp.triangle.key2 = self->g_edges_list[2*this_ws->pivot.key2+1];
find_H2_cohom_low(self, &temp);
if ((temp.low.key1 == this_ws->pivot.key1)\
&& (temp.low.key2 == this_ws->pivot.key2)){
this_ws->flag_red_w_trivial = 1;
this_ws->reduce_w_bndry = temp.triangle;
this_ws->V_col_idx = 0;
}
else{
if (!self->g_H2_cohom_pivots_len[this_ws->pivot.key1]){
//printf("\npivot not in complex c1. append");
this_ws->flag_red_w_complex = 0;
this_ws->flag_red_w_trivial = 0;
this_ws->flag_append_to_complex = 1;
continue;
}
else{
idx = search_H2_cohom_pivots(self->g_H2_cohom_pivots[this_ws->pivot.key1]\
, 0 \
, self->g_H2_cohom_pivots_len[this_ws->pivot.key1] - 1\
, this_ws->pivot.key2 \
, self->g_n_valid_edges);
// If this low is not a pivot
if (idx == self->g_n_valid_edges){
//printf("\npivot not in complex c2. append");
this_ws->flag_red_w_complex = 0;
this_ws->flag_red_w_trivial = 0;
this_ws->flag_append_to_complex = 1;
continue;
}
else{
this_ws->flag_red_w_complex = 1;
this_ws->reduce_w_bndry = self->g_H2_cohom_pivots[this_ws->pivot.key1][idx].bndry;
this_ws->V_col_idx = self->g_H2_cohom_pivots[this_ws->pivot.key1][idx].col_idx;
}
}
}
}
if ((!this_ws->flag_red_w_trivial) && (!this_ws->flag_red_w_complex)){
//printf("\nno red with trivial and no red with complex");
this_ws->flag_append_to_complex = 1;
continue;
}
//printf("\nReducing...");
// Presume that this will be flagged to be added to complex
this_ws->flag_red_w_complex = 0;
this_ws->flag_red_w_trivial = 0;
this_ws->flag_append_to_complex = 1;
int flag = 0;
while(1){
check_len = this_ws->v_triangles.last + 1;
if (this_ws->V_col_idx){
check_len += self->g_V_col_indices[this_ws->V_col_idx+1] -\
self->g_V_col_indices[this_ws->V_col_idx];
}
if (check_len > this_ws->v_triangles.max_len){
this_ws->v_triangles.max_len = check_len + 100;
self->g_V_ws_H2[ws_counter].v_triangles.o_abc = \
(simplex*)realloc(self->g_V_ws_H2[ws_counter].v_triangles.o_abc\
, this_ws->v_triangles.max_len*sizeof(simplex));
}
this_ws->v_triangles.o_abc[this_ws->v_triangles.last++] = this_ws->reduce_w_bndry;
temp.triangle = this_ws->reduce_w_bndry;
find_H2_cohom_greater(self, &(temp), &(this_ws->pivot));
//if (flag){
// printf("\ninserting (%d, %d):(%d, %d)"\
// , temp.triangle.key1\
// , temp.triangle.key2\
// , temp.low.key1\
// , temp.low.key2\
// );
//}
//if ((this_ws->triangle.key1 == self->g_debug_triangle.key1) &&\
// (this_ws->triangle.key2 == self->g_debug_triangle.key2)){
// printf("\nBefore inserting (%d, %d):(%d, %d)"\
// , temp.triangle.key1\
// , temp.triangle.key2\
// , temp.low.key1\
// , temp.low.key2\
// );
// coH2_print_v_implicit(self, ws_counter);
//}
coH2_insert_in_implicit_v(self, ws_counter, &(temp), 1);
//if ((this_ws->triangle.key1 == self->g_debug_triangle.key1) &&\
// (this_ws->triangle.key2 == self->g_debug_triangle.key2)){
// printf("\nAfter inserting");
// coH2_print_v_implicit(self, ws_counter);
//}
// IF the V was recorded, add the bndries
if (this_ws->V_col_idx){
// We have to cycle through the col in V and add all the other boundary columns for reduction
start = self->g_V_col_indices[this_ws->V_col_idx];
end = self->g_V_col_indices[this_ws->V_col_idx+1];
for (EDGE_ID mm = start; mm < end; mm++){
this_ws->v_triangles.o_abc[this_ws->v_triangles.last++] = self->g_V_sparse_H2[mm];
temp.triangle = self->g_V_sparse_H2[mm];
// Find the first low greater than or equal pivot
find_H2_cohom_greater(self, &(temp), &(this_ws->pivot));
//if (flag){
// printf("\ninserting (%d, %d):(%d, %d)"\
// , temp.triangle.key1\
// , temp.triangle.key2\
// , temp.low.key1\
// , temp.low.key2\
// );
//}
//if ((this_ws->triangle.key1 == self->g_debug_triangle.key1) &&\
// (this_ws->triangle.key2 == self->g_debug_triangle.key2)){
// printf("\nBefore inserting (%d, %d):(%d, %d)"\
// , temp.triangle.key1\
// , temp.triangle.key2\
// , temp.low.key1\
// , temp.low.key2\
// );
// coH2_print_v_implicit(self, ws_counter);
//}
coH2_insert_in_implicit_v(self, ws_counter, &(temp), 1);
//if ((this_ws->triangle.key1 == self->g_debug_triangle.key1) &&\
// (this_ws->triangle.key2 == self->g_debug_triangle.key2)){
// printf("\nAfter inserting");
// coH2_print_v_implicit(self, ws_counter);
//}
}
}
//simplex test_low = this_ws->pivot;
//if ((this_ws->triangle.key1 == self->g_debug_triangle.key1) &&\
// (this_ws->triangle.key2 == self->g_debug_triangle.key2)){
// printf("\nBefore reduction");
// coH2_print_v_implicit(self, ws_counter);
// printf("\nPivot is (%d, %d)"\
// , this_ws->pivot.key1\
// , this_ws->pivot.key2\
// );
//
//}
reduce_hash_table_coH2(self, ws_counter);
//if ((this_ws->triangle.key1 == self->g_debug_triangle.key1) &&\
// (this_ws->triangle.key2 == self->g_debug_triangle.key2)){
// printf("\nAfter reduction");
// coH2_print_v_implicit(self, ws_counter);
// printf("\nPivot is (%d, %d)"\
// , this_ws->pivot.key1\
// , this_ws->pivot.key2\
// );
//}
//if ((this_ws->triangle.key1 == self->g_debug_triangle.key1) &&\
// (this_ws->triangle.key2 == self->g_debug_triangle.key2)){
// printf("\nNew pivot (%d, %d)", this_ws->pivot.key1\
// , this_ws->pivot.key2);
// getchar();
//}
//if ((test_low.key1 == this_ws->pivot.key1)
// &&(test_low.key2 == this_ws->pivot.key2)){
// printf("\nprinting..");
// flag = 1;
// coH2_print_v_implicit(self, ws_counter);
// getchar();
//}
//printf("\nNew low (%d, %d,)", this_ws->pivot.key1\
// , this_ws->pivot.key2);
if (!this_ws->flag_non_empty){
break;
}
// Check with trivial pair
temp.triangle.key1 = this_ws->pivot.key1;
temp.triangle.key2 = self->g_edges_list[2*this_ws->pivot.key2+1];
find_H2_cohom_low(self, &temp);
if ((temp.low.key1 == this_ws->pivot.key1)\
&& (temp.low.key2 == this_ws->pivot.key2)){
//if (flag){
// printf("\nReducing with trivial (%d, %d)"\
// , temp.triangle.key1\
// , temp.triangle.key2\
// );
//}
//printf("\nreduce with trivial");
this_ws->reduce_w_bndry = temp.triangle;
this_ws->V_col_idx = 0;
continue;
}
// If this low is not a pivot
if (!self->g_H2_cohom_pivots_len[this_ws->pivot.key1]){
break;
}
idx = search_H2_cohom_pivots(self->g_H2_cohom_pivots[this_ws->pivot.key1]\
, 0 \
, self->g_H2_cohom_pivots_len[this_ws->pivot.key1] - 1\
, this_ws->pivot.key2 \
, self->g_n_valid_edges\
);
if (idx == self->g_n_valid_edges){
break;
}
//if (flag){
// for (EDGE_ID mm = 0; mm < self->g_H2_cohom_pivots_len[this_ws->pivot.key1]; mm++){
// printf("\n(%d, %d, %d), "\
// , self->g_H2_cohom_pivots[this_ws->pivot.key1][mm].key2\
// , self->g_H2_cohom_pivots[this_ws->pivot.key1][mm].bndry.key1\
// , self->g_H2_cohom_pivots[this_ws->pivot.key1][mm].bndry.key2\
// );
// }
// printf("\nReducing with complex (%d, %d) at idx %d"\
// , self->g_H2_cohom_pivots[this_ws->pivot.key1][idx].bndry.key1\
// , self->g_H2_cohom_pivots[this_ws->pivot.key1][idx].bndry.key2\
// , idx\
// );
//}
//printf("\nreduce with complex");
this_ws->reduce_w_bndry = self->g_H2_cohom_pivots[this_ws->pivot.key1][idx].bndry;
this_ws->V_col_idx = self->g_H2_cohom_pivots[this_ws->pivot.key1][idx].col_idx;
}
}
}
}
void update_V_coH2(filtration* self, int ws_counter){
EDGE_ID red_col = 0;
coboundary_H2_ws* this_ws = self->g_V_ws_H2 + ws_counter;
//if ((this_ws->triangle.key1 == 227282)\
// &&(this_ws->triangle.key2 == 1807632)){
//
// self->g_new_debug = 1;
//
//}
//else{
// self->g_new_debug = 0;
//}
//if (self->g_new_debug){
//
// printf("\n ADDDDDING %d, %d, %d, %d", this_ws->triangle.key1\
// , this_ws->triangle.key2\
// , this_ws->pivot.key1\
// , this_ws->pivot.key2\
// );
// getchar();
//}
//printf("\nENTERING UPDATE_V_coH2");
self->g_V_sparse_beg_ptr = self->g_V_sparse_ptr;
if (this_ws->v_triangles.last){
//printf("\n%d, %d, %d, %d", this_ws->triangle.key1\
// , this_ws->triangle.key2\
// , this_ws->pivot.key1\
// , this_ws->pivot.key2\
// );
if ((this_ws->v_triangles.last + self->g_V_sparse_ptr) + 1 > self->g_V_sparse_max){
self->g_V_sparse_max = self->g_V_sparse_ptr + this_ws->v_triangles.last + 100000;
self->g_V_sparse_H2 = (simplex*)realloc(self->g_V_sparse_H2\
, self->g_V_sparse_max*sizeof(simplex));
}
// //TRYING EDIT
if (this_ws->v_triangles.last > 1){
#ifdef VREDUCE2
sorter4_tim_sort(this_ws->v_triangles.o_abc, this_ws->v_triangles.last);
int coeff = 1;
for (EDGE_ID vv = 0; vv < this_ws->v_triangles.last-1; vv++){
if ((this_ws->v_triangles.o_abc[vv].key1 == this_ws->v_triangles.o_abc[vv+1].key1) &&
(this_ws->v_triangles.o_abc[vv].key2 == this_ws->v_triangles.o_abc[vv+1].key2))
{
coeff = 1 - coeff;
}
else{
if (coeff){
self->g_V_sparse_H2[self->g_V_sparse_ptr++] = this_ws->v_triangles.o_abc[vv];
}
coeff = 1;
}
}
if (coeff){
self->g_V_sparse_H2[self->g_V_sparse_ptr++] = this_ws->v_triangles.o_abc[this_ws->v_triangles.last-1];
}
#else
for (EDGE_ID vv = 0; vv < this_ws->v_triangles.last; vv++){
self->g_V_sparse_H2[self->g_V_sparse_ptr++] = this_ws->v_triangles.o_abc[vv];
}
#endif
}
else if (this_ws->v_triangles.last == 1){
self->g_V_sparse_H2[self->g_V_sparse_ptr++] = this_ws->v_triangles.o_abc[0];
}
// All have been added
this_ws->v_triangles.last = 0;
if ((self->g_V_sparse_ptr - self->g_V_sparse_beg_ptr) > 0){
red_col = self->g_V_col_indices_ptr;
if (self->g_V_col_indices_ptr+1 == self->g_V_col_indices_max){
self->g_V_col_indices_max += 10000;
self->g_V_col_indices = (EDGE_ID*)realloc(self->g_V_col_indices\
, self->g_V_col_indices_max*sizeof(EDGE_ID));
}
self->g_V_col_indices[self->g_V_col_indices_ptr] = self->g_V_sparse_beg_ptr;
self->g_V_col_indices[self->g_V_col_indices_ptr+1] = self->g_V_sparse_ptr;
self->g_V_col_indices_ptr++;
}
}
//printf("\n(%d, %d):(%d, %d)"\
// , this_ws->triangle.key1\
// , this_ws->triangle.key2\
// , this_ws->pivot.key1\
// , this_ws->pivot.key2\
// );
//printf("\nAdding v: ");
//
//for (EDGE_ID mm = self->g_V_sparse_beg_ptr; mm < self->g_V_sparse_ptr; mm++){
// printf("(%d, %d),", self->g_V_sparse_H2[mm].key1\
// , self->g_V_sparse_H2[mm].key2);
//}
//getchar();
// ADDING THE LOW
//
add_coH2_pivot(self, this_ws->triangle, this_ws->pivot, red_col);
}
void add_coH2_pivot (filtration* self, simplex triangle, simplex pivot, EDGE_ID red_col){
if (!self->g_H2_cohom_pivots_max_len[pivot.key1]){
self->g_H2_cohom_pivots_max_len[pivot.key1] = 2;
self->g_H2_cohom_pivots[pivot.key1] = \
(H2_cohom_pivots*)malloc(self->g_H2_cohom_pivots_max_len[pivot.key1]*sizeof(H2_cohom_pivots));
}
if (self->g_H2_cohom_pivots_len[pivot.key1]\
== self->g_H2_cohom_pivots_max_len[pivot.key1]){
self->g_H2_cohom_pivots_max_len[pivot.key1] += 5;
self->g_H2_cohom_pivots[pivot.key1] = (H2_cohom_pivots*)realloc( \
self->g_H2_cohom_pivots[pivot.key1] \
, self->g_H2_cohom_pivots_max_len[pivot.key1]*sizeof(H2_cohom_pivots));
}
EDGE_ID old_ptr = self->g_H2_cohom_pivots_len[pivot.key1];
EDGE_ID new_ptr = self->g_H2_cohom_pivots_len[pivot.key1];
while (old_ptr){
old_ptr--;
if (self->g_H2_cohom_pivots[pivot.key1][old_ptr].key2 > pivot.key2){
self->g_H2_cohom_pivots[pivot.key1][new_ptr--] =\
self->g_H2_cohom_pivots[pivot.key1][old_ptr];
continue;
}
break;
}
self->g_H2_cohom_pivots[pivot.key1][new_ptr].key2 = pivot.key2;
self->g_H2_cohom_pivots[pivot.key1][new_ptr].col_idx = red_col;
self->g_H2_cohom_pivots[pivot.key1][new_ptr].bndry = triangle;
self->g_H2_cohom_pivots_len[pivot.key1]++;
// PERS PAIRS
// Add non-zero barcodes
PAR birth = self->g_edge_parameter[triangle.key1];
PAR death = self->g_edge_parameter[pivot.key1];
if (birth != death){
//printf("\nNon trivial pers pair (%f, %f)", birth, death);
if (birth > death){
printf("\nBirth, death (%lf, %lf)", birth, death);
printf("\nError (%d, %d) at pair (%d, %d)", triangle.key1\
, triangle.key2\
, pivot.key1\
, pivot.key2);
getchar();
}
if (self->g_H2_pers_pairs_len+2 == self->g_H2_pers_pairs_max_len){
self->g_H2_pers_pairs_max_len += 1000;
self->g_H2_pers_pairs = (PAR*)realloc(self->g_H2_pers_pairs\
, self->g_H2_pers_pairs_max_len*sizeof(PAR));
}
self->g_H2_pers_pairs[self->g_H2_pers_pairs_len++] = birth;
self->g_H2_pers_pairs[self->g_H2_pers_pairs_len++] = death;
}
}
void reduce_ws_coH2(filtration* self){
//printf("\nBefore parallel");
//for (EDGE_ID mm = 0; mm < self->g_cohom_ws_size; mm++){
// printf("\n%d:(%d, %d) flag %d", self->g_V_ws_H1[mm].edge\
// , self->g_V_ws_H1[mm].pivot.key1\
// , self->g_V_ws_H1[mm].pivot.key2\
// , self->g_V_ws_H1[mm].flag_append_to_complex);
// printf(" v: ");
// for (EDGE_ID bb = 0; bb < self->g_V_ws_H1[mm].v_edges.last; bb++){
// printf("%d, ", self->g_V_ws_H1[mm].v_edges.o_ab[bb]);
// }
//}
//
// PARALLEL
self->g_processed_threads = 0;
pthread_cond_broadcast(&(self->g_start_workers));
while (self->g_processed_threads != self->g_cpu_count){
pthread_cond_wait(&(self->g_start_boss) \
,&(self->g_thread_lock));
}
//printf("\nAfter parallel");
//for (EDGE_ID mm = 0; mm < self->g_cohom_ws_size; mm++){
// printf("\n%d:(%d, %d) flag %d", self->g_V_ws_H1[mm].edge\
// , self->g_V_ws_H1[mm].pivot.key1\
// , self->g_V_ws_H1[mm].pivot.key2\
// , self->g_V_ws_H1[mm].flag_append_to_complex);
// printf(" v: ");
// for (EDGE_ID bb = 0; bb < self->g_V_ws_H1[mm].v_edges.last; bb++){
// printf("%d, ", self->g_V_ws_H1[mm].v_edges.o_ab[bb]);
// }
//}
// SERIAL
reduce_with_self_coH2(self);
//printf("\nAfter serial");
//for (EDGE_ID mm = 0; mm < self->g_cohom_ws_size; mm++){
// printf("\n%d:(%d, %d) flag %d", self->g_V_ws_H1[mm].edge\
// , self->g_V_ws_H1[mm].pivot.key1\
// , self->g_V_ws_H1[mm].pivot.key2\
// , self->g_V_ws_H1[mm].flag_append_to_complex);
// printf(" v: ");
// for (EDGE_ID bb = 0; bb < self->g_V_ws_H1[mm].v_edges.last; bb++){
// printf("%d, ", self->g_V_ws_H1[mm].v_edges.o_ab[bb]);
// }
//}
//getchar();
// CLEARANCE
int count_valid = 0;
for (int ws_counter = 0; ws_counter < self->g_ws_counter; ws_counter++){
if (!self->g_V_ws_H2[ws_counter].flag_non_empty){
// Add the undead H2
//printf("\nAdding undead for H2");
if (self->g_H2_pers_pairs_len+2 == self->g_H2_pers_pairs_max_len){
self->g_H2_pers_pairs_max_len += 1000;
self->g_H2_pers_pairs = (PAR*)realloc(self->g_H2_pers_pairs\
, self->g_H2_pers_pairs_max_len*sizeof(PAR));
}
self->g_H2_pers_pairs[self->g_H2_pers_pairs_len++] = \
self->g_edge_parameter[self->g_V_ws_H2[ws_counter].triangle.key1];
self->g_H2_pers_pairs[self->g_H2_pers_pairs_len++] = -1;
//#ifdef HOM_CYCLES
if (self->g_compute_cycles){
self->g_H2_undead[self->g_H2_undead_ptr++] = self->g_V_ws_H2[ws_counter].triangle;
if (self->g_H2_undead_ptr == self->g_H2_undead_max){
self->g_H2_undead_max += 100;
self->g_H2_undead = (simplex*)realloc(self->g_H2_undead\
, self->g_H2_undead_max*sizeof(simplex));
}
}
//#endif
//printf("\nEmpty. Not adding to complex.");
continue;
}
if (self->g_V_ws_H2[ws_counter].flag_append_to_complex){
//printf("\nAdding to complex.");
update_V_coH2(self, ws_counter);
continue;
}
//printf("\nSwapping...");
// Swap V
coboundary_H2_ws temp = self->g_V_ws_H2[count_valid];
self->g_V_ws_H2[count_valid] = self->g_V_ws_H2[ws_counter];
self->g_V_ws_H2[ws_counter] = temp;
// At this point, this has to be a non-zero column
self->g_V_ws_H2[count_valid].flag_non_empty = 1;
// Run through parallel at least once
self->g_V_ws_H2[count_valid].flag_append_to_complex = 0;
count_valid++;
}
self->g_ws_counter = count_valid;
}
void reduce_hash_table_coH2(filtration* self, int ws_counter){
// Now we have to reduce
int coeff = 1;
coboundary_H2_ws* this_ws = self->g_V_ws_H2 + ws_counter;
coboundary_H2 ttemp;
EDGE_ID* k1_ptr = &(this_ws->k1_ptr);
EDGE_ID* k2_ptr = &(this_ws->k2_ptr);
while (1){
if (this_ws->keys1[*k1_ptr].last == 1){
this_ws->pivot.key1 = this_ws->keys1[*k1_ptr].k1;
this_ws->pivot.key2 = this_ws->keys1[*k1_ptr].keys2[*k2_ptr].k2;
break;
}
if (this_ws->keys1[*k1_ptr].keys2[*k2_ptr].k2 ==\
this_ws->keys1[*k1_ptr].keys2[*k2_ptr+1].k2){
coeff = 1 - coeff;
if ((this_ws->keys1[*k1_ptr].keys2[*k2_ptr].o_abc.key1 ==\
this_ws->keys1[*k1_ptr].keys2[*k2_ptr+1].o_abc.key1) &&
(this_ws->keys1[*k1_ptr].keys2[*k2_ptr].o_abc.key2 ==\
this_ws->keys1[*k1_ptr].keys2[*k2_ptr+1].o_abc.key2))
{
if (this_ws->keys1[*k1_ptr].keys2[*k2_ptr].flag_next ==\
this_ws->keys1[*k1_ptr].keys2[*k2_ptr+1].flag_next){
this_ws->keys1[*k1_ptr].keys2[*k2_ptr].flag_next = 0;
this_ws->keys1[*k1_ptr].keys2[*k2_ptr+1].flag_next = 0;
}
}
}
else{
if (coeff){
this_ws->pivot.key1 = this_ws->keys1[*k1_ptr].k1;
this_ws->pivot.key2 = this_ws->keys1[*k1_ptr].keys2[*k2_ptr].k2;
break;
}
else{
coeff = 1;
}
}
if (this_ws->keys1[*k1_ptr].keys2[*k2_ptr].flag_next){
ttemp.triangle = this_ws->keys1[*k1_ptr].keys2[*k2_ptr].o_abc;
ttemp.a_ptr = this_ws->keys1[*k1_ptr].keys2[*k2_ptr].a_ptr;
ttemp.b_ptr = this_ws->keys1[*k1_ptr].keys2[*k2_ptr].b_ptr;
ttemp.c_ptr = this_ws->keys1[*k1_ptr].keys2[*k2_ptr].c_ptr;
ttemp.vertex = this_ws->keys1[*k1_ptr].keys2[*k2_ptr].vertex;
ttemp.low.key1 = this_ws->keys1[*k1_ptr].k1;
ttemp.low.key2 = this_ws->keys1[*k1_ptr].keys2[*k2_ptr].k2;
//if (this_ws->edge == self->g_debug_edge){
// printf("\nFinding next of %d:(%d, %d)", ttemp.o_ab\
// , ttemp.low.key1\
// , ttemp.low.key2\
// );
//}
//if ((this_ws->triangle.key1 == self->g_debug_triangle.key1) &&\
// (this_ws->triangle.key2 == self->g_debug_triangle.key2)){
// printf("\nFinding next of (%d, %d):(%d, %d)"\
// , ttemp.triangle.key1\
// , ttemp.triangle.key2\
// , ttemp.low.key1\
// , ttemp.low.key2\
// );
//}
find_H2_cohom_next(self, &(ttemp));
//printf("\nInserting (%d, %d):(%d, %d)"\
// , ttemp.triangle.key1\
// , ttemp.triangle.key2\
// , ttemp.low.key1\
// , ttemp.low.key2\
// );
this_ws->keys1[*k1_ptr].keys2[*k2_ptr].flag_next = 0;
coH2_insert_in_implicit_v(self, ws_counter, &(ttemp), 1);
//if ((this_ws->triangle.key1 == self->g_debug_triangle.key1) &&\
// (this_ws->triangle.key2 == self->g_debug_triangle.key2)){
// printf("\nAfter inserting");
// coH2_print_v_implicit(self, ws_counter);
//}
// It is possible that last key1 and last key2 changed. Make sure last is consistent
}
*k2_ptr = *k2_ptr + 1;
if (*k2_ptr == this_ws->keys1[*k1_ptr].last-1){
if (coeff){
this_ws->pivot.key1 = this_ws->keys1[*k1_ptr].k1;
this_ws->pivot.key2 = this_ws->keys1[*k1_ptr].keys2[*k2_ptr].k2;
break;
}
if (this_ws->keys1[*k1_ptr].keys2[*k2_ptr].flag_next){
ttemp.triangle = this_ws->keys1[*k1_ptr].keys2[*k2_ptr].o_abc;
ttemp.a_ptr = this_ws->keys1[*k1_ptr].keys2[*k2_ptr].a_ptr;
ttemp.b_ptr = this_ws->keys1[*k1_ptr].keys2[*k2_ptr].b_ptr;
ttemp.c_ptr = this_ws->keys1[*k1_ptr].keys2[*k2_ptr].c_ptr;
ttemp.vertex = this_ws->keys1[*k1_ptr].keys2[*k2_ptr].vertex;
ttemp.low.key1 = this_ws->keys1[*k1_ptr].k1;
ttemp.low.key2 = this_ws->keys1[*k1_ptr].keys2[*k2_ptr].k2;
find_H2_cohom_next(self, &(ttemp));
this_ws->keys1[*k1_ptr].keys2[*k2_ptr].flag_next = 0;
coH2_insert_in_implicit_v(self, ws_counter, &(ttemp), 1);
}
if (*k2_ptr == this_ws->keys1[*k1_ptr].last-2){
*k2_ptr = *k2_ptr + 1;
this_ws->pivot.key1 = this_ws->keys1[*k1_ptr].k1;
this_ws->pivot.key2 = this_ws->keys1[*k1_ptr].keys2[*k2_ptr].k2;
break;
}
else{
// Mark this key1 as empty
this_ws->keys1[*k1_ptr].flag_empty = 1;
// Reallocate to prune space
if (this_ws->keys1[*k1_ptr].max_len > 5){
this_ws->keys1[*k1_ptr].max_len = 5;
self->g_V_ws_H2[ws_counter].keys1[*k1_ptr].keys2 = \
(coH2_implicit_keys2*)realloc\
(self->g_V_ws_H2[ws_counter].keys1[*k1_ptr].keys2\
, self->g_V_ws_H2[ws_counter].keys1[*k1_ptr].max_len\
*sizeof(coH2_implicit_keys2));
}
EDGE_ID current_ptr = 0;
EDGE_ID minn = self->g_n_valid_edges;
for (EDGE_ID mm = 0; mm < this_ws->last; mm++){
if (this_ws->keys1[mm].flag_empty){
continue;
}
coH2_implicit_keys1 ttemp = this_ws->keys1[current_ptr];
this_ws->keys1[current_ptr] = this_ws->keys1[mm];
this_ws->keys1[mm] = ttemp;
if (this_ws->keys1[current_ptr].k1 < minn){
minn = this_ws->keys1[current_ptr].k1;
*k1_ptr = current_ptr;
}
current_ptr++;
}
this_ws->last = current_ptr;
if (minn == self->g_n_valid_edges){
this_ws->flag_non_empty = 0;
break;
}
// Otherwise reset coefficient and begin reduction
coeff = 1;
*k2_ptr = 0;
sorter9_tim_sort(this_ws->keys1[*k1_ptr].keys2\
, this_ws->keys1[*k1_ptr].last);
}
}
}
}
void coH2_insert_in_implicit_v(filtration* self, int ws_counter, coboundary_H2* phi, int flag_next){
if (phi->low.key1 == self->g_n_valid_edges){
return;
}
coboundary_H2_ws* this_ws = self->g_V_ws_H2 + ws_counter;
//if ((this_ws->triangle.key1 == self->g_debug_triangle.key1) &&\
// (this_ws->triangle.key2 == self->g_debug_triangle.key2)){
// printf("\nINSERTING (%d, %d):(%d, %d)"\
// , phi->triangle.key1\
// , phi->triangle.key2\
// , phi->low.key1\
// , phi->low.key2\
// );
//}
if (phi->low.key1 == this_ws->keys1[this_ws->k1_ptr].k1){
if (this_ws->keys1[this_ws->k1_ptr].last ==\
this_ws->keys1[this_ws->k1_ptr].max_len){
self->g_V_ws_H2[ws_counter].keys1[self->g_V_ws_H2[ws_counter].k1_ptr].max_len += 10;
self->g_V_ws_H2[ws_counter].keys1[self->g_V_ws_H2[ws_counter].k1_ptr].keys2 = \
(coH2_implicit_keys2*)realloc\
(self->g_V_ws_H2[ws_counter].keys1[self->g_V_ws_H2[ws_counter].k1_ptr].keys2\
, self->g_V_ws_H2[ws_counter].keys1[self->g_V_ws_H2[ws_counter].k1_ptr].max_len\
*sizeof(coH2_implicit_keys2));
}
EDGE_ID mm = this_ws->keys1[this_ws->k1_ptr].last;
coH2_implicit_keys2* this_key2 = &(this_ws->keys1[this_ws->k1_ptr].keys2[mm-1]);
int compare;
while (1){
//int compare = coH2_compare_implicit(this_ws->keys1[this_ws->k1_ptr].keys2[mm-1], *phi);
//if (this_ws->keys1[this_ws->k1_ptr].keys2[mm-1].k2 < phi->low.key2) compare = 0;
//else if (this_ws->keys1[this_ws->k1_ptr].keys2[mm-1].k2 > phi->low.key2) compare = 1;
if (this_key2->k2 < phi->low.key2) compare = 0;
else if (this_key2->k2 > phi->low.key2) compare = 1;
else{
if (this_key2->o_abc.key1 < phi->triangle.key1) compare = 0;
else if (this_key2->o_abc.key1 > phi->triangle.key1) compare = 1;
else{
if (this_key2->o_abc.key2 < phi->triangle.key2) compare = 0;
else compare = 1;
}
}
if (compare){
this_ws->keys1[this_ws->k1_ptr].keys2[mm] =\
this_ws->keys1[this_ws->k1_ptr].keys2[mm-1];
}
else{
//if ((this_ws->triangle.key1 == self->g_debug_triangle.key1) &&\
// (this_ws->triangle.key2 == self->g_debug_triangle.key2)){
// printf("\nin c1");
// coH2_print_v_implicit(self, ws_counter);
//}
coH2_implicit_keys1* this_key1 = &(this_ws->keys1[this_ws->k1_ptr]);
coH2_implicit_keys2* this_key2 = &(this_key1->keys2[mm]);
this_key2->k2 = phi->low.key2;
this_key2->o_abc = phi->triangle;
this_key2->a_ptr = phi->a_ptr;
this_key2->b_ptr = phi->b_ptr;
this_key2->c_ptr = phi->c_ptr;
this_key2->vertex = phi->vertex;
this_key2->flag_next = flag_next;
this_ws->keys1[this_ws->k1_ptr].last++;
//if ((this_ws->triangle.key1 == self->g_debug_triangle.key1) &&\
// (this_ws->triangle.key2 == self->g_debug_triangle.key2)){
// printf("\nin c2");
// coH2_print_v_implicit(self, ws_counter);
//}
return;
}
this_key2--;
mm--;
//// ERROR CHECKING, REMOVE LATER
//if (!mm){
// printf("\nk2_ptr %d", v_implicit->k2_ptr);
// printf("\nADDING %d:(%d, %d) to ", phi->o_ab, phi->low.key1, phi->low.key2);
// print_v_implicit(self);
// exit(0);
//
//}
if (mm == this_ws->k2_ptr){
//if ((this_ws->triangle.key1 == self->g_debug_triangle.key1) &&\
// (this_ws->triangle.key2 == self->g_debug_triangle.key2)){
// printf("\nin c3, idx %d, last is %d"\
// , self->g_V_ws_H2[ws_counter].k1_ptr\
// , self->g_V_ws_H2[ws_counter].keys1[self->g_V_ws_H2[ws_counter].k1_ptr].last);
// coH2_print_v_implicit(self, ws_counter);
//}
coH2_implicit_keys1* this_key1 = &(this_ws->keys1[this_ws->k1_ptr]);
coH2_implicit_keys2* this_key2 = &(this_key1->keys2[mm]);
this_key2->k2 = phi->low.key2;
this_key2->o_abc = phi->triangle;
this_key2->a_ptr = phi->a_ptr;
this_key2->b_ptr = phi->b_ptr;
this_key2->c_ptr = phi->c_ptr;
this_key2->vertex = phi->vertex;
this_key2->flag_next = flag_next;
this_ws->keys1[this_ws->k1_ptr].last++;
//if ((this_ws->triangle.key1 == self->g_debug_triangle.key1) &&\
// (this_ws->triangle.key2 == self->g_debug_triangle.key2)){
// printf("\nin c4, idx %d, last is %d"\
// , self->g_V_ws_H2[ws_counter].k1_ptr\
// , self->g_V_ws_H2[ws_counter].keys1[self->g_V_ws_H2[ws_counter].k1_ptr].last);
// coH2_print_v_implicit(self, ws_counter);
//}
return;
}
}
}
for (EDGE_ID mm = 0; mm < self->g_V_ws_H2[ws_counter].last; mm++){
if (self->g_V_ws_H2[ws_counter].keys1[mm].k1 == phi->low.key1){
//check_space_implicit_keys2(&(v_implicit->keys1[mm]));
if (self->g_V_ws_H2[ws_counter].keys1[mm].last ==\
self->g_V_ws_H2[ws_counter].keys1[mm].max_len){
self->g_V_ws_H2[ws_counter].keys1[mm].max_len += 10;
self->g_V_ws_H2[ws_counter].keys1[mm].keys2 = (coH2_implicit_keys2*)realloc\
(self->g_V_ws_H2[ws_counter].keys1[mm].keys2\
, self->g_V_ws_H2[ws_counter].keys1[mm].max_len*sizeof(coH2_implicit_keys2));
}
//if ((this_ws->triangle.key1 == self->g_debug_triangle.key1) &&\
// (this_ws->triangle.key2 == self->g_debug_triangle.key2)){
// printf("\nin c5, idx %d, last is %d"\
// , self->g_V_ws_H2[ws_counter].k1_ptr\
// , self->g_V_ws_H2[ws_counter].keys1[self->g_V_ws_H2[ws_counter].k1_ptr].last);
// coH2_print_v_implicit(self, ws_counter);
//}
coH2_implicit_keys1* this_key1 = &(this_ws->keys1[mm]);
coH2_implicit_keys2* this_key2 = &(this_key1->keys2[this_key1->last]);
this_ws->keys1[mm].flag_empty = 0;
this_key2->k2 = phi->low.key2;
this_key2->o_abc = phi->triangle;
this_key2->a_ptr = phi->a_ptr;
this_key2->b_ptr = phi->b_ptr;
this_key2->c_ptr = phi->c_ptr;
this_key2->vertex = phi->vertex;
this_key2->flag_next = flag_next;
this_key1->last++;
//if ((this_ws->triangle.key1 == self->g_debug_triangle.key1) &&\
// (this_ws->triangle.key2 == self->g_debug_triangle.key2)){
// printf("\nin c6, idx %d, last is %d"\
// , self->g_V_ws_H2[ws_counter].k1_ptr\
// , self->g_V_ws_H2[ws_counter].keys1[self->g_V_ws_H2[ws_counter].k1_ptr].last);
// coH2_print_v_implicit(self, ws_counter);
//}
return;
}
}
if (self->g_V_ws_H2[ws_counter].last == self->g_V_ws_H2[ws_counter].max_len){
EDGE_ID mm = self->g_V_ws_H2[ws_counter].max_len;
self->g_V_ws_H2[ws_counter].max_len += 10;
self->g_V_ws_H2[ws_counter].keys1 = (coH2_implicit_keys1*)realloc(self->g_V_ws_H2[ws_counter].keys1\
, self->g_V_ws_H2[ws_counter].max_len*sizeof(coH2_implicit_keys1));
while (mm < self->g_V_ws_H2[ws_counter].max_len){
this_ws->keys1[mm].flag_empty = 1;
this_ws->keys1[mm].max_len = 10;
this_ws->keys1[mm].last = 0;
self->g_V_ws_H2[ws_counter].keys1[mm].keys2 = (coH2_implicit_keys2*)malloc(10*sizeof(coH2_implicit_keys2));
mm++;
}
}
//if ((this_ws->triangle.key1 == self->g_debug_triangle.key1) &&\
// (this_ws->triangle.key2 == self->g_debug_triangle.key2)){
// printf("\nin c7, idx %d, last is %d"\
// , self->g_V_ws_H2[ws_counter].k1_ptr\
// , self->g_V_ws_H2[ws_counter].keys1[self->g_V_ws_H2[ws_counter].k1_ptr].last);
// coH2_print_v_implicit(self, ws_counter);
//}
coH2_implicit_keys1* this_key1 = &(this_ws->keys1[this_ws->last]);
coH2_implicit_keys2* this_key2 = &(this_key1->keys2[0]);
this_key1->flag_empty = 0;
this_key1->k1 = phi->low.key1;
this_key2->k2 = phi->low.key2;
this_key2->o_abc = phi->triangle;
this_key2->a_ptr = phi->a_ptr;
this_key2->b_ptr = phi->b_ptr;
this_key2->c_ptr = phi->c_ptr;
this_key2->vertex = phi->vertex;
this_key2->flag_next = flag_next;
this_key1->last = 1;
this_ws->last++;
//if ((this_ws->triangle.key1 == self->g_debug_triangle.key1) &&\
// (this_ws->triangle.key2 == self->g_debug_triangle.key2)){
// printf("\nin c7, idx %d, last is %d"\
// , self->g_V_ws_H2[ws_counter].k1_ptr\
// , self->g_V_ws_H2[ws_counter].keys1[self->g_V_ws_H2[ws_counter].k1_ptr].last);
// coH2_print_v_implicit(self, ws_counter);
//}
return;
}
BIGINT compute_num_simplices(filtration* self){
self->g_n_all_simp = (BIGINT)(self->g_n_vert) + (BIGINT)(self->g_n_valid_edges);
printf("\n");
for (EDGE_ID o_ab = 0; o_ab < self->g_n_valid_edges; o_ab++){
printf("\redge%d", o_ab);
VERT_ID a = self->g_edges_list[2*o_ab];
VERT_ID b = self->g_edges_list[2*o_ab+1];
VERT_ID a_ptr = 0;
VERT_ID b_ptr = 0;
while ((a_ptr < self->g_Neigh_len[a])\
&& (b_ptr < self->g_Neigh_len[b])){
if (self->g_Neighbors[a][a_ptr].neighbor < self->g_Neighbors[b][b_ptr].neighbor){
a_ptr++;
}
else if (self->g_Neighbors[a][a_ptr].neighbor > self->g_Neighbors[b][b_ptr].neighbor){
b_ptr++;
}
else{
VERT_ID c = self->g_Neighbors[a][a_ptr].neighbor;
EDGE_ID o_ac = self->g_Neighbors[a][a_ptr].order;
if (o_ac > o_ab){
a_ptr++;
b_ptr++;
continue;
}
EDGE_ID o_bc = self->g_Neighbors[b][b_ptr].order;
if (o_bc > o_ab){
a_ptr++;
b_ptr++;
continue;
}
// This is a valid triangle
self->g_n_all_simp++;
for (VERT_ID mm = 0; mm < self->g_Neigh_len[c]; mm++){
if (self->g_Neighbors[c][mm].neighbor < c){
continue;
}
VERT_ID d = self->g_Neighbors[c][mm].neighbor;
VERT_ID idx = search_Neighbors(self, a, d, 0, self->g_Neigh_len[a]-1);
if (idx == self->g_n_vert) continue;
EDGE_ID o_ad = self->g_Neighbors[a][idx].order;
if (o_ad > o_ab) continue;
idx = search_Neighbors(self, b, d, 0, self->g_Neigh_len[b]-1);
if (idx == self->g_n_vert) continue;
EDGE_ID o_bd = self->g_Neighbors[b][idx].order;
if (o_bd > o_ab) continue;
idx = search_Neighbors(self, c, d, 0, self->g_Neigh_len[c]-1);
if (idx == self->g_n_vert) continue;
EDGE_ID o_cd = self->g_Neighbors[c][idx].order;
if (o_cd > o_ab) continue;
self->g_n_all_simp++;
//printf("\n %d, %d, %d, %d", a, b, c, d);
}
a_ptr++;
b_ptr++;
}
}
}
printf("\nNumber of simplices %llu\n", self->g_n_all_simp);
}
void compute_H1_homology_cycles(filtration* self){
///////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////
//
// STEP H1.1: Find homology now for the triangles
//
///////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////
if (!self->g_suppress_output){
printf("\n\n---------------");
printf("\nComputing H1...");
printf("\n---------------\n");
}
struct timespec start_wall_clock, finish_wall_clock;
clock_gettime(CLOCK_MONOTONIC, &start_wall_clock);
self->g_R_max_len_H1 = 100;
self->g_R_H1 = (EDGE_ID*)malloc(self->g_R_max_len_H1*sizeof(EDGE_ID));
self->g_R_len_H1 = 0;
self->g_R_col_idx_max_len_H1 = 100;
self->g_R_col_idx_H1 = (EDGE_ID*)malloc(self->g_R_col_idx_max_len_H1*sizeof(EDGE_ID));
self->g_R_col_idx_H1_ptr = 0;
self->g_workspace_size = 1000;
self->g_ws_pre_alloc = 1000;
// Initialize ws counter
self->g_ws_counter = 0;
// H1 workspace structures
self->g_workspace_H1 = (EDGE_ID**)malloc(self->g_workspace_size*sizeof(EDGE_ID*));
// H1 workspace info
self->g_workspace_H1_info = (boundary_H1_ws*)malloc(self->g_workspace_size*sizeof(boundary_H1_ws));
for (int i = 0; i < self->g_workspace_size; i++){
self->g_workspace_H1_info[i].max_len = self->g_ws_pre_alloc;
self->g_workspace_H1[i] = (EDGE_ID*)malloc(2*self->g_workspace_H1_info[i].max_len*sizeof(EDGE_ID));
self->g_workspace_H1_info[i].trivial_boundary = (EDGE_ID*)malloc(3*sizeof(EDGE_ID));
}
// Pivots
self->g_pivots_H1 = (EDGE_ID*)calloc(self->g_n_valid_edges, sizeof(EDGE_ID));
// Convenient info for pers pairs
self->g_homH1_pers_len = 0;
self->g_homH1_pers_max_len = 100;
self->g_homH1_pers = (homH1_pers*)malloc(self->g_homH1_pers_max_len*sizeof(homH1_pers));
// Temporary space for birth cycles
self->g_temp_V_primary.max_len = 10;
self->g_temp_V_primary.VV = (EDGE_ID*)malloc(self->g_temp_V_primary.max_len*sizeof(EDGE_ID));
self->g_temp_V_primary.len = 0;
// Temporary space for birth cycles
self->g_temp_R_birth_cycles.max_len = 100;
self->g_temp_R_birth_cycles.RR = (EDGE_ID*)malloc(2*self->g_temp_R_birth_cycles.max_len*sizeof(EDGE_ID));
self->g_temp_R_birth_cycles.original = 0;
self->g_temp_R_birth_cycles.len = 0;
//#ifdef HOM_CYCLES
// Need this info for birth voids
if (self->g_compute_cycles){
self->g_H1_pivot_of = (V_H1*)malloc(self->g_n_valid_edges*sizeof(V_H1));
}
//#endif
#ifdef ADAPTIVE_V_STORAGE
// Create pointers to store V
if (self->g_compute_cycles){
for (EDGE_ID mm = 0; mm < self->g_n_vert; mm++){
self->g_H0_pivot_of[mm].V_usage = 0;
self->g_H0_pivot_of[mm].V_stored = 0;
self->g_H0_pivot_of[mm].V_len = 0;
self->g_H0_pivot_of[mm].VV = NULL;
}
}
// Create pointers to store V per extraction call
self->g_store_V_for_len = 0;
self->g_store_V_for_max_len = 10;
self->g_store_V_for = (EDGE_ID*)malloc(self->g_store_V_for_max_len*sizeof(EDGE_ID));
#endif
//#ifdef MINIMIZE_BIRTH_CYCLES
//if (self->g_reduce_cyc_lengths){
self->g_all_V_stored_num = 0;
self->g_all_V_stored_max_num = 10;
//self->g_all_V_stored_len = (EDGE_ID*)calloc(self->g_all_V_stored_max_num, sizeof(EDGE_ID));
//self->g_all_V_H0_stored = (EDGE_ID**)malloc(self->g_all_V_stored_max_num*sizeof(EDGE_ID*));
self->g_all_V_H0_stored = (cyc_info*)malloc(self->g_all_V_stored_max_num*sizeof(cyc_info));
self->g_edges_in_cycles = (EDGE_ID**)malloc(self->g_n_valid_edges*sizeof(EDGE_ID*));
self->g_edges_in_cycles_len = (EDGE_ID*)calloc(self->g_n_valid_edges, sizeof(EDGE_ID));
//}
//#endif
#ifdef MINIMIZE_HOM_CYCLES
self->g_all_R_hom_stored_num = 0;
self->g_all_R_hom_stored_max_num = 10;
//self->g_all_R_hom_stored_len = (EDGE_ID*)calloc(self->g_all_V_hom_stored_max_num, sizeof(EDGE_ID));
//self->g_all_V_hom_H1_stored = (EDGE_ID**)malloc(self->g_all_V_hom_stored_max_num*sizeof(EDGE_ID*));
self->g_all_R_hom_H1_stored = (cyc_info*)malloc(self->g_all_R_hom_stored_max_num*sizeof(cyc_info));
#endif
////////////////////////////////////////////////////////////////
//
// Allocate jobs for parallel H1
//
////////////////////////////////////////////////////////////////
self->g_jobs = (int*)malloc((self->g_cpu_count + 1)*sizeof(int));
allocate_jobs(self, self->g_workspace_size);
self->g_threads = (pthread_t *)malloc(self->g_cpu_count*sizeof(pthread_t));
int rtn;
if ((rtn = pthread_mutex_init(&(self->g_thread_lock), NULL)) !=0)
fprintf(stderr, "pthread_mutex_init %s", strerror(rtn)), exit(-1);
if ((rtn = pthread_cond_init(&(self->g_start_boss), NULL)) !=0)
fprintf(stderr, "pthread_cond_init %s", strerror(rtn)), exit(-1);
if ((rtn = pthread_cond_init(&(self->g_start_workers), NULL)) !=0)
fprintf(stderr, "pthread_cond_init %s", strerror(rtn)), exit(-1);
// Initialize thread creation
self->g_thread_id = 0;
self->g_sleeping_threads = 0;
self->g_delete_threads = 0;
for (int i = 0; i < self->g_cpu_count; i++){
if ((rtn = pthread_create( \
&(self->g_threads[i]) \
, NULL \
, reduce_with_complex_H1 \
, (void*)self)!= 0))
fprintf(stderr, "pthread_create %d", rtn), exit(-1);
}
// Wait for threads to be initialized
pthread_mutex_lock(&(self->g_thread_lock));
while(self->g_sleeping_threads != self->g_cpu_count){
pthread_cond_wait(&(self->g_start_boss) \
, &(self->g_thread_lock));
}
////////////////////////////////
// STEP 1: Compute R (Note: Do not include trivial pairs)
for (EDGE_ID o_ab = 0; o_ab < self->g_n_valid_edges; o_ab++){
if (!self->g_H1_cohom_pivots_len[o_ab]){
continue;
}
VERT_ID a = self->g_edges_list[2*o_ab];
VERT_ID b = self->g_edges_list[2*o_ab+1];
for (VERT_ID mm = 0; mm < self->g_H1_cohom_pivots_len[o_ab]; mm++){
//This triangle is in a non-trivial persistence pair
// Workspace attributes
boundary_H1_ws* this_ws = self->g_workspace_H1_info + self->g_ws_counter;
// Initially, the original is at 0
this_ws->original = 0;
this_ws->flag_first = 1;
// Parallel control flags
this_ws->flag_empty = 0;
this_ws->flag_red_w_complex = 0;
this_ws->flag_append_to_complex = 1;
this_ws->triangle.key1 = o_ab;
this_ws->triangle.key2 = self->g_H1_cohom_pivots[o_ab][mm].key2;
// Initial length of boundary
this_ws->len = 3;
compute_boundary_triangle(self, this_ws->triangle, self->g_workspace_H1[self->g_ws_counter]);
this_ws->pivot = o_ab;
//printf("\nOutside the boundary is (%d, %d, %d)"\
// , self->g_workspace_H1[self->g_ws_counter][0]\
// , self->g_workspace_H1[self->g_ws_counter][1]\
// , self->g_workspace_H1[self->g_ws_counter][2]\
// );
self->g_ws_counter++;
if (self->g_ws_counter == self->g_workspace_size){
reduce_ws_H1(self);
}
}
}
//printf("\n press key for the last batch");
//self->g_new_debug2 = 1;
//getchar();
// Reduction of final batch
while (self->g_ws_counter){
allocate_jobs(self, self->g_ws_counter);
reduce_ws_H1(self);
}
//printf("\nComputed H1.");
//getchar();
/////////////////////////
// Cancel the threads
/////////////////////////
self->g_delete_threads = 1;
pthread_cond_broadcast(&(self->g_start_workers));
pthread_mutex_unlock(&(self->g_thread_lock));
for (int i = 0; i < self->g_cpu_count; i++){
pthread_join(self->g_threads[i], NULL);
}
free(self->g_jobs);
free(self->g_threads);
// CANNOT FREE THIS IS H2 CYCLES ARE NEEDED
//free(self->g_R_H1);
//free(self->g_R_col_idx_H1);
//free(self->g_pivots_H1);
for (int i = 0; i < self->g_workspace_size; i++){
free(self->g_workspace_H1_info[i].trivial_boundary);
free(self->g_workspace_H1[i]);
}
free(self->g_workspace_H1);
free(self->g_workspace_H1_info);
if (!self->g_suppress_output){
clock_gettime(CLOCK_MONOTONIC, &finish_wall_clock);
self->g_timer_computeH1 = (finish_wall_clock.tv_sec - start_wall_clock.tv_sec);
self->g_timer_computeH1 += (finish_wall_clock.tv_nsec - start_wall_clock.tv_nsec) / 1000000000.0;
printf("\nComputed H1.");
////////////////////////
// HOMOLOGY CYCLES
////////////////////////
clock_gettime(CLOCK_MONOTONIC, &start_wall_clock);
}
FILE* fp2 = fopen(self->g_homH1_cycles_file, "w");
PAR birth, death;
self->g_n_H1_birth_cycles = 0;
self->g_n_H0_stored_V = 0;
int add_flag = 0;
// Go over the pers pairs of features that died and compute the cycles
for (EDGE_ID mm = 0; mm < self->g_homH1_pers_len; mm++){
//if (n_cycles%1000 == 0) printf("\r Computing cycle num %llu", n_cycles);
self->g_n_H1_birth_cycles++;
if (self->g_filetype == 1){
birth = sqrt(self->g_edge_parameter[self->g_homH1_pers[mm].birth_edge]);
death = sqrt(self->g_edge_parameter[self->g_homH1_pers[mm].death_triangle_key1]);
}
else{
birth = self->g_edge_parameter[self->g_homH1_pers[mm].birth_edge];
death = self->g_edge_parameter[self->g_homH1_pers[mm].death_triangle_key1];
}
fprintf(fp2, "%lf, %lf", birth , death);
fprintf(fp2, "\nhomology cycle");
#ifdef MINIMIZE_HOM_CYCLES
self->g_all_V_hom_stored_len[self->g_all_V_hom_stored_num] =\
self->g_R_col_idx_H1[self->g_homH1_pers[mm].R_col_idx + 1]\
- self->g_R_col_idx_H1[self->g_homH1_pers[mm].R_col_idx];
self->g_all_V_hom_H1_stored[self->g_all_V_hom_stored_num] =\
(EDGE_ID*)\
malloc(self->g_all_V_hom_stored_len[self->g_all_V_hom_stored_num]\
*sizeof(EDGE_ID));
EDGE_ID bbb = 0;
#endif
for (EDGE_ID bb = self->g_R_col_idx_H1[self->g_homH1_pers[mm].R_col_idx]\
; bb < self->g_R_col_idx_H1[self->g_homH1_pers[mm].R_col_idx + 1]\
; bb++){
fprintf(fp2, ", %d, %d", self->g_edges_list[2*self->g_R_H1[bb]]\
, self->g_edges_list[2*self->g_R_H1[bb]+1]);
#ifdef MINIMIZE_HOM_CYCLES
self->g_all_V_hom_H1_stored[self->g_all_V_hom_stored_num][bbb++] = self->g_R_H1[bb];
#endif
}
#ifdef MINIMIZE_HOM_CYCLES
self->g_all_V_hom_stored_num++;
if (self->g_all_V_hom_stored_num == self->g_all_V_hom_stored_max_num){
self->g_all_V_hom_stored_max_num += 100;
self->g_all_V_hom_H1_stored = (EDGE_ID**)realloc(self->g_all_V_hom_H1_stored\
, self->g_all_V_hom_stored_max_num*sizeof(EDGE_ID*));
self->g_all_V_hom_stored_len = (EDGE_ID*)realloc(self->g_all_V_hom_stored_len\
, self->g_all_V_hom_stored_max_num*sizeof(EDGE_ID));
}
#endif
// Always store the birth cycles for now
fprintf(fp2, "\nbirth cycle");
//printf("\nGetting birth cycle %d out of %d", mm, self->g_homH1_pers_len);
//getchar();
#ifdef ADAPTIVE_V_STORAGE
self->g_store_V_for_len = 0;
#endif
get_birth_cycle(self, self->g_homH1_pers[mm].birth_edge);
//#ifdef MINIMIZE_BIRTH_CYCLES
//if (self->g_reduce_cyc_lengths){
///self->g_all_V_stored_len[self->g_all_V_stored_num] = self->g_temp_V_primary.len;
///self->g_all_V_H0_stored[self->g_all_V_stored_num] =\
/// (EDGE_ID*)malloc(self->g_temp_V_primary.len*sizeof(EDGE_ID));
//add_flag = 1;
//if (death - birth > 4){
// add_flag = 1;
//}
//if (add_flag){
self->g_all_V_H0_stored[self->g_all_V_stored_num].boundary =\
(EDGE_ID*)malloc(self->g_temp_V_primary.len*sizeof(EDGE_ID));
self->g_all_V_H0_stored[self->g_all_V_stored_num].len = self->g_temp_V_primary.len;
//self->g_all_V_H0_stored[self->g_all_V_stored_num].ops_len = 1;
//self->g_all_V_H0_stored[self->g_all_V_stored_num].ops = (EDGE_ID*)malloc(sizeof(EDGE_ID));
//self->g_all_V_H0_stored[self->g_all_V_stored_num].ops[0] = self->g_all_V_stored_num;
self->g_all_V_H0_stored[self->g_all_V_stored_num].perspair[0] = birth;
self->g_all_V_H0_stored[self->g_all_V_stored_num].perspair[1] = death;
self->g_all_V_H0_stored[self->g_all_V_stored_num].updated_birth = birth;
//printf("\n%d idx is pers pair (%lf, %lf) has len %d"\
// , self->g_all_V_stored_num\
// , birth\
// , death\
// , self->g_temp_V_primary.len);
//}
//}
//#endif
for (EDGE_ID nn = 0; nn < self->g_temp_V_primary.len; nn++){
fprintf(fp2, ", %d, %d", self->g_edges_list[2*self->g_temp_V_primary.VV[nn]]\
, self->g_edges_list[2*self->g_temp_V_primary.VV[nn]+1]);
//#ifdef MINIMIZE_BIRTH_CYCLES
//if (self->g_reduce_cyc_lengths){
self->g_all_V_H0_stored[self->g_all_V_stored_num].boundary[nn] = self->g_temp_V_primary.VV[nn];
//}
//#endif
}
fprintf(fp2, "\n");
//#ifdef MINIMIZE_BIRTH_CYCLES
//if (add_flag){
//if (self->g_reduce_cyc_lengths){
self->g_all_V_stored_num++;
if (self->g_all_V_stored_num == self->g_all_V_stored_max_num){
self->g_all_V_stored_max_num += 100;
self->g_all_V_H0_stored = (cyc_info*)realloc(self->g_all_V_H0_stored\
, self->g_all_V_stored_max_num*sizeof(cyc_info));
//self->g_all_V_stored_len = (EDGE_ID*)realloc(self->g_all_V_stored_len\
// , self->g_all_V_stored_max_num*sizeof(EDGE_ID));
}
//}
//#endif
#ifdef ADAPTIVE_V_STORAGE
store_V_H0(self);
#endif
}
// Go over the pers pairs of undead features and compute the birth cycles
for (EDGE_ID mm = 0; mm < self->g_H1_undead_ptr; mm++){
self->g_n_H1_birth_cycles++;
if (self->g_filetype == 1){
birth = sqrt(self->g_edge_parameter[self->g_H1_undead[mm]]);
}
else{
birth = self->g_edge_parameter[self->g_H1_undead[mm]];
}
fprintf(fp2, "%lf, -1", birth);
#ifdef ADAPTIVE_V_STORAGE
self->g_store_V_for_len = 0;
#endif
get_birth_cycle(self, self->g_H1_undead[mm]);
//#ifdef MINIMIZE_BIRTH_CYCLES
//if (self->g_reduce_cyc_lengths){
//self->g_all_V_stored_len[self->g_all_V_stored_num] = self->g_temp_V_primary.len;
//self->g_all_V_H0_stored[self->g_all_V_stored_num] =\
// (EDGE_ID*)malloc(self->g_temp_V_primary.len*sizeof(EDGE_ID));
self->g_all_V_H0_stored[self->g_all_V_stored_num].boundary =\
(EDGE_ID*)malloc(self->g_temp_V_primary.len*sizeof(EDGE_ID));
self->g_all_V_H0_stored[self->g_all_V_stored_num].len = self->g_temp_V_primary.len;
//self->g_all_V_H0_stored[self->g_all_V_stored_num].ops_len = 1;
//self->g_all_V_H0_stored[self->g_all_V_stored_num].ops = (EDGE_ID*)malloc(sizeof(EDGE_ID));
//self->g_all_V_H0_stored[self->g_all_V_stored_num].ops[0] = self->g_all_V_stored_num;
self->g_all_V_H0_stored[self->g_all_V_stored_num].perspair[0] = birth;
self->g_all_V_H0_stored[self->g_all_V_stored_num].perspair[1] = -1;
self->g_all_V_H0_stored[self->g_all_V_stored_num].updated_birth = birth;
//}
//#endif
fprintf(fp2, "\nbirth cycle");
for (EDGE_ID nn = 0; nn < self->g_temp_V_primary.len; nn++){
fprintf(fp2, ", %d, %d", self->g_edges_list[2*self->g_temp_V_primary.VV[nn]]\
, self->g_edges_list[2*self->g_temp_V_primary.VV[nn]+1]);
//#ifdef MINIMIZE_BIRTH_CYCLES
//if (self->g_reduce_cyc_lengths){
self->g_all_V_H0_stored[self->g_all_V_stored_num].boundary[nn] = self->g_temp_V_primary.VV[nn];
//}
//#endif
}
fprintf(fp2, "\n");
//#ifdef MINIMIZE_BIRTH_CYCLES
//if (self->g_reduce_cyc_lengths){
self->g_all_V_stored_num++;
if (self->g_all_V_stored_num == self->g_all_V_stored_max_num){
self->g_all_V_stored_max_num += 100;
self->g_all_V_H0_stored = (cyc_info*)realloc(self->g_all_V_H0_stored\
, self->g_all_V_stored_max_num*sizeof(cyc_info));
//self->g_all_V_stored_len = (EDGE_ID*)realloc(self->g_all_V_stored_len\
// , self->g_all_V_stored_max_num*sizeof(EDGE_ID));
}
//}
//#endif
#ifdef ADAPTIVE_V_STORAGE
store_V_H0(self);
#endif
}
fclose(fp2);
free(self->g_homH1_pers);
free(self->g_temp_V_primary.VV);
free(self->g_temp_R_birth_cycles.RR);
#ifdef RECORD_V_USAGE
FILE* fp3 = fopen(self->g_V_H0_usage_file, "w");
#endif
for (EDGE_ID mm = 0; mm < self->g_n_vert; mm++){
if (self->g_H0_pivot_of[mm].V_len){
#ifdef RECORD_V_USAGE
fprintf(fp3, "%d, %d\n"\
, self->g_H0_pivot_of[mm].V_usage\
, self->g_H0_pivot_of[mm].V_depth);
#endif
//printf("\n%d is used %d times with depth %d"\
// , mm\
// , self->g_H0_pivot_of[mm].V_usage\
// , self->g_H0_pivot_of[mm].V_depth\
// );
free(self->g_H0_pivot_of[mm].VV);
}
}
#ifdef RECORD_V_USAGE
fclose(fp3);
#endif
free(self->g_H0_pivot_of);
//printf("\nPress key to continue...");
//getchar();
#ifdef ADAPTIVE_V_STORAGE
free(self->g_store_V_for);
#endif
if (!self->g_suppress_output){
clock_gettime(CLOCK_MONOTONIC, &finish_wall_clock);
self->g_timer_H1cycles = (finish_wall_clock.tv_sec - start_wall_clock.tv_sec);
self->g_timer_H1cycles += (finish_wall_clock.tv_nsec - start_wall_clock.tv_nsec) / 1000000000.0;
}
//if (self->g_reduce_cyc_lengths){
struct timespec start_wall_clock2, finish_wall_clock2;
if (!self->g_suppress_output){
printf("\nMinimizing birth cycles...");
clock_gettime(CLOCK_MONOTONIC, &start_wall_clock2);
}
minimize_birth_cycles_H0_v3(self\
, self->g_all_V_H0_stored\
, self->g_all_V_stored_num\
, self->g_minimal_V_H0_file\
, self->g_V_H0_birthcyc_lens_file\
, self->g_minimal_V_H0_birthcyc_lens_file\
, self->g_birth_subset_points_file_H0\
);
//, self->g_minimal_V_H0_in_cycles_file\
if (!self->g_suppress_output){
clock_gettime(CLOCK_MONOTONIC, &finish_wall_clock2);
self->g_timer_minimize_H1cycles = (finish_wall_clock2.tv_sec - start_wall_clock2.tv_sec);
self->g_timer_minimize_H1cycles += (finish_wall_clock2.tv_nsec - start_wall_clock2.tv_nsec) / 1000000000.0;
}
//}
//else{
// for (EDGE_ID ci = 0; ci < self->g_all_V_stored_num; ci++){
// free(self->g_all_V_H0_stored[ci].boundary);
// }
//}
#ifdef MINIMIZE_HOM_CYCLES
if (!self->g_suppress_output){
printf("\nMinimizing hom cycles...");
clock_gettime(CLOCK_MONOTONIC, &start_wall_clock2);
}
minimize_birth_cycles_H0_v3(self, self->g_all_V_hom_H1_stored\
, self->g_all_V_hom_stored_len\
, self->g_all_V_hom_stored_num\
, self->g_minimal_V_hom_H1_file\
, self->g_birth_subset_points_file_H0\
);
if (!self->g_suppress_output){
clock_gettime(CLOCK_MONOTONIC, &finish_wall_clock2);
self->g_timer_minimize_H1_homcycles = (finish_wall_clock2.tv_sec - start_wall_clock2.tv_sec);
self->g_timer_minimize_H1_homcycles += (finish_wall_clock2.tv_nsec - start_wall_clock2.tv_nsec) / 1000000000.0;
}
#endif
if (!self->g_suppress_output){
printf("\nComputed homology and birth cycles for H1.");
}
//getchar();
}
void reduce_ws_H1(filtration* self){
if (self->g_new_debug2){
for (int kk = 0; kk < self->g_ws_counter; kk++){
printf("\n%d has triangle (%d, %d) with pivot %d", kk\
, self->g_workspace_H1_info[kk].triangle.key1\
, self->g_workspace_H1_info[kk].triangle.key2\
, self->g_workspace_H1_info[kk].pivot\
);
}
printf("\nbefore parallel. press key to start parallel");
//getchar();
}
self->g_processed_threads = 0;
//printf("\npress key to reduce with complex");
//getchar();
pthread_cond_broadcast(&(self->g_start_workers));
while (self->g_processed_threads != self->g_cpu_count){
pthread_cond_wait(&(self->g_start_boss) \
,&(self->g_thread_lock));
}
//printf("\npress key to reduce with self");
//getchar();
if (self->g_new_debug2){
for (int kk = 0; kk < self->g_ws_counter; kk++){
printf("\n%d has triangle (%d, %d) with pivot %d", kk\
, self->g_workspace_H1_info[kk].triangle.key1\
, self->g_workspace_H1_info[kk].triangle.key2\
, self->g_workspace_H1_info[kk].pivot\
);
}
printf("\nafter parallel. press key to start serial");
//getchar();
}
reduce_with_self_H1( \
self \
);
if (self->g_new_debug2){
for (int kk = 0; kk < self->g_ws_counter; kk++){
printf("\n%d has triangle (%d, %d) with pivot %d", kk\
, self->g_workspace_H1_info[kk].triangle.key1\
, self->g_workspace_H1_info[kk].triangle.key2\
, self->g_workspace_H1_info[kk].pivot\
);
}
printf("\nafter serial. press key to update ");
//getchar();
}
int count_valid = 0;
for (int ws_counter=0; ws_counter < self->g_ws_counter; ws_counter++){
if (self->g_workspace_H1_info[ws_counter].flag_append_to_complex){
update_R_H1(self \
, ws_counter\
);
continue;
}
//if (!self->g_workspace_H1_info[ws_counter].len){continue;}
if (self->g_workspace_H1_info[ws_counter].flag_empty){
continue;
}
// Swap R
EDGE_ID* temp = self->g_workspace_H1[count_valid];
self->g_workspace_H1[count_valid] = self->g_workspace_H1[ws_counter];
self->g_workspace_H1[ws_counter] = temp;
// Swap R info
boundary_H1_ws temp2 = self->g_workspace_H1_info[count_valid];
self->g_workspace_H1_info[count_valid] = self->g_workspace_H1_info[ws_counter];
self->g_workspace_H1_info[ws_counter] = temp2;
// At this point, this has to be a non-zero column
self->g_workspace_H1_info[count_valid].flag_empty = 0;
count_valid += 1;
}
self->g_ws_counter = count_valid;
if (self->g_new_debug2){
for (int kk = 0; kk < self->g_ws_counter; kk++){
printf("\n%d has triangle (%d, %d) with pivot %d", kk\
, self->g_workspace_H1_info[kk].triangle.key1\
, self->g_workspace_H1_info[kk].triangle.key2\
, self->g_workspace_H1_info[kk].pivot\
);
}
printf("\nafter update. press key to continue ");
//getchar();
}
//if (dim)
// self->g_H0_MAX = self->g_n_reduced_simplex[dim];
}
void reduce_with_self_H1( \
filtration* self \
){
int compare;
int i, m;
int idx;
EDGE_ID count, j, k;
count = 0;
for (i=0; i < self->g_ws_counter; i++){
boundary_H1_ws* this_ws = self->g_workspace_H1_info + i;
this_ws->flag_reduce = 0;
// If the simplex has already been reduced to 0
// then continue
if (this_ws->flag_empty){
this_ws->flag_append_to_complex = 0;
continue;
}
EDGE_ID* orig = self->g_workspace_H1[i] + this_ws->original*this_ws->max_len;
m = 0;
while (m < i){
boundary_H1_ws* m_ws = self->g_workspace_H1_info + m;
if (m_ws->flag_empty){
m++;
continue;
}
EDGE_ID* original_m = self->g_workspace_H1[m] + m_ws->original*m_ws->max_len;
orig = self->g_workspace_H1[i] + this_ws->original*this_ws->max_len;
if (m_ws->pivot > this_ws->pivot){
if (m_ws->flag_red_w_complex){
this_ws->flag_append_to_complex = 0;
break;
}
m++;
continue;
}
if (m_ws->pivot < this_ws->pivot){
m++;
continue;
}
if (m_ws->flag_red_w_complex){
this_ws->flag_append_to_complex = 0;
//m++;
//continue;
break;
}
if (this_ws->len + m_ws->len > this_ws->max_len ){
//printf("\nReallocating inside self");
//simp_max_len_i = len_i + len_m + 1000;
if (this_ws->original){
for (EDGE_ID mm = 0; mm < this_ws->len; mm++){
self->g_workspace_H1[i][mm] = self->g_workspace_H1[i][mm + this_ws->max_len];
}
this_ws->original = 0;
}
this_ws->max_len = this_ws->len + m_ws->len + 1000;
self->g_workspace_H1[i] = (EDGE_ID*)realloc(self->g_workspace_H1[i]\
, 2*this_ws->max_len*sizeof(EDGE_ID));
orig = self->g_workspace_H1[i];
}
EDGE_ID* scratch = self->g_workspace_H1[i] + (1-this_ws->original)*this_ws->max_len;
// Store the result in scratch
count = 0;
j = 0;
k = 0;
while ((j < this_ws->len) && (k < m_ws->len)){
if (orig[j] < original_m[k]){
scratch[count++] = orig[j++];
}
else if (orig[j] > original_m[k]){
scratch[count++] = original_m[k++];
}
else{
j++;
k++;
}
}
while (j < this_ws->len){
scratch[count++] = orig[j++];
}
while (k < m_ws->len){
scratch[count++] = original_m[k++];
}
this_ws->len = count;
this_ws->original = 1 - this_ws->original;
if (!count){
this_ws->flag_append_to_complex = 0;
this_ws->flag_empty = 1;
break;
}
this_ws->pivot = scratch[this_ws->len-1];
//printf("\npivot is %d", this_ws->pivot);
// Check if pivot is trivial or if it is pivot in H1
if (self->g_coH1_all_lows[this_ws->pivot].low.key1 == this_ws->pivot){
compute_boundary_triangle(self, self->g_coH1_all_lows[this_ws->pivot].low, this_ws->trivial_boundary);
this_ws->R_col_idx = 0;
this_ws->reduce_with_len = 3;
this_ws->flag_reduce = 1;
this_ws->flag_red_w_complex = 1;
this_ws->flag_append_to_complex = 0;
break;
}
else{
idx = self->g_pivots_H1[this_ws->pivot];
if (idx){
this_ws->R_col_idx = idx;
this_ws->reduce_with_len = self->g_R_col_idx_H1[idx+1] - self->g_R_col_idx_H1[idx];
this_ws->flag_reduce = 1;
this_ws->flag_red_w_complex = 1;
this_ws->flag_append_to_complex = 0;
break;
}
}
//idx = self->g_pivots_H1[this_ws->pivot];
//// If the pivot is in red complex, then this has to be reduced w/ complex
////if (idx != self->g_n_reduced_simplex[self->g_dim_now]){
//if (idx){
//
// this_ws->flag_red_w_complex = 1;
// this_ws->flag_append_to_complex = 0;
// break;
//}
//}
m = 0;
}//End of m loop
}
}//End of red_ws_w_self_single
void* reduce_with_complex_H1(void* arg){
filtration* self = arg;
pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, 0);
pthread_mutex_lock(&(self->g_thread_lock));
int tid = ++self->g_thread_id;
EDGE_ID *simp, *original_simp, *scratch_simp, *red_start;
EDGE_ID j, k ,count;
//EDGE_ID reduced_col;
int i ,reduced_col ,idx;
for (;;){
self->g_sleeping_threads++;
if (self->g_sleeping_threads == self->g_cpu_count)
pthread_cond_signal(&(self->g_start_boss));
pthread_cond_wait(&(self->g_start_workers), &(self->g_thread_lock));
if (self->g_delete_threads){
//printf("\nexiting from thread %d", tid);
pthread_mutex_unlock(&(self->g_thread_lock));
pthread_exit(NULL);
}
self->g_sleeping_threads--;
pthread_mutex_unlock(&(self->g_thread_lock));
for (i = self->g_jobs[tid - 1]; i < self->g_jobs[tid]; i++){
boundary_H1_ws* this_ws = self->g_workspace_H1_info + i;
if (this_ws->flag_first){
this_ws->flag_first = 0;
// Check if this is part of trivial pair
if (self->g_coH1_all_lows[this_ws->pivot].low.key1 == this_ws->pivot){
compute_boundary_triangle(self\
, self->g_coH1_all_lows[this_ws->pivot].low\
, this_ws->trivial_boundary);
this_ws->R_col_idx = 0;
this_ws->reduce_with_len = 3;
this_ws->flag_reduce = 1;
}
else{
idx = self->g_pivots_H1[this_ws->pivot];
if (idx){
this_ws->R_col_idx = idx;
this_ws->reduce_with_len = self->g_R_col_idx_H1[idx+1] - self->g_R_col_idx_H1[idx];
this_ws->flag_reduce = 1;
}
}
}
if (this_ws->flag_empty){
// We are sure that we will exit only if there is no reduction
// required with existing complex or with trivial pair
this_ws->flag_red_w_complex = 0;
this_ws->flag_append_to_complex = 0;
continue;
}
this_ws->flag_red_w_complex = 0;
this_ws->flag_append_to_complex = 1;
EDGE_ID* orig = self->g_workspace_H1[i] \
+ this_ws->original*this_ws->max_len;
EDGE_ID* scratch = self->g_workspace_H1[i] \
+ (1-this_ws->original)*this_ws->max_len;
//printf("\nreducing with %d", idx);
while(this_ws->flag_reduce){
//printf("\nreducing with %d", idx);
//red_start = self->g_R_H1 + self->g_R_col_idx_H1[idx];
//len_a2 = self->g_R_col_idx_H1[idx+1] - self->g_R_col_idx_H1[idx];
if (this_ws->len + this_ws->reduce_with_len > this_ws->max_len){
if (this_ws->original){
for (EDGE_ID mm = 0; mm < this_ws->len; mm++){
self->g_workspace_H1[i][mm] = self->g_workspace_H1[i][mm + this_ws->max_len];
}
this_ws->original = 0;
}
this_ws->max_len = this_ws->len + this_ws->reduce_with_len + 1000;
self->g_workspace_H1[i] = (EDGE_ID*)realloc(self->g_workspace_H1[i]\
, 2*this_ws->max_len*sizeof(EDGE_ID));
orig = self->g_workspace_H1[i];
}
if (!this_ws->R_col_idx){
red_start = this_ws->trivial_boundary;
}
else{
red_start = self->g_R_H1 + self->g_R_col_idx_H1[this_ws->R_col_idx];
}
scratch = self->g_workspace_H1[i] \
+ (1-this_ws->original)*this_ws->max_len;
count = 0;
j = 0;
k = 0;
while ((j < this_ws->len) && (k < this_ws->reduce_with_len)){
if (orig[j] < red_start[k]){
scratch[count++] = orig[j++];
}
else if (orig[j] > red_start[k]){
scratch[count++] = red_start[k++];
}
else{
j++;
k++;
}
}
while (j < this_ws->len){
scratch[count++] = orig[j++];
}
while (k < this_ws->reduce_with_len){
scratch[count++] = red_start[k++];
}
this_ws->original = 1 - this_ws->original;
this_ws->len = count;
if (!this_ws->len){
//idx = self->g_n_reduced_simplex[self->g_dim_now];
//idx = -1;
this_ws->flag_empty = 1;
break;
}
orig = self->g_workspace_H1[i] + this_ws->original*this_ws->max_len;
this_ws->pivot = orig[this_ws->len-1];
//printf("\npivot is %d", this_ws->pivot);
this_ws->flag_reduce = 0;
// First check if this is trivial pair
if (self->g_coH1_all_lows[this_ws->pivot].low.key1 == this_ws->pivot){
compute_boundary_triangle(self\
, self->g_coH1_all_lows[this_ws->pivot].low\
, this_ws->trivial_boundary);
this_ws->R_col_idx = 0;
this_ws->reduce_with_len = 3;
this_ws->flag_reduce = 1;
}
else{
idx = self->g_pivots_H1[this_ws->pivot];
if (idx){
this_ws->R_col_idx = idx;
this_ws->reduce_with_len = self->g_R_col_idx_H1[idx+1] - self->g_R_col_idx_H1[idx];
this_ws->flag_reduce = 1;
}
}
//printf("\nidx is %d", idx);
}
}
pthread_mutex_lock(&(self->g_thread_lock));
self->g_processed_threads++;
}
}
void update_R_H1 (filtration* self, int ws_counter){
//printf("\nupdating R H1");
boundary_H1_ws* this_ws = self->g_workspace_H1_info + ws_counter;
EDGE_ID* orig = self->g_workspace_H1[ws_counter] \
+ this_ws->original*this_ws->max_len;
// Update R
if ((self->g_R_len_H1 + this_ws->len) > self->g_R_max_len_H1){
self->g_R_max_len_H1 += 1000 + this_ws->len;
self->g_R_H1 = (EDGE_ID*)realloc(self->g_R_H1, self->g_R_max_len_H1*sizeof(EDGE_ID));
}
// Update R col idx
self->g_R_col_idx_H1_ptr++;
if (self->g_R_col_idx_H1_ptr == self->g_R_col_idx_max_len_H1 - 1){
self->g_R_col_idx_max_len_H1 += 1000;
self->g_R_col_idx_H1 = (EDGE_ID*)realloc(self->g_R_col_idx_H1\
, self->g_R_col_idx_max_len_H1*sizeof(EDGE_ID));
}
//if (this_ws->pivot == 12631){
// printf("\nR is stored at R_col_idx %d: ", self->g_R_col_idx_H1_ptr);
// for (EDGE_ID mm = 0; mm < this_ws->len; mm++){
// printf("%d, ", orig[mm]);
// }
// getchar();
//}
//printf("\nAdding pivot %d at %d", this_ws->pivot, self->g_R_col_idx_H1_ptr);
self->g_pivots_H1[this_ws->pivot] = self->g_R_col_idx_H1_ptr;
self->g_R_col_idx_H1[self->g_R_col_idx_H1_ptr] = self->g_R_len_H1;
for (EDGE_ID mm = 0; mm < this_ws->len; mm++){
self->g_R_H1[self->g_R_len_H1++] = orig[mm];
//printf("\nadded %d", self->g_R_H1[self->g_R_len_H1 - 1]);
}
self->g_R_col_idx_H1[self->g_R_col_idx_H1_ptr+1] = self->g_R_len_H1;
PAR birth = self->g_edge_parameter[this_ws->pivot];
PAR death = self->g_edge_parameter[this_ws->triangle.key1];
//#ifdef HOM_CYCLES
if (self->g_compute_cycles){
self->g_H1_pivot_of[this_ws->pivot].coface.key1 = this_ws->triangle.key1;
self->g_H1_pivot_of[this_ws->pivot].coface.key2 = this_ws->triangle.key2;
//if (this_ws->pivot == 2879)
//printf("\nAdding pivot %d with simplex (%d, %d)", this_ws->pivot\
// , this_ws->triangle.key1\
// , this_ws->triangle.key2\
// );
}
//#endif
//printf("\n%lf, %lf", birth, death);
if (death != birth){
//printf("\n(%d, %d) has pivot %d at (%lf, %lf)", this_ws->triangle.key1\
// , this_ws->triangle.key2\
// , this_ws->pivot\
// , birth\
// , death\
// );
self->g_homH1_pers[self->g_homH1_pers_len].birth_edge = this_ws->pivot;
self->g_homH1_pers[self->g_homH1_pers_len].death_triangle_key1 = this_ws->triangle.key1;
self->g_homH1_pers[self->g_homH1_pers_len++].R_col_idx = self->g_R_col_idx_H1_ptr;
if (self->g_homH1_pers_len == self->g_homH1_pers_max_len){
self->g_homH1_pers_max_len += 100;
self->g_homH1_pers = (homH1_pers*)realloc(self->g_homH1_pers\
, self->g_homH1_pers_max_len*sizeof(homH1_pers));
}
//if (self->g_filetype == 1){
// fprintf(self->g_homH1_pers_file, "%0.12lf, %0.12lf\n", sqrt(birth), sqrt(death));
//}
//else{
// fprintf(self->g_homH1_pers_file, "%0.12lf, %0.12lf\n", birth, death);
//}
//for (EDGE_ID mm = 0; mm < this_ws->len; mm++){
//
// fprintf(self->g_homH1_cycles_file, "%d, %d,", self->g_edges_list[orig[mm]][0]\
// , self->g_edges_list[orig[mm]][1]);
// //printf("\nadded %d", self->g_R_H1[self->g_R_len_H1 - 1]);
//}
//
//fprintf(self->g_homH1_cycles_file, "\n");
}
}
void get_birth_cycle(filtration* self, EDGE_ID bo_idx){
self->g_temp_V_primary.len = 1;
self->g_temp_V_primary.VV[0] = bo_idx;
self->g_temp_R_birth_cycles.original = 0;
self->g_temp_R_birth_cycles.RR[0] = self->g_edges_list[2*bo_idx];
self->g_temp_R_birth_cycles.RR[1] = self->g_edges_list[2*bo_idx+1];
self->g_temp_R_birth_cycles.len = 2;
EDGE_ID* original_result;
EDGE_ID* scratch_result;
EDGE_ID j, k, count, possible_len, ro, red_simp_len, pivot;
EDGE_ID* red_start;
self->g_depth = 0;
#ifdef ADAPTIVE_V_STORAGE
self->g_store_V_for_len = 0;
#endif
while (self->g_temp_R_birth_cycles.len){
original_result = self->g_temp_R_birth_cycles.RR \
+ (self->g_temp_R_birth_cycles.original)*self->g_temp_R_birth_cycles.max_len;
pivot = original_result[self->g_temp_R_birth_cycles.len-1];
bo_idx = self->g_H0_pivot_of[pivot].coface;
// FIND THE V RECURSIVELY
find_V_recursively_edges(self, bo_idx, pivot);
ro = (EDGE_ID)self->g_pivots_H0[pivot];
red_simp_len = self->g_R_col_indices_H0[ro+1] - \
self->g_R_col_indices_H0[ro];
red_start = self->g_R_sparse_H0 + self->g_R_col_indices_H0[ro];
// Check for overflow
possible_len = self->g_temp_R_birth_cycles.len + red_simp_len;
if (possible_len > self->g_temp_R_birth_cycles.max_len - 1){
if (self->g_temp_R_birth_cycles.original){
for (EDGE_ID k = 0; k < self->g_temp_R_birth_cycles.len; k++){
self->g_temp_R_birth_cycles.RR[k] =\
self->g_temp_R_birth_cycles.RR[k + self->g_temp_R_birth_cycles.max_len];
}
}
self->g_temp_R_birth_cycles.max_len = possible_len + 1000;
self->g_temp_R_birth_cycles.RR = (EDGE_ID*)realloc(self->g_temp_R_birth_cycles.RR\
, (2*self->g_temp_R_birth_cycles.max_len)*sizeof(EDGE_ID));
original_result = self->g_temp_R_birth_cycles.RR;
self->g_temp_R_birth_cycles.original = 0;
}
scratch_result = self->g_temp_R_birth_cycles.RR \
+ (1-self->g_temp_R_birth_cycles.original)*self->g_temp_R_birth_cycles.max_len;
// Reduce
j = 0;
k = 0;
count = 0;
while ((j < self->g_temp_R_birth_cycles.len) && (k < red_simp_len)){
if (original_result[j] < red_start[k]){
scratch_result[count] = original_result[j];
count = count + 1;
j = j + 1;
}
else if (original_result[j] > red_start[k]){
scratch_result[count] = red_start[k];
count = count + 1;
k = k + 1;
}
else{
j = j + 1;
k = k + 1;
}
}
while (j < self->g_temp_R_birth_cycles.len){
scratch_result[count++] = original_result[j++];
}
while (k < red_simp_len){
scratch_result[count++] = red_start[k++];
}
self->g_temp_R_birth_cycles.len = count;
self->g_temp_R_birth_cycles.original = 1 - self->g_temp_R_birth_cycles.original;
}
//// Reduce V
reduce_temp_V_H0(self);
}
void find_V_recursively_edges(filtration* self, EDGE_ID bo_idx, EDGE_ID bo_pivot){
#ifdef RECORD_V_USAGE
self->g_H0_pivot_of[bo_pivot].V_usage++;
#endif
#ifdef ADAPTIVE_V_STORAGE
if (self->g_H0_pivot_of[bo_pivot].V_len){
//printf("\nUsing stored cycle for %d", bo_idx);
if ((self->g_temp_V_primary.len + self->g_H0_pivot_of[bo_pivot].V_len) > self->g_temp_V_primary.max_len - 1){
self->g_temp_V_primary.max_len = self->g_temp_V_primary.len + self->g_H0_pivot_of[bo_pivot].V_len + 1000;
self->g_temp_V_primary.VV = (EDGE_ID*)realloc(self->g_temp_V_primary.VV\
, self->g_temp_V_primary.max_len*sizeof(EDGE_ID));
}
for (EDGE_ID mm = 0; mm < self->g_H0_pivot_of[bo_pivot].V_len; mm++){
self->g_temp_V_primary.VV[self->g_temp_V_primary.len++] =\
self->g_H0_pivot_of[bo_pivot].VV[mm];
}
return;
}
#endif
EDGE_ID res_max_len = 100;
EDGE_ID* result = (EDGE_ID*)malloc((2*res_max_len)*sizeof(EDGE_ID));
int res_original = 0;
EDGE_ID* original_result;
EDGE_ID* scratch_result;
EDGE_ID res_len = 2;
result[0] = self->g_edges_list[2*bo_idx];
result[1] = self->g_edges_list[2*bo_idx+1];
EDGE_ID possible_len;
EDGE_ID* red_start;
EDGE_ID red_simp_len;
EDGE_ID ro;
EDGE_ID j, k, count;
EDGE_ID pivot;
// RECORD THIS IN REDUCTION OPERATION for e_o
self->g_temp_V_primary.VV[self->g_temp_V_primary.len++] = bo_idx;
// Check for overflow
if (self->g_temp_V_primary.len == self->g_temp_V_primary.max_len){
//printf("\nReallocating");
//getchar();
self->g_temp_V_primary.max_len += 100;
self->g_temp_V_primary.VV = (EDGE_ID*)realloc(self->g_temp_V_primary.VV\
, self->g_temp_V_primary.max_len*sizeof(EDGE_ID));
}
while(res_len != 0){
#ifdef ADAPTIVE_V_STORAGE
self->g_depth++;
#endif
original_result = result + (res_original*res_max_len);
scratch_result = result + ((1-res_original)*res_max_len);
pivot = original_result[res_len-1];
// The new pivot is pivot of R(bo_idx)
bo_idx = self->g_H0_pivot_of[pivot].coface;
ro = (EDGE_ID)self->g_pivots_H0[pivot];
red_simp_len = self->g_R_col_indices_H0[ro+1] - \
self->g_R_col_indices_H0[ro];
red_start = self->g_R_sparse_H0 + self->g_R_col_indices_H0[ro];
// Check for overflow
possible_len = res_len + red_simp_len;
if (possible_len > res_max_len - 1){
if (res_original){
for (k = 0; k < res_len; k++){
result[k] = result[k + res_max_len];
}
}
res_max_len = possible_len + 1000;
result = (EDGE_ID*)realloc(result, (2*res_max_len)*sizeof(EDGE_ID));
original_result = result;
scratch_result = result + res_max_len;
res_original = 0;
}
// Reduce
j = 0;
k = 0;
count = 0;
while ((j < res_len) && (k < red_simp_len)){
if (original_result[j] < red_start[k]){
scratch_result[count] = original_result[j];
count = count + 1;
j = j + 1;
}
else if (original_result[j] > red_start[k]){
scratch_result[count] = red_start[k];
count = count + 1;
k = k + 1;
}
else{
j = j + 1;
k = k + 1;
}
}
while (j < res_len){
scratch_result[count++] = original_result[j++];
}
while (k < red_simp_len){
scratch_result[count++] = red_start[k++];
}
res_len = count;
res_original = 1 - res_original;
if (res_len != 0){
find_V_recursively_edges(self, bo_idx, pivot);
}
}
#ifndef RECORD_V_USAGE
self->g_H0_pivot_of[bo_pivot].V_usage++;
#endif
#ifdef ADAPTIVE_V_STORAGE
if ((self->g_H0_pivot_of[bo_pivot].V_usage > self->g_cycle_usage_thresh)\
&&(!self->g_H0_pivot_of[bo_pivot].V_stored)){
self->g_store_V_for[self->g_store_V_for_len++] = bo_pivot;
if (self->g_store_V_for_len == self->g_store_V_for_max_len){
self->g_store_V_for_max_len += 100;
self->g_store_V_for = (EDGE_ID*)realloc(self->g_store_V_for\
, self->g_store_V_for_max_len*sizeof(EDGE_ID));
}
}
#endif
free(result);
}
void compute_H2_homology_cycles(filtration* self){
///////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////
//
// STEP H2.1: Find homology now for the tetrahedrons (H2)
//
///////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////
if (!self->g_suppress_output){
printf("\n\n---------------");
printf("\nComputing H2...");
printf("\n---------------\n");
}
struct timespec start_wall_clock, finish_wall_clock;
clock_gettime(CLOCK_MONOTONIC, &(self->g_start_wall_clock));
self->g_R_max_len_H2 = 100;
self->g_R_H2 = (simplex*)malloc(self->g_R_max_len_H2*sizeof(simplex));
self->g_R_len_H2 = 0;
self->g_R_col_idx_max_len_H2 = 100;
self->g_R_col_idx_H2 = (EDGE_ID*)malloc(self->g_R_col_idx_max_len_H2*sizeof(EDGE_ID));
self->g_R_col_idx_H2_ptr = 0;
self->g_workspace_size = 1000;
self->g_ws_pre_alloc = 1000;
// Initialize ws counter
self->g_ws_counter = 0;
// H1 workspace structures
self->g_workspace_H2 = (simplex**)malloc(self->g_workspace_size*sizeof(simplex*));
// H1 workspace info
self->g_workspace_H2_info = (boundary_H2_ws*)malloc(self->g_workspace_size*sizeof(boundary_H2_ws));
for (int i = 0; i < self->g_workspace_size; i++){
self->g_workspace_H2_info[i].max_len = self->g_ws_pre_alloc;
self->g_workspace_H2[i] = (simplex*)malloc(2*self->g_workspace_H2_info[i].max_len*sizeof(simplex));
self->g_workspace_H2_info[i].trivial_boundary = (simplex*)malloc(4*sizeof(simplex));
}
// Pivots
self->g_H2_pivots = (H2_pivots**)malloc(self->g_n_valid_edges*sizeof(H2_pivots*));
self->g_H2_pivots_len = (EDGE_ID*)calloc(self->g_n_valid_edges, sizeof(EDGE_ID));
self->g_H2_pivots_max_len = (EDGE_ID*)calloc(self->g_n_valid_edges, sizeof(EDGE_ID));
for (EDGE_ID mm = 0; mm < self->g_n_valid_edges; mm++){
self->g_H2_pivots_max_len[mm] = 5;
self->g_H2_pivots[mm] = (H2_pivots*)malloc(self->g_H2_pivots_max_len[mm]*sizeof(H2_pivots));
}
// Convenient info for pers pairs
self->g_homH2_pers_len = 0;
self->g_homH2_pers_max_len = 100;
self->g_homH2_pers = (homH2_pers*)malloc(self->g_homH2_pers_max_len*sizeof(homH2_pers));
// Temporary space for birth cycles
self->g_temp_V_H2_primary.max_len = 10;
self->g_temp_V_H2_primary.VV = (simplex*)malloc(self->g_temp_V_H2_primary.max_len*sizeof(simplex));
self->g_temp_V_H2_primary.len = 0;
// Temporary space for birth cycles
self->g_temp_R_H2_birth_cycles.max_len = 100;
self->g_temp_R_H2_birth_cycles.RR = (EDGE_ID*)malloc(2*self->g_temp_R_H2_birth_cycles.max_len*sizeof(EDGE_ID));
self->g_temp_R_H2_birth_cycles.original = 0;
self->g_temp_R_H2_birth_cycles.len = 0;
#ifdef ADAPTIVE_V_STORAGE
// Create pointers to store V
for (EDGE_ID mm = 0; mm < self->g_n_valid_edges; mm++){
self->g_H1_pivot_of[mm].V_usage = 0;
self->g_H1_pivot_of[mm].V_stored = 0;
self->g_H1_pivot_of[mm].V_len = 0;
self->g_H1_pivot_of[mm].VV = NULL;
}
// Create pointers to store V per extraction call
self->g_store_V_for_len = 0;
self->g_store_V_for_max_len = 10;
self->g_store_V_for = (EDGE_ID*)malloc(self->g_store_V_for_max_len*sizeof(EDGE_ID));
#endif
//#ifdef MINIMIZE_BIRTH_CYCLES
if (self->g_reduce_cyc_lengths){
self->g_all_V_stored_num = 0;
self->g_all_V_stored_max_num = 10;
self->g_all_V_H1_stored = (cyc_info_H2*)malloc(self->g_all_V_stored_max_num*sizeof(cyc_info_H2));
}
//#endif
////////////////////////////////////////////////////////////////
//
// Allocate jobs for parallel H1
//
////////////////////////////////////////////////////////////////
self->g_jobs = (int*)malloc((self->g_cpu_count + 1)*sizeof(int));
allocate_jobs(self, self->g_workspace_size);
self->g_threads = (pthread_t *)malloc(self->g_cpu_count*sizeof(pthread_t));
int rtn;
if ((rtn = pthread_mutex_init(&(self->g_thread_lock), NULL)) !=0)
fprintf(stderr, "pthread_mutex_init %s", strerror(rtn)), exit(-1);
if ((rtn = pthread_cond_init(&(self->g_start_boss), NULL)) !=0)
fprintf(stderr, "pthread_cond_init %s", strerror(rtn)), exit(-1);
if ((rtn = pthread_cond_init(&(self->g_start_workers), NULL)) !=0)
fprintf(stderr, "pthread_cond_init %s", strerror(rtn)), exit(-1);
// Initialize thread creation
self->g_thread_id = 0;
self->g_sleeping_threads = 0;
self->g_delete_threads = 0;
for (int i = 0; i < self->g_cpu_count; i++){
if ((rtn = pthread_create( \
&(self->g_threads[i]) \
, NULL \
, reduce_with_complex_H2 \
, (void*)self)!= 0))
fprintf(stderr, "pthread_create %d", rtn), exit(-1);
}
// Wait for threads to be initialized
pthread_mutex_lock(&(self->g_thread_lock));
while(self->g_sleeping_threads != self->g_cpu_count){
pthread_cond_wait(&(self->g_start_boss) \
, &(self->g_thread_lock));
}
////////////////////////////////
H2_preprocess* temp_tetra = (H2_preprocess*)malloc(self->g_n_valid_edges*sizeof(H2_preprocess));
EDGE_ID temp_tetra_len = 0;
int ccounter = 0;
self->g_new_debug2 = 0;
EDGE_ID o_cd;
self->g_ws_counter = 0;
for (EDGE_ID o_ab = 0; o_ab < self->g_n_valid_edges; o_ab++){
if (!self->g_H2_cohom_pivots_len[o_ab]){
continue;
}
for (VERT_ID mm = 0; mm < self->g_H2_cohom_pivots_len[o_ab]; mm++){
o_cd = self->g_H2_cohom_pivots[o_ab][mm].key2;
// Workspace attributes
boundary_H2_ws* this_ws = self->g_workspace_H2_info + self->g_ws_counter;
// Initially, the original is at 0
this_ws->original = 0;
this_ws->flag_first = 1;
// Parallel control flags
this_ws->flag_empty = 0;
this_ws->flag_red_w_complex = 0;
this_ws->flag_append_to_complex = 1;
this_ws->tetrahedron.key1 = o_ab;
this_ws->tetrahedron.key2 = o_cd;
// Initial length of boundary
this_ws->len = 4;
compute_boundary_tetra(self, this_ws->tetrahedron, self->g_workspace_H2[self->g_ws_counter]);
this_ws->pivot = self->g_workspace_H2[self->g_ws_counter][3];
self->g_ws_counter++;
if (self->g_ws_counter == self->g_workspace_size){
reduce_ws_H2(self);
}
}
}
// Reduction of final batch
while (self->g_ws_counter){
allocate_jobs(self, self->g_ws_counter);
reduce_ws_H2(self);
}
clock_gettime(CLOCK_MONOTONIC, &finish_wall_clock);
self->g_timer_computeH2 = (finish_wall_clock.tv_sec - start_wall_clock.tv_sec);
self->g_timer_computeH2 += (finish_wall_clock.tv_nsec - start_wall_clock.tv_nsec) / 1000000000.0;
if (!self->g_suppress_output){
printf("\nComputed H2.");
}
////////////////////////
// HOMOLOGY CYCLES
////////////////////////
clock_gettime(CLOCK_MONOTONIC, &start_wall_clock);
FILE* fp2 = fopen(self->g_homH2_cycles_file, "w");
PAR birth, death;
self->g_n_H1_stored_V = 0;
// Go over the pers pairs of features that died and compute the cycles
for (EDGE_ID mm = 0; mm < self->g_homH2_pers_len; mm++){
self->g_n_H2_birth_cycles++;
//printf("\nFinding cycle for (%d, %d)", self->g_homH2_pers[mm].birth_simplex.key1\
// , self->g_homH2_pers[mm].birth_simplex.key2);
//getchar();
if (self->g_filetype == 1){
birth = sqrt(self->g_edge_parameter[self->g_homH2_pers[mm].birth_simplex.key1]);
death = sqrt(self->g_edge_parameter[self->g_homH2_pers[mm].death_edge]);
}
else{
birth = self->g_edge_parameter[self->g_homH2_pers[mm].birth_simplex.key1];
death = self->g_edge_parameter[self->g_homH2_pers[mm].death_edge];
}
fprintf(fp2, "%lf, %lf", birth, death);
fprintf(fp2, "\nhomology cycle");
for (EDGE_ID bb = self->g_R_col_idx_H2[self->g_homH2_pers[mm].R_col_idx]\
; bb < self->g_R_col_idx_H2[self->g_homH2_pers[mm].R_col_idx + 1]\
; bb++){
fprintf(fp2, ", %d, %d, %d", self->g_edges_list[2*self->g_R_H2[bb].key1]\
, self->g_edges_list[2*self->g_R_H2[bb].key1+1]\
, self->g_R_H2[bb].key2\
);
}
// Always write the birth cycles to file for now
fprintf(fp2, "\nbirth cycle");
//printf("\nGetting birth cycle %d out of %d", mm, self->g_homH1_pers_len);
//getchar();
#ifdef ADAPTIVE_V_STORAGE
self->g_store_V_for_len = 0;
#endif
//printf("\nGetting void");
get_birth_void(self, self->g_homH2_pers[mm].birth_simplex);
//#ifdef MINIMIZE_BIRTH_CYCLES
if (self->g_reduce_cyc_lengths){
//self->g_all_V_stored_len[self->g_all_V_stored_num] = self->g_temp_V_H2_primary.len;
//self->g_all_V_H1_stored[self->g_all_V_stored_num] =\
// (simplex*)malloc(self->g_temp_V_H2_primary.len*sizeof(simplex));
self->g_all_V_H1_stored[self->g_all_V_stored_num].boundary =\
(simplex*)malloc(self->g_temp_V_H2_primary.len*sizeof(simplex));
self->g_all_V_H1_stored[self->g_all_V_stored_num].len = self->g_temp_V_H2_primary.len;
self->g_all_V_H1_stored[self->g_all_V_stored_num].perspair[0] = birth;
self->g_all_V_H1_stored[self->g_all_V_stored_num].perspair[1] = death;
self->g_all_V_H1_stored[self->g_all_V_stored_num].updated_birth = birth;
}
//#endif
for (EDGE_ID nn = 0; nn < self->g_temp_V_H2_primary.len; nn++){
fprintf(fp2, ", %d, %d, %d", self->g_edges_list[2*self->g_temp_V_H2_primary.VV[nn].key1]\
, self->g_edges_list[2*self->g_temp_V_H2_primary.VV[nn].key1+1]\
, self->g_temp_V_H2_primary.VV[nn].key2\
);
//#ifdef MINIMIZE_BIRTH_CYCLES
if (self->g_reduce_cyc_lengths){
self->g_all_V_H1_stored[self->g_all_V_stored_num].boundary[nn] = self->g_temp_V_H2_primary.VV[nn];
}
//#endif
}
fprintf(fp2, "\n");
//#ifdef MINIMIZE_BIRTH_CYCLES
if (self->g_reduce_cyc_lengths){
self->g_all_V_stored_num++;
if (self->g_all_V_stored_num == self->g_all_V_stored_max_num){
self->g_all_V_stored_max_num += 100;
self->g_all_V_H1_stored = (cyc_info_H2*)realloc(self->g_all_V_H1_stored\
, self->g_all_V_stored_max_num*sizeof(cyc_info_H2));
//self->g_all_V_stored_len = (EDGE_ID*)realloc(self->g_all_V_stored_len\
// , self->g_all_V_stored_max_num*sizeof(EDGE_ID));
}
}
//#endif
#ifdef ADAPTIVE_V_STORAGE
//printf("\nPress key to store V at line 10412");
//getchar();
store_V_H1(self);
//printf("\nstored V at line 10412. Pres key to continue.");
//getchar();
#endif
}
// Go over the pers pairs of undead features and compute the birth cycles
for (EDGE_ID mm = 0; mm < self->g_H2_undead_ptr; mm++){
self->g_n_H2_birth_cycles++;
if (self->g_filetype == 1){
birth = sqrt(self->g_edge_parameter[self->g_H2_undead[mm].key1]);
}
else{
birth = self->g_edge_parameter[self->g_H2_undead[mm].key1];
}
fprintf(fp2, "%lf, -1", birth);
#ifdef ADAPTIVE_V_STORAGE
self->g_store_V_for_len = 0;
#endif
get_birth_void(self, self->g_H2_undead[mm]);
//#ifdef MINIMIZE_BIRTH_CYCLES
if (self->g_reduce_cyc_lengths){
//self->g_all_V_stored_len[self->g_all_V_stored_num] = self->g_temp_V_H2_primary.len;
self->g_all_V_H1_stored[self->g_all_V_stored_num].boundary =\
(simplex*)malloc(self->g_temp_V_H2_primary.len*sizeof(simplex));
self->g_all_V_H1_stored[self->g_all_V_stored_num].len = self->g_temp_V_H2_primary.len;
self->g_all_V_H1_stored[self->g_all_V_stored_num].perspair[0] = birth;
self->g_all_V_H1_stored[self->g_all_V_stored_num].perspair[1] = -1;
self->g_all_V_H1_stored[self->g_all_V_stored_num].updated_birth = birth;
}
//#endif
fprintf(fp2, "\nbirth cycle");
for (EDGE_ID nn = 0; nn < self->g_temp_V_H2_primary.len; nn++){
fprintf(fp2, ", %d, %d, %d", self->g_edges_list[2*self->g_temp_V_H2_primary.VV[nn].key1]\
, self->g_edges_list[2*self->g_temp_V_H2_primary.VV[nn].key1+1]\
, self->g_temp_V_H2_primary.VV[nn].key2\
);
//#ifdef MINIMIZE_BIRTH_CYCLES
if (self->g_reduce_cyc_lengths){
self->g_all_V_H1_stored[self->g_all_V_stored_num].boundary[nn] = self->g_temp_V_H2_primary.VV[nn];
}
//#endif
}
fprintf(fp2, "\n");
//#ifdef MINIMIZE_BIRTH_CYCLES
if (self->g_reduce_cyc_lengths){
self->g_all_V_stored_num++;
if (self->g_all_V_stored_num == self->g_all_V_stored_max_num){
self->g_all_V_stored_max_num += 100;
//self->g_all_V_H1_stored = (simplex**)realloc(self->g_all_V_H1_stored\
// , self->g_all_V_stored_max_num*sizeof(simplex*));
self->g_all_V_H1_stored = (cyc_info_H2*)realloc(self->g_all_V_H1_stored\
, self->g_all_V_stored_max_num*sizeof(cyc_info_H2));
//self->g_all_V_stored_len = (EDGE_ID*)realloc(self->g_all_V_stored_len\
// , self->g_all_V_stored_max_num*sizeof(EDGE_ID));
}
}
//#endif
#ifdef ADAPTIVE_V_STORAGE
store_V_H1(self);
#endif
}
if (!self->g_suppress_output){
printf("\nComputed birth cycles.");
}
//getchar();
/////////////////////////
// Cancel the threads used in getting next during reduction
/////////////////////////
self->g_delete_threads = 1;
pthread_cond_broadcast(&(self->g_start_workers));
pthread_mutex_unlock(&(self->g_thread_lock));
for (int i = 0; i < self->g_cpu_count; i++){
pthread_join(self->g_threads[i], NULL);
}
free(self->g_jobs);
free(self->g_threads);
for (int i = 0; i < self->g_workspace_size; i++){
free(self->g_workspace_H2_info[i].trivial_boundary);
free(self->g_workspace_H2[i]);
}
free(self->g_workspace_H2);
free(self->g_workspace_H2_info);
free(self->g_R_H2);
free(self->g_R_col_idx_H2);
free(self->g_H2_pivots);
free(self->g_homH2_pers);
free(self->g_temp_V_H2_primary.VV);
free(self->g_temp_R_H2_birth_cycles.RR);
free(self->g_R_H1);
free(self->g_R_col_idx_H1);
free(self->g_pivots_H1);
#ifdef ADAPTIVE_V_STORAGE
free(self->g_store_V_for);
#ifdef RECORD_V_USAGE
FILE* fp3 = fopen(self->g_V_H1_usage_file, "w");
#endif
for (EDGE_ID mm = 0; mm < self->g_n_valid_edges; mm++){
if (self->g_H1_pivot_of[mm].V_len){
#ifdef RECORD_V_USAGE
fprintf(fp3, "%d, %d\n"\
, self->g_H1_pivot_of[mm].V_usage\
, self->g_H1_pivot_of[mm].V_depth);
#endif
free(self->g_H1_pivot_of[mm].VV);
}
}
#ifdef RECORD_V_USAGE
fclose(fp3);
#endif
#endif
//#ifdef HOM_CYCLES
free(self->g_H1_pivot_of);
clock_gettime(CLOCK_MONOTONIC, &finish_wall_clock);
self->g_timer_H2cycles = (finish_wall_clock.tv_sec - start_wall_clock.tv_sec);
self->g_timer_H2cycles += (finish_wall_clock.tv_nsec - start_wall_clock.tv_nsec) / 1000000000.0;
//#endif
if (!self->g_suppress_output){
printf("\nQUITTING H2 cycle computation");
}
//getchar();
//#ifdef MINIMIZE_BIRTH_CYCLES
if (self->g_reduce_cyc_lengths){
if (!self->g_suppress_output){
printf("\nMinimizing birth cycles...");
}
//getchar();
clock_gettime(CLOCK_MONOTONIC, &start_wall_clock);
minimize_birth_cycles_H1_v2(self\
, self->g_all_V_H1_stored\
, self->g_all_V_stored_num\
, self->g_minimal_V_H1_file\
, self->g_V_H1_birthcyc_lens_file\
, self->g_minimal_V_H1_birthcyc_lens_file\
);
clock_gettime(CLOCK_MONOTONIC, &finish_wall_clock);
self->g_timer_minimize_H2cycles = (finish_wall_clock.tv_sec - start_wall_clock.tv_sec);
self->g_timer_minimize_H2cycles += (finish_wall_clock.tv_nsec - start_wall_clock.tv_nsec) / 1000000000.0;
}
//#endif
}
void reduce_ws_H2(filtration* self){
//if (self->g_new_debug2){
//printf("\nBEFORE parallel.");
//for (int kk = 0; kk < self->g_ws_counter; kk++){
//
// printf("\n%d has triangle (%d, %d) with pivot (%d, %d) and reduce_with %p", kk\
// , self->g_workspace_H2_info[kk].tetrahedron.key1\
// , self->g_workspace_H2_info[kk].tetrahedron.key2\
// , self->g_workspace_H2_info[kk].pivot.key1\
// , self->g_workspace_H2_info[kk].pivot.key2\
// , self->g_workspace_H2_info[kk].reduce_with\
// );
//}
//getchar();
//}
self->g_processed_threads = 0;
//printf("\npress key to reduce with complex");
//getchar();
pthread_cond_broadcast(&(self->g_start_workers));
while (self->g_processed_threads != self->g_cpu_count){
pthread_cond_wait(&(self->g_start_boss) \
,&(self->g_thread_lock));
}
//printf("\npress key to reduce with self");
//getchar();
//if (self->g_new_debug2){
// for (int kk = 0; kk < self->g_ws_counter; kk++){
//
// printf("\n%d has triangle (%d, %d) with pivot %d", kk\
// , self->g_workspace_H1_info[kk].triangle.key1\
// , self->g_workspace_H1_info[kk].triangle.key2\
// , self->g_workspace_H1_info[kk].pivot\
// );
// }
// printf("\nafter parallel. press key to start serial");
// //getchar();
//}
reduce_with_self_H2( \
self \
);
//printf("\nAFTER SERIAL parallel.");
//for (int kk = 0; kk < self->g_ws_counter; kk++){
//
// printf("\n%d has triangle (%d, %d) with pivot (%d, %d) and reduce_with %p", kk\
// , self->g_workspace_H2_info[kk].tetrahedron.key1\
// , self->g_workspace_H2_info[kk].tetrahedron.key2\
// , self->g_workspace_H2_info[kk].pivot.key1\
// , self->g_workspace_H2_info[kk].pivot.key2\
// , self->g_workspace_H2_info[kk].reduce_with\
// );
//}
//if (self->g_new_debug2){
// for (int kk = 0; kk < self->g_ws_counter; kk++){
//
// printf("\n%d has triangle (%d, %d) with pivot %d", kk\
// , self->g_workspace_H1_info[kk].triangle.key1\
// , self->g_workspace_H1_info[kk].triangle.key2\
// , self->g_workspace_H1_info[kk].pivot\
// );
// }
// printf("\nafter serial. press key to update ");
// //getchar();
//}
int count_valid = 0;
for (int ws_counter=0; ws_counter < self->g_ws_counter; ws_counter++){
if (self->g_workspace_H2_info[ws_counter].flag_append_to_complex){
update_R_H2(self \
, ws_counter\
);
continue;
}
//if (!self->g_workspace_H1_info[ws_counter].len){continue;}
if (self->g_workspace_H2_info[ws_counter].flag_empty){
continue;
}
// Swap R
simplex* temp = self->g_workspace_H2[count_valid];
self->g_workspace_H2[count_valid] = self->g_workspace_H2[ws_counter];
self->g_workspace_H2[ws_counter] = temp;
// Swap R info
boundary_H2_ws temp2 = self->g_workspace_H2_info[count_valid];
self->g_workspace_H2_info[count_valid] = self->g_workspace_H2_info[ws_counter];
self->g_workspace_H2_info[ws_counter] = temp2;
// At this point, this has to be a non-zero column
self->g_workspace_H2_info[count_valid].flag_empty = 0;
count_valid += 1;
}
self->g_ws_counter = count_valid;
//printf("\nAFTER UPDATE .");
//for (int kk = 0; kk < self->g_ws_counter; kk++){
//
// printf("\n%d has triangle (%d, %d) with pivot (%d, %d)", kk\
// , self->g_workspace_H2_info[kk].tetrahedron.key1\
// , self->g_workspace_H2_info[kk].tetrahedron.key2\
// , self->g_workspace_H2_info[kk].pivot.key1\
// , self->g_workspace_H2_info[kk].pivot.key2\
// );
//}
//if (self->g_new_debug2){
// for (int kk = 0; kk < self->g_ws_counter; kk++){
//
// printf("\n%d has triangle (%d, %d) with pivot %d", kk\
// , self->g_workspace_H1_info[kk].triangle.key1\
// , self->g_workspace_H1_info[kk].triangle.key2\
// , self->g_workspace_H1_info[kk].pivot\
// );
// }
// printf("\nafter update. press key to continue ");
// //getchar();
//}
//if (dim)
// self->g_H0_MAX = self->g_n_reduced_simplex[dim];
}
void reduce_with_self_H2( \
filtration* self \
){
int compare;
int i, m;
int idx;
EDGE_ID count, j, k;
for (i=0; i < self->g_ws_counter; i++){
boundary_H2_ws* this_ws = self->g_workspace_H2_info + i;
this_ws->flag_reduce = 0;
// If the simplex has already been reduced to 0
// then continue
if (this_ws->flag_empty){
this_ws->flag_append_to_complex = 0;
continue;
}
simplex* orig = self->g_workspace_H2[i] + this_ws->original*this_ws->max_len;
m = 0;
while (m < i){
boundary_H2_ws* m_ws = self->g_workspace_H2_info + m;
if (m_ws->flag_empty){
m++;
continue;
}
simplex* original_m = self->g_workspace_H2[m] + m_ws->original*m_ws->max_len;
orig = self->g_workspace_H2[i] + this_ws->original*this_ws->max_len;
int compare;
if (m_ws->pivot.key1 < this_ws->pivot.key1) compare = 1;
else if (m_ws->pivot.key1 > this_ws->pivot.key1) compare = 0;
else{
if (m_ws->pivot.key2 < this_ws->pivot.key2) compare = 1;
else if (m_ws->pivot.key2 > this_ws->pivot.key2) compare = 0;
else compare = -1;
}
//if (m_ws->pivot > this_ws->pivot){
if (compare == 0){
if (m_ws->flag_red_w_complex){
this_ws->flag_append_to_complex = 0;
break;
}
m++;
continue;
}
//if (m_ws->pivot < this_ws->pivot){
if (compare == 1){
m++;
continue;
}
if (m_ws->flag_red_w_complex){
this_ws->flag_append_to_complex = 0;
//m++;
//continue;
break;
}
if (this_ws->len + m_ws->len > this_ws->max_len ){
//printf("\nReallocating inside self");
//simp_max_len_i = len_i + len_m + 1000;
if (this_ws->original){
for (EDGE_ID mm = 0; mm < this_ws->len; mm++){
self->g_workspace_H2[i][mm] = self->g_workspace_H2[i][mm + this_ws->max_len];
}
this_ws->original = 0;
}
this_ws->max_len = this_ws->len + m_ws->len + 1000;
self->g_workspace_H2[i] = (simplex*)realloc(self->g_workspace_H2[i]\
, 2*this_ws->max_len*sizeof(simplex));
orig = self->g_workspace_H2[i];
}
simplex* scratch = self->g_workspace_H2[i] + (1-this_ws->original)*this_ws->max_len;
// Store the result in scratch
count = 0;
j = 0;
k = 0;
while ((j < this_ws->len) && (k < m_ws->len)){
if (orig[j].key1 < original_m[k].key1) compare = 1;
else if (orig[j].key1 > original_m[k].key1) compare = 0;
else{
if (orig[j].key2 < original_m[k].key2) compare = 1;
else if (orig[j].key2 > original_m[k].key2) compare = 0;
else compare = -1;
}
//if (orig[j] < original_m[k]){
if (compare == 1){
scratch[count++] = orig[j++];
}
//else if (orig[j] > original_m[k]){
else if (compare == 0){
scratch[count++] = original_m[k++];
}
else{
j++;
k++;
}
}
while (j < this_ws->len){
scratch[count++] = orig[j++];
}
while (k < m_ws->len){
scratch[count++] = original_m[k++];
}
this_ws->len = count;
this_ws->original = 1 - this_ws->original;
if (!count){
this_ws->flag_append_to_complex = 0;
this_ws->flag_empty = 1;
break;
}
this_ws->pivot = scratch[this_ws->len-1];
coboundary_H2 temp;
temp.triangle.key1 = this_ws->pivot.key1;
temp.triangle.key2 = this_ws->pivot.key2;
find_H2_cohom_low(self, &temp);
// Check if pivot is trivial or if it is pivot in H2
// To check trivial, check if maximum triangle in low of cob of triangle is triangle
if ((temp.low.key1 == temp.triangle.key1)\
&&(self->g_edges_list[2*temp.low.key2+1] == temp.triangle.key2)){
// In which case, reduce with boundary of temp.low
compute_boundary_tetra(self, temp.low, this_ws->trivial_boundary);
this_ws->R_col_idx = 0;
this_ws->reduce_with_len = 4;
this_ws->flag_reduce = 1;
this_ws->flag_red_w_complex = 1;
this_ws->flag_append_to_complex = 0;
break;
}
else{
if (self->g_H2_pivots_len[this_ws->pivot.key1]){
EDGE_ID idx = search_H2_pivots(self->g_H2_pivots[this_ws->pivot.key1]\
, 0\
, self->g_H2_pivots_len[this_ws->pivot.key1] - 1\
, this_ws->pivot.key2 \
, self->g_n_valid_edges\
);
if (idx != self->g_n_valid_edges){
//this_ws->flag_red_w_trivial = 0;
this_ws->R_col_idx = self->g_H2_pivots[this_ws->pivot.key1][idx].col_idx;
this_ws->reduce_with_len = self->g_R_col_idx_H2[this_ws->R_col_idx+1] -\
self->g_R_col_idx_H2[this_ws->R_col_idx];
this_ws->flag_reduce = 1;
this_ws->flag_red_w_complex = 1;
this_ws->flag_append_to_complex = 0;
break;
}
}
}
//}
m = 0;
}//End of m loop
}
}//End of red_ws_w_self_single
void* reduce_with_complex_H2(void* arg){
filtration* self = arg;
pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, 0);
pthread_mutex_lock(&(self->g_thread_lock));
int tid = ++self->g_thread_id;
simplex *red_start;
EDGE_ID j, k ,count;
for (;;){
self->g_sleeping_threads++;
if (self->g_sleeping_threads == self->g_cpu_count)
pthread_cond_signal(&(self->g_start_boss));
pthread_cond_wait(&(self->g_start_workers), &(self->g_thread_lock));
if (self->g_delete_threads){
//printf("\nexiting from thread %d", tid);
pthread_mutex_unlock(&(self->g_thread_lock));
pthread_exit(NULL);
}
self->g_sleeping_threads--;
pthread_mutex_unlock(&(self->g_thread_lock));
for (int i = self->g_jobs[tid - 1]; i < self->g_jobs[tid]; i++){
boundary_H2_ws* this_ws = self->g_workspace_H2_info + i;
if (this_ws->flag_empty){
// We are sure that we will exit only if there is no reduction
// required with existing complex or with trivial pair
this_ws->flag_red_w_complex = 0;
this_ws->flag_append_to_complex = 0;
continue;
}
if (this_ws->flag_first){
//if ((this_ws->tetrahedron.key1 == self->g_debug_tetra.key1)\
// &&(this_ws->tetrahedron.key2 == self->g_debug_tetra.key2))
//{
// printf("\nProcessing tetra: (%d, %d) first pivot is (%d, %d)"\
// ,self->g_debug_tetra.key1\
// ,self->g_debug_tetra.key2\
// ,this_ws->pivot.key1\
// ,this_ws->pivot.key2\
// );
//
//}
this_ws->flag_first = 0;
coboundary_H2 temp;
temp.triangle.key1 = this_ws->pivot.key1;
temp.triangle.key2 = this_ws->pivot.key2;
find_H2_cohom_low(self, &temp);
this_ws->flag_reduce = 0;
// Check if pivot is trivial or if it is pivot in H2
// To check trivial, check if maximum triangle in low of cob of triangle is triangle
if ((temp.low.key1 == temp.triangle.key1)\
&&(self->g_edges_list[2*temp.low.key2+1] == temp.triangle.key2)){
// In which case, reduce with boundary of temp.low
compute_boundary_tetra(self, temp.low, this_ws->trivial_boundary);
this_ws->R_col_idx = 0;
this_ws->reduce_with_len = 4;
this_ws->flag_reduce = 1;
}
else{
if (self->g_H2_pivots_len[this_ws->pivot.key1]){
EDGE_ID idx = search_H2_pivots(self->g_H2_pivots[this_ws->pivot.key1]\
, 0\
, self->g_H2_pivots_len[this_ws->pivot.key1] - 1\
, this_ws->pivot.key2 \
, self->g_n_valid_edges\
);
if (idx != self->g_n_valid_edges){
//this_ws->flag_red_w_trivial = 0;
this_ws->R_col_idx = self->g_H2_pivots[this_ws->pivot.key1][idx].col_idx;
//this_ws->reduce_with = self->g_R_H2 + self->g_R_col_idx_H2[R_col_idx];
this_ws->reduce_with_len = self->g_R_col_idx_H2[this_ws->R_col_idx+1] -\
self->g_R_col_idx_H2[this_ws->R_col_idx];
this_ws->flag_reduce = 1;
}
}
}
}
//if ((this_ws->tetrahedron.key1 == 368174)\
// &&(this_ws->tetrahedron.key2 == 333271)){
// printf("\npivot before reduction is (%d, %d)"\
// , this_ws->pivot.key1\
// , this_ws->pivot.key2\
// );
//}
this_ws->flag_red_w_complex = 0;
this_ws->flag_append_to_complex = 1;
//simplex* orig = self->g_workspace_H2[i] \
// + this_ws->original*this_ws->max_len;
//simplex* scratch = self->g_workspace_H2[i] \
// + (1-this_ws->original)*this_ws->max_len;
//printf("\nreducing with %d", idx);
while(this_ws->flag_reduce){
simplex* orig = self->g_workspace_H2[i] + this_ws->original*this_ws->max_len;
if ( this_ws->len + this_ws->reduce_with_len > this_ws->max_len){
//printf("\nREALLOCATING");
if (this_ws->original){
//printf("\nCOPYING");
for (EDGE_ID mm = 0; mm < this_ws->len; mm++){
self->g_workspace_H2[i][mm] = self->g_workspace_H2[i][mm + this_ws->max_len];
}
this_ws->original = 0;
}
this_ws->max_len = this_ws->len + this_ws->reduce_with_len + 1000;
self->g_workspace_H2[i] = (simplex*)realloc(self->g_workspace_H2[i]\
, 2*this_ws->max_len*sizeof(simplex));
orig = self->g_workspace_H2[i];
}
simplex* scratch = self->g_workspace_H2[i] \
+ (1-this_ws->original)*this_ws->max_len;
if (!this_ws->R_col_idx){
//if ((this_ws->tetrahedron.key1 == 368174)\
// &&(this_ws->tetrahedron.key2 == 333271)){
// printf("\nreducing with trivial boundary "\
// );
//}
red_start = this_ws->trivial_boundary;
}
else{
//if ((this_ws->tetrahedron.key1 == 368174)\
// &&(this_ws->tetrahedron.key2 == 333271)){
// printf("\nreducing with red complex "\
// );
//}
red_start = self->g_R_H2 + self->g_R_col_idx_H2[this_ws->R_col_idx];
}
//if ((this_ws->tetrahedron.key1 == 368174)\
// &&(this_ws->tetrahedron.key2 == 333271)){
// printf("\nReducing ");
// for (EDGE_ID mm = 0; mm < this_ws->len; mm++){
// printf("(%d, %d), "\
// ,orig[mm].key1\
// ,orig[mm].key2\
// );
// }
// printf("\nwith ");
// for (EDGE_ID mm = 0; mm < this_ws->reduce_with_len; mm++){
// printf("(%d, %d), "\
// ,red_start[mm].key1\
// ,red_start[mm].key2\
// );
// }
//}
count = 0;
j = 0;
k = 0;
int compare;
while ((j < this_ws->len) && (k < this_ws->reduce_with_len)){
if (orig[j].key1 < red_start[k].key1) compare = 1;
else if (orig[j].key1 > red_start[k].key1) compare = 0;
else{
if (orig[j].key2 < red_start[k].key2) compare = 1;
else if (orig[j].key2 > red_start[k].key2) compare = 0;
else compare = -1;
}
if (compare == 1){
scratch[count++] = orig[j++];
}
else if (compare == 0){
scratch[count++] = red_start[k++];
}
else{
j++;
k++;
}
}
while (j < this_ws->len){
scratch[count++] = orig[j++];
}
while (k < this_ws->reduce_with_len){
scratch[count++] = red_start[k++];
}
this_ws->original = 1 - this_ws->original;
this_ws->len = count;
//if ((this_ws->tetrahedron.key1 == 368174)\
// &&(this_ws->tetrahedron.key2 == 333271)){
// printf("\nAfter reduction ");
// for (EDGE_ID mm = 0; mm < this_ws->len; mm++){
// printf("(%d, %d), "\
// ,scratch[mm].key1\
// ,scratch[mm].key2\
// );
// }
//}
if (!this_ws->len){
//idx = self->g_n_reduced_simplex[self->g_dim_now];
//idx = -1;
this_ws->flag_empty = 1;
break;
}
//else{
this_ws->pivot = scratch[this_ws->len-1];
//printf("\nNew pivot is (%d, %d)"\
// , this_ws->pivot.key1\
// , this_ws->pivot.key2\
// );
// Check trivial pers pair
//if ((this_ws->tetrahedron.key1 == 368174)\
// &&(this_ws->tetrahedron.key2 == 333271)){
// printf("\npivot after reduction is (%d, %d)"\
// , this_ws->pivot.key1\
// , this_ws->pivot.key2\
// );
// getchar();
//}
coboundary_H2 temp;
temp.triangle.key1 = this_ws->pivot.key1;
temp.triangle.key2 = this_ws->pivot.key2;
find_H2_cohom_low(self, &temp);
this_ws->flag_reduce = 0;
// Check if pivot is trivial or if it is pivot in H2
// To check trivial, check if maximum triangle in low of cob of triangle is triangle
if ((temp.low.key1 == temp.triangle.key1)\
&&(self->g_edges_list[2*temp.low.key2+1] == temp.triangle.key2)){
// In which case, reduce with boundary of temp.low
//if ((this_ws->tetrahedron.key1 == 368174)\
// &&(this_ws->tetrahedron.key2 == 333271)){
// printf("\nreducing with trivial");
//}
compute_boundary_tetra(self, temp.low, this_ws->trivial_boundary);
//this_ws->reduce_with = this_ws->trivial_boundary;
this_ws->R_col_idx = 0;
this_ws->reduce_with_len = 4;
this_ws->flag_reduce = 1;
}
else{
if (self->g_H2_pivots_len[this_ws->pivot.key1]){
EDGE_ID idx = search_H2_pivots(self->g_H2_pivots[this_ws->pivot.key1]\
, 0\
, self->g_H2_pivots_len[this_ws->pivot.key1] - 1\
, this_ws->pivot.key2 \
, self->g_n_valid_edges\
);
if (idx != self->g_n_valid_edges){
//this_ws->flag_red_w_trivial = 0;
this_ws->R_col_idx = self->g_H2_pivots[this_ws->pivot.key1][idx].col_idx;
this_ws->reduce_with_len = self->g_R_col_idx_H2[this_ws->R_col_idx+1] -\
self->g_R_col_idx_H2[this_ws->R_col_idx];
this_ws->flag_reduce = 1;
//if ((this_ws->tetrahedron.key1 == 368174)\
// &&(this_ws->tetrahedron.key2 == 333271)){
// printf("\nreducing with complex");
//}
}
}
}
//printf("\nidx is %d, pivot key2 %d: in red at col idx %d, in R at %d to %d"\
// , idx\
// , self->g_H2_pivots[this_ws->pivot.key1][idx].key2\
// , self->g_H2_pivots[this_ws->pivot.key1][idx].col_idx\
// , self->g_R_col_idx_H2[self->g_H2_pivots[this_ws->pivot.key1][idx].col_idx]\
// , self->g_R_col_idx_H2[self->g_H2_pivots[this_ws->pivot.key1][idx].col_idx+1]\
// );
//}
}
//if ((this_ws->tetrahedron.key1 == 368174)\
// &&(this_ws->tetrahedron.key2 == 333271)){
// printf("\nQuitting parallel");
//}
}
pthread_mutex_lock(&(self->g_thread_lock));
self->g_processed_threads++;
}
}
void update_R_H2 (filtration* self, int ws_counter){
boundary_H2_ws* this_ws = self->g_workspace_H2_info + ws_counter;
simplex* orig = self->g_workspace_H2[ws_counter] \
+ this_ws->original*this_ws->max_len;
// Update R
if ((self->g_R_len_H2 + this_ws->len) > self->g_R_max_len_H2){
self->g_R_max_len_H2 += 1000 + this_ws->len;
self->g_R_H2 = (simplex*)realloc(self->g_R_H2, self->g_R_max_len_H2*sizeof(simplex));
}
// Update R col idx
self->g_R_col_idx_H2_ptr++;
if (self->g_R_col_idx_H2_ptr == self->g_R_col_idx_max_len_H2 - 1){
self->g_R_col_idx_max_len_H2 += 1000;
self->g_R_col_idx_H2 = (EDGE_ID*)realloc(self->g_R_col_idx_H2\
, self->g_R_col_idx_max_len_H2*sizeof(EDGE_ID));
}
self->g_R_col_idx_H2[self->g_R_col_idx_H2_ptr] = self->g_R_len_H2;
//printf("\nAdding pivot %d at %d", this_ws->pivot, self->g_R_col_idx_H2_ptr);
// ////////////////////
// ADD PIVOT
// ////////////////////
add_H2_pivot(self, this_ws->tetrahedron, this_ws->pivot, self->g_R_col_idx_H2_ptr);
//printf("\nAdding to R with pivot (%d, %d): "\
// , this_ws->pivot.key1\
// , this_ws->pivot.key2\
// );
//////////////////////
for (EDGE_ID mm = 0; mm < this_ws->len; mm++){
self->g_R_H2[self->g_R_len_H2++] = orig[mm];
//printf("(%d, %d), ", self->g_R_H2[self->g_R_len_H2 - 1].key1\
// , self->g_R_H2[self->g_R_len_H2 - 1].key2);
}
self->g_R_col_idx_H2[self->g_R_col_idx_H2_ptr+1] = self->g_R_len_H2;
//if (this_ws->pivot.key1 == 24782){
//printf("\nR_col_idx at %d, in R from from %d to %d"\
// , self->g_R_col_idx_H2_ptr\
// , self->g_R_col_idx_H2[self->g_R_col_idx_H2_ptr]\
// , self->g_R_col_idx_H2[self->g_R_col_idx_H2_ptr+1]);
//getchar();
//}
//if (this_ws->pivot.key1 == 7173){
// printf("\nThe R is from %d to %d"\
// , self->g_R_col_idx_H2[self->g_R_col_idx_H2_ptr]\
// , self->g_R_col_idx_H2[self->g_R_col_idx_H2_ptr+1]\
// );
// getchar();
//}
//PAR birth = self->g_edge_parameter[this_ws->pivot.key1];
//PAR death = self->g_edge_parameter[this_ws->tetrahedron.key1];
//#ifdef BIRTH_HOM_CYCLES
// self->g_H1_pivot_of[this_ws->pivot].key1 = this_ws->triangle.key1;
// self->g_H1_pivot_of[this_ws->pivot].key2 = this_ws->triangle.key2;
//#endif
//printf("\n%lf, %lf", birth, death);
}
void add_H2_pivot (filtration* self, simplex tetrahedron, simplex pivot, EDGE_ID red_col){
if (self->g_H2_pivots_len[pivot.key1]\
== self->g_H2_pivots_max_len[pivot.key1]){
self->g_H2_pivots_max_len[pivot.key1] += 5;
self->g_H2_pivots[pivot.key1] = (H2_pivots*)realloc( \
self->g_H2_pivots[pivot.key1] \
, self->g_H2_pivots_max_len[pivot.key1]*sizeof(H2_pivots));
}
EDGE_ID old_ptr = self->g_H2_pivots_len[pivot.key1];
EDGE_ID new_ptr = self->g_H2_pivots_len[pivot.key1];
while (old_ptr){
old_ptr--;
if (self->g_H2_pivots[pivot.key1][old_ptr].key2 > pivot.key2){
self->g_H2_pivots[pivot.key1][new_ptr--] =\
self->g_H2_pivots[pivot.key1][old_ptr];
continue;
}
break;
}
self->g_H2_pivots[pivot.key1][new_ptr].key2 = pivot.key2;
self->g_H2_pivots[pivot.key1][new_ptr].col_idx = red_col;
self->g_H2_pivots[pivot.key1][new_ptr].tetrahedron = tetrahedron;
self->g_H2_pivots_len[pivot.key1]++;
//if (pivot.key1 == 7173){
// printf("\nAdding (%d, %d) at R col idx %d"\
// , pivot.key1\
// , pivot.key2\
// , self->g_H2_pivots[pivot.key1][new_ptr].col_idx\
// );
// getchar();
//}
// PERS PAIRS
// Add non-zero barcodes
PAR birth = self->g_edge_parameter[pivot.key1];
PAR death = self->g_edge_parameter[tetrahedron.key1];
if (birth != death){
//printf("\nNon trivial pers pair (%f, %f)", birth, death);
//if (birth > death){
// printf("\nBirth, death (%lf, %lf)", birth, death);
// printf("\nError (%d, %d) at pair (%d, %d)", triangle.key1\
// , triangle.key2\
// , pivot.key1\
// , pivot.key2);
// getchar();
//
//}
self->g_homH2_pers[self->g_homH2_pers_len].birth_simplex = pivot;
self->g_homH2_pers[self->g_homH2_pers_len].death_edge = tetrahedron.key1;
self->g_homH2_pers[self->g_homH2_pers_len++].R_col_idx = red_col;
//printf("\nAdding (%d, %d) at %d in homH2pers for tetra (%d, %d)"\
// , pivot.key1\
// , pivot.key2\
// , self->g_homH2_pers_len-1\
// , tetrahedron.key1\
// , tetrahedron.key2\
// );
if (self->g_homH2_pers_len == self->g_homH2_pers_max_len){
self->g_homH2_pers_max_len += 100;
self->g_homH2_pers = (homH2_pers*)realloc(self->g_homH2_pers\
, self->g_homH2_pers_max_len*sizeof(homH2_pers));
}
}
}
EDGE_ID search_H2_pivots(H2_pivots* arr, EDGE_ID l, EDGE_ID r, EDGE_ID key2, EDGE_ID max)
{
if (r >= l) {
EDGE_ID mid = l + (r - l) / 2;
if (arr[mid].key2 == key2)
return mid;
// If element is smaller than mid, then
// it can only be present in left subarray
if (arr[mid].key2 > key2)
{
/// PRECAUTIONARY: CAN REMOVE LATER
if (!mid){
return max;
//printf("\nMID 0 WILL GIVE ERROR FOR UNSIGNED NEXT");
//getchar();
}
///////////////////
return search_H2_pivots(arr, l, mid - 1, key2, max);
}
// Else the element can only be present
// in right subarray
return search_H2_pivots(arr, mid + 1, r, key2, max);
}
// We reach here when element is not
// present in array
//printf("\nNOT FOUND");
return max;
}
void get_birth_void(filtration* self, simplex bo_idx){
self->g_temp_V_H2_primary.len = 1;
self->g_temp_V_H2_primary.VV[0] = bo_idx;
// Initiate R with the boundary of bo_idx
compute_boundary_triangle(self, bo_idx, self->g_temp_R_H2_birth_cycles.RR);
//printf("\nComputed cob at line 11582");
//getchar();
self->g_temp_R_H2_birth_cycles.original = 0;
self->g_temp_R_H2_birth_cycles.len = 3;
EDGE_ID* original_result;
EDGE_ID* scratch_result;
EDGE_ID j, k, count, possible_len, ro, red_simp_len;
EDGE_ID* red_start;
EDGE_ID* trivial_boundary = (EDGE_ID*)malloc(3*sizeof(EDGE_ID));
EDGE_ID bo_pivot;
#ifdef ADAPTIVE_V_STORAGE
self->g_store_V_for_len = 0;
#endif
self->g_depth = 0;
while (self->g_temp_R_H2_birth_cycles.len){
//printf("\nV len is %d", self->g_temp_V_H2_primary.len);
original_result = self->g_temp_R_H2_birth_cycles.RR \
+ (self->g_temp_R_H2_birth_cycles.original)*self->g_temp_R_H2_birth_cycles.max_len;
bo_pivot = original_result[self->g_temp_R_H2_birth_cycles.len-1];
// Check if the pivot-edge is in a trivial pers pair
if (self->g_coH1_all_lows[bo_pivot].low.key1 == bo_pivot){
bo_idx = self->g_coH1_all_lows[bo_pivot].low;
// Get the boundary of R
compute_boundary_triangle(self, bo_idx, trivial_boundary);
red_simp_len = 3;
red_start = trivial_boundary;
// The V-operation for this is exactly bo_idx
self->g_temp_V_H2_primary.VV[self->g_temp_V_H2_primary.len++] = bo_idx;
// Check for overflow
if (self->g_temp_V_H2_primary.len == self->g_temp_V_H2_primary.max_len){
//getchar();
self->g_temp_V_H2_primary.max_len += 100;
//printf("\nReallocating %d", self->g_temp_V_H2_primary.max_len);
self->g_temp_V_H2_primary.VV = (simplex*)realloc(self->g_temp_V_H2_primary.VV\
, self->g_temp_V_H2_primary.max_len*sizeof(simplex));
}
}
else{
bo_idx = self->g_H1_pivot_of[bo_pivot].coface;
ro = self->g_pivots_H1[bo_pivot];
red_simp_len = self->g_R_col_idx_H1[ro+1] - \
self->g_R_col_idx_H1[ro];
red_start = self->g_R_H1 + self->g_R_col_idx_H1[ro];
// For this one we have to find V recursively
// FIND THE V RECURSIVELY
find_V_recursively_triangles(self, bo_idx, bo_pivot);
}
// Check for overflow of R_H2
possible_len = self->g_temp_R_H2_birth_cycles.len + red_simp_len;
if (possible_len > self->g_temp_R_H2_birth_cycles.max_len - 1){
if (self->g_temp_R_H2_birth_cycles.original){
for (EDGE_ID k = 0; k < self->g_temp_R_H2_birth_cycles.len; k++){
self->g_temp_R_H2_birth_cycles.RR[k] =\
self->g_temp_R_H2_birth_cycles.RR[k + self->g_temp_R_H2_birth_cycles.max_len];
}
}
self->g_temp_R_H2_birth_cycles.max_len = possible_len + 1000;
self->g_temp_R_H2_birth_cycles.RR = (EDGE_ID*)realloc(self->g_temp_R_H2_birth_cycles.RR\
, (2*self->g_temp_R_H2_birth_cycles.max_len)*sizeof(EDGE_ID));
original_result = self->g_temp_R_H2_birth_cycles.RR;
self->g_temp_R_H2_birth_cycles.original = 0;
}
scratch_result = self->g_temp_R_H2_birth_cycles.RR \
+ (1-self->g_temp_R_H2_birth_cycles.original)*self->g_temp_R_H2_birth_cycles.max_len;
// Reduce
j = 0;
k = 0;
count = 0;
while ((j < self->g_temp_R_H2_birth_cycles.len) && (k < red_simp_len)){
if (original_result[j] < red_start[k]){
scratch_result[count] = original_result[j];
count = count + 1;
j = j + 1;
}
else if (original_result[j] > red_start[k]){
scratch_result[count] = red_start[k];
count = count + 1;
k = k + 1;
}
else{
j = j + 1;
k = k + 1;
}
}
while (j < self->g_temp_R_H2_birth_cycles.len){
scratch_result[count++] = original_result[j++];
}
while (k < red_simp_len){
scratch_result[count++] = red_start[k++];
}
self->g_temp_R_H2_birth_cycles.len = count;
self->g_temp_R_H2_birth_cycles.original = 1 - self->g_temp_R_H2_birth_cycles.original;
}
//printf("\nreducing temp V at line 11735");
//getchar();
reduce_temp_V_H1(self);
//printf("\nreduced V at line 11735. Press key to continue");
//getchar();
free(trivial_boundary);
}
void find_V_recursively_triangles(filtration* self, simplex bo_idx, EDGE_ID bo_pivot){
#ifdef RECORD_V_USAGE
self->g_H1_pivot_of[bo_pivot].V_usage++;
#endif
#ifdef ADAPTIVE_V_STORAGE
if (self->g_H1_pivot_of[bo_pivot].V_stored == 1){
//printf("\nUsing stored cycle for %d of length %d, temp_V_H2 length is %d, max %d"\
// , bo_idx\
// , self->g_H1_pivot_of[bo_pivot].V_len\
// , self->g_temp_V_H2_primary.len\
// , self->g_temp_V_H2_primary.max_len\
// );
if ((self->g_temp_V_H2_primary.len + self->g_H1_pivot_of[bo_pivot].V_len) > self->g_temp_V_H2_primary.max_len - 1){
self->g_temp_V_H2_primary.max_len =\
self->g_temp_V_H2_primary.len + self->g_H1_pivot_of[bo_pivot].V_len + 1000;
//printf("\nReallocating %d", self->g_temp_V_H2_primary.max_len);
self->g_temp_V_H2_primary.VV = (simplex*)realloc(self->g_temp_V_H2_primary.VV\
, self->g_temp_V_H2_primary.max_len*sizeof(simplex));
}
for (EDGE_ID mm = 0; mm < self->g_H1_pivot_of[bo_pivot].V_len; mm++){
self->g_temp_V_H2_primary.VV[self->g_temp_V_H2_primary.len++] =\
self->g_H1_pivot_of[bo_pivot].VV[mm];
}
return;
}
else if (self->g_H1_pivot_of[bo_pivot].V_stored != -1){
#ifndef RECORD_V_USAGE
self->g_H1_pivot_of[bo_pivot].V_usage++;
#endif
if ((self->g_H1_pivot_of[bo_pivot].V_usage > self->g_cycle_usage_thresh)\
&&(!self->g_H1_pivot_of[bo_pivot].V_stored)){
//printf("\nstore simplex (%d, %d) that has pivot %d", bo_idx.key1\
// , bo_idx.key2\
// , bo_pivot);
self->g_store_V_for[self->g_store_V_for_len++] = bo_pivot;
if (self->g_store_V_for_len == self->g_store_V_for_max_len){
self->g_store_V_for_max_len += 100;
self->g_store_V_for = (EDGE_ID*)realloc(self->g_store_V_for\
, self->g_store_V_for_max_len*sizeof(EDGE_ID));
}
}
}
#endif
//printf("\nComputing V for %d, temp_V_H2 length is %d, max %d" , bo_idx\
// , self->g_temp_V_H2_primary.len\
// , self->g_temp_V_H2_primary.max_len\
);
EDGE_ID res_max_len = 100;
EDGE_ID* result = (EDGE_ID*)malloc((2*res_max_len)*sizeof(EDGE_ID));
int res_original = 0;
EDGE_ID* original_result;
EDGE_ID* scratch_result;
//printf("\nstarting red for depth %d", self->g_depth);
// Initiate R with the boundary of bo_idx
compute_boundary_triangle(self, bo_idx, result);
EDGE_ID res_len = 3;
EDGE_ID possible_len;
EDGE_ID* red_start;
EDGE_ID red_simp_len;
EDGE_ID ro;
EDGE_ID j, k, count;
EDGE_ID* trivial_boundary = (EDGE_ID*)malloc(3*sizeof(EDGE_ID));
self->g_temp_V_H2_primary.VV[self->g_temp_V_H2_primary.len++] = bo_idx;
// Check for overflow
if (self->g_temp_V_H2_primary.len == self->g_temp_V_H2_primary.max_len){
//printf("\nReallocating");
//getchar();
self->g_temp_V_H2_primary.max_len += 100;
//printf("\nReallocating %d", self->g_temp_V_H2_primary.max_len);
self->g_temp_V_H2_primary.VV = (simplex*)realloc(self->g_temp_V_H2_primary.VV\
, self->g_temp_V_H2_primary.max_len*sizeof(simplex));
}
//int flag_recursive = 0;
while(res_len != 0){
#ifdef ADAPTIVE_V_STORAGE
self->g_depth++;
#endif
original_result = result + (res_original*res_max_len);
bo_pivot = original_result[res_len-1];
// Check for trivial pers pair
if (self->g_coH1_all_lows[bo_pivot].low.key1 == bo_pivot){
//printf("\nreducing with trivial");
bo_idx = self->g_coH1_all_lows[bo_pivot].low;
// Get the boundary of R
compute_boundary_triangle(self, bo_idx, trivial_boundary);
red_simp_len = 3;
red_start = trivial_boundary;
}
else{
//printf("\nreducing with complex");
bo_idx = self->g_H1_pivot_of[bo_pivot].coface;
ro = self->g_pivots_H1[bo_pivot];
//printf("\n r_col_idx is %d", ro);
red_simp_len = self->g_R_col_idx_H1[ro+1] - \
self->g_R_col_idx_H1[ro];
red_start = self->g_R_H1 + self->g_R_col_idx_H1[ro];
}
// Check for overflow
possible_len = res_len + red_simp_len;
if (possible_len > res_max_len - 1){
if (res_original){
for (k = 0; k < res_len; k++){
result[k] = result[k + res_max_len];
}
}
res_max_len = possible_len + 1000;
result = (EDGE_ID*)realloc(result, (2*res_max_len)*sizeof(EDGE_ID));
original_result = result;
res_original = 0;
}
scratch_result = result + ((1-res_original)*res_max_len);
//printf("\nReducing ");
//for (EDGE_ID mm = 0; mm < res_len; mm++){
// printf("%d, ", original_result[mm]);
//}
//printf("\nwith ");
//for (EDGE_ID mm = 0; mm < red_simp_len; mm++){
// printf("%d, ", red_start[mm]);
//}
// Reduce
j = 0;
k = 0;
count = 0;
while ((j < res_len) && (k < red_simp_len)){
if (original_result[j] < red_start[k]){
scratch_result[count] = original_result[j];
count = count + 1;
j = j + 1;
}
else if (original_result[j] > red_start[k]){
scratch_result[count] = red_start[k];
count = count + 1;
k = k + 1;
}
else{
j = j + 1;
k = k + 1;
}
}
while (j < res_len){
scratch_result[count++] = original_result[j++];
}
while (k < red_simp_len){
scratch_result[count++] = red_start[k++];
}
res_len = count;
res_original = 1 - res_original;
if (res_len != 0){
find_V_recursively_triangles(self, bo_idx, bo_pivot);
}
}
free(result);
free(trivial_boundary);
}
void compute_boundary_tetra(filtration* self, simplex tetra, simplex* simp){
EDGE_ID o_ab = tetra.key1;
VERT_ID a = self->g_edges_list[2*o_ab];
VERT_ID b = self->g_edges_list[2*o_ab+1];
EDGE_ID o_cd = tetra.key2;
VERT_ID c = self->g_edges_list[2*o_cd];
VERT_ID d = self->g_edges_list[2*o_cd+1];
// Since d > c, abd is the maximum triangle
simp[3].key1 = o_ab;
simp[3].key2 = d;
// abc
simp[2].key1 = o_ab;
simp[2].key2 = c;
// Remaining triangles are bcd and acd
// Get all edges first
VERT_ID idx = search_Neighbors(self, b, d, 0, self->g_Neigh_len[b]-1);
EDGE_ID o_bd = self->g_Neighbors[b][idx].order;
idx = search_Neighbors(self, a, d, 0, self->g_Neigh_len[a]-1);
EDGE_ID o_ad = self->g_Neighbors[a][idx].order;
idx = search_Neighbors(self, a, c, 0, self->g_Neigh_len[a]-1);
EDGE_ID o_ac = self->g_Neighbors[a][idx].order;
idx = search_Neighbors(self, b, c, 0, self->g_Neigh_len[b]-1);
EDGE_ID o_bc = self->g_Neighbors[b][idx].order;
// Triangle bcd
EDGE_ID o_max = o_bc;
VERT_ID v3 = d;
if (o_cd > o_max){
o_max = o_cd;
v3 = b;
}
if (o_bd > o_max){
o_max = o_bd;
v3 = c;
}
// Triangle acd
EDGE_ID o_max_2 = o_ac;
VERT_ID v3_2 = d;
if (o_cd > o_max_2){
o_max_2 = o_cd;
v3_2 = a;
}
if (o_ad > o_max_2){
o_max_2 = o_ad;
v3_2 = c;
}
int compare = -1;
if (o_max > o_max_2) compare = 1;
else if (o_max < o_max_2) compare = 0;
else{
if (v3 > v3_2) compare = 1;
else if (v3 < v3_2) compare = 0;
else compare = -1;
}
if (compare == 1){
simp[1].key1 = o_max;
simp[1].key2 = v3;
simp[0].key1 = o_max_2;
simp[0].key2 = v3_2;
}
else if (compare == 0){
simp[1].key1 = o_max_2;
simp[1].key2 = v3_2;
simp[0].key1 = o_max;
simp[0].key2 = v3;
}
else{
printf("\nERROR? 10119");
getchar();
}
}
void compute_boundary_triangle(filtration* self, simplex triangle, EDGE_ID* simp){
VERT_ID a = self->g_edges_list[2*triangle.key1];
VERT_ID b = self->g_edges_list[2*triangle.key1+1];
VERT_ID c = triangle.key2;
VERT_ID idx = search_Neighbors(self, a, c, 0, self->g_Neigh_len[a]-1);
EDGE_ID o_ac = self->g_Neighbors[a][idx].order;
idx = search_Neighbors(self, b, c, 0, self->g_Neigh_len[b]-1);
EDGE_ID o_bc = self->g_Neighbors[b][idx].order;
if (o_ac > o_bc){
simp[0] = o_bc;
simp[1] = o_ac;
}
else{
simp[0] = o_ac;
simp[1] = o_bc;
}
simp[2] = triangle.key1;
//printf("\nComputed boundary of triangle (%d, %d, %d)"\
// , simp[0]\
// , simp[1]\
// , simp[2]\
// );
}
void store_V_H0(filtration* self){
for (EDGE_ID nn = 0; nn < self->g_store_V_for_len; nn++){
EDGE_ID bo_pivot = self->g_store_V_for[nn];
if (self->g_H0_pivot_of[bo_pivot].V_len){
continue;
}
//printf("\nstoring %d", bo_pivot);
self->g_temp_V_primary.len = 0;
EDGE_ID bo_idx = self->g_H0_pivot_of[bo_pivot].coface;
self->g_depth = 0;
find_V_recursively_edges(self, bo_idx, bo_pivot);
#ifdef RECORD_V_USAGE
self->g_H0_pivot_of[bo_pivot].V_depth = self->g_depth;
#endif
if (self->g_depth < self->g_cycle_depth_thresh){
self->g_H0_pivot_of[bo_pivot].V_stored = -1;
continue;
}
self->g_n_H0_stored_V++;
//printf("\nstoring %d with depth %d", bo_pivot, self->g_depth);
self->g_H0_pivot_of[bo_pivot].V_stored = 1;
// Reduce V
reduce_temp_V_H0(self);
//printf("\nlen is %d", self->g_temp_V_primary.len);
self->g_H0_pivot_of[bo_pivot].V_len = self->g_temp_V_primary.len;
self->g_H0_pivot_of[bo_pivot].VV = \
(EDGE_ID*)malloc(self->g_temp_V_primary.len*sizeof(EDGE_ID));
for (EDGE_ID oo = 0; oo < self->g_temp_V_primary.len; oo++){
self->g_H0_pivot_of[bo_pivot].VV[oo] = self->g_temp_V_primary.VV[oo];
}
}
}
void reduce_temp_V_H0(filtration* self){
//// Reduce V
sorter8_tim_sort(self->g_temp_V_primary.VV, self->g_temp_V_primary.len);
int coeff = 1;
EDGE_ID idx = 0;
for (EDGE_ID vv = 0; vv < self->g_temp_V_primary.len-1; vv++){
if (self->g_temp_V_primary.VV[vv] == self->g_temp_V_primary.VV[vv+1])
{
coeff = 1 - coeff;
}
else{
if (coeff){
//self->g_V_sparse_H1[self->g_V_sparse_ptr++] = this_ws->v_edges.o_ab[vv];
self->g_temp_V_primary.VV[idx++] = self->g_temp_V_primary.VV[vv];
}
coeff = 1;
}
}
if (coeff){
self->g_temp_V_primary.VV[idx++] = self->g_temp_V_primary.VV[self->g_temp_V_primary.len-1];
}
self->g_temp_V_primary.len = idx;
}
void store_V_H1(filtration* self){
for (EDGE_ID nn = 0; nn < self->g_store_V_for_len; nn++){
EDGE_ID bo_pivot = self->g_store_V_for[nn];
//printf("\nPress key to store V for pivot %d", bo_pivot);
//getchar();
if (self->g_H1_pivot_of[bo_pivot].V_len){
continue;
}
// Check for trivial pers pair
if (self->g_coH1_all_lows[bo_pivot].low.key1 == bo_pivot){
self->g_H1_pivot_of[bo_pivot].V_stored = -1;
//printf("\nNot storing because trivial, %d", bo_pivot);
//getchar();
continue;
}
self->g_n_H1_stored_V++;
//printf("\nchecking depth for pivot %d", bo_pivot);
//getchar();
self->g_temp_V_H2_primary.len = 0;
simplex bo_idx = self->g_H1_pivot_of[bo_pivot].coface;
//printf(", simplex is (%d, %d)", bo_idx.key1, bo_idx.key2);
//getchar();
self->g_depth = 0;
find_V_recursively_triangles(self, bo_idx, bo_pivot);
#ifdef RECORD_V_USAGE
self->g_H1_pivot_of[bo_pivot].V_depth = self->g_depth;
#endif
if (self->g_depth < self->g_cycle_depth_thresh){
self->g_H1_pivot_of[bo_pivot].V_stored = -1;
continue;
}
//printf("\nstoring %d with depth %d", bo_pivot, self->g_depth);
self->g_H1_pivot_of[bo_pivot].V_stored = 1;
//self->g_H1_pivot_of[bo_pivot].V_depth = self->g_depth;
// Reduce V
reduce_temp_V_H1(self);
self->g_H1_pivot_of[bo_pivot].V_len = self->g_temp_V_H2_primary.len;
self->g_H1_pivot_of[bo_pivot].VV = \
(simplex*)malloc(self->g_temp_V_H2_primary.len*sizeof(simplex));
for (EDGE_ID oo = 0; oo < self->g_temp_V_H2_primary.len; oo++){
self->g_H1_pivot_of[bo_pivot].VV[oo] = self->g_temp_V_H2_primary.VV[oo];
}
}
}
void reduce_temp_V_H1(filtration* self){
//// Reduce V
sorter4_tim_sort(self->g_temp_V_H2_primary.VV, self->g_temp_V_H2_primary.len);
int coeff = 1;
EDGE_ID idx = 0;
for (EDGE_ID vv = 0; vv < self->g_temp_V_H2_primary.len-1; vv++){
if ((self->g_temp_V_H2_primary.VV[vv].key1 == self->g_temp_V_H2_primary.VV[vv+1].key1)\
&&(self->g_temp_V_H2_primary.VV[vv].key2 == self->g_temp_V_H2_primary.VV[vv+1].key2))
{
coeff = 1 - coeff;
}
else{
if (coeff){
//self->g_V_sparse_H1[self->g_V_sparse_ptr++] = this_ws->v_edges.o_ab[vv];
self->g_temp_V_H2_primary.VV[idx++] = self->g_temp_V_H2_primary.VV[vv];
}
coeff = 1;
}
}
if (coeff){
self->g_temp_V_H2_primary.VV[idx++] = self->g_temp_V_H2_primary.VV[self->g_temp_V_H2_primary.len-1];
}
self->g_temp_V_H2_primary.len = idx;
}
EDGE_ID bin_search_max_less_V(EDGE_ID* arr, EDGE_ID l, EDGE_ID r, EDGE_ID x, EDGE_ID MAX){
//printf("\nl %d, r %d", l, r);
if (arr[r] >= x){
//printf("\nreturing max");
return MAX;
}
if (arr[l] < x){
//printf("\nreturing l %d", l);
return l;
}
EDGE_ID mid = l + (r-l)/2;
//printf("\nmid is %d", mid);
if (arr[mid] >= x){
l = mid+1;
bin_search_max_less_V(arr, l, r, x, MAX);
}
else{
r = mid-1;
bin_search_max_less_V(arr, l , r, x, MAX);
}
}
EDGE_ID bin_search_max_less_updated_V(min_update_V* arr, EDGE_ID l, EDGE_ID r, EDGE_ID x, EDGE_ID MAX){
//printf("\nl %d, r %d", l, r);
if (arr[r].cycid >= x){
//printf("\nreturing max");
return MAX;
}
if (arr[l].cycid < x){
//printf("\nreturing l %d", l);
return l;
}
EDGE_ID mid = l + (r-l)/2;
//printf("\nmid is %d", mid);
if (arr[mid].cycid >= x){
l = mid+1;
bin_search_max_less_updated_V(arr, l, r, x, MAX);
}
else{
r = mid-1;
bin_search_max_less_updated_V(arr, l , r, x, MAX);
}
}
EDGE_ID bin_search_min_greater_updated_V_byLidx(EDGE_ID* arr, EDGE_ID l, EDGE_ID r, EDGE_ID x, EDGE_ID MAX){
//printf("\nl %d, r %d", l, r);
if (arr[r] <= x){
//printf("\nreturing max");
return MAX;
}
if (arr[l] > x){
//printf("\nreturing l %d", l);
return l;
}
EDGE_ID mid = l + (r-l)/2;
//printf("\nmid is %d", mid);
if (arr[mid] <= x){
l = mid+1;
bin_search_min_greater_updated_V_byLidx(arr, l, r, x, MAX);
}
else{
r = mid;
if (arr[r-1] <= x) return r;
bin_search_min_greater_updated_V_byLidx(arr, l , r, x, MAX);
}
}
void minimize_birth_cycles_H0_v2(filtration* self\
, EDGE_ID** stored_boundaries\
, EDGE_ID* len_boundaries\
, EDGE_ID stored_num\
, char* filename\
){
EDGE_ID** all_diff = (EDGE_ID**)malloc(self->g_all_V_stored_num*sizeof(EDGE_ID*));
for (EDGE_ID mm = 0; mm < self->g_all_V_stored_num; mm++){
all_diff[mm] = (EDGE_ID*)malloc(2*sizeof(EDGE_ID));
}
EDGE_ID* updated = (EDGE_ID*)calloc(stored_num, sizeof(EDGE_ID));
EDGE_ID* original_id = (EDGE_ID*)malloc(stored_num*sizeof(EDGE_ID));
for (EDGE_ID mm = 0; mm < stored_num; mm++){
original_id[mm] = mm;
}
mergeSort_V_H0(len_boundaries, stored_boundaries, updated, original_id \
, 0, stored_num-1);
//printf("\nInitial sums");
//omp_set_num_threads(2*self->g_cpu_count-1);
#pragma omp parallel for schedule(guided) shared(self, stored_boundaries, len_boundaries, stored_num, all_diff)
for (EDGE_ID mm = 0; mm < stored_num; mm++){
if (!self->g_suppress_output){
if (mm%1000 == 0)
printf("\nDoing %d", mm);
}
all_diff[mm][0] = 0;
all_diff[mm][1] = 0;
for (EDGE_ID nn = mm+1; nn < stored_num; nn++){
if (len_boundaries[nn] < all_diff[mm][1]){
break;
}
EDGE_ID j, k, count;
j = 0;
k = 0;
count = 0;
int quit_flag = 0;
while ((j < len_boundaries[mm]) && (k < len_boundaries[nn])){
if (stored_boundaries[mm][j] < stored_boundaries[nn][k]){
count++;
j++;
}
else if (stored_boundaries[mm][j] > stored_boundaries[nn][k]){
count++;
k++;
}
else{
j++;
k++;
}
if (count > len_boundaries[mm] - all_diff[mm][1]){
quit_flag = 1;
break;
}
}
if (quit_flag) continue;
if (j < len_boundaries[mm]){
count += len_boundaries[mm] - j;
}
if (count > len_boundaries[mm] - all_diff[mm][1]){
continue;
}
if (k < len_boundaries[nn]){
count += len_boundaries[nn] - k;
}
if (count > len_boundaries[mm] - all_diff[mm][1]){
continue;
}
if (count < len_boundaries[mm]){
if ((len_boundaries[mm] - count) > all_diff[mm][1]){
all_diff[mm][0] = nn;
all_diff[mm][1] = len_boundaries[mm] - count;
}
}
}
}
}
//////////////////////////////////////////////////////////
// MERGE SORT ALGORITHMS FOR update_V
//////////////////////////////////////////////////////////
// Merges two subarrays of arr[].
// First subarray is arr[l..m]
// Second subarray is arr[m+1..r]
void merge_update_V(min_update_V* arr, EDGE_ID l, EDGE_ID m, EDGE_ID r)
{
EDGE_ID i, j, k;
EDGE_ID n1 = m - l + 1;
EDGE_ID n2 = r - m;
//printf("\nn1, n2: %u, %u", n1, n2);
/* create temp arrays */
min_update_V *L, *R;
L = (min_update_V*)malloc(n1*sizeof(min_update_V));
R = (min_update_V*)malloc(n2*sizeof(min_update_V));
//int L[n1], R[n2];
/* Copy data to temp arrays L[] and R[] */
for (i = 0; i < n1; i++){
L[i] = arr[l + i];
}
for (j = 0; j < n2; j++) {
R[j] = arr[m + 1+ j];
}
/* Merge the temp arrays back into arr[l..r]*/
i = 0; // Initial index of first subarray
j = 0; // Initial index of second subarray
k = l; // Initial index of merged subarray
while (i < n1 && j < n2)
{
if (L[i].cycid > R[j].cycid)
{
arr[k] = L[i];
i++;
}
else
{
arr[k] = R[j];
j++;
}
k++;
}
/* Copy the remaining elements of L[], if there
are any */
while (i < n1)
{
arr[k] = L[i];
i++;
k++;
}
/* Copy the remaining elements of R[], if there
are any */
while (j < n2)
{
arr[k] = R[j];
j++;
k++;
}
free(L);
free(R);
}
/* l is for left index and r is right index of the
sub-array of arr to be sorted */
void mergeSort_update_V(min_update_V* arr, EDGE_ID l, EDGE_ID r)
{
if (l < r)
{
// Same as (l+r)/2, but avoids overflow for
// large l and h
EDGE_ID m = l+(r-l)/2;
// Sort first and second halves
mergeSort_update_V(arr, l, m);
mergeSort_update_V(arr, m+1, r);
merge_update_V(arr, l, m, r);
}
}
// Merges two subarrays of arr[].
// First subarray is arr[l..m]
// Second subarray is arr[m+1..r]
void merge_update_V_byLidx(min_update_V* arr, EDGE_ID l, EDGE_ID m, EDGE_ID r)
{
EDGE_ID i, j, k;
EDGE_ID n1 = m - l + 1;
EDGE_ID n2 = r - m;
//printf("\nn1, n2: %u, %u", n1, n2);
/* create temp arrays */
min_update_V *L, *R;
L = (min_update_V*)malloc(n1*sizeof(min_update_V));
R = (min_update_V*)malloc(n2*sizeof(min_update_V));
//int L[n1], R[n2];
/* Copy data to temp arrays L[] and R[] */
for (i = 0; i < n1; i++){
L[i] = arr[l + i];
}
for (j = 0; j < n2; j++) {
R[j] = arr[m + 1+ j];
}
/* Merge the temp arrays back into arr[l..r]*/
i = 0; // Initial index of first subarray
j = 0; // Initial index of second subarray
k = l; // Initial index of merged subarray
while (i < n1 && j < n2)
{
if (L[i].Lidx < R[j].Lidx)
{
arr[k] = L[i];
i++;
}
else
{
arr[k] = R[j];
j++;
}
k++;
}
/* Copy the remaining elements of L[], if there
are any */
while (i < n1)
{
arr[k] = L[i];
i++;
k++;
}
/* Copy the remaining elements of R[], if there
are any */
while (j < n2)
{
arr[k] = R[j];
j++;
k++;
}
free(L);
free(R);
}
/* l is for left index and r is right index of the
sub-array of arr to be sorted */
void mergeSort_update_V_byLidx(min_update_V* arr, EDGE_ID l, EDGE_ID r)
{
if (l < r)
{
// Same as (l+r)/2, but avoids overflow for
// large l and h
EDGE_ID m = l+(r-l)/2;
// Sort first and second halves
mergeSort_update_V_byLidx(arr, l, m);
mergeSort_update_V_byLidx(arr, m+1, r);
merge_update_V_byLidx(arr, l, m, r);
}
}
EDGE_ID find_first_diff_H0(EDGE_ID* len_boundaries\
, EDGE_ID stored_num\
, EDGE_ID** stored_boundaries){
EDGE_ID difff = 0;
for (EDGE_ID mm = 0; mm < stored_num; mm++){
if (len_boundaries[mm] < difff){
break;
}
for (EDGE_ID nn = mm+1; nn < stored_num; nn++){
if (len_boundaries[nn] < difff){
break;
}
EDGE_ID j, k, count;
j = 0;
k = 0;
count = 0;
int quit_flag = 0;
while ((j < len_boundaries[mm]) && (k < len_boundaries[nn])){
if (stored_boundaries[mm][j] < stored_boundaries[nn][k]){
count++;
j++;
}
else if (stored_boundaries[mm][j] > stored_boundaries[nn][k]){
count++;
k++;
}
else{
j++;
k++;
}
if (count > len_boundaries[mm] - difff){
quit_flag = 1;
break;
}
}
if (quit_flag) continue;
if (j < len_boundaries[mm]){
count += len_boundaries[mm] - j;
}
if (count > len_boundaries[mm] - difff){
continue;
}
if (k < len_boundaries[nn]){
count += len_boundaries[nn] - k;
}
if (count > len_boundaries[mm] - difff){
continue;
}
//printf("\n%d, %d, %d",len_boundaries[mm], count, difff);
if (count < len_boundaries[mm]){
if ((len_boundaries[mm] - count) > difff){
difff = len_boundaries[mm] - count;
}
}
}
}
return difff;
}
void minimize_birth_cycles_H0_v3(filtration* self\
, cyc_info* CC\
, EDGE_ID stored_num\
, char* filename\
, char* lens_filename\
, char* minimal_lens_filename\
, char* subset_points_file\
){
//, char* filename2\
if (!self->g_suppress_output){
printf("\nNumber of cycles %d", stored_num);
}
//getchar();
#ifdef STORE_LENGTHS_CYCLES
FILE* fp0 = fopen(lens_filename, "w");
for (EDGE_ID ci = 0; ci < stored_num; ci++){
fprintf(fp0, "%d, ", CC[ci].len);
if (!self->g_reduce_cyc_lengths){
free(CC[ci].boundary);
}
}
if (!self->g_reduce_cyc_lengths){
free(CC);
}
fclose(fp0);
#endif
if (!self->g_reduce_cyc_lengths){
return;
}
omp_set_num_threads(2*self->g_cpu_count - 1);
EDGE_ID* Lcycid = (EDGE_ID*)malloc(stored_num*sizeof(EDGE_ID));
EDGE_ID* Llen = (EDGE_ID*)malloc(stored_num*sizeof(EDGE_ID));
EDGE_ID* Lupdated = (EDGE_ID*)calloc(stored_num, sizeof(EDGE_ID));
update_in_cyc** update_in_cycle = (update_in_cyc**)malloc(self->g_n_valid_edges*sizeof(update_in_cyc*));
EDGE_ID* update_in_cycle_len = (EDGE_ID*)calloc(self->g_n_valid_edges, sizeof(EDGE_ID));
EDGE_ID* update_in_cycle_max_len = (EDGE_ID*)calloc(self->g_n_valid_edges, sizeof(EDGE_ID));
for (EDGE_ID bb = 0; bb < self->g_n_valid_edges; bb++){
update_in_cycle_max_len[bb] = 1;
update_in_cycle[bb] = (update_in_cyc*)malloc(sizeof(update_in_cyc));
}
EDGE_ID* update_edges = (EDGE_ID*)malloc(self->g_n_valid_edges*sizeof(EDGE_ID));
EDGE_ID update_edges_num = 0;
EDGE_ID* case2a2bb = (EDGE_ID*)malloc(stored_num*sizeof(EDGE_ID));
EDGE_ID case2a2bb_num = 0;
EDGE_ID* case2ba = (EDGE_ID*)malloc(stored_num*sizeof(EDGE_ID));
EDGE_ID case2ba_num = 0;
//printf("\nIntializing L, C.Lidx...");
// Step 1. Initialize L and C.Lidx
for (EDGE_ID i = 0; i < stored_num; i++){
Lcycid[i] = i;
Llen[i] = CC[i].len;
Lupdated[i] = 0;
}
//printf("\nSorting Llen...");
// Step 2(a): Sort Llen, Lcycid, Lupdated by Llen
mergeSort_Llen(Llen, Lcycid, Lupdated, 0, stored_num - 1);
//printf("\nInitializing C.Lidx...");
// Step 2(b): Initialize C.Lidx
for (EDGE_ID li = 0; li < stored_num; li++){
CC[Lcycid[li]].Lidx = li;
}
//// Cycle-intersects-cycle information
//cyc_in_cyc** cyc_inter_cyc = (cyc_in_cyc**)malloc(stored_num*sizeof(cyc_in_cyc*));
//EDGE_ID* cyc_inter_cyc_len = (EDGE_ID*)calloc(stored_num, sizeof(EDGE_ID));
//EDGE_ID* cyc_inter_cyc_max_len = (EDGE_ID*)calloc(stored_num, sizeof(EDGE_ID));
// BUILD VERT to cycle association
for (EDGE_ID ci = 0; ci < stored_num; ci++){
EDGE_ID li = CC[ci].Lidx;
for (EDGE_ID idx = 0; idx < CC[ci].len; idx++){
EDGE_ID edge = CC[ci].boundary[idx];
//printf("\nedge is %d with %d cycles ", edge, self->g_edges_in_cycles_len[edge]);
//for (EDGE_ID nn = 0; nn < self->g_edges_in_cycles_len[edge]; nn++){
// printf("%d, ", self->g_edges_in_cycles[edge][nn]);
//}
if (!self->g_edges_in_cycles_len[edge]){
self->g_edges_in_cycles[edge] = (EDGE_ID*)malloc(sizeof(EDGE_ID));
}
else{
self->g_edges_in_cycles[edge] = (EDGE_ID*)realloc(\
self->g_edges_in_cycles[edge]\
,(self->g_edges_in_cycles_len[edge]+1)*sizeof(EDGE_ID));
}
self->g_edges_in_cycles[edge][self->g_edges_in_cycles_len[edge]++] = ci;
}
}
// NOTE: At this point g_edges_in_cycles is sorted by cyc_id
if (!self->g_suppress_output){
printf("\nInitializing diff...");
}
#pragma omp parallel for schedule(static, 1000) shared(Lcycid, CC, Llen)
for (EDGE_ID li = 0; li < stored_num; li++){
EDGE_ID ci = Lcycid[li];
CC[ci].diff = 0;
if (!self->g_suppress_output){
if (li %1000 == 0){
printf("\n%d", li);
}
}
for (EDGE_ID lj = li + 1; lj < stored_num; lj++){
//for (EDGE_ID lj = 0; lj < stored_num; lj++){
if (Llen[lj] < CC[ci].diff){
break;
}
//if (Llen[lj] > Llen[li]){
// continue;
//}
EDGE_ID cj = Lcycid[lj];
//if (CC[cj].perspair[0] > CC[ci].perspair[0]){
// continue;
//}
//if (CC[cj].updated_birth > self->g_cycle_min_birth_thresh){
// continue;
//}
EDGE_ID j, k, count;
j = 0;
k = 0;
count = 0;
int quit_flag = 0;
while ((j < Llen[li]) && (k < Llen[lj])){
if (CC[ci].boundary[j] < CC[cj].boundary[k]){
j++;
count++;
}
else if (CC[ci].boundary[j] > CC[cj].boundary[k]){
k++;
count++;
}
else{
j++;
k++;
}
if (count >= Llen[li]){
quit_flag = 1;
break;
}
if ((Llen[li] - CC[ci].diff) <= count){
quit_flag = 1;
break;
}
}
if (quit_flag){
continue;
}
if (j < Llen[li]){
count += Llen[li] - j;
}
if (k < Llen[lj]){
count += Llen[lj] - k;
}
if (count >= Llen[li]){
continue;
}
if ((Llen[li] - CC[ci].diff) <= count){
continue;
}
// At this point len - count < diff?
CC[ci].diff = Llen[li] - count;
CC[ci].redw = cj;
}
}
//for (EDGE_ID ci = 0; ci < stored_num; ci++){
// if (CC[ci].diff)
// printf("\nmax diff for %d is %d", ci, CC[ci].diff);
//}
//getchar();
// Define V that will store summations to be done
EDGE_ID V_len = 0;
EDGE_ID V_max_len = 10;
min_update_V* update_V = (min_update_V*)malloc(V_max_len*sizeof(min_update_V));
EDGE_ID* update_v_indices = (EDGE_ID*)malloc(stored_num*sizeof(EDGE_ID));
int* update_edges_flag = (int*)calloc(self->g_n_valid_edges, sizeof(int));
update_edges_num = 0;
EDGE_ID it_counter = 0;
// Step 4: Loop for minimization
while (1){
struct timespec ss0, ss1, ss2, ss3, ss4, ss5, ss6;
struct timespec ff0, ff1, ff2, ff3, ff4, ff5, ff6;
double cc0 = 0;
double cc1 = 0;
double cc2 = 0;
double cc3 = 0;
double cc4 = 0;
double cc5 = 0;
double cc6 = 0;
if (!self->g_suppress_output){
printf("\n\nIteration %d", it_counter++);
clock_gettime(CLOCK_MONOTONIC, &ss0);
clock_gettime(CLOCK_MONOTONIC, &ss1);
}
//printf("\nFINDING MAX DIFF");
// Step 4(a): Find max diff
EDGE_ID dm = 1;
V_len = 0;
for (EDGE_ID ci = 0; ci < stored_num; ci++){
if (CC[ci].diff > dm){
dm = CC[ci].diff;
update_V[0].cycid = ci;
V_len = 1;
}
else if (CC[ci].diff == dm){
update_V[V_len++].cycid = ci;
if (V_len == V_max_len){
V_max_len += 100;
update_V = (min_update_V*)realloc(update_V\
, V_max_len*sizeof(min_update_V));
}
}
}
if (!self->g_suppress_output){
clock_gettime(CLOCK_MONOTONIC, &ff1);
cc1 += (ff1.tv_sec - ss1.tv_sec);
cc1 += (ff1.tv_nsec - ss1.tv_nsec) / 1000000000.0;
}
if (!V_len){
//printf("\nDiff 0. EXITING.");
break;
}
//printf("\nMax diff %d in pairs %d", dm, V_len);
//printf("\ndo 15453");
//getchar();
if (!self->g_suppress_output){
clock_gettime(CLOCK_MONOTONIC, &ss2);
}
//#pragma omp parallel for schedule(static, 50) shared(update_V, CC)
for (EDGE_ID vi = 0; vi < V_len; vi++){
//if (vi % 1000 == 0){
// printf("\nreducing %d", vi);
//}
EDGE_ID ci = update_V[vi].cycid;
EDGE_ID cj = CC[ci].redw;
//printf("\nReducing %d with %d", ci, cj);
EDGE_ID* scratch = (EDGE_ID*)malloc((CC[ci].len + CC[cj].len)*sizeof(EDGE_ID));
EDGE_ID edge;
EDGE_ID j = 0;
EDGE_ID k = 0;
EDGE_ID count = 0;
while ((j < CC[ci].len) && (k < CC[cj].len)){
if (CC[ci].boundary[j] < CC[cj].boundary[k]){
scratch[count++] = CC[ci].boundary[j++];
}
else if (CC[ci].boundary[j] > CC[cj].boundary[k]){
scratch[count] = CC[cj].boundary[k];
// This edge is new in ci
// So, add this edge to ci-info
edge = CC[cj].boundary[k];
if (!update_edges_flag[edge]){
update_edges_flag[edge] = 1;
update_edges[update_edges_num++] = edge;
}
update_in_cycle[edge][update_in_cycle_len[edge]].cyc = ci;
// Add ci in edge
update_in_cycle[edge][update_in_cycle_len[edge]].flag = 1;
update_in_cycle_len[edge]++;
if (update_in_cycle_len[edge] == update_in_cycle_max_len[edge]){
update_in_cycle_max_len[edge] += 100;
update_in_cycle[edge] = (update_in_cyc*)realloc(update_in_cycle[edge]\
, update_in_cycle_max_len[edge]*sizeof(update_in_cyc));
}
count++;
k++;
}
else{
// This edge is not there anymore in ci
// so, remove ci from this edge's info
edge = CC[ci].boundary[j];
if (!update_edges_flag[edge]){
update_edges_flag[edge] = 1;
update_edges[update_edges_num++] = edge;
}
update_in_cycle[edge][update_in_cycle_len[edge]].cyc = ci;
// Remove ci from edge
update_in_cycle[edge][update_in_cycle_len[edge]].flag = 0;
update_in_cycle_len[edge]++;
if (update_in_cycle_len[edge] == update_in_cycle_max_len[edge]){
update_in_cycle_max_len[edge] += 100;
update_in_cycle[edge] = (update_in_cyc*)realloc(update_in_cycle[edge]\
, update_in_cycle_max_len[edge]*sizeof(update_in_cyc));
}
j++;
k++;
}
}
// update_in_cyc will have unique
while (j < CC[ci].len){
// No change
scratch[count++] = CC[ci].boundary[j++];
}
while (k < CC[cj].len){
// These are all new edges
// So, add ci to every edge-info
scratch[count] = CC[cj].boundary[k];
edge = CC[cj].boundary[k];
if (!update_edges_flag[edge]){
update_edges_flag[edge] = 1;
update_edges[update_edges_num++] = edge;
}
update_in_cycle[edge][update_in_cycle_len[edge]].cyc = ci;
// Add ci in edge
update_in_cycle[edge][update_in_cycle_len[edge]].flag = 1;
update_in_cycle_len[edge]++;
if (update_in_cycle_len[edge] == update_in_cycle_max_len[edge]){
update_in_cycle_max_len[edge] += 100;
update_in_cycle[edge] = (update_in_cyc*)realloc(update_in_cycle[edge]\
, update_in_cycle_max_len[edge]*sizeof(update_in_cyc));
}
count++;
k++;
}
scratch = (EDGE_ID*)realloc(scratch, count*sizeof(EDGE_ID));
update_V[vi].VV = scratch;
update_V[vi].V_len = count;
}
if (!self->g_suppress_output){
clock_gettime(CLOCK_MONOTONIC, &ff2);
cc2 += (ff2.tv_sec - ss2.tv_sec);
cc2 += (ff2.tv_nsec - ss2.tv_nsec) / 1000000000.0;
clock_gettime(CLOCK_MONOTONIC, &ss6);
clock_gettime(CLOCK_MONOTONIC, &ff6);
cc6 += (ff6.tv_sec - ss6.tv_sec);
cc6 += (ff6.tv_nsec - ss6.tv_nsec) / 1000000000.0;
clock_gettime(CLOCK_MONOTONIC, &ss3);
printf("\nNumber of edges to update %d", update_edges_num);
}
#pragma omp parallel for schedule(static, 50) shared(self, stored_num\
, CC\
, Lcycid, Lupdated\
, update_V, V_len\
, update_edges\
, update_in_cycle\
, update_in_cycle_len\
)
for (EDGE_ID iddx = 0; iddx < update_edges_num; iddx++){
EDGE_ID ei = update_edges[iddx];
update_edges_flag[ei] = 0;
EDGE_ID* scratch = (EDGE_ID*)malloc(\
(self->g_edges_in_cycles_len[ei]+update_in_cycle_len[ei])*sizeof(EDGE_ID));
EDGE_ID o_ptr = 0;
EDGE_ID u_ptr = 0;
EDGE_ID s_ptr = 0;
while ((o_ptr < self->g_edges_in_cycles_len[ei]) && (u_ptr < update_in_cycle_len[ei])){
if (self->g_edges_in_cycles[ei][o_ptr] < update_in_cycle[ei][u_ptr].cyc){
scratch[s_ptr++] = self->g_edges_in_cycles[ei][o_ptr++];
}
else if (self->g_edges_in_cycles[ei][o_ptr] > update_in_cycle[ei][u_ptr].cyc){
// Check if update_in_cycle is flagged for addition
if (update_in_cycle[ei][u_ptr].flag){
scratch[s_ptr++] = update_in_cycle[ei][u_ptr++].cyc;
}
else{
// Sanity check
printf("\nERROR 15604");
getchar();
}
}
else{
// Check if update_in_cycle is flagged for addition
if (update_in_cycle[ei][u_ptr].flag){
// Flagged for addition: means, it is
// already there in original. Just copy and increment all pointers
scratch[s_ptr++] = self->g_edges_in_cycles[ei][o_ptr++];
u_ptr++;
}
else{
// Flagged for removal: means, it is
// to be skipped
o_ptr++;
u_ptr++;
}
}
}
// If o_ptr did not reach end: means copy all that are remaining as nothing to
// update
while (o_ptr < self->g_edges_in_cycles_len[ei]){
scratch[s_ptr++] = self->g_edges_in_cycles[ei][o_ptr++];
}
// If u_ptr did not reach end: means all of these should have been flagged for addition
while (u_ptr < update_in_cycle_len[ei]){
scratch[s_ptr++] = update_in_cycle[ei][u_ptr++].cyc;
}
free(self->g_edges_in_cycles[ei]);
self->g_edges_in_cycles[ei] = scratch;
self->g_edges_in_cycles_len[ei] = s_ptr;
update_in_cycle_len[ei] = 0;
update_in_cycle_max_len[ei] = 1;
update_in_cycle[ei] = (update_in_cyc*)realloc(\
update_in_cycle[ei]\
, sizeof(update_in_cyc));
}
update_edges_num = 0;
if (!self->g_suppress_output){
clock_gettime(CLOCK_MONOTONIC, &ff3);
cc3 += (ff3.tv_sec - ss3.tv_sec);
cc3 += (ff3.tv_nsec - ss3.tv_nsec) / 1000000000.0;
clock_gettime(CLOCK_MONOTONIC, &ss4);
}
// Update CC with new boundaries, update Llen and Lupdated
for (EDGE_ID vi = 0; vi < V_len; vi++){
EDGE_ID ci = update_V[vi].cycid;
EDGE_ID li = CC[ci].Lidx;
free(CC[ci].boundary);
CC[ci].boundary = update_V[vi].VV;
CC[ci].len = update_V[vi].V_len;
Llen[li] = update_V[vi].V_len;
Lupdated[li] = 1;
}
// Sort Llen, Lupdated, Lcycid
mergeSort_Llen(Llen, Lcycid, Lupdated, 0, stored_num - 1);
// Update C.Lidx
for (EDGE_ID li = 0; li < stored_num; li++){
CC[Lcycid[li]].Lidx = li;
}
// Update V.Lidx
for (EDGE_ID vi = 0; vi < V_len; vi++){
update_V[vi].Lidx = CC[update_V[vi].cycid].Lidx;
}
// Sort update_V by Lidx
if (V_len > 1){
mergeSort_update_V_byLidx(update_V, 0, V_len-1);
}
for (EDGE_ID vi = 0; vi < V_len; vi++){
update_v_indices[vi] = update_V[vi].Lidx;
}
//// Sort each g_edges_in_cycles by len of cycles
//for (EDGE_ID ei = 0; ei < self->g_n_valid_edges; ei++){
//
// if (self->g_edges_in_cycles_len[ei] < 2) continue;
// mergeSort_edges_in_cycles(self->g_edges_in_cycles[ei], CC, 0, self->g_edges_in_cycles_len[ei]-1);
//
//}
if (!self->g_suppress_output){
clock_gettime(CLOCK_MONOTONIC, &ff4);
cc4 += (ff4.tv_sec - ss4.tv_sec);
cc4 += (ff4.tv_nsec - ss4.tv_nsec) / 1000000000.0;
clock_gettime(CLOCK_MONOTONIC, &ss5);
}
// Get the cases
// Case 1: x is updated -> check with all with diff = 0 (this is n update_V)
// CASE 2a: x is not updated + diff is 0 -> only check with updated
// CASE 2ba: x is not updated + diff is not 0 + y is updated -> check with all after new diff
// Case 2bb: x is not updated + diff is not 0 + y is not updated -> only check with updated
case2a2bb_num = 0;
case2ba_num = 0;
for (EDGE_ID li = 0; li < stored_num; li++){
EDGE_ID ci = Lcycid[li];
if (Lupdated[li]) continue;
// x is not updated
if (!CC[ci].diff){
// diff is 0 -> only check with updated -> case 2
case2a2bb[case2a2bb_num++] = li;
}
else{
EDGE_ID cj = CC[ci].redw;
EDGE_ID lj = CC[cj].Lidx;
if (Lupdated[lj]){
// diff is not 0 and y is updated -> check with all, but diff is not set to 0 -> case 1
case2ba[case2ba_num++] = li;
}
else{
// diff is not 0 and y is not updated -> only check with updated -> case 2
case2a2bb[case2a2bb_num++] = li;
}
}
}
if (!self->g_suppress_output){
clock_gettime(CLOCK_MONOTONIC, &ff5);
cc5 += (ff5.tv_sec - ss5.tv_sec);
cc5 += (ff5.tv_nsec - ss5.tv_nsec) / 1000000000.0;
}
//printf("\nStarting 15790");
//getchar();
//////////////////////////////////////////////////////////
//printf("\nUpdating diffs...");
// Update diffs
//////////////////////////////////////////////////////////
// NOTE: updated_V is sorted by increasing Lidx at this point
struct timespec start1, start2, start3;
struct timespec finish1, finish2, finish3;
double c1 = 0;
double c2 = 0;
double c3 = 0;
// NEW NEW
// CASE 1
//printf("\nStarting 15810");
//getchar();
if (!self->g_suppress_output){
clock_gettime(CLOCK_MONOTONIC, &start1);
}
#pragma omp parallel for schedule(static, 50) shared(stored_num\
, CC\
, Lcycid, Lupdated\
, update_V, V_len\
)
for (EDGE_ID vi = 0; vi < V_len; vi++){
EDGE_ID li = update_V[vi].Lidx;
EDGE_ID ci = Lcycid[li];
//for (EDGE_ID ci = 0; ci < stored_num; ci++){
CC[ci].diff = 0;
//EDGE_ID li = CC[ci].Lidx;
//printf("\ncase 1 diff before for cycle %d is %d", ci, CC[ci].diff);
//// flag_case = 0 to check with all
update_diff(self, li, Lupdated, 0, Lcycid, CC, stored_num);
//printf("\ncase 1 diff after for cycle %d is %d", ci, CC[ci].diff);
}
if (!self->g_suppress_output){
clock_gettime(CLOCK_MONOTONIC, &finish1);
c1 += (finish1.tv_sec - start1.tv_sec);
c1 += (finish1.tv_nsec - start1.tv_nsec) / 1000000000.0;
clock_gettime(CLOCK_MONOTONIC, &start2);
}
// CASE 2a2bb: Only check with updated cycles
#pragma omp parallel for schedule(static, 50) shared(stored_num\
, CC\
, Lcycid, Lupdated\
, case2a2bb_num, case2a2bb)
for (EDGE_ID idx = 0; idx < case2a2bb_num; idx++){
EDGE_ID li = case2a2bb[idx];
//EDGE_ID ci = Lcycid[li];
//printf("\ncase 2 diff before for cycle %d is %d", ci, CC[ci].diff);
// flag_case = 1 to check with only updated
//update_diff(self, li, Lupdated, 1, Lcycid, CC, stored_num);
minimal_CASE2(self, li, CC, Lcycid, Llen, update_v_indices, V_len);
//printf("\ncase 2 diff after for cycle %d is %d", ci, CC[ci].diff);
}
if (!self->g_suppress_output){
clock_gettime(CLOCK_MONOTONIC, &finish2);
c2 += (finish2.tv_sec - start2.tv_sec);
c2 += (finish2.tv_nsec - start2.tv_nsec) / 1000000000.0;
clock_gettime(CLOCK_MONOTONIC, &start3);
}
// CASE 2ba
#pragma omp parallel for schedule(static, 50) shared(stored_num\
, CC\
, Lcycid, Lupdated\
, case2ba, case2ba_num)
for(EDGE_ID idx = 0; idx < case2ba_num; idx++){
EDGE_ID li = case2ba[idx];
EDGE_ID ci = Lcycid[li];
EDGE_ID cj = CC[ci].redw;
//printf("\ncase 3 diff before for cycle %d is %d", ci, CC[ci].diff);
//////////////
// CASE 2ba: x is NOT updated and diff is NOT 0 and y is updated
//////////////
//printf(" Case 2ba");
EDGE_ID j = 0;
EDGE_ID k = 0;
EDGE_ID count = 0;
while ((j < CC[ci].len) && (k < CC[cj].len)){
if (CC[ci].boundary[j] < CC[cj].boundary[k]){
j++;
count++;
}
else if (CC[ci].boundary[j] > CC[cj].boundary[k]){
k++;
count++;
}
else{
j++;
k++;
}
if (count >= CC[ci].len){
break;
}
}
if (j < CC[ci].len){
count += CC[ci].len - j;
}
if (k < CC[cj].len){
count += CC[cj].len - k;
}
if (count < CC[ci].len){
CC[ci].diff = CC[ci].len - count;
}
else{
CC[ci].diff = 0;
}
// flag_case = 0 to check with all
update_diff(self, li, Lupdated, 0, Lcycid, CC, stored_num);
//printf("\ncase 3 diff after for cycle %d is %d", ci, CC[ci].diff);
}
if (!self->g_suppress_output){
clock_gettime(CLOCK_MONOTONIC, &finish3);
c3 += (finish3.tv_sec - start3.tv_sec);
c3 += (finish3.tv_nsec - start3.tv_nsec) / 1000000000.0;
clock_gettime(CLOCK_MONOTONIC, &ff0);
cc0 += (ff0.tv_sec - ss0.tv_sec);
cc0 += (ff0.tv_nsec - ss0.tv_nsec) / 1000000000.0;
printf("\nmax diff %d, num pairs %d, case 1 %lf, case 2a2bb %lf, case 2ba %lf, (%lf,%lf, %lf, %lf, %lf, %lf), %lf"\
, dm\
, V_len\
, c1\
, c2\
, c3\
, cc1\
, cc2\
, cc3\
, cc4\
, cc5\
, cc6\
, cc0\
);
}
// Reset Lupdated
for (EDGE_ID li = 0; li < stored_num; li++){
Lupdated[li] = 0;
}
//getchar();
}
//////////////////////////////////////////////////////////////
// TESTING
//////////////////////////////////////////////////////////////
//
//printf("\nPress key to test");
//getchar();
//printf("\nTESTING...");
//#pragma omp parallel for schedule(static) shared(stored_num\
// , CC)
//for (EDGE_ID ci = 0; ci < stored_num; ci++){
// EDGE_ID diff = 0;
//
// for (EDGE_ID cj = 0; cj < stored_num; cj++){
//
// if (cj == ci) continue;
// EDGE_ID j = 0;
// EDGE_ID k = 0;
// EDGE_ID count = 0;
// int quit_flag = 0;
// while ((j < CC[ci].len) && (k < CC[cj].len)){
// if (CC[ci].boundary[j] < CC[cj].boundary[k]){
// j++;
// count++;
// }
// else if (CC[ci].boundary[j] > CC[cj].boundary[k]){
// k++;
// count++;
// }
// else{
// j++;
// k++;
// }
// }
// if (j < CC[ci].len){
// count += CC[ci].len - j;
// }
// if (k < CC[cj].len){
// count += CC[cj].len - k;
// }
//
// if (count < CC[ci].len){
// if (CC[ci].len - count > diff){
// printf("\nImprovement possible by reducing ci, li %d, %d with cj, lj %d, %d!"\
// , ci, CC[ci].Lidx\
// , cj, CC[cj].Lidx);
// printf("\nTESTING FAILED!!!");
// getchar();
// }
// }
//
// }
//
//}
//printf("\nTESTED OK.");
//////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
// Sort Llen, Lupdated, Lcycid
mergeSort_Llen(Llen, Lcycid, Lupdated, 0, stored_num - 1);
// Update C.Lidx
for (EDGE_ID li = 0; li < stored_num; li++){
CC[Lcycid[li]].Lidx = li;
}
#ifdef STORE_LENGTHS_CYCLES
FILE* fp1 = fopen(minimal_lens_filename, "w");
for (EDGE_ID li = 0; li < stored_num; li++){
fprintf(fp1, "%d, ", Llen[li]);
}
//for (EDGE_ID ci = 0; ci < stored_num; ci++){
// fprintf(fp1, "%d, ", CC[ci].len);
//}
fclose(fp1);
#endif
FILE* fp2 = fopen(filename, "w");
if (!self->g_suppress_output){
printf("\n");
}
for (EDGE_ID li = 0; li < stored_num; li++){
if (Llen[li] < 5){
break;
}
EDGE_ID ci = Lcycid[li];
//if (li < 15)
//printf("\nlen %d", Llen[li]);
for (EDGE_ID nn = 0; nn < CC[ci].len; nn++){
fprintf(fp2, "%d, %d, ", self->g_edges_list[2*CC[ci].boundary[nn]]\
, self->g_edges_list[2*CC[ci].boundary[nn]+1]);
}
fprintf(fp2, "\n");
free(CC[ci].boundary);
}
fclose(fp2);
free(CC);
free(update_V);
free(Lcycid);
free(Llen);
free(Lupdated);
//free(update_v_indices);
//
free(case2ba);
free(case2a2bb);
free(update_edges);
free(update_edges_flag);
free(update_in_cycle_len);
free(update_in_cycle_max_len);
for (EDGE_ID ii = 0; ii < self->g_n_valid_edges; ii++){
free(update_in_cycle[ii]);
}
if (!self->g_suppress_output){
printf("\nDone. Press key to quit H1 cycle shortening");
}
}
void minimize_birth_cycles_H0_v4(filtration* self\
, cyc_info* CC\
, EDGE_ID stored_num\
, char* filename\
, char* filename2\
){
//printf("\nNumber of cycles %d", stored_num);
//getchar();
omp_set_num_threads(2*self->g_cpu_count - 1);
EDGE_ID* Lcycid = (EDGE_ID*)malloc(stored_num*sizeof(EDGE_ID));
EDGE_ID* Llen = (EDGE_ID*)malloc(stored_num*sizeof(EDGE_ID));
EDGE_ID* Lupdated = (EDGE_ID*)calloc(stored_num, sizeof(EDGE_ID));
printf("\nIntializing L, C.Lidx...");
// Step 1. Initialize L and C.Lidx
for (EDGE_ID i = 0; i < stored_num; i++){
Lcycid[i] = i;
Llen[i] = CC[i].len;
Lupdated[i] = 0;
}
printf("\nSorting Llen...");
// Step 2(a): Sort Llen, Lcycid, Lupdated by Llen
mergeSort_Llen(Llen, Lcycid, Lupdated, 0, stored_num - 1);
printf("\nInitializing C.Lidx...");
// Step 2(b): Initialize C.Lidx
for (EDGE_ID li = 0; li < stored_num; li++){
CC[Lcycid[li]].Lidx = li;
}
PAR* sorted_par = (PAR*)malloc(stored_num*sizeof(PAR));
EDGE_ID* Pcycid = (EDGE_ID*)malloc(stored_num*sizeof(EDGE_ID));
for (EDGE_ID ci = 0; ci < stored_num; ci++){
if (CC[ci].perspair[1] != -1){
sorted_par[ci] = CC[ci].perspair[1] - CC[ci].perspair[0];
}
else{
sorted_par[ci] = self->g_thresh - CC[ci].perspair[0];
}
Pcycid[ci] = ci;
}
// SORT temp_par by pers barcode as follows:
// Sorted in decreasing order of parameter where:
// For dead features: parameter = death - birth
// For undead features: parameter = thresh - birth
mergeSort_temp_par(sorted_par, Pcycid, 0, stored_num-1);
for (EDGE_ID pi = 0; pi < stored_num; pi++){
EDGE_ID ci = Pcycid[pi];
while(1){
EDGE_ID li = CC[ci].Lidx;
CC[ci].diff = 0;
for (EDGE_ID lj = li + 1; lj < stored_num; lj++){
if (Llen[lj] < CC[ci].diff) break;
EDGE_ID cj = Lcycid[lj];
EDGE_ID j = 0;
EDGE_ID k = 0;
EDGE_ID count = 0;
while ((j < CC[ci].len) && (k < CC[cj].len)){
if (CC[ci].boundary[j] < CC[cj].boundary[k]){
j++;
count++;
}
else if (CC[ci].boundary[j] > CC[cj].boundary[k]){
k++;
count++;
}
else{
j++;
k++;
}
}
if (j < CC[ci].len){
count += CC[ci].len - j;
}
if (k < CC[cj].len){
count += CC[cj].len - k;
}
if (count >= CC[ci].len - CC[ci].diff){
continue;
}
CC[ci].redw = cj;
CC[ci].diff = CC[ci].len - count;
}
if (!CC[ci].diff){
printf("\nFinal new len of (%lf, %lf) is %d"\
, CC[ci].perspair[0]\
, CC[ci].perspair[1]\
, CC[ci].len);
getchar();
break;
}
EDGE_ID j = 0;
EDGE_ID k = 0;
EDGE_ID count = 0;
EDGE_ID cj = CC[ci].redw;
printf("\nreducing %d (len %d) with %d (len %d, (%lf, %lf))"\
, ci\
, CC[ci].len\
, cj\
, CC[cj].len\
, CC[cj].perspair[0]\
, CC[cj].perspair[1]\
);
EDGE_ID* scratch = (EDGE_ID*)malloc((CC[ci].len+CC[cj].len)*sizeof(EDGE_ID));
while ((j < CC[ci].len) && (k < CC[cj].len)){
if (CC[ci].boundary[j] < CC[cj].boundary[k]){
scratch[count++] = CC[ci].boundary[j++];
}
else if (CC[ci].boundary[j] > CC[cj].boundary[k]){
scratch[count++] = CC[cj].boundary[k++];
}
else{
j++;
k++;
}
}
while (j < CC[ci].len){
scratch[count++] = CC[ci].boundary[j++];
}
while (k < CC[cj].len){
scratch[count++] = CC[cj].boundary[k++];
}
free(CC[ci].boundary);
printf("\nfor pers pair (%lf, %lf), Old len is %d and new len is %d"\
, CC[ci].perspair[0]\
, CC[ci].perspair[1]\
, CC[ci].len\
, count);
CC[ci].boundary = scratch;
CC[ci].len = count;
Llen[ci] = count;
printf("\nUpdate lengths");
// Sort Llen, Lupdated, Lcycid
mergeSort_Llen(Llen, Lcycid, Lupdated, 0, stored_num - 1);
// Update C.Lidx
for (EDGE_ID li = 0; li < stored_num; li++){
CC[Lcycid[li]].Lidx = li;
}
}
}
}
void minimal_CASE1(EDGE_ID li, cyc_info* CC, EDGE_ID* Lcycid, EDGE_ID* Llen, EDGE_ID stored_num){
EDGE_ID ci = Lcycid[li];
//CC[ci].diff = 0;
for (EDGE_ID lj = li + 1; lj < stored_num; lj++){
//for (EDGE_ID lj = 0; lj < stored_num; lj++){
if (Llen[lj] < CC[ci].diff){
break;
}
//if (lj == li) continue;
EDGE_ID cj = Lcycid[lj];
EDGE_ID j = 0;
EDGE_ID k = 0;
EDGE_ID count = 0;
int quit_flag = 0;
while ((j < CC[ci].len) && (k < CC[cj].len)){
if (CC[ci].boundary[j] < CC[cj].boundary[k]){
j++;
count++;
}
else if (CC[ci].boundary[j] > CC[cj].boundary[k]){
k++;
count++;
}
else{
j++;
k++;
}
if (count >= CC[ci].len){
quit_flag = 1;
break;
}
if ((CC[ci].len - count) <= CC[ci].diff){
quit_flag = 1;
break;
}
}
if (quit_flag){
continue;
}
if (j < CC[ci].len){
count += CC[ci].len - j;
}
if (k < CC[cj].len){
count += CC[cj].len - k;
}
// NEED TO CHECK LOGIC HERE!!!!!!!!!!!!!!!!!!!!!!
if (count >= CC[ci].len){
continue;
}
// NEED TO CHECK THIS <= OR < !!!!!!!!!!!!!!!!
if ((CC[ci].len - count) <= CC[ci].diff){
continue;
}
CC[ci].diff = CC[ci].len - count;
CC[ci].redw = cj;
//printf("\ncase1 updating diff to %d", CC[ci].diff);
//getchar();
}
}
void minimal_CASE2(filtration* self, EDGE_ID li, cyc_info* CC, EDGE_ID* Lcycid, EDGE_ID* Llen\
, EDGE_ID* update_v_indices, EDGE_ID V_len){
if (update_v_indices[V_len-1] <= li) return;
EDGE_ID idx = bin_search_min_greater_updated_V_byLidx(update_v_indices\
, 0, V_len-1\
, li\
, V_len);
EDGE_ID ci = Lcycid[li];
for (EDGE_ID vj = idx; vj < V_len; vj++){
EDGE_ID lj = update_v_indices[vj];
if (lj <= li) continue;
if (Llen[lj] < CC[ci].diff){
break;
}
EDGE_ID cj = Lcycid[lj];
//if (CC[cj].perspair[0] > CC[ci].perspair[0]){
// continue;
//}
// CHECK BIRTH THRESH
//if (CC[cj].updated_birth > self->g_cycle_min_birth_thresh){
// continue;
//}
EDGE_ID j = 0;
EDGE_ID k = 0;
EDGE_ID count = 0;
EDGE_ID common = 0;
int quit_flag = 0;
while ((j < CC[ci].len) && (k < CC[cj].len)){
if (CC[ci].boundary[j] < CC[cj].boundary[k]){
j++;
count++;
}
else if (CC[ci].boundary[j] > CC[cj].boundary[k]){
k++;
count++;
}
else{
j++;
k++;
}
if (count >= CC[ci].len){
quit_flag = 1;
break;
}
if ((CC[ci].len - count) <= CC[ci].diff){
quit_flag = 1;
break;
}
}
if (quit_flag){
continue;
}
if (j < CC[ci].len){
count += CC[ci].len - j;
}
if (k < CC[cj].len){
count += CC[cj].len - k;
}
// NEED TO CHECK LOGIC HERE!!!!!!!!!!!!!!!!!!!!!!
if (count >= CC[ci].len){
continue;
}
// NEED TO CHECK LOGIC HERE!!!!!!!!!!!!!!!!!!!!!!
if ((CC[ci].len - count) <= CC[ci].diff){
continue;
}
CC[ci].diff = CC[ci].len - count;
CC[ci].redw = cj;
}
}
void shuffle_cyc(cyc_info* CC, EDGE_ID num){
int n = (int)num;
srand((unsigned)time(NULL));
for (int i = 0; i < n - 1; i++) {
size_t j = i + rand() / (RAND_MAX / (n - i) + 1);
cyc_info t = CC[j];
CC[j] = CC[i];
CC[i] = t;
}
}
void update_diff(filtration* self, EDGE_ID li, EDGE_ID* Lupdated, int flag_case\
, EDGE_ID* Lcycid, cyc_info* CC, EDGE_ID stored_num){
EDGE_ID ci = Lcycid[li];
EDGE_ID* cj_diff = (EDGE_ID*)calloc(stored_num, sizeof(EDGE_ID));
EDGE_ID max_diff = 0;
for (EDGE_ID idx = 0; idx < CC[ci].len; idx++){
EDGE_ID edge = CC[ci].boundary[idx];
for (EDGE_ID idx2 = 0; idx2 < self->g_edges_in_cycles_len[edge]; idx2++){
EDGE_ID cj = self->g_edges_in_cycles[edge][idx2];
//if (CC[cj].perspair[0] > CC[ci].perspair[0]){
// continue;
//}
// CHECK BIRTH THRESH
//if (CC[cj].updated_birth > self->g_cycle_min_birth_thresh){
// continue;
//}
EDGE_ID lj = CC[cj].Lidx;
//if (CC[cj].len < max_diff) break;
if (lj <= li) continue;
if (CC[cj].len < CC[ci].diff) continue;
if (flag_case){
if (!Lupdated[lj]) continue;
}
cj_diff[cj] += 2;
if (CC[cj].len < cj_diff[cj]){
if ((cj_diff[cj] - CC[cj].len) > CC[ci].diff){
CC[ci].diff = cj_diff[cj] - CC[cj].len;
CC[ci].redw = cj;
max_diff = CC[ci].len + CC[cj].len - cj_diff[cj];
}
}
}
}
free(cj_diff);
}
void find_first_diff(filtration* self, EDGE_ID li, EDGE_ID* Lupdated\
, EDGE_ID* Lcycid, cyc_info* CC, EDGE_ID stored_num){
EDGE_ID ci = Lcycid[li];
EDGE_ID* cj_diff = (EDGE_ID*)calloc(stored_num, sizeof(EDGE_ID));
for (EDGE_ID idx = 0; idx < CC[ci].len; idx++){
EDGE_ID edge = CC[ci].boundary[idx];
for (EDGE_ID idx2 = 0; idx2 < self->g_edges_in_cycles_len[edge]; idx2++){
EDGE_ID cj = self->g_edges_in_cycles[edge][idx2];
EDGE_ID lj = CC[cj].Lidx;
if (CC[cj].len < CC[ci].diff) break;
if (lj <= li) continue;
cj_diff[cj] += 2;
if (CC[cj].len < cj_diff[cj]){
if ((cj_diff[cj] - CC[cj].len) > CC[ci].diff){
CC[ci].diff = cj_diff[cj] - CC[cj].len;
CC[ci].redw = cj;
}
}
}
}
free(cj_diff);
}
void minimize_birth_cycles_H1_v2(filtration* self\
, cyc_info_H2* CC\
, EDGE_ID stored_num\
, char* filename\
, char* lens_filename\
, char* minimal_lens_filename\
){
//, char* filename2\
if (!self->g_suppress_output){
printf("\nNumber of cycles %d", stored_num);
}
#ifdef STORE_LENGTHS_CYCLES
FILE* fp0 = fopen(lens_filename, "w");
for (EDGE_ID ci = 0; ci < stored_num; ci++){
fprintf(fp0, "%d, ", CC[ci].len);
if (!self->g_reduce_cyc_lengths){
free(CC[ci].boundary);
}
}
fclose(fp0);
#endif
if (!self->g_reduce_cyc_lengths){
free(CC);
return;
}
omp_set_num_threads(2*self->g_cpu_count - 1);
EDGE_ID* Lcycid = (EDGE_ID*)malloc(stored_num*sizeof(EDGE_ID));
EDGE_ID* Llen = (EDGE_ID*)malloc(stored_num*sizeof(EDGE_ID));
EDGE_ID* Lupdated = (EDGE_ID*)calloc(stored_num, sizeof(EDGE_ID));
// Step 1. Initialize L and C.Lidx
for (EDGE_ID i = 0; i < stored_num; i++){
Lcycid[i] = i;
Llen[i] = CC[i].len;
Lupdated[i] = 0;
}
//printf("\nSorting Llen...");
// Step 2(a): Sort Llen, Lcycid, Lupdated by Llen
mergeSort_Llen(Llen, Lcycid, Lupdated, 0, stored_num - 1);
//printf("\nInitializing C.Lidx...");
// Step 2(b): Initialize C.Lidx
for (EDGE_ID li = 0; li < stored_num; li++){
CC[Lcycid[li]].Lidx = li;
}
// Define V that will store summations to be done
EDGE_ID V_len = 0;
EDGE_ID V_max_len = 10;
min_update_V_H2* update_V = (min_update_V_H2*)malloc(V_max_len*sizeof(min_update_V_H2));
EDGE_ID it_counter = 0;
// Step 4: Loop for minimization
while (1){
if (!self->g_suppress_output){
printf("\n\nIteration %d", it_counter++);
}
// Step 4(a): Find max diff
EDGE_ID dm = 1;
V_len = 0;
for (EDGE_ID li = 0; li < stored_num; li++){
//printf("\nli %d", li);
if (Llen[li] < dm) break;
int add_flag = 0;
EDGE_ID ci = Lcycid[li];
for (EDGE_ID lj = li + 1; lj < stored_num; lj++){
if (Llen[lj] < dm) break;
//printf("\nlj %d", lj);
EDGE_ID cj = Lcycid[lj];
EDGE_ID i = 0;
EDGE_ID j = 0;
EDGE_ID count = 0;
int quit_flag = 0;
while (i < CC[ci].len && j < CC[cj].len){
int compare;
if (CC[ci].boundary[i].key1 < CC[cj].boundary[j].key1){
compare = 1;
}
else if (CC[ci].boundary[i].key1 > CC[cj].boundary[j].key1){
compare = 0;
}
else{
if (CC[ci].boundary[i].key2 < CC[cj].boundary[j].key2){
compare = 1;
}
else if (CC[ci].boundary[i].key2 > CC[cj].boundary[j].key2){
compare = 0;
}
else{
compare = -1;
}
}
if (compare == 1){
i++;
count++;
}
else if (!compare){
j++;
count++;
}
else{
i++;
j++;
}
if (count > CC[ci].len - dm){
quit_flag = 1;
break;
}
}
if (quit_flag) continue;
if (i < CC[ci].len){
count += CC[ci].len - i;
}
if (j < CC[cj].len){
count += CC[cj].len - j;
}
if (count > CC[ci].len - dm){
continue;
}
//printf("\nREACHED HERE");
//getchar();
if (CC[ci].len - count > dm){
dm = CC[ci].len - count;
update_V[0].cycid = ci;
CC[ci].redw = cj;
V_len = 1;
add_flag = 1;
}
else if ((CC[ci].len - count == dm) && (!add_flag)){
add_flag = 1;
update_V[V_len++].cycid = ci;
CC[ci].redw = cj;
if (V_len == V_max_len){
V_max_len += 100;
update_V = (min_update_V_H2*)realloc(update_V\
, V_max_len*sizeof(min_update_V_H2));
}
}
}
}
if (!V_len){
//printf("\nDiff 0. EXITING.");
break;
}
if (!self->g_suppress_output){
printf("\nmaxdiff %d, num pairs %d", dm, V_len);
}
#pragma omp parallel for schedule(static, 50) shared(update_V, CC)
for (EDGE_ID vi = 0; vi < V_len; vi++){
//if (vi % 1000 == 0){
//printf("\nreducing %d", vi);
//}
EDGE_ID ci = update_V[vi].cycid;
EDGE_ID cj = CC[ci].redw;
//printf("\nReducing %d with %d", ci, cj);
simplex* scratch = (simplex*)malloc((CC[ci].len + CC[cj].len)*sizeof(simplex));
EDGE_ID edge;
EDGE_ID i = 0;
EDGE_ID j = 0;
EDGE_ID count = 0;
while ((i < CC[ci].len) && (j < CC[cj].len)){
int compare;
if (CC[ci].boundary[i].key1 < CC[cj].boundary[j].key1){
compare = 1;
}
else if (CC[ci].boundary[i].key1 > CC[cj].boundary[j].key1){
compare = 0;
}
else{
if (CC[ci].boundary[i].key2 < CC[cj].boundary[j].key2){
compare = 1;
}
else if (CC[ci].boundary[i].key2 > CC[cj].boundary[j].key2){
compare = 0;
}
else{
compare = -1;
}
}
if (compare==1){
scratch[count++] = CC[ci].boundary[i++];
}
else if (!compare){
scratch[count++] = CC[cj].boundary[j++];
}
else{
i++;
j++;
}
}
// update_in_cyc will have unique
while (i < CC[ci].len){
// No change
scratch[count++] = CC[ci].boundary[i++];
}
while (j < CC[cj].len){
scratch[count++] = CC[cj].boundary[j++];
}
scratch = (simplex*)realloc(scratch, count*sizeof(simplex));
update_V[vi].VV = scratch;
update_V[vi].V_len = count;
}
// Update CC with new boundaries, update Llen and Lupdated
for (EDGE_ID vi = 0; vi < V_len; vi++){
EDGE_ID ci = update_V[vi].cycid;
EDGE_ID li = CC[ci].Lidx;
free(CC[ci].boundary);
CC[ci].boundary = update_V[vi].VV;
CC[ci].len = update_V[vi].V_len;
Llen[li] = update_V[vi].V_len;
Lupdated[li] = 1;
}
// Sort Llen, Lupdated, Lcycid
mergeSort_Llen(Llen, Lcycid, Lupdated, 0, stored_num - 1);
// Update C.Lidx
for (EDGE_ID li = 0; li < stored_num; li++){
CC[Lcycid[li]].Lidx = li;
}
}
//////////////////////////////////////////////////////////////
// TESTING
//////////////////////////////////////////////////////////////
//
//printf("\nPress key to test");
//getchar();
//printf("\nTESTING...");
//#pragma omp parallel for schedule(static) shared(stored_num\
// , CC)
//for (EDGE_ID ci = 0; ci < stored_num; ci++){
// EDGE_ID diff = 0;
//
// for (EDGE_ID cj = 0; cj < stored_num; cj++){
//
// if (cj == ci) continue;
// EDGE_ID j = 0;
// EDGE_ID k = 0;
// EDGE_ID count = 0;
// int quit_flag = 0;
// while ((j < CC[ci].len) && (k < CC[cj].len)){
// if (CC[ci].boundary[j] < CC[cj].boundary[k]){
// j++;
// count++;
// }
// else if (CC[ci].boundary[j] > CC[cj].boundary[k]){
// k++;
// count++;
// }
// else{
// j++;
// k++;
// }
// }
// if (j < CC[ci].len){
// count += CC[ci].len - j;
// }
// if (k < CC[cj].len){
// count += CC[cj].len - k;
// }
//
// if (count < CC[ci].len){
// if (CC[ci].len - count > diff){
// printf("\nImprovement possible by reducing ci, li %d, %d with cj, lj %d, %d!"\
// , ci, CC[ci].Lidx\
// , cj, CC[cj].Lidx);
// printf("\nTESTING FAILED!!!");
// getchar();
// }
// }
//
// }
//
//}
//printf("\nTESTED OK.");
//////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
#ifdef STORE_LENGTHS_CYCLES
FILE* fp1 = fopen(minimal_lens_filename, "w");
for (EDGE_ID ci = 0; ci < stored_num; ci++){
fprintf(fp1, "%d, ", CC[ci].len);
}
fclose(fp1);
#endif
FILE* fp2 = fopen(filename, "w");
for (EDGE_ID li = 0; li < stored_num; li++){
EDGE_ID ci = Lcycid[li];
//if (li < 15)
//printf("\nlen %d", Llen[li]);
for (EDGE_ID nn = 0; nn < CC[ci].len; nn++){
fprintf(fp2, "%d, %d, %d, ", self->g_edges_list[2*CC[ci].boundary[nn].key1]\
, self->g_edges_list[2*CC[ci].boundary[nn].key1+1]\
, CC[ci].boundary[nn].key2\
);
}
fprintf(fp2, "\n");
free(CC[ci].boundary);
}
fclose(fp2);
//}
free(CC);
free(update_V);
free(Lcycid);
free(Llen);
free(Lupdated);
}
//static PyMethodDef DoryMethods[] = {
//
// {"compute_PH", compute_PH, METH_VARARGS, "Compute PH"},
// {NULL, NULL, 0, NULL}
//
//};
//
//static struct PyModuleDef dorymodule = {
// PyModuleDef_HEAD_INIT,
// "pydory", /* name of module*/
// NULL, /* documentation */
// -1, /* ??? */
// DoryMethods
//};
//
//
//PyMODINIT_FUNC PyInit_pydory(void){
// return PyModule_Create(&dorymodule);
//}
|
9852.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "correlation.h"
/* Array initialization. */
static
void init_array (int m,
int n,
DATA_TYPE *float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n))
{
int i, j;
*float_n = 1.2;
for (i = 0; i < m; i++)
for (j = 0; j < n; j++)
data[i][j] = ((DATA_TYPE) i*j) / M;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int m,
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m))
{
int i, j;
for (i = 0; i < m; i++)
for (j = 0; j < m; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]);
if ((i * m + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_correlation(int m, int n,
DATA_TYPE float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n),
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m),
DATA_TYPE POLYBENCH_1D(mean,M,m),
DATA_TYPE POLYBENCH_1D(stddev,M,m))
{
int i, j, j1, j2;
DATA_TYPE eps = 0.1f;
#define sqrt_of_array_cell(x,j) sqrt(x[j])
#pragma scop
/* Determine mean of column vectors of input data matrix */
#pragma omp parallel private(i, j, j2) num_threads(#P11)
{
#pragma omp target teams distribute #p #p
for (j = 0; j < _PB_M; j++)
{
mean[j] = 0.0;
for (i = 0; i < _PB_N; i++)
mean[j] += data[i][j];
mean[j] /= float_n;
}
/* Determine standard deviations of column vectors of data matrix. */
#pragma omp target teams distribute #p #p
for (j = 0; j < _PB_M; j++)
{
stddev[j] = 0.0;
for (i = 0; i < _PB_N; i++)
stddev[j] += (data[i][j] - mean[j]) * (data[i][j] - mean[j]);
stddev[j] /= float_n;
stddev[j] = sqrt_of_array_cell(stddev, j);
/* The following in an inelegant but usual way to handle
near-zero std. dev. values, which below would cause a zero-
divide. */
stddev[j] = stddev[j] <= eps ? 1.0 : stddev[j];
}
/* Center and reduce the column vectors. */
#pragma omp target teams distribute #p #p
for (i = 0; i < _PB_N; i++)
{
for (j = 0; j < _PB_M; j++)
{
data[i][j] -= mean[j];
data[i][j] /= sqrt(float_n) * stddev[j];
}
}
/* Calculate the m * m correlation matrix. */
#pragma omp target teams distribute #p #p
for (j1 = 0; j1 < _PB_M-1; j1++)
{
symmat[j1][j1] = 1.0;
for (j2 = j1+1; j2 < _PB_M; j2++)
{
symmat[j1][j2] = 0.0;
for (i = 0; i < _PB_N; i++)
symmat[j1][j2] += (data[i][j1] * data[i][j2]);
symmat[j2][j1] = symmat[j1][j2];
}
}
}
#pragma endscop
symmat[_PB_M-1][_PB_M-1] = 1.0;
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int m = M;
/* Variable declaration/allocation. */
DATA_TYPE float_n;
POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n);
POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m);
POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m);
POLYBENCH_1D_ARRAY_DECL(stddev,DATA_TYPE,M,m);
/* Initialize array(s). */
init_array (m, n, &float_n, POLYBENCH_ARRAY(data));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_correlation (m, n, float_n,
POLYBENCH_ARRAY(data),
POLYBENCH_ARRAY(symmat),
POLYBENCH_ARRAY(mean),
POLYBENCH_ARRAY(stddev));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(data);
POLYBENCH_FREE_ARRAY(symmat);
POLYBENCH_FREE_ARRAY(mean);
POLYBENCH_FREE_ARRAY(stddev);
return 0;
}
|
GB_binop__second_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__second_uint16
// A.*B function (eWiseMult): GB_AemultB__second_uint16
// A*D function (colscale): GB_AxD__second_uint16
// D*A function (rowscale): GB_DxB__second_uint16
// C+=B function (dense accum): GB_Cdense_accumB__second_uint16
// C+=b function (dense accum): GB_Cdense_accumb__second_uint16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__second_uint16
// C=scalar+B (none)
// C=scalar+B' (none)
// C=A+scalar GB_bind2nd__second_uint16
// C=A'+scalar GB_bind2nd_tran__second_uint16
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = bij
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = y ;
// op is second
#define GB_OP_IS_SECOND \
1
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SECOND || GxB_NO_UINT16 || GxB_NO_SECOND_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__second_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__second_uint16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__second_uint16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__second_uint16
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__second_uint16
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__second_uint16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__second_uint16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t bij = Bx [p] ;
Cx [p] = bij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__second_uint16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
; ;
Cx [p] = y ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = aij ; \
}
GrB_Info (none)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = y ; \
}
GrB_Info GB_bind2nd_tran__second_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
gadget2fits.c | /*
Program to produce fits files from gadget snapshots
gcc -fopenmp -lgad-stan -lgsl -lgslcblas gadget2fits.c -o ~/bin/gadget2fits -I$HOME/usr/include -L$HOME/usr/lib -lcfitsio
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "libgad.h"
#include "fitsio.h"
#define USE 16
void usage()
{
fprintf(stderr," Create FITS file for gas surface density v0.01\n");
fprintf(stderr," -i <input file name>\n");
fprintf(stderr," -t <temperature threshold>\n");
fprintf(stderr," -tc <use temperature cut described in Hirschmann et al 2012>\n");
fprintf(stderr," -use <bitcode of particles to be used for inertia tensor>\n");
fprintf(stderr," -r <max-distance of particles to be considered for inertia tensor>\n");
fprintf(stderr," -vb <number of velocity bins>\n");
fprintf(stderr," -vm <sets range of velocity bins [-vm, vm]>\n");
fprintf(stderr," -rf <rotate file> (sets viewport)\n");
fprintf(stderr," -srf <save rotate file>\n");
fprintf(stderr," -s <smoothing length for stellar particles>\n\n");
fprintf(stderr," choose two:\n");
fprintf(stderr," -b <boxsize>\n");
fprintf(stderr," -g <gridsize>\n");
fprintf(stderr," -p <pixelsize>\n\n");
fprintf(stderr," -nophys <do not convert pixelsize to physical units (instead use code units)>\n\n");
fprintf(stderr," \n\n");
exit(1);
}
#ifndef _OPENMP
int omp_get_thread_num() {return 0;}
//int omp_get_num_threads() {return 1;}
#endif
int main (int argc, char *argv[])
{
FILE *fp;
char infile[256];
char fitsfilename[256];
char rotfile[256];
int i,j,k,n, usepart;
int gridsize = 0;
int verbose = 0;
long ii;
int velbins = 64;
double velmax = 320;
double boxsize = 0;
double binsize = 0;
double rotate_dist = 0;
double center[3] = {0., 0., 0.};
double stellar_hsml = 0.4;
struct gadpart *part, *wpart;
struct header head;
double tempthreshold=1.e5;
int convert_phys = 1;
int save_rotation = 0;
int load_rotation = 0;
int diag_temp_cut = 0;
double *dens;
double **dens_tmp;
double *sdens;
double **sdens_tmp;
int nproj = 7;
int iproj;
double proj_angle;
int numthreads ;
#pragma omp parallel
{
int thread= omp_get_thread_num();
if (thread == 0)
numthreads = omp_get_num_threads();
}
dens_tmp = (double**) malloc(numthreads * sizeof(double *));
sdens_tmp = (double**) malloc(numthreads * sizeof(double *));
strcpy(fitsfilename,"default.fits");
i=1;
usepart=USE;
if (1==argc) usage();
while (i<argc)
{
if (!strcmp(argv[i],"-i"))
{
i++;
strcpy(infile,argv[i]);
i++;
}
else if (*argv[i]!='-')
{
strcpy(infile,argv[i]);
i++;
}
else if (!strcmp(argv[i],"-o"))
{
i++;
strcpy(fitsfilename,argv[i]);
i++;
}
else if (!strcmp(argv[i],"-rf"))
{
i++;
strcpy(rotfile,argv[i]);
load_rotation=1;
i++;
}
else if (!strcmp(argv[i],"-srf"))
{
i++;
strcpy(rotfile,argv[i]);
save_rotation=1;
i++;
}
else if (!strcmp(argv[i],"-use")) {
i++;
if (!strcmp(argv[i],"all")) usepart=63;
else usepart=atoi(argv[i]);
i++;
}
else if (!strcmp(argv[i],"-g"))
{
i++;
gridsize=atoi(argv[i]);
i++;
}
else if (!strcmp(argv[i],"-np"))
{
i++;
nproj=atoi(argv[i]);
i++;
}
else if (!strcmp(argv[i],"-vb"))
{
i++;
velbins=atoi(argv[i]);
i++;
}
else if (!strcmp(argv[i],"-vm"))
{
i++;
velmax=atof(argv[i]);
i++;
}
else if (!strcmp(argv[i],"-b"))
{
i++;
boxsize=atof(argv[i]);
i++;
}
else if (!strcmp(argv[i],"-s"))
{
i++;
stellar_hsml=atof(argv[i]);
i++;
}
else if (!strcmp(argv[i],"-r"))
{
i++;
rotate_dist=atof(argv[i]);
i++;
}
else if (!strcmp(argv[i],"-p"))
{
i++;
binsize=atof(argv[i]);
i++;
}
else if (!strcmp(argv[i],"-nophys"))
{
i++;
convert_phys = 0;
}
else if (!strcmp(argv[i],"-tc"))
{
i++;
diag_temp_cut = 1;
}
else if (!strcmp(argv[i],"-t"))
{
i++;
tempthreshold=atof(argv[i]);
i++;
}
else if (!strcmp(argv[i],"-v"))
{
i++;
verbose=1;
}
else if (!strcmp(argv[i],"-c"))
{
i++;
center[0]=atof(argv[i]);
i++;
center[1]=atof(argv[i]);
i++;
center[2]=atof(argv[i]);
i++;
} else {
usage();
}
}
unsigned int numpart_all;
if (!(numpart_all=readgadget_part(infile, &head, &part)))
{
extern int libgaderr;
printf("error reading file %s\nError Code %d\n",infile, libgaderr);
exit(1);
}
if (nproj > 1)
{
proj_angle = 90 / ((nproj-1) /2. );
} else nproj = 1;
if (convert_phys)
{
binsize = binsize * head.hubparam / head.time;
}
if ((binsize != 0) && (gridsize !=0) && (boxsize==0))
{
boxsize = gridsize * binsize;
}
else if ((binsize != 0) && (gridsize ==0) && (boxsize!=0))
{
gridsize = ceil(boxsize/binsize);
}
else if ((binsize == 0) && (gridsize !=0) && (boxsize!=0))
{
binsize = boxsize / gridsize;
}
else usage();
if (!strcmp(fitsfilename,"default.fits"))
sprintf(fitsfilename,"!%s.fits",infile);
else
{
char chdum[256];
strcpy(chdum, fitsfilename);
sprintf(fitsfilename,"!%s",chdum);
}
if (verbose) printf("building grid...\n");
long g3 = gridsize * gridsize * velbins;
long g2 = gridsize * gridsize;
long g = gridsize;
dens = (double *) calloc (g3, sizeof( double ));
sdens = (double *) calloc (g3, sizeof( double ));
if (verbose) printf("allocating grid for %d processes...\n", numthreads);
#pragma omp parallel
{
int thread= omp_get_thread_num();
dens_tmp[thread] = (double *) calloc (g3, sizeof(double));
sdens_tmp[thread] = (double *) calloc (g3, sizeof(double));
}
if (verbose) printf("read snapshot...\n");
if ( ( center[0] != 0 ) || ( center[1] != 0 ) || ( center[2] != 0 ) )
{
for ( n = 0; n < numpart_all; n++)
{
for ( j = 0; j < 3; j++)
part[n].pos[j] -= center[j];
}
}
if (verbose) printf("rotate system...\n");
double ratios[2] = {0,0};
gsl_matrix *rotation;
// rotategalaxy(wpart, num, -1, use_cm);//, &ratios[0], num);
if (load_rotation)
{
FILE *matrixf=fopen(rotfile,"r");
rotation = gsl_matrix_alloc(3,3);
gsl_matrix_fread (matrixf, rotation);
fclose(matrixf);
rotatepart(part, numpart_all, rotation);
}
else if (usepart)
{
if (!rotate_dist) rotate_dist = boxsize/2.;
rotategalaxy(part, numpart_all, rotate_dist, usepart, &ratios[0], &rotation);
if (save_rotation)
{
FILE *matrixf=fopen(rotfile,"w");
gsl_matrix_fwrite (matrixf, rotation);
fclose(matrixf);
}
}
gsl_matrix_free(rotation);
/*********************************************************************
Program code goes here
*********************************************************************/
int d;
const double boxhalf = boxsize / 2.;
const double cellhalf = boxhalf / gridsize;
const double cellvol = 8.0 * cellhalf * cellhalf * cellhalf;
const double cellarea = 4.0 * cellhalf * cellhalf;
const double binsizeinv = gridsize / boxsize;
const int starind = head.npart[0] + head.npart[1] + head.npart[2] + head.npart[3];
double velbinsize = 2*velmax / velbins;
const double convert = 10 * SQR(head.hubparam) * binsize * 1000 / SQR(head.time); //convert from 10^10Msun*h^2/kpc^3 comoving to Msun/pc^2 physical
double total_proj_angle = 0;
fitsfile *fptr;
int status = 0;
fits_create_file(&fptr, fitsfilename, &status);
if (verbose) printf("binnning %d gas particles | gridsize %d\n", head.npart[0], gridsize);
if (verbose) printf("start binning...\n");
for (iproj = 0; iproj < nproj; iproj++)
{
for ( i=0; i<g3; i++)
{
dens[i] = 0;
sdens[i] = 0;
}
#pragma omp parallel private(i)
{
int thread= omp_get_thread_num();
for ( i=0; i<g3; i++)
{
dens_tmp[thread][i] = 0;
sdens_tmp[thread][i] = 0;
}
}
if (iproj)
{
if (iproj == ((nproj-1)/2)+1)
{
xrotate(-total_proj_angle, part, numpart_all);
total_proj_angle = 0;
}
if (iproj <= (nproj-1)/2)
xrotate(proj_angle, part, numpart_all);
else
yrotate(proj_angle, part, numpart_all);
total_proj_angle += proj_angle;
}
if (verbose) printf("Projection Angle: %g\n", total_proj_angle);
// char testname[256];
// sprintf(testname,"test%d.gad", iproj);
// writegadget_part(testname, head, part);
#pragma omp parallel for private(i, j, k)
for ( n = 0; n < (head.npart[0]+head.npart[4]); n++ )
{
long index;
double h;
int star = 0;
if (n < head.npart[0])
{
index = n;
if (diag_temp_cut)
{
double ltemp = log10(temperature(part[index]));
double lrho = log10( (part[index].sph->rho) * SQR(0.72) * 1.e10);
if (ltemp > ( 0.3 * lrho + 3.2 ) ) continue;
}
else
if (temperature(part[index]) > tempthreshold) continue;
h = part[index].sph->hsml;
}
else
{
index = n + head.npart[1] + head.npart[2] + head.npart[3];
h = stellar_hsml;
star = 1;
}
double x = part[index].pos[0];
double y = part[index].pos[1];
double z = part[index].pos[2];
double vz =part[index].vel[2];
double pmass = part[index].mass;
if ( ((x+2*h) < -boxhalf) || ((x-2*h) > boxhalf) ) continue;
if ( ((y+2*h) < -boxhalf) || ((y-2*h) > boxhalf) ) continue;
if ( ((z+2*h) < -boxhalf) || ((z-2*h) > boxhalf) ) continue;
int ix = floor( (x + boxhalf) * binsizeinv );
int iy = floor( (y + boxhalf) * binsizeinv );
int iz = floor( (z + boxhalf) * binsizeinv );
int inc = 2.0 * h * binsizeinv;
double gx = ( ix / binsizeinv ) - boxhalf + cellhalf;
double gy = ( iy / binsizeinv ) - boxhalf + cellhalf;
double gz = ( iz / binsizeinv ) - boxhalf + cellhalf;
double dist2 = ( SQR( x - gx ) + SQR( y - gy ) + SQR( z - gz ) ) / SQR( h );
int thread = omp_get_thread_num();
if ( dist2 < 4.0 )
{
for ( i = (ix - inc); i <= (ix+inc); i++ )
{
if ((i<0) || (i>= gridsize)) continue;
for ( j = (iy - inc); j <= (iy+inc); j++ )
{
if ((j<0) || (j>= gridsize)) continue;
for ( k = (iz - inc); k <= (iz+inc); k++ )
{
if ((k<0) || (k>= gridsize)) continue;
gx = ( i / binsizeinv ) - boxhalf + cellhalf;
gy = ( j / binsizeinv ) - boxhalf + cellhalf;
gz = ( k / binsizeinv ) - boxhalf + cellhalf;
double val = 0.;
dist2 = ( SQR( x - gx ) + SQR( y - gy ) + SQR( z - gz ) ) / SQR( h );
if ( dist2 < 4.0 )
{
double weight = 0;
double dist = sqrt( dist2 );
if ( dist < 1.0 )
{
weight = 1.0 -1.5 * dist2 + 0.75 * dist2 * dist;
}
else
{
double dif2 = 2.0 - dist;
weight = 0.25 * dif2 * dif2 * dif2;
}
//val = weight / (h*h*h) / M_PI / norm * pmass / cellvol;
val = weight / (h*h*h) / M_PI * pmass;
}
if (val > 0)
{
long vind = floor((vz + velmax) / velbinsize);
// if (vind < 0) vind = 0;
// else if (vind >= velbins) vind = velbins - 1;
if (vind < 0) continue;
else if (vind >= velbins) continue;
long ind = i + g * j + g2 * vind;
{
if (star)
sdens_tmp[thread][ind] += val * convert;
else
dens_tmp[thread][ind] += val * convert;
}
}
}
}
}
}
else
{
if ( (ix<0) || (ix >= gridsize) ) continue;
if ( (iy<0) || (iy >= gridsize) ) continue;
if ( (iz<0) || (iz >= gridsize) ) continue;
long vind = floor((vz + velmax) / velbinsize);
// if (vind < 0) vind = 0;
// else if (vind >= velbins) vind = velbins - 1;
if (vind < 0) continue;
else if (vind >= velbins) continue;
long ind = i + g * j + g2 * vind;
{
if (star)
sdens_tmp[thread][ind] += pmass/cellvol * convert;
else
dens_tmp[thread][ind] += pmass/cellvol * convert;
}
}
}
long fpixel = 1;
long lpixel = g3;
int naxis = 3;
long npixels[3] = {gridsize, gridsize, velbins};
double pixelsize = binsize / head.hubparam * head.time;
long nelements = g3;
double dum = 0;
double aexpn = head.time;
for ( ii = 0; ii < nelements; ii++)
{
for ( i = 0; i < numthreads; i++ )
{
dens[ii] += dens_tmp[i][ii];
sdens[ii] += sdens_tmp[i][ii];
}
}
fits_create_img(fptr, DOUBLE_IMG, naxis, npixels, &status );
fits_update_key(fptr, TDOUBLE, "TYPE", &dum , "Cold Gas Distribution", &status);
fits_update_key(fptr, TDOUBLE, "PIXELSIZE", &pixelsize , "Size of Pixel in kpc", &status);
fits_update_key(fptr, TDOUBLE, "VELBINSIZE", &velbinsize , "Size of Velocity bins in km/s", &status);
fits_update_key(fptr, TDOUBLE, "VELBINMAX", &velmax , "Range of Velocities in km/s, [-velbinmax, velbinmax]", &status);
fits_update_key(fptr, TDOUBLE, "PROJECTION ANGLE", &total_proj_angle, "Projection Angle in degrees (0 = face-on)", &status);
fits_update_key(fptr, TDOUBLE, "AEXP", &aexpn, "Expansion factor a of the snapshot", &status);
// fits_write_img(fptr, TDOUBLE, fpixel, nelements, dens, &status);
fits_write_3d_dbl(fptr, 1, g, g, g, g, velbins, dens, &status);
fits_create_img(fptr, DOUBLE_IMG, naxis, npixels, &status );
fits_update_key(fptr, TDOUBLE, "TYPE", &dum , "Stellar Distribution", &status);
fits_update_key(fptr, TDOUBLE, "PIXELSIZE", &pixelsize , "Size of Pixel in kpc", &status);
fits_update_key(fptr, TDOUBLE, "VELBINSIZE", &velbinsize , "Size of Velocity bins in km/s", &status);
fits_update_key(fptr, TDOUBLE, "VELBINMAX", &velmax , "Range of Velocities in km/s, [-velbinmax, velbinmax]", &status);
fits_update_key(fptr, TDOUBLE, "PROJECTION ANGLE", &total_proj_angle, "Projection Angle in degrees (0 = face-on)", &status);
fits_update_key(fptr, TDOUBLE, "AEXP", &aexpn, "Expansion factor a of the snapshot", &status);
// fits_write_img(fptr, TDOUBLE, fpixel, nelements, dens, &status);
fits_write_3d_dbl(fptr, 1, g, g, g, g, velbins, sdens, &status);
// fits_write_subset_dbl(fptr, 1, 3, npixels, &fpixel, &lpixel, dens, &status);
}
fits_close_file(fptr, &status);
fits_report_error(stderr, status);
// free(surfdens);
free(dens);
return (status);
}
|
core_strmm.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_ztrmm.c, normal z -> s, Fri Sep 28 17:38:23 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "core_lapack.h"
/***************************************************************************//**
*
* @ingroup core_trmm
*
* Performs a triangular matrix-matrix multiply of the form
*
* \f[B = \alpha [op(A) \times B] \f], if side = PlasmaLeft or
* \f[B = \alpha [B \times op(A)] \f], if side = PlasmaRight
*
* where op( X ) is one of:
*
* - op(A) = A or
* - op(A) = A^T or
* - op(A) = A^T
*
* alpha is a scalar, B is an m-by-n matrix and A is a unit or non-unit, upper
* or lower triangular matrix.
*
*******************************************************************************
*
* @param[in] side
* Specifies whether op( A ) appears on the left or on the right of B:
* - PlasmaLeft: alpha*op( A )*B
* - PlasmaRight: alpha*B*op( A )
*
* @param[in] uplo
* Specifies whether the matrix A is upper triangular or lower
* triangular:
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] transa
* Specifies whether the matrix A is transposed, not transposed or
* conjugate transposed:
* - PlasmaNoTrans: A is transposed;
* - PlasmaTrans: A is not transposed;
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] diag
* Specifies whether or not A is unit triangular:
* - PlasmaNonUnit: A is non-unit triangular;
* - PlasmaUnit: A is unit triangular.
*
* @param[in] m
* The number of rows of matrix B.
* m >= 0.
*
* @param[in] n
* The number of columns of matrix B.
* n >= 0.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* The triangular matrix A of dimension lda-by-k, where k is m when
* side='L' or 'l' and k is n when when side='R' or 'r'. If uplo =
* PlasmaUpper, the leading k-by-k upper triangular part of the array
* A contains the upper triangular matrix, and the strictly lower
* triangular part of A is not referenced. If uplo = PlasmaLower, the
* leading k-by-k lower triangular part of the array A contains the
* lower triangular matrix, and the strictly upper triangular part of
* A is not referenced. If diag = PlasmaUnit, the diagonal elements of
* A are also not referenced and are assumed to be 1.
*
* @param[in] lda
* The leading dimension of the array A. When side='L' or 'l',
* lda >= max(1,m), when side='R' or 'r' then lda >= max(1,n).
*
* @param[in,out] B
* On entry, the matrix B of dimension ldb-by-n.
* On exit, the result of a triangular matrix-matrix multiply
* ( alpha*op(A)*B ) or ( alpha*B*op(A) ).
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,m).
*
******************************************************************************/
__attribute__((weak))
void plasma_core_strmm(
plasma_enum_t side, plasma_enum_t uplo,
plasma_enum_t transa, plasma_enum_t diag,
int m, int n,
float alpha, const float *A, int lda,
float *B, int ldb)
{
cblas_strmm(
CblasColMajor,
(CBLAS_SIDE)side, (CBLAS_UPLO)uplo,
(CBLAS_TRANSPOSE)transa, (CBLAS_DIAG)diag,
m, n,
(alpha), A, lda,
B, ldb);
}
/******************************************************************************/
void plasma_core_omp_strmm(
plasma_enum_t side, plasma_enum_t uplo,
plasma_enum_t transa, plasma_enum_t diag,
int m, int n,
float alpha, const float *A, int lda,
float *B, int ldb,
plasma_sequence_t *sequence, plasma_request_t *request)
{
int k = (side == PlasmaLeft) ? m : n;
if (sequence->status == PlasmaSuccess) {
#pragma omp target nowait \
depend(in:A[0:lda*k]) \
depend(inout:B[0:ldb*n]) \
map(to:A[:lda*k]) \
map(tofrom:B[:ldb*n]) \
firstprivate(side, uplo, transa, diag, m, n, alpha, ldb, lda)
{
plasma_core_strmm(side, uplo,
transa, diag,
m, n,
alpha, A, lda,
B, ldb);
}
}
}
|
mix_fftw.c |
#include <stdio.h>
#include <stdarg.h> // vargs
#include <math.h> // sqrt
#include <string.h>
#include <ctype.h> // tolower
#include <time.h> // clock, CLOCKS_PER_SEC
#include <fftw3.h> // This uses the system FFTW (check with ldd)
#include "mex.h"
//int mxUnshareArray(mxArray *, int);
#define __USE_OMP__ 0
#ifdef __USE_OMP__
#include <omp.h>
#endif
#ifndef DEBUG
#define DEBUG 0 // debug uses the same verbosity levels as opt.verbosity
#endif
#ifndef DEBUG_OR_VERB
#define DEBUG_OR_VERB(lvl) \
(DEBUG >= lvl || opt->verbosity >= lvl)
#endif
// http://stackoverflow.com/a/1644898
#ifndef DEBUG_PRINTF
#define DEBUG_PRINTF(fmt, ...) \
do { fprintf(stderr, "%s:%d:%s(): " fmt, "mix_fftw.c", \
__LINE__, __func__, __VA_ARGS__); } while (0)
#endif
// View all the stuff in libmx.so, libmex.so
// http://stackoverflow.com/a/1620583
// readelf -Ws /usr/local/MATLAB/R2015b/bin/glnxa64/libmex.so | grep "mex"
// readelf -Ws /usr/local/MATLAB/R2015b/bin/glnxa64/libmx.so | grep "mx"
// readelf -Ws /usr/local/MATLAB/R2015b/bin/glnxa64/libmx.so | awk "{print $8}" | grep -i "trans"
// options from the mex interface
typedef struct mix_opt {
bool transpose;
char *side;
char *transform;
int plan_rigor;
char *wisdom_file;
bool wisdom_only;
bool use_diags;
int verbosity;
int nthreads;
size_t block_size;
} mix_opt;
// use this like printf, with a format and optional arguments
// http://stackoverflow.com/a/1485819
// http://embeddedgurus.com/stack-overflow/2008/12/efficient-c-tips-5-make-local-functions-static/
static void mex_error(char *fmt, ...) {
va_list args;
char *msg = mxMalloc(1024*sizeof(char));
va_start(args, fmt);
sprintf(msg,"mix_fftw.c: "); // hard code the file basename
// could do something like http://stackoverflow.com/a/8488201
vsprintf(msg+strlen("mix_fftw.c: "), fmt, args);
va_end(args);
mexErrMsgTxt(msg);
}
static void mex_warning(char *fmt, ...) {
va_list args;
char *msg = mxMalloc(1024*sizeof(char));
va_start(args, fmt);
sprintf(msg,"mix_fftw.c: "); // hard code the file basename
// could do something like http://stackoverflow.com/a/8488201
vsprintf(msg+strlen("mix_fftw.c: "), fmt, args);
va_end(args);
mexWarnMsgTxt(msg);
}
void export_wisdom(mix_opt *opt) {
if ( strcmp(opt->wisdom_file,"") ) {
FILE *f = fopen(opt->wisdom_file, "w");
if ( f ) {
if (DEBUG_OR_VERB(1))
DEBUG_PRINTF("Exporting wisdom to file %s\n", opt->wisdom_file);
// this concatenates new wisdom to any wisdom loaded from earlier.
// If wisdom already exists for a specific size with a more patient
// planner, the more rigorous wisdom is saved (and used when imported).
clock_t end, begin=clock();
fftw_export_wisdom_to_file(f);
int s = ferror(f);
if ( s != 0 )
mex_warning("Wisdom export may have failed; file error indicator is "
"set for %s", opt->wisdom_file);
s = fclose(f);
if ( s != 0 )
mex_warning("Wisdom export sucessful, but there was an error "
"closing the file %s\n", opt->wisdom_file);
end = clock();
if (DEBUG_OR_VERB(2))
DEBUG_PRINTF("Exporting wisdom complete. Wall time = %es.\n", (double)(end-begin) /
CLOCKS_PER_SEC);
}
else
// even if issues, try to continue
mex_warning("Error exporting wisdom to file %s\n", opt->wisdom_file);
}
}
void import_wisdom(mix_opt *opt) {
if ( strcmp(opt->wisdom_file,"") ) {
FILE *f = fopen(opt->wisdom_file, "r");
if ( f ) {
if (DEBUG_OR_VERB(1))
DEBUG_PRINTF("Importing wisdom from file %s\n", opt->wisdom_file);
clock_t end, begin=clock();
//TODO: this occasionally segfaults. Why?!
// Even ..._from_filename segfaults.
// Restarting matlab seems to "fix" it.
int s = fftw_import_wisdom_from_file(f);
if ( s == 0 )
mex_warning("Error importing wisdom from file %s\n", opt->wisdom_file);
s = fclose(f);
if ( s != 0 )
mex_warning("Wisdom import sucessful, but there was an error "
"closing the file %s\n", opt->wisdom_file);
end = clock();
if (DEBUG_OR_VERB(2))
DEBUG_PRINTF("Importing wisdom complete. Wall time = %es.\n", (double)(end-begin) /
CLOCKS_PER_SEC);
}
else
// Assume all is good and continue.
// If the user specifies a new filename (i.e. file doesn't yet exist),
// we try to read the wisdom file before it exists; this shouldn't be an
// issue, so we'll just continue here without warning or error.
//mex_warning("error importing wisdom from file %s.\nDoes the file exist"
// " and contain FFTW wisdom?\n", opt->wisdom_file);
if (DEBUG_OR_VERB(1))
DEBUG_PRINTF("Issue importing wisdom from file %s. This usually occurs when"
" you try to import wisdom from a new, non-existant file; in this"
" case, import_wisdom() will fail, we'll generate wisdom, and then "
"we'll export_wisdom() to the file.\n",
opt->wisdom_file);
}
}
void mix_cols(double *A, double *darr, size_t m, size_t n, size_t n_its, mix_opt *opt) {
unsigned it,i,j,mj;
fftw_plan p;
double *darr_it;
double nrm_1=1,nrm_mid=1,nrm_end=1.;
bool nrm_before=false;
// Order of F*D_i
bool darr_before_F, darr_reversed, F_transpose;
bool right = strncmp(opt->side,"r",1)==0;
if ( (right && !opt->transpose) || (!right && opt->transpose) ) { // equivalent to V^T*A
darr_before_F = false;
darr_reversed = true;
F_transpose = true;
} else { // equivalent to V*A
darr_before_F = true;
darr_reversed = false;
F_transpose = false;
}
// Plan a real-to-real transform
// The planner syntax is
// fftw_plan fftw_plan_many_r2r(int rank, const int *n, int howmany,
// double *in, const int *inembed,
// int istride, int idist,
// double *out, const int *onembed,
// int ostride, int odist,
// const fftw_r2r_kind *kind, unsigned flags);
int trans_lens[1] = {m};
fftw_r2r_kind kind[1];
bool do_dct = strncmp(opt->transform,"dct",4)==0;
bool do_idct = strncmp(opt->transform,"idct",5)==0;
bool do_dct_type = do_dct || do_idct;
if ( do_dct_type ) {
if ( (!F_transpose && do_dct) || (F_transpose && do_idct) ) { // DCT-II
kind[0] = FFTW_REDFT10;
nrm_1 = 1./(2.*sqrt(m));
nrm_mid = 1./sqrt(2.*m);
nrm_end = 1./sqrt(2.*m);
nrm_before = false;
} else { // DCT-II
kind[0] = FFTW_REDFT01;
nrm_1 = 1./sqrt(m);
nrm_mid = 1./sqrt(2.*m);
nrm_end = 1./sqrt(2.*m);
nrm_before = true;
}
}
else
mex_error("unknown transform type: %s", opt->transform);
unsigned fftw_flags=0;
switch (opt->plan_rigor) {
case 1:
fftw_flags |= FFTW_ESTIMATE;
case 2:
fftw_flags |= FFTW_MEASURE;
case 3:
fftw_flags |= FFTW_PATIENT;
case 4:
fftw_flags |= FFTW_EXHAUSTIVE;
}
// Try to get the plan from wisdom
// If using FFTW_ESTIMATE, this forms the plan if wisdom is not present; if
// wisdom is present for a more patient planner, then it uses the more patient plan.
clock_t end,begin=clock();
p = fftw_plan_many_r2r(1, trans_lens, (int)n, A, NULL, 1, (int)m, A, NULL, 1, (int)m,
kind, fftw_flags | FFTW_WISDOM_ONLY);
end=clock();
if (DEBUG_OR_VERB(2))
DEBUG_PRINTF("FFTW_WISDOM_ONLY time = %e.\n", (double)(end-begin)/CLOCKS_PER_SEC);
// We don't have wisdom, so compute a plan
if ( NULL == p && opt->plan_rigor > 1 ) {
// Make copy of A for planning (this doesn't actually occur for FFTW_ESTIMATE, so no copy)
if (DEBUG_OR_VERB(1))
DEBUG_PRINTF("Making copy of A to create FFTW plan.\n",NULL);
clock_t end,begin=clock();
double *A_cpy = (double *)mxMalloc(m*n*sizeof(*A));
// Compute the plan
if (DEBUG_OR_VERB(1))
DEBUG_PRINTF("Starting FFTW planning.\n",NULL);
p = fftw_plan_many_r2r(1, trans_lens, (int)n, A_cpy, NULL, 1, (int)m, A_cpy, NULL, 1, (int)m,
kind, fftw_flags);
if (DEBUG_OR_VERB(2))
DEBUG_PRINTF("fftw_alignment_of(A) = %d; fftw_alignment_of(A_cpy) = %d.\n",
fftw_alignment_of(A), fftw_alignment_of(A_cpy));
end = clock();
if (DEBUG_OR_VERB(1))
DEBUG_PRINTF("FFTW planning time = %e.\n", (double)(end-begin)/CLOCKS_PER_SEC);
mxFree(A_cpy);
}
else if ( NULL == p && opt->plan_rigor == 1 ) {
// Compute the plan (using FFTW_ESTIMATE so no need to copy)
clock_t end,begin=clock();
p = fftw_plan_many_r2r(1, trans_lens, (int)n, A, NULL, 1, (int)m, A, NULL, 1, (int)m,
kind, fftw_flags);
end = clock();
if (DEBUG_OR_VERB(1))
DEBUG_PRINTF("FFTW planning time = %e.\n", (double)(end-begin)/CLOCKS_PER_SEC);
}
else
if (DEBUG_OR_VERB(1))
DEBUG_PRINTF("Suitable wisdom found to create plan\n",NULL);
if ( NULL == p) {
mex_error("error making FFTW plan");
}
if ( opt->wisdom_only ) {
if (DEBUG_OR_VERB(1))
DEBUG_PRINTF("Only computing wisdom; returning.\n",NULL);
return;
}
// Execute the plan
for ( it=0; it<n_its; ++it ) {
// Rescale to get desired normalization (orthogonal transform; matches MATLAB)
if ( nrm_before ) {
#if __USE_OMP__
#pragma omp parallel for num_threads((opt->nthreads)) private(i,mj)
#endif
for (j=0; j<n; ++j) {
mj=m*j;
A[mj] *= nrm_1;
for (i=1; i<m-1; ++i)
A[mj+i] *= nrm_mid;
A[mj+m-1] *= nrm_end;
}
}
// Fourier-like goes before D
if ( !darr_before_F ) {
// Apply transform to columns
fftw_execute_r2r(p, A, A);
}
if ( opt->use_diags ) {
// Pointer to proper column of darr array
if ( !darr_reversed ) // forward through darr
darr_it = darr + it*m;
else
darr_it = darr + (n_its-1-it)*m; // backward through darr
// Apply +-1 to rows
#if __USE_OMP__
#pragma omp parallel for num_threads((opt->nthreads)) private(i,mj)
#endif
for ( j=0; j<n; ++j ) {
mj = m*j;
for ( i=0; i<m; ++i ) {
A[mj+i] *= darr_it[i];
}
}
}
// Fourier-like goes after D
if ( darr_before_F ) {
// Apply transform to columns
fftw_execute_r2r(p, A, A);
}
// Rescale to get desired normalization (orthogonal transform; matches MATLAB)
if ( !nrm_before ) {
#if __USE_OMP__
#pragma omp parallel for num_threads((opt->nthreads)) private(i,mj)
#endif
for (j=0; j<n; ++j) {
mj=m*j;
A[mj] *= nrm_1;
for (i=1; i<m-1; ++i)
A[mj+i] *= nrm_mid;
A[mj+m-1] *= nrm_end;
}
}
}
fftw_destroy_plan(p);
}
void mix_cols_blocked(double *A, double *darr, size_t m, size_t n, size_t n_its, mix_opt *opt) {
size_t bs = opt->block_size;
size_t n_blocks, block_rem;
if ( bs > n ) {
if (DEBUG_OR_VERB(1))
DEBUG_PRINTF("block size, %d, greater than number of columns, %d. "
"Continuing with block_size=%d.\n", bs, n, n);
opt->block_size=n; // we call the monotlithic code, so this doesn't do anything
mix_cols(A, darr, m, n, n_its, opt);
return;
}
n_blocks = n/bs;
block_rem = n%bs;
// Import wisdom file (if provided by user)
import_wisdom(opt);
//TODO: investigate better ways to use OMP
for (size_t b=0; b < n_blocks; ++b)
mix_cols(A+b*bs*m, darr, m, bs, n_its, opt);
if (block_rem)
mix_cols(A+n_blocks*bs*m, darr, m, block_rem, n_its, opt);
// Save wisdom to file (if requested by user)
export_wisdom(opt);
}
//TODO: This routine works in place on A; when is it better to transpose back+forth?
void mix_rows_inplace(double *A, double *darr, size_t m, size_t n, size_t n_its, mix_opt *opt) {
mex_error("mix_rows is not implemented.");
}
mxArray* default_options_struct(mix_opt *opt) {
const char *fieldnames[] = {"transpose", "side", "transform",
"plan_rigor", "wisdom_file", "wisdom_only",
"use_diags", "verbosity", "nthreads", "block_size"};
mxArray *optmx = mxCreateStructMatrix(1,1,10,fieldnames);
mxSetField(optmx,0, "transpose", mxCreateLogicalScalar(opt->transpose));
mxSetField(optmx,0, "side", mxCreateString(opt->side));
mxSetField(optmx,0, "transform", mxCreateString(opt->transform));
mxSetField(optmx,0, "plan_rigor", mxCreateDoubleScalar(opt->plan_rigor));
mxSetField(optmx,0, "wisdom_file", mxCreateString(opt->wisdom_file));
mxSetField(optmx,0, "wisdom_only", mxCreateLogicalScalar(opt->wisdom_only));
mxSetField(optmx,0, "use_diags", mxCreateLogicalScalar(opt->use_diags));
mxSetField(optmx,0, "verbosity", mxCreateDoubleScalar(opt->verbosity));
mxSetField(optmx,0, "nthreads", mxCreateDoubleScalar(opt->nthreads));
mxSetField(optmx,0, "block_size", mxCreateDoubleScalar(opt->block_size));
return optmx;
}
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
double *darr, *A, *B;
mxArray *darr_mx;
size_t m,n,n_its;
bool generated_darr=false;
// Default options
mix_opt opt = {.transpose=false, .side="left", .transform="dct",
.plan_rigor=1, .wisdom_file="", .wisdom_only=false,
.use_diags=true, .verbosity=0, .nthreads=1, .block_size=250};
/****************************/
/* Check inputs and outputs */
/****************************/
// Check A
if ( nrhs == 0 ) { // return default options struct
plhs[0] = default_options_struct(&opt);
return;
}
mxClassID class = mxGetClassID(prhs[0]);
if ( !(class == mxDOUBLE_CLASS || class == mxSINGLE_CLASS) )
mex_error("we only handle real single and double arrays");
if ( mxIsEmpty(prhs[0]) ) mex_error("A is empty");
if ( mxGetNumberOfDimensions(prhs[0]) != 2 ) mex_error("A should be an m x n matrix");
A = mxGetPr(prhs[0]);
m = mxGetM(prhs[0]);
n = mxGetN(prhs[0]);
// Read in options struct
// {{{
if ( nrhs >= 3 ) {
mxArray *tmp;
// transpose
tmp = mxGetField(prhs[2], 0, "transpose");
if ( tmp ) {
if ( !mxIsScalar(tmp) )
mex_error("opt.transpose should be either 0 or 1");
if ( mxGetScalar(tmp) == 0. )
opt.transpose = false;
else if ( mxGetScalar(tmp) == 1. )
opt.transpose = true;
else
mex_error("opt.transpose should be either 0 or 1");
}
// side
tmp = mxGetField(prhs[2], 0, "side");
if ( tmp ) {
opt.side = mxArrayToString(tmp);
if ( !(opt.side) ) mexErrMsgTxt("error converting opt.side to string.");
for (char *p=opt.side;*p;++p) *p=tolower(*p);
if ( !(strncmp(opt.side,"l",2)==0 || strncmp(opt.side,"left",5)==0 ||
strncmp(opt.side,"r",2)==0 || strncmp(opt.side,"right",6)==0) )
mex_error("opt.side should be 'l'/'left' or 'r'/'right'");
}
// transform
tmp = mxGetField(prhs[2], 0, "transform");
if ( tmp ) {
opt.transform = mxArrayToString(tmp);
if ( !(opt.transform) ) mexErrMsgTxt("error converting opt.transform to string.");
for (char *p=opt.transform;*p;++p) *p=tolower(*p);
if ( !(strncmp(opt.transform,"dct",4)==0 || strncmp(opt.transform,"idct",5)==0 ) )
mex_error("opt.transform should be 'DCT' or 'IDCT'");
}
// plan_rigor
tmp = mxGetField(prhs[2], 0, "plan_rigor");
if ( tmp ) {
if ( !mxIsScalar(tmp) )
mex_error("opt.plan_rigor should be 1, 2, 3, or 4");
int pr = (int)mxGetScalar(tmp);
if ( pr >= 1 && pr <= 4 )
opt.plan_rigor = pr;
else
mex_error("opt.plan_rigor should be 1, 2, 3, or 4");
}
// wisdom_file
tmp = mxGetField(prhs[2], 0, "wisdom_file");
if ( tmp ) {
if ( !mxIsChar(tmp) )
mexErrMsgTxt("error converting opt.wisdom_file to string.");
opt.wisdom_file = mxArrayToString(tmp);
if ( !(opt.wisdom_file) ) mexErrMsgTxt("error converting opt.wisdom_file to string.");
}
// wisdom_only
tmp = mxGetField(prhs[2], 0, "wisdom_only");
if ( tmp ) {
if ( !mxIsScalar(tmp) )
mex_error("opt.wisdom_only should be either 0 or 1");
if ( mxGetScalar(tmp) == 0. )
opt.wisdom_only = false;
else if ( mxGetScalar(tmp) == 1. )
opt.wisdom_only = true;
else
mex_error("opt.wisdom_only should be either 0 or 1");
}
// use_diags
tmp = mxGetField(prhs[2], 0, "use_diags");
if ( tmp ) {
if ( !mxIsScalar(tmp) )
mex_error("opt.use_diags should be either 0 or 1");
if ( mxGetScalar(tmp) == 0. )
opt.use_diags = false;
else if ( mxGetScalar(tmp) == 1. )
opt.use_diags = true;
else
mex_error("opt.use_diags should be either 0 or 1");
}
// verbosity
tmp = mxGetField(prhs[2], 0, "verbosity");
if ( tmp ) {
if ( !mxIsScalar(tmp) )
mex_error("opt.verbosity should be 0, 1, or 2");
int vrb = (int)mxGetScalar(tmp);
if ( vrb >= 0 && vrb <= 2 )
opt.verbosity = vrb;
else
mex_error("opt.verbosity should be 0, 1, or 2");
}
// nthreads
tmp = mxGetField(prhs[2], 0, "nthreads");
if ( tmp ) {
if ( !mxIsScalar(tmp) )
mex_error("opt.nthreads should be an integer.");
int nt = (int)mxGetScalar(tmp);
if ( nt >= -1)
opt.nthreads = nt;
else
mex_error("opt.nthreads should be an integer.");
}
// block_size
tmp = mxGetField(prhs[2], 0, "block_size");
if ( tmp ) {
if ( !mxIsScalar(tmp) )
mex_error("opt.block_size should be an integer.");
size_t bs = (size_t)mxGetScalar(tmp);
if ( bs >= 0)
opt.block_size = bs;
else
mex_error("opt.block_size should be an integer.");
}
}
if (DEBUG >= 1 || opt.verbosity >= 1)
DEBUG_PRINTF("\ninterface options:\n -> transpose=%d\n -> side=\"%s\"\n"
" -> transform=\"%s\"\n -> plan_rigor=%d\n"
" -> wisdom_file=\"%s\"\n -> wisdom_only=%d\n\n -> use_diags=%d\n"
" -> verbosity=%d\n -> nthreads=%d\n -> block_size=%d\n",
opt.transpose, opt.side, opt.transform,
opt.plan_rigor, opt.wisdom_file, opt.wisdom_only,
opt.use_diags, opt.verbosity, opt.nthreads, opt.block_size);
// }}}
// n_its+generate darr or get pointer to darr's data
bool is_scalar = false;
if ( nrhs >= 2 ) {
if ( mxIsEmpty(prhs[1]) ) // if darr=[], don't apply darr
opt.use_diags = false;
is_scalar = mxIsScalar(prhs[1]);
}
if ( nrhs < 2 || is_scalar ) { // not specified or n_its given
if ( is_scalar ) { // n_its is provided
n_its = (size_t)mxGetScalar(prhs[1]);
if ( (double)n_its != mxGetScalar(prhs[1]) )
mex_error("n_its should be an integer.");
}
else // default value of n_its
n_its = 1;
// Use MATLAB's `rand()` to populate darr with Uniform[0,1]
mxArray* mcm_prhs[2];
size_t trans_dim;
if ( *opt.side == 'l' )
trans_dim = m;
else
trans_dim = n;
mcm_prhs[0] = mxCreateDoubleScalar(trans_dim);
mcm_prhs[1] = mxCreateDoubleScalar(n_its);
mexCallMATLAB(1, &darr_mx, 2, mcm_prhs, "rand");
darr = mxGetPr(darr_mx);
// Threshold to get +- 1 values
for (unsigned i=0; i < trans_dim*n_its; ++i) {
darr[i] = (darr[i] >= 0.5) ? 1. : -1.;
}
generated_darr = true;
}
else { // Use the given darr
darr = mxGetPr(prhs[1]);
n_its = mxGetN(prhs[1]);
if ( *opt.side == 'r' && n != mxGetM(prhs[1]) )
mex_error("dimension 2 of A and dimension 1 of darr should be equal.");
else if ( *opt.side == 'l' && m != mxGetM(prhs[1]) )
mex_error("dimension 1 of A and dimension 1 of darr should be equal.");
}
// Set output pointers
if ( nlhs >= 2 ) {
if ( !generated_darr )
mex_error("darr output specified but is only returned when generating "
"a new darr.");
plhs[1] = darr_mx;
}
/***************/
/* Do the work */
/***************/
// Set up FFTW threads
int s = fftw_init_threads();
if ( s == 0 )
mex_error("Error setting up FFTW threads\n.");
fftw_plan_with_nthreads(opt.nthreads);
if (DEBUG >= 1 || opt.verbosity >= 1)
DEBUG_PRINTF("FFTW planning with %d threads\n", opt.nthreads);
// Call the worker routines
if ( *opt.side == 'l' ) {
// Make a copy of A to work on
plhs[0] = mxDuplicateArray(prhs[0]); B = mxGetPr(plhs[0]);
// mix the columns
if ( opt.block_size > 0 )
mix_cols_blocked(B, darr, m, n, n_its, &opt);
else
mix_cols(B, darr, m, n, n_its, &opt);
}
else {
// Make a copy of the transpose of A to work on
mxArray *At_mx;
mexCallMATLAB(1, &At_mx, 1, (mxArray **) &prhs[0], "transpose");
double *At = mxGetPr(At_mx);
// Mix the columns
if ( opt.block_size > 0 )
mix_cols_blocked(At, darr, n, m, n_its, &opt);
else
mix_cols(At, darr, n, m, n_its, &opt);
// Initialize output array and copy over transpose of mixed At
mexCallMATLAB(1, &plhs[0], 1, &At_mx, "transpose");
}
/************/
/* Clean up */
/************/
fftw_cleanup_threads();
//TODO: only free if we called mxArrayToString
//mxFree(opt.side);
//mxFree(opt.transform);
//mxFree(opt.wisdom_file);
return;
}
|
pr66633-3.c | /* PR middle-end/66633 */
/* { dg-do compile } */
/* { dg-options "-fopenmp -O1" } */
void baz (int (*) ());
void
foo (void)
{
int i;
auto int bar (void) { return i; }
auto void bar2 (void)
{
#pragma omp parallel
baz (bar);
}
bar2 ();
}
|
ordered_dependences.c | // RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7
#include "callback.h"
#include <omp.h>
int main() {
int a[10][10];
int i, j;
#pragma omp parallel num_threads(2)
#pragma omp for ordered(2)
for (i = 0; i < 2; i++)
for (j = 0; j < 2; j++) {
a[i][j] = i + j + 1;
printf("%d, %d\n", i, j);
#pragma omp ordered depend(sink : i - 1, j) depend(sink : i, j - 1)
if (i > 0 && j > 0)
a[i][j] = a[i - 1][j] + a[i][j - 1] + 1;
printf("%d, %d\n", i, j);
#pragma omp ordered depend(source)
}
return 0;
}
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER:[0-9]+]]: ompt_event_loop_begin:
// CHECK-SAME: parallel_id={{[0-9]+}}, parent_task_id=[[ITASK:[0-9]+]],
// CHECK: {{^}}[[MASTER]]: ompt_event_dependences: task_id=[[ITASK]],
// CHECK-SAME: deps=[(0, ompt_dependence_type_source), (0,
// CHECK-SAME: ompt_dependence_type_source)], ndeps=2
// CHECK: {{^}}[[MASTER]]: ompt_event_dependences: task_id=[[ITASK]],
// CHECK-SAME: deps=[(0, ompt_dependence_type_sink), (0,
// CHECK-SAME: ompt_dependence_type_sink)], ndeps=2
// CHECK: {{^}}[[MASTER]]: ompt_event_dependences: task_id=[[ITASK]],
// CHECK-SAME: deps=[(0, ompt_dependence_type_source), (1,
// CHECK-SAME: ompt_dependence_type_source)], ndeps=2
// CHECK: {{^}}[[WORKER:[0-9]+]]: ompt_event_loop_begin:
// CHECK-SAME: parallel_id={{[0-9]+}}, parent_task_id=[[ITASK:[0-9]+]],
// CHECK: {{^}}[[WORKER]]: ompt_event_dependences: task_id=[[ITASK]],
// CHECK-SAME: deps=[(0, ompt_dependence_type_sink), (0,
// CHECK-SAME: ompt_dependence_type_sink)], ndeps=2
// CHECK: {{^}}[[WORKER]]: ompt_event_dependences: task_id=[[ITASK]],
// CHECK-SAME: deps=[(1, ompt_dependence_type_source), (0,
// CHECK-SAME: ompt_dependence_type_source)], ndeps=2
// either can be first for last iteration
// CHECK-DAG: [[ITASK]]{{.*}}deps=[(0{{.*}}sink), (1,{{.*}}sink)]
// CHECK-DAG: [[ITASK]]{{.*}}deps=[(1{{.*}}sink), (0,{{.*}}sink)]
// CHECK: {{^}}[[WORKER]]: ompt_event_dependences: task_id=[[ITASK]],
// CHECK-SAME: deps=[(1, ompt_dependence_type_source), (1,
// CHECK-SAME: ompt_dependence_type_source)], ndeps=2
|
Sphere.h | #ifndef SPHERE_HEADER
#define SPHERE_HEADER
#include "basic.h"
#include <MiscLib/Vector.h>
#include <stdexcept>
#include <GfxTL/HyperplaneCoordinateSystem.h>
#include <utility>
#include "PointCloud.h"
#include <ostream>
#include <istream>
#include <stdio.h>
#include <utility>
#include <MiscLib/NoShrinkVector.h>
#include "LevMarLSWeight.h"
#include "LevMarFitting.h"
#ifndef DLL_LINKAGE
#define DLL_LINKAGE
#endif
struct DLL_LINKAGE InvalidTetrahedonError
: public std::runtime_error
{
InvalidTetrahedonError();
};
class DLL_LINKAGE Sphere
{
public:
enum { RequiredSamples = 2 };
Sphere();
Sphere(const Vec3f ¢er, float radius);
Sphere(const Vec3f &p1, const Vec3f &p2, const Vec3f &p3,
const Vec3f &p4);
bool Init(const MiscLib::Vector< Vec3f > &samples);
bool Init(const Vec3f &p1, const Vec3f &p2, const Vec3f &p3,
const Vec3f &p4);
bool Init2(const Vec3f &p1, const Vec3f &p2, const Vec3f &n1,
const Vec3f &n2);
bool Init(bool binary, std::istream *i);
void Init(FILE *i);
void Init(float *array);
inline float Distance(const Vec3f &p) const;
inline void Normal(const Vec3f &p, Vec3f *normal) const;
inline float DistanceAndNormal(const Vec3f &p, Vec3f *normal) const;
inline float SignedDistance(const Vec3f &p) const;
void Project(const Vec3f &p, Vec3f *pp) const;
const Vec3f &Center() const;
void Center(const Vec3f ¢er) { m_center = center; }
float Radius() const;
void Radius(float radius) { m_radius = radius; }
bool LeastSquaresFit(const PointCloud &pc,
MiscLib::Vector< size_t >::const_iterator begin,
MiscLib::Vector< size_t >::const_iterator end);
template< class IteratorT >
bool LeastSquaresFit(IteratorT begin, IteratorT end);
bool Fit(const PointCloud &pc,
MiscLib::Vector< size_t >::const_iterator begin,
MiscLib::Vector< size_t >::const_iterator end)
{ return LeastSquaresFit(pc, begin, end); }
static bool Interpolate(const MiscLib::Vector< Sphere > &spheres,
const MiscLib::Vector< float > &weights, Sphere *is);
void Serialize(bool binary, std::ostream *o) const;
static size_t SerializedSize();
void Serialize(FILE *o) const;
void Serialize(float* array) const;
static size_t SerializedFloatSize();
void Transform(float scale, const Vec3f &translate);
inline unsigned int Intersect(const Vec3f &p, const Vec3f &r,
float *first, float *second) const;
private:
template< class WeightT >
class LevMarSimpleSphere
: public WeightT
{
public:
enum { NumParams = 4 };
typedef float ScalarType;
template< class IteratorT >
ScalarType Chi(const ScalarType *params, IteratorT begin, IteratorT end,
ScalarType *values, ScalarType *temp) const
{
ScalarType chi = 0;
int size = end - begin;
#ifdef DOPARALLEL
#pragma omp parallel for schedule(static) reduction(+:chi)
#endif
for(int idx = 0; idx < size; ++idx)
{
float s = begin[idx][0] - params[0];
s *= s;
for(unsigned int j = 1; j < 3; ++j)
{
float ss = begin[idx][j] - params[j];
s += ss * ss;
}
values[idx] = WeightT::Weigh(std::sqrt(s) - params[3]);
chi += values[idx] * values[idx];
}
return chi;
}
template< class IteratorT >
void Derivatives(const ScalarType *params, IteratorT begin, IteratorT end,
const ScalarType *values, const ScalarType *temp, ScalarType *matrix) const
{
int size = end - begin;
#ifdef DOPARALLEL
#pragma omp parallel for schedule(static)
#endif
for(int idx = 0; idx < size; ++idx)
{
float s[3];
s[0] = begin[idx][0] - params[0];
float sl = s[0] * s[0];
for(unsigned int i = 1; i < 3; ++i)
{
s[i] = begin[idx][i] - params[i];
sl += s[i] * s[i];
}
sl = std::sqrt(sl);
matrix[idx * NumParams + 0] = -s[0] / sl;
matrix[idx * NumParams + 1] = -s[1] / sl;
matrix[idx * NumParams + 2] = -s[2] / sl;
matrix[idx * NumParams + 3] = -1;
WeightT::template DerivWeigh< NumParams >(sl - params[3],
matrix + idx * NumParams);
}
}
void Normalize(ScalarType *) const
{}
};
template< class WeightT >
class LevMarSphere
: public WeightT
{
public:
enum { NumParams = 7 };
typedef float ScalarType;
// parametrization: params[0] - params[2] = normal
// params[3] - params[5] = point
// params[6] = 1 / radius
template< class IteratorT >
ScalarType Chi(const ScalarType *params, IteratorT begin, IteratorT end,
ScalarType *values, ScalarType *temp) const
{
ScalarType chi = 0;
ScalarType radius = 1 / params[6];
Vec3f center = -radius * Vec3f(params[0], params[1], params[2])
+ Vec3f(params[3], params[4], params[5]);
int size = end - begin;
#ifdef DOPARALLEL
#pragma omp parallel for schedule(static) reduction(+:chi)
#endif
for(int idx = 0; idx < size; ++idx)
{
temp[idx] = (begin[idx] - center).length();
chi += (values[idx] = WeightT::Weigh(temp[idx] - radius))
* values[idx];
}
return chi;
}
template< class IteratorT >
void Derivatives(const ScalarType *params, IteratorT begin, IteratorT end,
const ScalarType *values, const ScalarType *temp, ScalarType *matrix) const
{
Vec3f normal(params[0], params[1], params[2]);
Vec3f point(params[3], params[4], params[5]);
int size = end - begin;
#ifdef DOPARALLEL
#pragma omp parallel for schedule(static)
#endif
for(int idx = 0; idx < size; ++idx)
{
ScalarType denominator = -1.f / temp[idx] * params[6];
matrix[idx * NumParams + 0] =
(matrix[idx * NumParams + 3] = (point[0] - normal[0] * params[6] - begin[idx][0]))
* denominator;
matrix[idx * NumParams + 1] =
(matrix[idx * NumParams + 4] = (point[1] - normal[1] * params[6] - begin[idx][1]))
* denominator;
matrix[idx * NumParams + 2] =
(matrix[idx * NumParams + 5] = (point[2] - normal[2] * params[6] - begin[idx][2]))
* denominator;
matrix[idx * NumParams + 3] /= temp[idx];
matrix[idx * NumParams + 4] /= temp[idx];
matrix[idx * NumParams + 5] /= temp[idx];
matrix[idx * NumParams + 6] = (normal[0] * matrix[idx * NumParams + 3]
+ normal[1] * matrix[idx * NumParams + 4]
+ normal[2] * matrix[idx * NumParams + 5] + 1) * params[6] * params[6];
WeightT::template DerivWeigh< NumParams >(temp[idx] - 1.f / params[6],
matrix + idx * NumParams);
}
}
void Normalize(ScalarType *params) const
{
ScalarType len = std::sqrt(params[0] * params[0]
+ params[1] * params[1] + params[2] * params[2]);
params[0] /= len;
params[1] /= len;
params[2] /= len;
}
};
private:
Vec3f m_center;
float m_radius;
};
inline float Sphere::Distance(const Vec3f &p) const
{
return fabs((m_center - p).length() - m_radius);
}
inline void Sphere::Normal(const Vec3f &p, Vec3f *normal) const
{
*normal = p - m_center;
normal->normalize();
}
inline float Sphere::DistanceAndNormal(const Vec3f &p, Vec3f *normal) const
{
*normal = p - m_center;
float l = normal->length();
if(l > 0)
*normal /= l;
return fabs(l - m_radius);
}
inline float Sphere::SignedDistance(const Vec3f &p) const
{
return (m_center - p).length() - m_radius;
}
template< class IteratorT >
bool Sphere::LeastSquaresFit(IteratorT begin, IteratorT end)
{
LevMarSimpleSphere< LevMarLSWeight > levMarSphere;
float param[4];
for(size_t i = 0; i < 3; ++i)
param[i] = m_center[i];
param[3] = m_radius;
if(!LevMar(begin, end, levMarSphere, param))
return false;
for(size_t i = 0; i < 3; ++i)
m_center[i] = param[i];
m_radius = param[3];
return true;
}
inline unsigned int Sphere::Intersect(const Vec3f &p, const Vec3f &r,
float *first, float *second) const
{
using namespace std;
Vec3f kDiff = p - m_center;
float fA0 = kDiff.dot(kDiff) - m_radius*m_radius;
float fA1, fDiscr, fRoot;
if (fA0 <= 0)
{
// P is inside the sphere
fA1 = r.dot(kDiff);
fDiscr = fA1*fA1 - fA0;
fRoot = sqrt(fDiscr);
*first = -fA1 + fRoot;
return 1;
}
// else: P is outside the sphere
fA1 = r.dot(kDiff);
if (fA1 >= 0)
return 0;
fDiscr = fA1*fA1 - fA0;
if(fDiscr < 0)
return 0;
else if(fDiscr >= /* zero tolerance eps */ 1e-7f)
{
fRoot = sqrt(fDiscr);
*first = -fA1 - fRoot;
*second = -fA1 + fRoot;
return 2;
}
*first = -fA1;
return 1;
}
class DLL_LINKAGE SphereAsSquaresParametrization
{
public:
SphereAsSquaresParametrization() {}
SphereAsSquaresParametrization(const Sphere &sphere,
const Vec3f &planeNormal);
void Init(const Sphere &sphere, const Vec3f &planeNormal);
// returns < 0 if point is on lower hemisphere
float Parameters(const Vec3f &p,
std::pair< float, float > *param) const;
bool InSpace(const std::pair< float, float > ¶m, bool lower,
Vec3f *p) const;
bool InSpace(const std::pair< float, float > ¶m, bool lower,
Vec3f *p, Vec3f *n) const;
void Transform(const GfxTL::MatrixXX< 3, 3, float > &rot,
const GfxTL::Vector3Df &trans);
void HyperplaneCoordinateSystem( Vec3f* hcs0, Vec3f* hcs1, Vec3f* hcs2 ) const;
private:
void Hemisphere2Disk(const Vec3f &p,
std::pair< float, float > *inDisk) const;
void Disk2Square(const std::pair< float, float > &inDisk,
std::pair< float, float > *inSquare) const;
void Square2Disk(const std::pair< float, float > &inSquare,
std::pair< float, float > *inDisk) const;
void Disk2Hemisphere(const std::pair< float, float > &inDisk,
Vec3f *p) const;
private:
Sphere m_sphere;
Vec3f m_planeNormal;
GfxTL::HyperplaneCoordinateSystem< float, 3 > m_hcs;
};
class DLL_LINKAGE UpperSphereAsSquaresParametrization
: public SphereAsSquaresParametrization
{
public:
UpperSphereAsSquaresParametrization() {}
UpperSphereAsSquaresParametrization(const SphereAsSquaresParametrization &p)
: SphereAsSquaresParametrization(p) {}
bool InSpace(const std::pair< float, float > ¶m, Vec3f *p) const
{ return SphereAsSquaresParametrization::InSpace(param, false, p); }
bool InSpace(const std::pair< float, float > ¶m, Vec3f *p, Vec3f *n) const
{ return SphereAsSquaresParametrization::InSpace(param, false, p, n); }
bool InSpace(float u, float v, Vec3f *p) const
{ return SphereAsSquaresParametrization::InSpace(std::make_pair(u, v), false, p); }
bool InSpace(float u, float v, Vec3f *p, Vec3f *n) const
{ return SphereAsSquaresParametrization::InSpace(std::make_pair(u, v), false, p, n); }
};
class DLL_LINKAGE LowerSphereAsSquaresParametrization
: public SphereAsSquaresParametrization
{
public:
LowerSphereAsSquaresParametrization() {}
LowerSphereAsSquaresParametrization(const SphereAsSquaresParametrization &p)
: SphereAsSquaresParametrization(p) {}
bool InSpace(const std::pair< float, float > ¶m, Vec3f *p) const
{ return SphereAsSquaresParametrization::InSpace(param, true, p); }
bool InSpace(const std::pair< float, float > ¶m, Vec3f *p, Vec3f *n) const
{ return SphereAsSquaresParametrization::InSpace(param, true, p, n); }
bool InSpace(float u, float v, Vec3f *p) const
{ return SphereAsSquaresParametrization::InSpace(std::make_pair(u, v), true, p); }
bool InSpace(float u, float v, Vec3f *p, Vec3f *n) const
{ return SphereAsSquaresParametrization::InSpace(std::make_pair(u, v), true, p, n); }
};
#endif
|
blas_dh.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
#include "_hypre_Euclid.h"
/* #include "blas_dh.h" */
#undef __FUNC__
#define __FUNC__ "matvec_euclid_seq"
void matvec_euclid_seq(HYPRE_Int n, HYPRE_Int *rp, HYPRE_Int *cval, HYPRE_Real *aval, HYPRE_Real *x, HYPRE_Real *y)
{
START_FUNC_DH
HYPRE_Int i, j;
HYPRE_Int from, to, col;
HYPRE_Real sum;
if (np_dh > 1) SET_V_ERROR("only for sequential case!\n");
#ifdef USING_OPENMP_DH
#pragma omp parallel private(j, col, sum, from, to) \
default(shared) \
firstprivate(n, rp, cval, aval, x, y)
#endif
{
#ifdef USING_OPENMP_DH
#pragma omp for schedule(static)
#endif
for (i=0; i<n; ++i) {
sum = 0.0;
from = rp[i];
to = rp[i+1];
for (j=from; j<to; ++j) {
col = cval[j];
sum += (aval[j]*x[col]);
}
y[i] = sum;
}
}
END_FUNC_DH
}
#undef __FUNC__
#define __FUNC__ "Axpy"
void Axpy(HYPRE_Int n, HYPRE_Real alpha, HYPRE_Real *x, HYPRE_Real *y)
{
START_FUNC_DH
HYPRE_Int i;
#ifdef USING_OPENMP_DH
#pragma omp parallel for schedule(static) firstprivate(alpha, x, y) \
private(i)
#endif
for (i=0; i<n; ++i) {
y[i] = alpha*x[i] + y[i];
}
END_FUNC_DH
}
#undef __FUNC__
#define __FUNC__ "CopyVec"
void CopyVec(HYPRE_Int n, HYPRE_Real *xIN, HYPRE_Real *yOUT)
{
START_FUNC_DH
HYPRE_Int i;
#ifdef USING_OPENMP_DH
#pragma omp parallel for schedule(static) firstprivate(yOUT, xIN) \
private(i)
#endif
for (i=0; i<n; ++i) {
yOUT[i] = xIN[i];
}
END_FUNC_DH
}
#undef __FUNC__
#define __FUNC__ "ScaleVec"
void ScaleVec(HYPRE_Int n, HYPRE_Real alpha, HYPRE_Real *x)
{
START_FUNC_DH
HYPRE_Int i;
#ifdef USING_OPENMP_DH
#pragma omp parallel for schedule(static) firstprivate(alpha, x) \
private(i)
#endif
for (i=0; i<n; ++i) {
x[i] *= alpha;
}
END_FUNC_DH
}
#undef __FUNC__
#define __FUNC__ "InnerProd"
HYPRE_Real InnerProd(HYPRE_Int n, HYPRE_Real *x, HYPRE_Real *y)
{
START_FUNC_DH
HYPRE_Real result, local_result = 0.0;
HYPRE_Int i;
#ifdef USING_OPENMP_DH
#pragma omp parallel for schedule(static) firstprivate(x, y) \
private(i) \
reduction(+:local_result)
#endif
for (i=0; i<n; ++i) {
local_result += x[i] * y[i];
}
if (np_dh > 1) {
hypre_MPI_Allreduce(&local_result, &result, 1, hypre_MPI_REAL, hypre_MPI_SUM, comm_dh);
} else {
result = local_result;
}
END_FUNC_VAL(result)
}
#undef __FUNC__
#define __FUNC__ "Norm2"
HYPRE_Real Norm2(HYPRE_Int n, HYPRE_Real *x)
{
START_FUNC_DH
HYPRE_Real result, local_result = 0.0;
HYPRE_Int i;
#ifdef USING_OPENMP_DH
#pragma omp parallel for schedule(static) firstprivate(x) \
private(i) \
reduction(+:local_result)
#endif
for (i=0; i<n; ++i) {
local_result += (x[i]*x[i]);
}
if (np_dh > 1) {
hypre_MPI_Allreduce(&local_result, &result, 1, hypre_MPI_REAL, hypre_MPI_SUM, comm_dh);
} else {
result = local_result;
}
result = sqrt(result);
END_FUNC_VAL(result)
}
|
TSDFVoxelGridImpl.h | // ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018-2021 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#include <atomic>
#include <cmath>
#include "open3d/core/Dispatch.h"
#include "open3d/core/Dtype.h"
#include "open3d/core/MemoryManager.h"
#include "open3d/core/SizeVector.h"
#include "open3d/core/Tensor.h"
#include "open3d/t/geometry/Utility.h"
#include "open3d/t/geometry/kernel/GeometryIndexer.h"
#include "open3d/t/geometry/kernel/GeometryMacros.h"
#include "open3d/t/geometry/kernel/TSDFVoxel.h"
#include "open3d/t/geometry/kernel/TSDFVoxelGrid.h"
#include "open3d/utility/Logging.h"
#include "open3d/utility/Timer.h"
namespace open3d {
namespace t {
namespace geometry {
namespace kernel {
namespace tsdf {
#if defined(__CUDACC__)
void IntegrateCUDA
#else
void IntegrateCPU
#endif
(const core::Tensor& depth,
const core::Tensor& color,
const core::Tensor& indices,
const core::Tensor& block_keys,
core::Tensor& block_values,
// Transforms
const core::Tensor& intrinsics,
const core::Tensor& extrinsics,
// Parameters
int64_t resolution,
float voxel_size,
float sdf_trunc,
float depth_scale,
float depth_max) {
// Parameters
int64_t resolution3 = resolution * resolution * resolution;
// Shape / transform indexers, no data involved
NDArrayIndexer voxel_indexer({resolution, resolution, resolution});
TransformIndexer transform_indexer(intrinsics, extrinsics, voxel_size);
// Real data indexer
NDArrayIndexer depth_indexer(depth, 2);
NDArrayIndexer block_keys_indexer(block_keys, 1);
NDArrayIndexer voxel_block_buffer_indexer(block_values, 4);
// Optional color integration
NDArrayIndexer color_indexer;
bool integrate_color = false;
if (color.NumElements() != 0) {
color_indexer = NDArrayIndexer(color, 2);
integrate_color = true;
}
// Plain arrays that does not require indexers
const int* indices_ptr = indices.GetDataPtr<int>();
int64_t n = indices.GetLength() * resolution3;
DISPATCH_BYTESIZE_TO_VOXEL(
voxel_block_buffer_indexer.ElementByteSize(), [&]() {
core::ParallelFor(
depth.GetDevice(), n,
[=] OPEN3D_DEVICE(int64_t workload_idx) {
// Natural index (0, N) -> (block_idx, voxel_idx)
int block_idx =
indices_ptr[workload_idx / resolution3];
int voxel_idx = workload_idx % resolution3;
/// Coordinate transform
// block_idx -> (x_block, y_block, z_block)
int* block_key_ptr =
block_keys_indexer.GetDataPtr<int>(
block_idx);
int64_t xb = static_cast<int64_t>(block_key_ptr[0]);
int64_t yb = static_cast<int64_t>(block_key_ptr[1]);
int64_t zb = static_cast<int64_t>(block_key_ptr[2]);
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv,
&zv);
// coordinate in world (in voxel)
int64_t x = (xb * resolution + xv);
int64_t y = (yb * resolution + yv);
int64_t z = (zb * resolution + zv);
// coordinate in camera (in voxel -> in meter)
float xc, yc, zc, u, v;
transform_indexer.RigidTransform(
static_cast<float>(x),
static_cast<float>(y),
static_cast<float>(z), &xc, &yc, &zc);
// coordinate in image (in pixel)
transform_indexer.Project(xc, yc, zc, &u, &v);
if (!depth_indexer.InBoundary(u, v)) {
return;
}
// Associate image workload and compute SDF and
// TSDF.
float depth = *depth_indexer.GetDataPtr<float>(
static_cast<int64_t>(u),
static_cast<int64_t>(v)) /
depth_scale;
float sdf = (depth - zc);
if (depth <= 0 || depth > depth_max || zc <= 0 ||
sdf < -sdf_trunc) {
return;
}
sdf = sdf < sdf_trunc ? sdf : sdf_trunc;
sdf /= sdf_trunc;
// Associate voxel workload and update TSDF/Weights
voxel_t* voxel_ptr =
voxel_block_buffer_indexer
.GetDataPtr<voxel_t>(xv, yv, zv,
block_idx);
if (integrate_color) {
float* color_ptr =
color_indexer.GetDataPtr<float>(
static_cast<int64_t>(u),
static_cast<int64_t>(v));
voxel_ptr->Integrate(sdf, color_ptr[0],
color_ptr[1],
color_ptr[2]);
} else {
voxel_ptr->Integrate(sdf);
}
});
});
#if defined(__CUDACC__)
core::cuda::Synchronize();
#endif
}
#if defined(__CUDACC__)
void ExtractSurfacePointsCUDA
#else
void ExtractSurfacePointsCPU
#endif
(const core::Tensor& indices,
const core::Tensor& nb_indices,
const core::Tensor& nb_masks,
const core::Tensor& block_keys,
const core::Tensor& block_values,
core::Tensor& points,
utility::optional<std::reference_wrapper<core::Tensor>> normals,
utility::optional<std::reference_wrapper<core::Tensor>> colors,
int64_t resolution,
float voxel_size,
float weight_threshold,
int& valid_size) {
// Parameters
int64_t resolution3 = resolution * resolution * resolution;
// Shape / transform indexers, no data involved
NDArrayIndexer voxel_indexer({resolution, resolution, resolution});
// Real data indexer
NDArrayIndexer voxel_block_buffer_indexer(block_values, 4);
NDArrayIndexer block_keys_indexer(block_keys, 1);
NDArrayIndexer nb_block_masks_indexer(nb_masks, 2);
NDArrayIndexer nb_block_indices_indexer(nb_indices, 2);
// Plain arrays that does not require indexers
const int64_t* indices_ptr = indices.GetDataPtr<int64_t>();
int64_t n_blocks = indices.GetLength();
int64_t n = n_blocks * resolution3;
// Output
#if defined(__CUDACC__)
core::Tensor count(std::vector<int>{0}, {1}, core::Int32,
block_values.GetDevice());
int* count_ptr = count.GetDataPtr<int>();
#else
std::atomic<int> count_atomic(0);
std::atomic<int>* count_ptr = &count_atomic;
#endif
if (valid_size < 0) {
utility::LogWarning(
"No estimated max point cloud size provided, using a 2-pass "
"estimation. Surface extraction could be slow.");
// This pass determines valid number of points.
DISPATCH_BYTESIZE_TO_VOXEL(
voxel_block_buffer_indexer.ElementByteSize(), [&]() {
core::ParallelFor(
indices.GetDevice(), n,
[=] OPEN3D_DEVICE(int64_t workload_idx) {
auto GetVoxelAt = [&] OPEN3D_DEVICE(
int xo, int yo,
int zo,
int curr_block_idx)
-> voxel_t* {
return DeviceGetVoxelAt<voxel_t>(
xo, yo, zo, curr_block_idx,
static_cast<int>(resolution),
nb_block_masks_indexer,
nb_block_indices_indexer,
voxel_block_buffer_indexer);
};
// Natural index (0, N) -> (block_idx,
// voxel_idx)
int64_t workload_block_idx =
workload_idx / resolution3;
int64_t block_idx =
indices_ptr[workload_block_idx];
int64_t voxel_idx = workload_idx % resolution3;
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv,
&yv, &zv);
voxel_t* voxel_ptr =
voxel_block_buffer_indexer
.GetDataPtr<voxel_t>(xv, yv, zv,
block_idx);
float tsdf_o = voxel_ptr->GetTSDF();
float weight_o = voxel_ptr->GetWeight();
if (weight_o <= weight_threshold) return;
// Enumerate x-y-z directions
for (int i = 0; i < 3; ++i) {
voxel_t* ptr = GetVoxelAt(
static_cast<int>(xv) + (i == 0),
static_cast<int>(yv) + (i == 1),
static_cast<int>(zv) + (i == 2),
static_cast<int>(
workload_block_idx));
if (ptr == nullptr) continue;
float tsdf_i = ptr->GetTSDF();
float weight_i = ptr->GetWeight();
if (weight_i > weight_threshold &&
tsdf_i * tsdf_o < 0) {
OPEN3D_ATOMIC_ADD(count_ptr, 1);
}
}
});
});
#if defined(__CUDACC__)
valid_size = count[0].Item<int>();
count[0] = 0;
#else
valid_size = (*count_ptr).load();
(*count_ptr) = 0;
#endif
}
int max_count = valid_size;
if (points.GetLength() == 0) {
points = core::Tensor({max_count, 3}, core::Float32,
block_values.GetDevice());
}
NDArrayIndexer point_indexer(points, 1);
// Normals
bool extract_normal = false;
NDArrayIndexer normal_indexer;
if (normals.has_value()) {
extract_normal = true;
if (normals.value().get().GetLength() == 0) {
normals.value().get() = core::Tensor({max_count, 3}, core::Float32,
block_values.GetDevice());
}
normal_indexer = NDArrayIndexer(normals.value().get(), 1);
}
// This pass extracts exact surface points.
DISPATCH_BYTESIZE_TO_VOXEL(
voxel_block_buffer_indexer.ElementByteSize(), [&]() {
// Colors
bool extract_color = false;
NDArrayIndexer color_indexer;
if (voxel_t::HasColor() && colors.has_value()) {
extract_color = true;
if (colors.value().get().GetLength() == 0) {
colors.value().get() =
core::Tensor({max_count, 3}, core::Float32,
block_values.GetDevice());
}
color_indexer = NDArrayIndexer(colors.value().get(), 1);
}
core::ParallelFor(
indices.GetDevice(), n,
[=] OPEN3D_DEVICE(int64_t workload_idx) {
auto GetVoxelAt =
[&] OPEN3D_DEVICE(
int xo, int yo, int zo,
int curr_block_idx) -> voxel_t* {
return DeviceGetVoxelAt<voxel_t>(
xo, yo, zo, curr_block_idx,
static_cast<int>(resolution),
nb_block_masks_indexer,
nb_block_indices_indexer,
voxel_block_buffer_indexer);
};
auto GetNormalAt = [&] OPEN3D_DEVICE(
int xo, int yo, int zo,
int curr_block_idx,
float* n) {
return DeviceGetNormalAt<voxel_t>(
xo, yo, zo, curr_block_idx, n,
static_cast<int>(resolution),
voxel_size, nb_block_masks_indexer,
nb_block_indices_indexer,
voxel_block_buffer_indexer);
};
// Natural index (0, N) -> (block_idx, voxel_idx)
int64_t workload_block_idx =
workload_idx / resolution3;
int64_t block_idx = indices_ptr[workload_block_idx];
int64_t voxel_idx = workload_idx % resolution3;
/// Coordinate transform
// block_idx -> (x_block, y_block, z_block)
int* block_key_ptr =
block_keys_indexer.GetDataPtr<int>(
block_idx);
int64_t xb = static_cast<int64_t>(block_key_ptr[0]);
int64_t yb = static_cast<int64_t>(block_key_ptr[1]);
int64_t zb = static_cast<int64_t>(block_key_ptr[2]);
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv,
&zv);
voxel_t* voxel_ptr =
voxel_block_buffer_indexer
.GetDataPtr<voxel_t>(xv, yv, zv,
block_idx);
float tsdf_o = voxel_ptr->GetTSDF();
float weight_o = voxel_ptr->GetWeight();
if (weight_o <= weight_threshold) return;
int64_t x = xb * resolution + xv;
int64_t y = yb * resolution + yv;
int64_t z = zb * resolution + zv;
float no[3] = {0}, ni[3] = {0};
if (extract_normal) {
GetNormalAt(
static_cast<int>(xv),
static_cast<int>(yv),
static_cast<int>(zv),
static_cast<int>(workload_block_idx),
no);
}
// Enumerate x-y-z axis
for (int i = 0; i < 3; ++i) {
voxel_t* ptr = GetVoxelAt(
static_cast<int>(xv) + (i == 0),
static_cast<int>(yv) + (i == 1),
static_cast<int>(zv) + (i == 2),
static_cast<int>(workload_block_idx));
if (ptr == nullptr) continue;
float tsdf_i = ptr->GetTSDF();
float weight_i = ptr->GetWeight();
if (weight_i > weight_threshold &&
tsdf_i * tsdf_o < 0) {
float ratio =
(0 - tsdf_o) / (tsdf_i - tsdf_o);
int idx = OPEN3D_ATOMIC_ADD(count_ptr, 1);
if (idx >= valid_size) {
printf("Point cloud size larger than "
"estimated, please increase the "
"estimation!\n");
return;
}
float* point_ptr =
point_indexer.GetDataPtr<float>(
idx);
point_ptr[0] = voxel_size *
(x + ratio * int(i == 0));
point_ptr[1] = voxel_size *
(y + ratio * int(i == 1));
point_ptr[2] = voxel_size *
(z + ratio * int(i == 2));
if (extract_color) {
float* color_ptr =
color_indexer.GetDataPtr<float>(
idx);
float r_o = voxel_ptr->GetR();
float g_o = voxel_ptr->GetG();
float b_o = voxel_ptr->GetB();
float r_i = ptr->GetR();
float g_i = ptr->GetG();
float b_i = ptr->GetB();
color_ptr[0] = ((1 - ratio) * r_o +
ratio * r_i) /
255.0f;
color_ptr[1] = ((1 - ratio) * g_o +
ratio * g_i) /
255.0f;
color_ptr[2] = ((1 - ratio) * b_o +
ratio * b_i) /
255.0f;
}
if (extract_normal) {
GetNormalAt(
static_cast<int>(xv) + (i == 0),
static_cast<int>(yv) + (i == 1),
static_cast<int>(zv) + (i == 2),
static_cast<int>(
workload_block_idx),
ni);
float* normal_ptr =
normal_indexer
.GetDataPtr<float>(idx);
float nx = (1 - ratio) * no[0] +
ratio * ni[0];
float ny = (1 - ratio) * no[1] +
ratio * ni[1];
float nz = (1 - ratio) * no[2] +
ratio * ni[2];
float norm = static_cast<float>(
sqrt(nx * nx + ny * ny +
nz * nz) +
1e-5);
normal_ptr[0] = nx / norm;
normal_ptr[1] = ny / norm;
normal_ptr[2] = nz / norm;
}
}
}
});
});
#if defined(__CUDACC__)
int total_count = count.Item<int>();
#else
int total_count = (*count_ptr).load();
#endif
utility::LogDebug("{} vertices extracted", total_count);
valid_size = total_count;
#if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__)
core::cuda::Synchronize();
#endif
}
#if defined(__CUDACC__)
void ExtractSurfaceMeshCUDA
#else
void ExtractSurfaceMeshCPU
#endif
(const core::Tensor& indices,
const core::Tensor& inv_indices,
const core::Tensor& nb_indices,
const core::Tensor& nb_masks,
const core::Tensor& block_keys,
const core::Tensor& block_values,
core::Tensor& vertices,
core::Tensor& triangles,
utility::optional<std::reference_wrapper<core::Tensor>> normals,
utility::optional<std::reference_wrapper<core::Tensor>> colors,
int64_t resolution,
float voxel_size,
float weight_threshold,
int& vertex_count) {
int64_t resolution3 = resolution * resolution * resolution;
// Shape / transform indexers, no data involved
NDArrayIndexer voxel_indexer({resolution, resolution, resolution});
int n_blocks = static_cast<int>(indices.GetLength());
// TODO(wei): profile performance by replacing the table to a hashmap.
// Voxel-wise mesh info. 4 channels correspond to:
// 3 edges' corresponding vertex index + 1 table index.
core::Tensor mesh_structure;
try {
mesh_structure = core::Tensor::Zeros(
{n_blocks, resolution, resolution, resolution, 4}, core::Int32,
block_keys.GetDevice());
} catch (const std::runtime_error&) {
utility::LogError(
"[MeshExtractionKernel] Unable to allocate assistance mesh "
"structure for Marching "
"Cubes with {} active voxel blocks. Please consider using a "
"larger voxel size (currently {}) for TSDF "
"integration, or using tsdf_volume.cpu() to perform mesh "
"extraction on CPU.",
n_blocks, voxel_size);
}
// Real data indexer
NDArrayIndexer voxel_block_buffer_indexer(block_values, 4);
NDArrayIndexer mesh_structure_indexer(mesh_structure, 4);
NDArrayIndexer nb_block_masks_indexer(nb_masks, 2);
NDArrayIndexer nb_block_indices_indexer(nb_indices, 2);
// Plain arrays that does not require indexers
const int64_t* indices_ptr = indices.GetDataPtr<int64_t>();
const int64_t* inv_indices_ptr = inv_indices.GetDataPtr<int64_t>();
int64_t n = n_blocks * resolution3;
int64_t voxel_bytesize = voxel_block_buffer_indexer.ElementByteSize();
// Pass 0: analyze mesh structure, set up one-on-one correspondences
// from edges to vertices.
DISPATCH_BYTESIZE_TO_VOXEL(voxel_bytesize, [&]() {
core::ParallelFor(
indices.GetDevice(), n, [=] OPEN3D_DEVICE(int64_t widx) {
auto GetVoxelAt = [&] OPEN3D_DEVICE(
int xo, int yo, int zo,
int curr_block_idx) -> voxel_t* {
return DeviceGetVoxelAt<voxel_t>(
xo, yo, zo, curr_block_idx,
static_cast<int>(resolution),
nb_block_masks_indexer,
nb_block_indices_indexer,
voxel_block_buffer_indexer);
};
// Natural index (0, N) -> (block_idx, voxel_idx)
int64_t workload_block_idx = widx / resolution3;
int64_t voxel_idx = widx % resolution3;
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
// Check per-vertex sign in the cube to determine cube
// type
int table_idx = 0;
for (int i = 0; i < 8; ++i) {
voxel_t* voxel_ptr_i = GetVoxelAt(
static_cast<int>(xv) + vtx_shifts[i][0],
static_cast<int>(yv) + vtx_shifts[i][1],
static_cast<int>(zv) + vtx_shifts[i][2],
static_cast<int>(workload_block_idx));
if (voxel_ptr_i == nullptr) return;
float tsdf_i = voxel_ptr_i->GetTSDF();
float weight_i = voxel_ptr_i->GetWeight();
if (weight_i <= weight_threshold) return;
table_idx |= ((tsdf_i < 0) ? (1 << i) : 0);
}
int* mesh_struct_ptr =
mesh_structure_indexer.GetDataPtr<int>(
xv, yv, zv, workload_block_idx);
mesh_struct_ptr[3] = table_idx;
if (table_idx == 0 || table_idx == 255) return;
// Check per-edge sign determine the cube type
int edges_with_vertices = edge_table[table_idx];
for (int i = 0; i < 12; ++i) {
if (edges_with_vertices & (1 << i)) {
int64_t xv_i = xv + edge_shifts[i][0];
int64_t yv_i = yv + edge_shifts[i][1];
int64_t zv_i = zv + edge_shifts[i][2];
int edge_i = edge_shifts[i][3];
int dxb = static_cast<int>(xv_i / resolution);
int dyb = static_cast<int>(yv_i / resolution);
int dzb = static_cast<int>(zv_i / resolution);
int nb_idx =
(dxb + 1) + (dyb + 1) * 3 + (dzb + 1) * 9;
int64_t block_idx_i =
*nb_block_indices_indexer
.GetDataPtr<int64_t>(
workload_block_idx,
nb_idx);
int* mesh_ptr_i =
mesh_structure_indexer.GetDataPtr<int>(
xv_i - dxb * resolution,
yv_i - dyb * resolution,
zv_i - dzb * resolution,
inv_indices_ptr[block_idx_i]);
// Non-atomic write, but we are safe
mesh_ptr_i[edge_i] = -1;
}
}
});
});
// Pass 1: determine valid number of vertices (if not preset)
#if defined(__CUDACC__)
core::Tensor count(std::vector<int>{0}, {}, core::Int32,
block_values.GetDevice());
int* count_ptr = count.GetDataPtr<int>();
#else
std::atomic<int> count_atomic(0);
std::atomic<int>* count_ptr = &count_atomic;
#endif
if (vertex_count < 0) {
core::ParallelFor(
indices.GetDevice(), n, [=] OPEN3D_DEVICE(int64_t widx) {
// Natural index (0, N) -> (block_idx, voxel_idx)
int64_t workload_block_idx = widx / resolution3;
int64_t voxel_idx = widx % resolution3;
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
// Obtain voxel's mesh struct ptr
int* mesh_struct_ptr =
mesh_structure_indexer.GetDataPtr<int>(
xv, yv, zv, workload_block_idx);
// Early quit -- no allocated vertex to compute
if (mesh_struct_ptr[0] != -1 && mesh_struct_ptr[1] != -1 &&
mesh_struct_ptr[2] != -1) {
return;
}
// Enumerate 3 edges in the voxel
for (int e = 0; e < 3; ++e) {
int vertex_idx = mesh_struct_ptr[e];
if (vertex_idx != -1) continue;
OPEN3D_ATOMIC_ADD(count_ptr, 1);
}
});
#if defined(__CUDACC__)
vertex_count = count.Item<int>();
#else
vertex_count = (*count_ptr).load();
#endif
}
utility::LogDebug("Total vertex count = {}", vertex_count);
vertices = core::Tensor({vertex_count, 3}, core::Float32,
block_values.GetDevice());
bool extract_normal = false;
NDArrayIndexer normal_indexer;
if (normals.has_value()) {
extract_normal = true;
normals.value().get() = core::Tensor({vertex_count, 3}, core::Float32,
block_values.GetDevice());
normal_indexer = NDArrayIndexer(normals.value().get(), 1);
}
NDArrayIndexer block_keys_indexer(block_keys, 1);
NDArrayIndexer vertex_indexer(vertices, 1);
#if defined(__CUDACC__)
count = core::Tensor(std::vector<int>{0}, {}, core::Int32,
block_values.GetDevice());
count_ptr = count.GetDataPtr<int>();
#else
(*count_ptr) = 0;
#endif
// Pass 2: extract vertices.
DISPATCH_BYTESIZE_TO_VOXEL(voxel_bytesize, [&]() {
bool extract_color = false;
NDArrayIndexer color_indexer;
if (voxel_t::HasColor() && colors.has_value()) {
extract_color = true;
colors.value().get() = core::Tensor(
{vertex_count, 3}, core::Float32, block_values.GetDevice());
color_indexer = NDArrayIndexer(colors.value().get(), 1);
}
core::ParallelFor(
indices.GetDevice(), n, [=] OPEN3D_DEVICE(int64_t widx) {
auto GetVoxelAt = [&] OPEN3D_DEVICE(
int xo, int yo, int zo,
int curr_block_idx) -> voxel_t* {
return DeviceGetVoxelAt<voxel_t>(
xo, yo, zo, curr_block_idx,
static_cast<int>(resolution),
nb_block_masks_indexer,
nb_block_indices_indexer,
voxel_block_buffer_indexer);
};
auto GetNormalAt = [&] OPEN3D_DEVICE(int xo, int yo, int zo,
int curr_block_idx,
float* n) {
return DeviceGetNormalAt<voxel_t>(
xo, yo, zo, curr_block_idx, n,
static_cast<int>(resolution), voxel_size,
nb_block_masks_indexer,
nb_block_indices_indexer,
voxel_block_buffer_indexer);
};
// Natural index (0, N) -> (block_idx, voxel_idx)
int64_t workload_block_idx = widx / resolution3;
int64_t block_idx = indices_ptr[workload_block_idx];
int64_t voxel_idx = widx % resolution3;
// block_idx -> (x_block, y_block, z_block)
int* block_key_ptr =
block_keys_indexer.GetDataPtr<int>(block_idx);
int64_t xb = static_cast<int64_t>(block_key_ptr[0]);
int64_t yb = static_cast<int64_t>(block_key_ptr[1]);
int64_t zb = static_cast<int64_t>(block_key_ptr[2]);
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
// global coordinate (in voxels)
int64_t x = xb * resolution + xv;
int64_t y = yb * resolution + yv;
int64_t z = zb * resolution + zv;
// Obtain voxel's mesh struct ptr
int* mesh_struct_ptr =
mesh_structure_indexer.GetDataPtr<int>(
xv, yv, zv, workload_block_idx);
// Early quit -- no allocated vertex to compute
if (mesh_struct_ptr[0] != -1 && mesh_struct_ptr[1] != -1 &&
mesh_struct_ptr[2] != -1) {
return;
}
// Obtain voxel ptr
voxel_t* voxel_ptr =
voxel_block_buffer_indexer.GetDataPtr<voxel_t>(
xv, yv, zv, block_idx);
float tsdf_o = voxel_ptr->GetTSDF();
float no[3] = {0}, ne[3] = {0};
if (extract_normal) {
GetNormalAt(static_cast<int>(xv), static_cast<int>(yv),
static_cast<int>(zv),
static_cast<int>(workload_block_idx), no);
}
// Enumerate 3 edges in the voxel
for (int e = 0; e < 3; ++e) {
int vertex_idx = mesh_struct_ptr[e];
if (vertex_idx != -1) continue;
voxel_t* voxel_ptr_e = GetVoxelAt(
static_cast<int>(xv) + (e == 0),
static_cast<int>(yv) + (e == 1),
static_cast<int>(zv) + (e == 2),
static_cast<int>(workload_block_idx));
OPEN3D_ASSERT(
voxel_ptr_e != nullptr &&
"Internal error: GetVoxelAt returns nullptr.");
float tsdf_e = voxel_ptr_e->GetTSDF();
float ratio = (0 - tsdf_o) / (tsdf_e - tsdf_o);
int idx = OPEN3D_ATOMIC_ADD(count_ptr, 1);
mesh_struct_ptr[e] = idx;
float ratio_x = ratio * int(e == 0);
float ratio_y = ratio * int(e == 1);
float ratio_z = ratio * int(e == 2);
float* vertex_ptr =
vertex_indexer.GetDataPtr<float>(idx);
vertex_ptr[0] = voxel_size * (x + ratio_x);
vertex_ptr[1] = voxel_size * (y + ratio_y);
vertex_ptr[2] = voxel_size * (z + ratio_z);
if (extract_normal) {
float* normal_ptr =
normal_indexer.GetDataPtr<float>(idx);
GetNormalAt(static_cast<int>(xv) + (e == 0),
static_cast<int>(yv) + (e == 1),
static_cast<int>(zv) + (e == 2),
static_cast<int>(workload_block_idx),
ne);
float nx = (1 - ratio) * no[0] + ratio * ne[0];
float ny = (1 - ratio) * no[1] + ratio * ne[1];
float nz = (1 - ratio) * no[2] + ratio * ne[2];
float norm = static_cast<float>(
sqrt(nx * nx + ny * ny + nz * nz) + 1e-5);
normal_ptr[0] = nx / norm;
normal_ptr[1] = ny / norm;
normal_ptr[2] = nz / norm;
}
if (extract_color) {
float* color_ptr =
color_indexer.GetDataPtr<float>(idx);
float r_o = voxel_ptr->GetR();
float g_o = voxel_ptr->GetG();
float b_o = voxel_ptr->GetB();
float r_e = voxel_ptr_e->GetR();
float g_e = voxel_ptr_e->GetG();
float b_e = voxel_ptr_e->GetB();
color_ptr[0] =
((1 - ratio) * r_o + ratio * r_e) / 255.0f;
color_ptr[1] =
((1 - ratio) * g_o + ratio * g_e) / 255.0f;
color_ptr[2] =
((1 - ratio) * b_o + ratio * b_e) / 255.0f;
}
}
});
});
// Pass 3: connect vertices and form triangles.
int triangle_count = vertex_count * 3;
triangles = core::Tensor({triangle_count, 3}, core::Int64,
block_values.GetDevice());
NDArrayIndexer triangle_indexer(triangles, 1);
#if defined(__CUDACC__)
count = core::Tensor(std::vector<int>{0}, {}, core::Int32,
block_values.GetDevice());
count_ptr = count.GetDataPtr<int>();
#else
(*count_ptr) = 0;
#endif
core::ParallelFor(indices.GetDevice(), n, [=] OPEN3D_DEVICE(int64_t widx) {
// Natural index (0, N) -> (block_idx, voxel_idx)
int64_t workload_block_idx = widx / resolution3;
int64_t voxel_idx = widx % resolution3;
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
// Obtain voxel's mesh struct ptr
int* mesh_struct_ptr = mesh_structure_indexer.GetDataPtr<int>(
xv, yv, zv, workload_block_idx);
int table_idx = mesh_struct_ptr[3];
if (tri_count[table_idx] == 0) return;
for (size_t tri = 0; tri < 16; tri += 3) {
if (tri_table[table_idx][tri] == -1) return;
int tri_idx = OPEN3D_ATOMIC_ADD(count_ptr, 1);
for (size_t vertex = 0; vertex < 3; ++vertex) {
int edge = tri_table[table_idx][tri + vertex];
int64_t xv_i = xv + edge_shifts[edge][0];
int64_t yv_i = yv + edge_shifts[edge][1];
int64_t zv_i = zv + edge_shifts[edge][2];
int64_t edge_i = edge_shifts[edge][3];
int dxb = static_cast<int>(xv_i / resolution);
int dyb = static_cast<int>(yv_i / resolution);
int dzb = static_cast<int>(zv_i / resolution);
int nb_idx = (dxb + 1) + (dyb + 1) * 3 + (dzb + 1) * 9;
int64_t block_idx_i =
*nb_block_indices_indexer.GetDataPtr<int64_t>(
workload_block_idx, nb_idx);
int* mesh_struct_ptr_i = mesh_structure_indexer.GetDataPtr<int>(
xv_i - dxb * resolution, yv_i - dyb * resolution,
zv_i - dzb * resolution, inv_indices_ptr[block_idx_i]);
int64_t* triangle_ptr =
triangle_indexer.GetDataPtr<int64_t>(tri_idx);
triangle_ptr[2 - vertex] = mesh_struct_ptr_i[edge_i];
}
}
});
#if defined(__CUDACC__)
triangle_count = count.Item<int>();
#else
triangle_count = (*count_ptr).load();
#endif
utility::LogInfo("Total triangle count = {}", triangle_count);
triangles = triangles.Slice(0, 0, triangle_count);
}
#if defined(__CUDACC__)
void EstimateRangeCUDA
#else
void EstimateRangeCPU
#endif
(const core::Tensor& block_keys,
core::Tensor& range_minmax_map,
const core::Tensor& intrinsics,
const core::Tensor& extrinsics,
int h,
int w,
int down_factor,
int64_t block_resolution,
float voxel_size,
float depth_min,
float depth_max) {
// TODO(wei): reserve it in a reusable buffer
// Every 2 channels: (min, max)
int h_down = h / down_factor;
int w_down = w / down_factor;
range_minmax_map = core::Tensor({h_down, w_down, 2}, core::Float32,
block_keys.GetDevice());
NDArrayIndexer range_map_indexer(range_minmax_map, 2);
// Every 6 channels: (v_min, u_min, v_max, u_max, z_min, z_max)
const int fragment_size = 16;
const int frag_buffer_size = 65535;
// TODO(wei): explicit buffer
core::Tensor fragment_buffer = core::Tensor(
{frag_buffer_size, 6}, core::Float32, block_keys.GetDevice());
NDArrayIndexer frag_buffer_indexer(fragment_buffer, 1);
NDArrayIndexer block_keys_indexer(block_keys, 1);
TransformIndexer w2c_transform_indexer(intrinsics, extrinsics);
#if defined(__CUDACC__)
core::Tensor count(std::vector<int>{0}, {1}, core::Int32,
block_keys.GetDevice());
int* count_ptr = count.GetDataPtr<int>();
#else
std::atomic<int> count_atomic(0);
std::atomic<int>* count_ptr = &count_atomic;
#endif
#ifndef __CUDACC__
using std::max;
using std::min;
#endif
// Pass 0: iterate over blocks, fill-in an rendering fragment array
core::ParallelFor(
block_keys.GetDevice(), block_keys.GetLength(),
[=] OPEN3D_DEVICE(int64_t workload_idx) {
int* key = block_keys_indexer.GetDataPtr<int>(workload_idx);
int u_min = w_down - 1, v_min = h_down - 1, u_max = 0,
v_max = 0;
float z_min = depth_max, z_max = depth_min;
float xc, yc, zc, u, v;
// Project 8 corners to low-res image and form a rectangle
for (int i = 0; i < 8; ++i) {
float xw = (key[0] + ((i & 1) > 0)) * block_resolution *
voxel_size;
float yw = (key[1] + ((i & 2) > 0)) * block_resolution *
voxel_size;
float zw = (key[2] + ((i & 4) > 0)) * block_resolution *
voxel_size;
w2c_transform_indexer.RigidTransform(xw, yw, zw, &xc, &yc,
&zc);
if (zc <= 0) continue;
// Project to the down sampled image buffer
w2c_transform_indexer.Project(xc, yc, zc, &u, &v);
u /= down_factor;
v /= down_factor;
v_min = min(static_cast<int>(floorf(v)), v_min);
v_max = max(static_cast<int>(ceilf(v)), v_max);
u_min = min(static_cast<int>(floorf(u)), u_min);
u_max = max(static_cast<int>(ceilf(u)), u_max);
z_min = min(z_min, zc);
z_max = max(z_max, zc);
}
v_min = max(0, v_min);
v_max = min(h_down - 1, v_max);
u_min = max(0, u_min);
u_max = min(w_down - 1, u_max);
if (v_min >= v_max || u_min >= u_max || z_min >= z_max) return;
// Divide the rectangle into small 16x16 fragments
int frag_v_count =
ceilf(float(v_max - v_min + 1) / float(fragment_size));
int frag_u_count =
ceilf(float(u_max - u_min + 1) / float(fragment_size));
int frag_count = frag_v_count * frag_u_count;
int frag_count_start = OPEN3D_ATOMIC_ADD(count_ptr, 1);
int frag_count_end = frag_count_start + frag_count;
if (frag_count_end >= frag_buffer_size) {
printf("Fragment count exceeding buffer size, abort!\n");
}
int offset = 0;
for (int frag_v = 0; frag_v < frag_v_count; ++frag_v) {
for (int frag_u = 0; frag_u < frag_u_count;
++frag_u, ++offset) {
float* frag_ptr = frag_buffer_indexer.GetDataPtr<float>(
frag_count_start + offset);
// zmin, zmax
frag_ptr[0] = z_min;
frag_ptr[1] = z_max;
// vmin, umin
frag_ptr[2] = v_min + frag_v * fragment_size;
frag_ptr[3] = u_min + frag_u * fragment_size;
// vmax, umax
frag_ptr[4] = min(frag_ptr[2] + fragment_size - 1,
static_cast<float>(v_max));
frag_ptr[5] = min(frag_ptr[3] + fragment_size - 1,
static_cast<float>(u_max));
}
}
});
#if defined(__CUDACC__)
int frag_count = count[0].Item<int>();
#else
int frag_count = (*count_ptr).load();
#endif
// Pass 0.5: Fill in range map to prepare for atomic min/max
core::ParallelFor(block_keys.GetDevice(), h_down * w_down,
[=] OPEN3D_DEVICE(int64_t workload_idx) {
int v = workload_idx / w_down;
int u = workload_idx % w_down;
float* range_ptr =
range_map_indexer.GetDataPtr<float>(u, v);
range_ptr[0] = depth_max;
range_ptr[1] = depth_min;
});
// Pass 1: iterate over rendering fragment array, fill-in range
core::ParallelFor(
block_keys.GetDevice(), frag_count * fragment_size * fragment_size,
[=] OPEN3D_DEVICE(int64_t workload_idx) {
int frag_idx = workload_idx / (fragment_size * fragment_size);
int local_idx = workload_idx % (fragment_size * fragment_size);
int dv = local_idx / fragment_size;
int du = local_idx % fragment_size;
float* frag_ptr =
frag_buffer_indexer.GetDataPtr<float>(frag_idx);
int v_min = static_cast<int>(frag_ptr[2]);
int u_min = static_cast<int>(frag_ptr[3]);
int v_max = static_cast<int>(frag_ptr[4]);
int u_max = static_cast<int>(frag_ptr[5]);
int v = v_min + dv;
int u = u_min + du;
if (v > v_max || u > u_max) return;
float z_min = frag_ptr[0];
float z_max = frag_ptr[1];
float* range_ptr = range_map_indexer.GetDataPtr<float>(u, v);
#ifdef __CUDACC__
atomicMinf(&(range_ptr[0]), z_min);
atomicMaxf(&(range_ptr[1]), z_max);
#else
#pragma omp critical(EstimateRangeCPU)
{
range_ptr[0] = min(z_min, range_ptr[0]);
range_ptr[1] = max(z_max, range_ptr[1]);
}
#endif
});
#if defined(__CUDACC__)
core::cuda::Synchronize();
#endif
}
struct BlockCache {
int x;
int y;
int z;
int block_idx;
inline int OPEN3D_DEVICE Check(int xin, int yin, int zin) {
return (xin == x && yin == y && zin == z) ? block_idx : -1;
}
inline void OPEN3D_DEVICE Update(int xin,
int yin,
int zin,
int block_idx_in) {
x = xin;
y = yin;
z = zin;
block_idx = block_idx_in;
}
};
#if defined(__CUDACC__)
void RayCastCUDA
#else
void RayCastCPU
#endif
(std::shared_ptr<core::DeviceHashmap>& hashmap,
const core::Tensor& block_values,
const core::Tensor& range_map,
core::Tensor& vertex_map,
core::Tensor& depth_map,
core::Tensor& color_map,
core::Tensor& normal_map,
const core::Tensor& intrinsics,
const core::Tensor& extrinsics,
int h,
int w,
int64_t block_resolution,
float voxel_size,
float sdf_trunc,
float depth_scale,
float depth_min,
float depth_max,
float weight_threshold) {
using Key = core::Block<int, 3>;
using Hash = core::BlockHash<int, 3>;
#if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__)
auto cuda_hashmap =
std::dynamic_pointer_cast<core::StdGPUHashmap<Key, Hash>>(hashmap);
if (cuda_hashmap == nullptr) {
utility::LogError(
"Unsupported backend: CUDA raycasting only supports STDGPU.");
}
auto hashmap_impl = cuda_hashmap->GetImpl();
#else
auto cpu_hashmap =
std::dynamic_pointer_cast<core::TBBHashmap<Key, Hash>>(hashmap);
auto hashmap_impl = *cpu_hashmap->GetImpl();
#endif
NDArrayIndexer voxel_block_buffer_indexer(block_values, 4);
NDArrayIndexer range_map_indexer(range_map, 2);
NDArrayIndexer vertex_map_indexer;
NDArrayIndexer depth_map_indexer;
NDArrayIndexer color_map_indexer;
NDArrayIndexer normal_map_indexer;
bool enable_vertex = (vertex_map.GetLength() != 0);
bool enable_depth = (depth_map.GetLength() != 0);
bool enable_color = (color_map.GetLength() != 0);
bool enable_normal = (normal_map.GetLength() != 0);
if (!enable_vertex && !enable_depth && !enable_color && !enable_normal) {
utility::LogWarning("No output specified for ray casting, exit.");
return;
}
if (enable_vertex) {
vertex_map_indexer = NDArrayIndexer(vertex_map, 2);
}
if (enable_depth) {
depth_map_indexer = NDArrayIndexer(depth_map, 2);
}
if (enable_color) {
color_map_indexer = NDArrayIndexer(color_map, 2);
}
if (enable_normal) {
normal_map_indexer = NDArrayIndexer(normal_map, 2);
}
TransformIndexer c2w_transform_indexer(
intrinsics, t::geometry::InverseTransformation(extrinsics));
TransformIndexer w2c_transform_indexer(intrinsics, extrinsics);
int64_t rows = h;
int64_t cols = w;
float block_size = voxel_size * block_resolution;
#ifndef __CUDACC__
using std::max;
#endif
DISPATCH_BYTESIZE_TO_VOXEL(voxel_block_buffer_indexer.ElementByteSize(), [&]() {
core::ParallelFor(
hashmap->GetDevice(), rows * cols,
[=] OPEN3D_DEVICE(int64_t workload_idx) {
auto GetVoxelAtP = [&] OPEN3D_DEVICE(
int x_b, int y_b, int z_b,
int x_v, int y_v, int z_v,
core::addr_t block_addr,
BlockCache& cache) -> voxel_t* {
int x_vn = (x_v + block_resolution) % block_resolution;
int y_vn = (y_v + block_resolution) % block_resolution;
int z_vn = (z_v + block_resolution) % block_resolution;
int dx_b = Sign(x_v - x_vn);
int dy_b = Sign(y_v - y_vn);
int dz_b = Sign(z_v - z_vn);
if (dx_b == 0 && dy_b == 0 && dz_b == 0) {
return voxel_block_buffer_indexer
.GetDataPtr<voxel_t>(x_v, y_v, z_v,
block_addr);
} else {
Key key;
key.Set(0, x_b + dx_b);
key.Set(1, y_b + dy_b);
key.Set(2, z_b + dz_b);
int block_addr = cache.Check(key.Get(0), key.Get(1),
key.Get(2));
if (block_addr < 0) {
auto iter = hashmap_impl.find(key);
if (iter == hashmap_impl.end()) return nullptr;
block_addr = iter->second;
cache.Update(key.Get(0), key.Get(1), key.Get(2),
block_addr);
}
return voxel_block_buffer_indexer
.GetDataPtr<voxel_t>(x_vn, y_vn, z_vn,
block_addr);
}
};
auto GetVoxelAtT = [&] OPEN3D_DEVICE(
float x_o, float y_o, float z_o,
float x_d, float y_d, float z_d,
float t,
BlockCache& cache) -> voxel_t* {
float x_g = x_o + t * x_d;
float y_g = y_o + t * y_d;
float z_g = z_o + t * z_d;
// Block coordinate and look up
int x_b = static_cast<int>(floorf(x_g / block_size));
int y_b = static_cast<int>(floorf(y_g / block_size));
int z_b = static_cast<int>(floorf(z_g / block_size));
Key key;
key.Set(0, x_b);
key.Set(1, y_b);
key.Set(2, z_b);
int block_addr = cache.Check(x_b, y_b, z_b);
if (block_addr < 0) {
auto iter = hashmap_impl.find(key);
if (iter == hashmap_impl.end()) return nullptr;
block_addr = iter->second;
cache.Update(x_b, y_b, z_b, block_addr);
}
// Voxel coordinate and look up
int x_v = int((x_g - x_b * block_size) / voxel_size);
int y_v = int((y_g - y_b * block_size) / voxel_size);
int z_v = int((z_g - z_b * block_size) / voxel_size);
return voxel_block_buffer_indexer.GetDataPtr<voxel_t>(
x_v, y_v, z_v, block_addr);
};
int64_t y = workload_idx / cols;
int64_t x = workload_idx % cols;
float *depth_ptr = nullptr, *vertex_ptr = nullptr,
*normal_ptr = nullptr, *color_ptr = nullptr;
if (enable_depth) {
depth_ptr = depth_map_indexer.GetDataPtr<float>(x, y);
*depth_ptr = 0;
}
if (enable_vertex) {
vertex_ptr = vertex_map_indexer.GetDataPtr<float>(x, y);
vertex_ptr[0] = 0;
vertex_ptr[1] = 0;
vertex_ptr[2] = 0;
}
if (enable_color) {
color_ptr = color_map_indexer.GetDataPtr<float>(x, y);
color_ptr[0] = 0;
color_ptr[1] = 0;
color_ptr[2] = 0;
}
if (enable_normal) {
normal_ptr = normal_map_indexer.GetDataPtr<float>(x, y);
normal_ptr[0] = 0;
normal_ptr[1] = 0;
normal_ptr[2] = 0;
}
const float* range =
range_map_indexer.GetDataPtr<float>(x / 8, y / 8);
float t = range[0];
const float t_max = range[1];
if (t >= t_max) return;
// Coordinates in camera and global
float x_c = 0, y_c = 0, z_c = 0;
float x_g = 0, y_g = 0, z_g = 0;
float x_o = 0, y_o = 0, z_o = 0;
// Iterative ray intersection check
float t_prev = t;
float tsdf_prev = -1.0f;
float tsdf = 1.0;
float w = 0.0;
// Camera origin
c2w_transform_indexer.RigidTransform(0, 0, 0, &x_o, &y_o,
&z_o);
// Direction
c2w_transform_indexer.Unproject(static_cast<float>(x),
static_cast<float>(y), 1.0f,
&x_c, &y_c, &z_c);
c2w_transform_indexer.RigidTransform(x_c, y_c, z_c, &x_g,
&y_g, &z_g);
float x_d = (x_g - x_o);
float y_d = (y_g - y_o);
float z_d = (z_g - z_o);
BlockCache cache{0, 0, 0, -1};
bool surface_found = false;
while (t < t_max) {
voxel_t* voxel_ptr = GetVoxelAtT(x_o, y_o, z_o, x_d,
y_d, z_d, t, cache);
if (!voxel_ptr) {
t_prev = t;
t += block_size;
} else {
tsdf_prev = tsdf;
tsdf = voxel_ptr->GetTSDF();
w = voxel_ptr->GetWeight();
if (tsdf_prev > 0 && w >= weight_threshold &&
tsdf <= 0) {
surface_found = true;
break;
}
t_prev = t;
float delta = tsdf * sdf_trunc;
t += delta < voxel_size ? voxel_size : delta;
}
}
if (surface_found) {
float t_intersect = (t * tsdf_prev - t_prev * tsdf) /
(tsdf_prev - tsdf);
x_g = x_o + t_intersect * x_d;
y_g = y_o + t_intersect * y_d;
z_g = z_o + t_intersect * z_d;
// Trivial vertex assignment
if (enable_depth) {
*depth_ptr = t_intersect * depth_scale;
}
if (enable_vertex) {
w2c_transform_indexer.RigidTransform(
x_g, y_g, z_g, vertex_ptr + 0,
vertex_ptr + 1, vertex_ptr + 2);
}
// Trilinear interpolation
// TODO(wei): simplify the flow by splitting the
// functions given what is enabled
if (enable_color || enable_normal) {
int x_b =
static_cast<int>(floorf(x_g / block_size));
int y_b =
static_cast<int>(floorf(y_g / block_size));
int z_b =
static_cast<int>(floorf(z_g / block_size));
float x_v = (x_g - float(x_b) * block_size) /
voxel_size;
float y_v = (y_g - float(y_b) * block_size) /
voxel_size;
float z_v = (z_g - float(z_b) * block_size) /
voxel_size;
Key key;
key.Set(0, x_b);
key.Set(1, y_b);
key.Set(2, z_b);
int block_addr = cache.Check(x_b, y_b, z_b);
if (block_addr < 0) {
auto iter = hashmap_impl.find(key);
if (iter == hashmap_impl.end()) return;
block_addr = iter->second;
cache.Update(x_b, y_b, z_b, block_addr);
}
int x_v_floor = static_cast<int>(floorf(x_v));
int y_v_floor = static_cast<int>(floorf(y_v));
int z_v_floor = static_cast<int>(floorf(z_v));
float ratio_x = x_v - float(x_v_floor);
float ratio_y = y_v - float(y_v_floor);
float ratio_z = z_v - float(z_v_floor);
float sum_weight_color = 0.0;
float sum_weight_normal = 0.0;
for (int k = 0; k < 8; ++k) {
int dx_v = (k & 1) > 0 ? 1 : 0;
int dy_v = (k & 2) > 0 ? 1 : 0;
int dz_v = (k & 4) > 0 ? 1 : 0;
float ratio = (dx_v * (ratio_x) +
(1 - dx_v) * (1 - ratio_x)) *
(dy_v * (ratio_y) +
(1 - dy_v) * (1 - ratio_y)) *
(dz_v * (ratio_z) +
(1 - dz_v) * (1 - ratio_z));
voxel_t* voxel_ptr_k = GetVoxelAtP(
x_b, y_b, z_b, x_v_floor + dx_v,
y_v_floor + dy_v, z_v_floor + dz_v,
block_addr, cache);
if (enable_color && voxel_ptr_k &&
voxel_ptr_k->GetWeight() > 0) {
sum_weight_color += ratio;
color_ptr[0] += ratio * voxel_ptr_k->GetR();
color_ptr[1] += ratio * voxel_ptr_k->GetG();
color_ptr[2] += ratio * voxel_ptr_k->GetB();
}
if (enable_normal) {
for (int dim = 0; dim < 3; ++dim) {
voxel_t* voxel_ptr_k_plus = GetVoxelAtP(
x_b, y_b, z_b,
x_v_floor + dx_v + (dim == 0),
y_v_floor + dy_v + (dim == 1),
z_v_floor + dz_v + (dim == 2),
block_addr, cache);
voxel_t* voxel_ptr_k_minus =
GetVoxelAtP(x_b, y_b, z_b,
x_v_floor + dx_v -
(dim == 0),
y_v_floor + dy_v -
(dim == 1),
z_v_floor + dz_v -
(dim == 2),
block_addr, cache);
bool valid = false;
if (voxel_ptr_k_plus &&
voxel_ptr_k_plus->GetWeight() > 0) {
normal_ptr[dim] +=
ratio *
voxel_ptr_k_plus
->GetTSDF() /
(2 * voxel_size);
valid = true;
}
if (voxel_ptr_k_minus &&
voxel_ptr_k_minus->GetWeight() >
0) {
normal_ptr[dim] -=
ratio *
voxel_ptr_k_minus
->GetTSDF() /
(2 * voxel_size);
valid = true;
}
sum_weight_normal += valid ? ratio : 0;
}
} // if (enable_normal)
} // loop over 8 neighbors
if (enable_color && sum_weight_color > 0) {
sum_weight_color *= 255.0;
color_ptr[0] /= sum_weight_color;
color_ptr[1] /= sum_weight_color;
color_ptr[2] /= sum_weight_color;
}
if (enable_normal && sum_weight_normal > 0) {
normal_ptr[0] /= sum_weight_normal;
normal_ptr[1] /= sum_weight_normal;
normal_ptr[2] /= sum_weight_normal;
float norm =
sqrt(normal_ptr[0] * normal_ptr[0] +
normal_ptr[1] * normal_ptr[1] +
normal_ptr[2] * normal_ptr[2]);
w2c_transform_indexer.Rotate(
normal_ptr[0] / norm,
normal_ptr[1] / norm,
normal_ptr[2] / norm, normal_ptr + 0,
normal_ptr + 1, normal_ptr + 2);
}
} // if (color or normal)
} // if (tsdf < 0)
});
});
#if defined(__CUDACC__)
core::cuda::Synchronize();
#endif
}
} // namespace tsdf
} // namespace kernel
} // namespace geometry
} // namespace t
} // namespace open3d
|
opencl_keychain_fmt_plug.c | /*
* Modified by Dhiru Kholia <dhiru at openwall.com> for Keychain format.
*
* This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net>
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted. */
#ifdef HAVE_OPENCL
#if FMT_EXTERNS_H
extern struct fmt_main fmt_opencl_keychain;
#elif FMT_REGISTERS_H
john_register_one(&fmt_opencl_keychain);
#else
#include <stdint.h>
#include <string.h>
#include <openssl/des.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "arch.h"
#include "formats.h"
#include "common.h"
#include "misc.h"
#include "options.h"
#include "jumbo.h"
#include "common-opencl.h"
#define FORMAT_LABEL "keychain-opencl"
#define FORMAT_NAME "Mac OS X Keychain"
#define FORMAT_TAG "$keychain$*"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL 3DES"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define SWAP(n) \
(((n) << 24) | (((n) & 0xff00) << 8) | (((n) >> 8) & 0xff00) | ((n) >> 24))
#define BINARY_SIZE 0
#define PLAINTEXT_LENGTH 64
#define SALT_SIZE sizeof(*salt_struct)
#define BINARY_ALIGN MEM_ALIGN_WORD
#define SALT_ALIGN 4
#define SALTLEN 20
#define IVLEN 8
#define CTLEN 48
typedef struct {
uint32_t length;
uint8_t v[PLAINTEXT_LENGTH];
} keychain_password;
typedef struct {
uint32_t v[32/4];
} keychain_hash;
typedef struct {
uint32_t iterations;
uint32_t outlen;
uint32_t skip_bytes;
uint8_t length;
uint8_t salt[64];
} keychain_salt;
static int *cracked;
static int any_cracked;
static struct fmt_main *self;
static struct fmt_tests keychain_tests[] = {
{"$keychain$*10f7445c8510fa40d9ef6b4e0f8c772a9d37e449*f3d19b2a45cdcccb*8c3c3b1c7d48a24dad4ccbd4fd794ca9b0b3f1386a0a4527f3548bfe6e2f1001804b082076641bbedbc9f3a7c33c084b", "password"},
// these were generated with pass_gen.pl. NOTE, they ALL have the data (which gets encrypted) which was decrypted from the above hash.
{"$keychain$*a88cd6fbaaf40bc5437eee015a0f95ab8ab70545*b12372b1b7cb5c1f*1f5c596bcdd015afc126bc86f42dd092cb9d531d14a0aafaa89283f1bebace60562d497332afbd952fd329cc864144ec", "password"},
{"$keychain$*23328e264557b93204dc825c46a25f7fb1e17d4a*19a9efde2ca98d30*6ac89184134758a95c61bd274087ae0cffcf49f433c7f91edea98bd4fd60094e2936d99e4d985dec98284379f23259c0", "hhh"},
{"$keychain$*927717d8509db73aa47c5e820e3a381928b5e048*eef33a4a1483ae45*a52691580f17e295b8c2320947968503c605b2784bfe4851077782139f0de46f71889835190c361870baa56e2f4e9e43", "JtR-Jumbo"},
{"$keychain$*1fab88d0b8ea1a3d303e0aef519796eb29e46299*3358b0e77d60892f*286f975dcd191024227514ed9939d0fa94034294ba1eca6d5c767559e75e944b5a2fcb54fd696be64c64f9d069ce628a", "really long password -----------------------------"},
{NULL}
};
static struct custom_salt {
unsigned char salt[SALTLEN];
unsigned char iv[IVLEN];
unsigned char ct[CTLEN];
} *salt_struct;
static cl_int cl_error;
static keychain_password *inbuffer;
static keychain_hash *outbuffer;
static keychain_salt currentsalt;
static cl_mem mem_in, mem_out, mem_setting;
size_t insize, outsize, settingsize, cracked_size;
#define STEP 0
#define SEED 256
// This file contains auto-tuning routine(s). Has to be included after formats definitions.
#include "opencl_autotune.h"
#include "memdbg.h"
static const char * warn[] = {
"xfer: ", ", crypt: ", ", xfer: "
};
/* ------- Helper functions ------- */
static size_t get_task_max_work_group_size()
{
return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel);
}
static void create_clobj(size_t gws, struct fmt_main *self)
{
insize = sizeof(keychain_password) * gws;
outsize = sizeof(keychain_hash) * gws;
settingsize = sizeof(keychain_salt);
cracked_size = sizeof(*cracked) * gws;
inbuffer = mem_calloc(1, insize);
outbuffer = mem_alloc(outsize);
cracked = mem_calloc(1, cracked_size);
/// Allocate memory
mem_in =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem in");
mem_setting =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize,
NULL, &cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem setting");
mem_out =
clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem out");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in),
&mem_in), "Error while setting mem_in kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out),
&mem_out), "Error while setting mem_out kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting),
&mem_setting), "Error while setting mem_salt kernel argument");
}
static void release_clobj(void)
{
if (cracked) {
HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in");
HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting");
HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out");
MEM_FREE(inbuffer);
MEM_FREE(outbuffer);
MEM_FREE(cracked);
}
}
static void done(void)
{
if (autotuned) {
release_clobj();
HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel");
HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
autotuned--;
}
}
static void init(struct fmt_main *_self)
{
self = _self;
opencl_prepare_dev(gpu_id);
}
static void reset(struct db_main *db)
{
if (!autotuned) {
char build_opts[64];
snprintf(build_opts, sizeof(build_opts),
"-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d",
PLAINTEXT_LENGTH,
(int)sizeof(currentsalt.salt),
(int)sizeof(outbuffer->v));
opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl",
gpu_id, build_opts);
crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error);
HANDLE_CLERROR(cl_error, "Error creating kernel");
// Initialize openCL tuning (library) for this format.
opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self,
create_clobj, release_clobj,
sizeof(keychain_password), 0, db);
// Auto tune execution from shared/included code.
autotune_run(self, 1, 0, 1000);
}
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr, *p;
int extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN;
if ((p = strtokm(ctcopy, "*")) == NULL) /* salt */
goto err;
if (hexlenl(p, &extra) != SALTLEN * 2 || extra)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iv */
goto err;
if (hexlenl(p, &extra) != IVLEN * 2 || extra)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* ciphertext */
goto err;
if (hexlenl(p, &extra) != CTLEN * 2 || extra)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
static struct custom_salt *salt_struct;
if (!salt_struct)
salt_struct = mem_calloc_tiny(sizeof(struct custom_salt),
MEM_ALIGN_WORD);
ctcopy += FORMAT_TAG_LEN; /* skip over "$keychain$*" */
p = strtokm(ctcopy, "*");
for (i = 0; i < SALTLEN; i++)
salt_struct->salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
for (i = 0; i < IVLEN; i++)
salt_struct->iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
for (i = 0; i < CTLEN; i++)
salt_struct->ct[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)salt_struct;
}
static void set_salt(void *salt)
{
salt_struct = (struct custom_salt *)salt;
memcpy((char*)currentsalt.salt, salt_struct->salt, 20);
currentsalt.length = 20;
currentsalt.iterations = 1000;
currentsalt.outlen = 24;
currentsalt.skip_bytes = 0;
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting,
CL_FALSE, 0, settingsize, ¤tsalt, 0, NULL, NULL),
"Copy salt to gpu");
}
#undef set_key
static void set_key(char *key, int index)
{
uint8_t length = strlen(key);
if (length > PLAINTEXT_LENGTH)
length = PLAINTEXT_LENGTH;
inbuffer[index].length = length;
memcpy(inbuffer[index].v, key, length);
}
static char *get_key(int index)
{
static char ret[PLAINTEXT_LENGTH + 1];
uint8_t length = inbuffer[index].length;
memcpy(ret, inbuffer[index].v, length);
ret[length] = '\0';
return ret;
}
static int kcdecrypt(unsigned char *key, unsigned char *iv, unsigned char *data)
{
unsigned char out[CTLEN];
DES_cblock key1, key2, key3;
DES_cblock ivec;
DES_key_schedule ks1, ks2, ks3;
memset(out, 0, sizeof(out));
memcpy(key1, key, 8);
memcpy(key2, key + 8, 8);
memcpy(key3, key + 16, 8);
DES_set_key((DES_cblock *) key1, &ks1);
DES_set_key((DES_cblock *) key2, &ks2);
DES_set_key((DES_cblock *) key3, &ks3);
memcpy(ivec, iv, 8);
DES_ede3_cbc_encrypt(data, out, CTLEN, &ks1, &ks2, &ks3, &ivec, DES_DECRYPT);
/* possible bug here, is this assumption (pad of 4) always valid? */
if (out[47] != 4 || check_pkcs_pad(out, CTLEN, 8) < 0)
return -1;
return 0;
}
#if 0
//#ifdef DEBUG
static void print_hex(unsigned char *str, int len)
{
int i;
for (i = 0; i < len; ++i)
printf("%02x", str[i]);
printf("\n");
}
#endif
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index;
size_t *lws = local_work_size ? &local_work_size : NULL;
global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size);
if (any_cracked) {
memset(cracked, 0, cracked_size);
any_cracked = 0;
}
/// Copy data to gpu
BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0,
insize, inbuffer, 0, NULL, multi_profilingEvent[0]),
"Copy data to gpu");
/// Run kernel
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1,
NULL, &global_work_size, lws, 0, NULL,
multi_profilingEvent[1]), "Run kernel");
/// Read the result back
BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0,
outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back");
if (ocl_autotune_running)
return count;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
if (!kcdecrypt((unsigned char*)outbuffer[index].v,
salt_struct->iv, salt_struct->ct))
{
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
return count;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_opencl_keychain = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT,
{ NULL },
{ FORMAT_TAG },
keychain_tests
}, {
init,
done,
reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_OPENCL */
|
scheduled_clauseModificado.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
int main(int argc, char const *argv[])
{
int i, n = 200, chunk, a[n], suma=0;
if (argc < 2) {
fprintf(stderr, "\nFalta iteraciones o chunk\n");
exit(-1);
}
n = atoi(argv[1]);
if (n>200)
n = 200;
chunk = atoi(argv[2]);
for (i = 0; i < n; i++)
a[i] = i;
#pragma omp parallel
{
#pragma omp for firstprivate(suma)\
lastprivate(suma) schedule(dynamic, chunk)
for (i=0; i<n; i++) {
suma = suma+a[i];
printf("thread %d suma a[%d] suma=%d\n",
omp_get_thread_num(), i, /*a[i],*/ suma);
}
#pragma omp master
{
printf("Dentro de 'parallel':\n");
printf("dyn-var: %d\n",omp_get_dynamic());
printf("nthreads-var: %d\n", omp_get_max_threads());
printf("thread-limit-var: %d\n", omp_get_thread_limit());
omp_sched_t schedule_type;
int chunk_size;
omp_get_schedule(&schedule_type, &chunk_size);
printf("run-sched-var:\n");
if (schedule_type == omp_sched_static) printf("\tomp_sched_static\n");
else if (schedule_type == omp_sched_dynamic) printf("\tomp_sched_dynamic\n");
else if (schedule_type == omp_sched_guided) printf("\tomp_sched_guided\n");
else /*if (schedule_type == omp_sched_auto)*/ printf("\tomp_sched_auto\n");
printf("\tchunk: %d\n", chunk_size);
}
}
printf("Fuera de 'parallel' suma = %d\n", suma);
printf("dyn-var: %d\n",omp_get_dynamic());
printf("nthreads-var: %d\n", omp_get_max_threads());
printf("thread-limit-var: %d\n", omp_get_thread_limit());
omp_sched_t schedule_type;
int chunk_size;
omp_get_schedule(&schedule_type, &chunk_size);
printf("run-sched-var:\n");
if (schedule_type == omp_sched_static) printf("\tomp_sched_static\n");
else if (schedule_type == omp_sched_dynamic) printf("\tomp_sched_dynamic\n");
else if (schedule_type == omp_sched_guided) printf("\tomp_sched_guided\n");
else /*if (schedule_type == omp_sched_auto)*/ printf("\tomp_sched_auto\n");
printf("\tchunk: %d\n", chunk_size);
return 0;
} |
GB_unop__identity_bool_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_bool_fc64
// op(A') function: GB_unop_tran__identity_bool_fc64
// C type: bool
// A type: GxB_FC64_t
// cast: bool cij = (creal (aij) != 0) || (cimag (aij) != 0)
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
bool z = (creal (aij) != 0) || (cimag (aij) != 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
bool z = (creal (aij) != 0) || (cimag (aij) != 0) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_bool_fc64
(
bool *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
bool z = (creal (aij) != 0) || (cimag (aij) != 0) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
bool z = (creal (aij) != 0) || (cimag (aij) != 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_bool_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
lu.c | void LU_decomp(int const n, int const lda, double* const A) {
// LU decomposition without pivoting
// In-place decomposition of form A=LU
// L is returned below main diagonal of A
// U is returned at and above main diagonal
int i, j, k;
// For all "hammer" rows
for (k = 0; k < n; k++) {
double * const Ak = A + k*lda; // Pointer to row k
// For all "iron" rows
#pragma omp parallel for private(j)
for (i = k + 1; i < n; i++) {
double * const Ai = A + i*lda; // Pointer to row i
// Compute the scaling factor (and the element of L)
Ai[k] /= Ak[k];
// Hit row "iron" row i with "hammer" row k
for (j = k + 1; j < n; j++)
Ai[j] -= Ai[k]*Ak[j];
}
}
}
|
par_multi_interp.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
/*--------------------------------------------------------------------------
* hypre_ParAMGBuildMultipass
* This routine implements Stuben's direct interpolation with multiple passes.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildMultipass( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int P_max_elmts,
HYPRE_Int weight_option,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MULTIPASS_INTERP] -= hypre_MPI_Wtime();
#endif
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S);
hypre_ParCSRCommHandle *comm_handle;
hypre_ParCSRCommPkg *tmp_comm_pkg;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = NULL;
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = NULL;
HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = NULL;
HYPRE_BigInt *col_map_offd_S = hypre_ParCSRMatrixColMapOffd(S);
HYPRE_Int num_cols_offd_S = hypre_CSRMatrixNumCols(S_offd);
HYPRE_BigInt *col_map_offd = NULL;
HYPRE_Int num_cols_offd;
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i; /*at first counter of nonzero cols for each row,
finally will be pointer to start of row */
HYPRE_Int *P_diag_j;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i; /*at first counter of nonzero cols for each row,
finally will be pointer to start of row */
HYPRE_Int *P_offd_j = NULL;
HYPRE_Int num_sends = 0;
HYPRE_Int *int_buf_data = NULL;
HYPRE_BigInt *big_buf_data = NULL;
HYPRE_Int *send_map_start;
HYPRE_Int *send_map_elmt;
HYPRE_Int *send_procs;
HYPRE_Int num_recvs = 0;
HYPRE_Int *recv_vec_start;
HYPRE_Int *recv_procs;
HYPRE_Int *new_recv_vec_start = NULL;
HYPRE_Int **Pext_send_map_start = NULL;
HYPRE_Int **Pext_recv_vec_start = NULL;
HYPRE_Int *Pext_start = NULL;
HYPRE_Int *P_ncols = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
HYPRE_Int *P_marker;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *C_array;
HYPRE_Int *C_array_offd = NULL;
HYPRE_Int *pass_array = NULL; /* contains points ordered according to pass */
HYPRE_Int *pass_pointer = NULL; /* pass_pointer[j] contains pointer to first
point of pass j contained in pass_array */
HYPRE_Int *P_diag_start;
HYPRE_Int *P_offd_start = NULL;
HYPRE_Int **P_diag_pass;
HYPRE_Int **P_offd_pass = NULL;
HYPRE_Int **Pext_pass = NULL;
HYPRE_BigInt *big_temp_pass = NULL;
HYPRE_BigInt **new_elmts = NULL; /* new neighbors generated in each pass */
HYPRE_Int *new_counter = NULL; /* contains no. of new neighbors for
each pass */
HYPRE_Int *loc = NULL; /* contains locations for new neighbor
connections in int_o_buffer to avoid searching */
HYPRE_Int *Pext_i = NULL; /*contains P_diag_i and P_offd_i info for nonzero
cols of off proc neighbors */
HYPRE_BigInt *Pext_send_buffer = NULL; /* used to collect global nonzero
col ids in P_diag for send_map_elmts */
HYPRE_Int *map_S_to_new = NULL;
/*HYPRE_Int *map_A_to_new = NULL;*/
HYPRE_Int *map_A_to_S = NULL;
HYPRE_BigInt *new_col_map_offd = NULL;
HYPRE_BigInt *col_map_offd_P = NULL;
HYPRE_Int *permute = NULL;
HYPRE_BigInt *big_permute = NULL;
HYPRE_Int cnt;
HYPRE_Int cnt_nz;
HYPRE_Int total_nz;
HYPRE_Int pass;
HYPRE_Int num_passes;
HYPRE_Int max_num_passes = 10;
HYPRE_Int n_fine;
HYPRE_Int n_coarse = 0;
HYPRE_Int n_coarse_offd = 0;
HYPRE_Int n_SF = 0;
HYPRE_Int n_SF_offd = 0;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int *assigned = NULL;
HYPRE_Int *assigned_offd = NULL;
HYPRE_Real *Pext_send_data = NULL;
HYPRE_Real *Pext_data = NULL;
HYPRE_Real sum_C, sum_N;
HYPRE_Real sum_C_pos, sum_C_neg;
HYPRE_Real sum_N_pos, sum_N_neg;
HYPRE_Real diagonal;
HYPRE_Real alfa = 1.0;
HYPRE_Real beta = 1.0;
HYPRE_Int j_start;
HYPRE_Int j_end;
HYPRE_Int i,i1;
HYPRE_Int j,j1;
HYPRE_Int k,k1,k2,k3;
HYPRE_BigInt big_k1;
HYPRE_Int pass_array_size;
HYPRE_BigInt global_pass_array_size;
HYPRE_BigInt local_pass_array_size;
HYPRE_Int my_id, num_procs;
HYPRE_Int index, start;
HYPRE_BigInt my_first_cpt;
HYPRE_BigInt total_global_cpts;
HYPRE_Int p_cnt;
HYPRE_Int total_nz_offd;
HYPRE_Int cnt_nz_offd;
HYPRE_Int cnt_offd, cnt_new;
HYPRE_Int no_break;
HYPRE_Int not_found;
HYPRE_Int Pext_send_size;
HYPRE_Int Pext_recv_size;
HYPRE_Int old_Pext_send_size;
HYPRE_Int old_Pext_recv_size;
HYPRE_Int P_offd_size = 0;
HYPRE_Int local_index = -1;
HYPRE_Int new_num_cols_offd = 0;
HYPRE_Int num_cols_offd_P;
/* Threading variables */
HYPRE_Int my_thread_num, num_threads, thread_start, thread_stop;
HYPRE_Int pass_length;
HYPRE_Int *tmp_marker, *tmp_marker_offd;
HYPRE_Int *tmp_array, *tmp_array_offd;
HYPRE_Int * max_num_threads = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST);
HYPRE_Int * cnt_nz_per_thread;
HYPRE_Int * cnt_nz_offd_per_thread;
/* HYPRE_Real wall_time;
wall_time = hypre_MPI_Wtime(); */
/* Initialize threading variables */
max_num_threads[0] = hypre_NumThreads();
cnt_nz_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST);
cnt_nz_offd_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST);
for(i=0; i < max_num_threads[0]; i++)
{
cnt_nz_offd_per_thread[i] = 0;
cnt_nz_per_thread[i] = 0;
}
/*-----------------------------------------------------------------------
* Access the CSR vectors for A and S. Also get size of fine grid.
*-----------------------------------------------------------------------*/
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
my_first_cpt = num_cpts_global[0];
/* total_global_cpts = 0; */
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
if (!comm_pkg)
{
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
col_offd_S_to_A = NULL;
}
if (col_offd_S_to_A)
{
col_map_offd = col_map_offd_S;
num_cols_offd = num_cols_offd_S;
}
else
{
col_map_offd = col_map_offd_A;
num_cols_offd = num_cols_offd_A;
}
if (num_cols_offd_A)
{
A_offd_data = hypre_CSRMatrixData(A_offd);
A_offd_j = hypre_CSRMatrixJ(A_offd);
}
if (num_cols_offd)
S_offd_j = hypre_CSRMatrixJ(S_offd);
n_fine = hypre_CSRMatrixNumRows(A_diag);
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
if (n_fine) fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
n_coarse = 0;
n_SF = 0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) reduction(+:n_coarse,n_SF ) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < n_fine; i++)
if (CF_marker[i] == 1) n_coarse++;
else if (CF_marker[i] == -3) n_SF++;
pass_array_size = n_fine-n_coarse-n_SF;
if (pass_array_size) pass_array = hypre_CTAlloc(HYPRE_Int, pass_array_size, HYPRE_MEMORY_HOST);
pass_pointer = hypre_CTAlloc(HYPRE_Int, max_num_passes+1, HYPRE_MEMORY_HOST);
if (n_fine) assigned = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
if (n_coarse) C_array = hypre_CTAlloc(HYPRE_Int, n_coarse, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1) dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
}
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg);
send_map_start = hypre_ParCSRCommPkgSendMapStarts(comm_pkg);
send_map_elmt = hypre_ParCSRCommPkgSendMapElmts(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg);
recv_vec_start = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg);
if (send_map_start[num_sends])
{
int_buf_data = hypre_CTAlloc(HYPRE_Int, send_map_start[num_sends], HYPRE_MEMORY_HOST);
big_buf_data = hypre_CTAlloc(HYPRE_BigInt, send_map_start[num_sends], HYPRE_MEMORY_HOST);
}
}
index = 0;
for (i=0; i < num_sends; i++)
{
start = send_map_start[i];
for (j = start; j < send_map_start[i+1]; j++)
int_buf_data[index++] = CF_marker[send_map_elmt[j]];
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (num_functions > 1)
{
index = 0;
for (i=0; i < num_sends; i++)
{
start = send_map_start[i];
for (j = start; j < send_map_start[i+1]; j++)
int_buf_data[index++] = dof_func[send_map_elmt[j]];
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
}
n_coarse_offd = 0;
n_SF_offd = 0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) reduction(+:n_coarse_offd,n_SF_offd) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_offd; i++)
if (CF_marker_offd[i] == 1) n_coarse_offd++;
else if (CF_marker_offd[i] == -3) n_SF_offd++;
if (num_cols_offd)
{
assigned_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
map_S_to_new = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST);
new_col_map_offd = hypre_CTAlloc(HYPRE_BigInt, n_coarse_offd, HYPRE_MEMORY_HOST);
}
/*-----------------------------------------------------------------------
* First Pass: determine the maximal size of P, and elementsPerRow[i].
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Assigned points are points for which we know an interpolation
* formula already, and which are thus available to interpolate from.
* assigned[i]=0 for C points, and 1, 2, 3, ... for F points, depending
* in which pass their interpolation formula is determined.
*
* pass_array contains the points ordered according to its pass, i.e.
* | C-points | points of pass 1 | points of pass 2 | ....
* C_points are points 0 through pass_pointer[1]-1,
* points of pass k (0 < k < num_passes) are contained in points
* pass_pointer[k] through pass_pointer[k+1]-1 of pass_array .
*
* pass_array is also used to avoid going through all points for each pass,
* i,e. at the bginning it contains all points in descending order starting
* with n_fine-1. Then starting from the last point, we evaluate whether
* it is a C_point (pass 0). If it is the point is brought to the front
* and the length of the points to be searched is shortened. This is
* done until the parameter cnt (which determines the first point of
* pass_array to be searched) becomes n_fine. Then all points have been
* assigned a pass number.
*-----------------------------------------------------------------------*/
cnt = 0;
p_cnt = pass_array_size-1;
P_diag_i[0] = 0;
P_offd_i[0] = 0;
for (i = 0; i < n_fine; i++)
{
if (CF_marker[i] == 1)
{
fine_to_coarse[i] = cnt; /* this C point is assigned index
coarse_counter on coarse grid,
and in column of P */
C_array[cnt++] = i;
assigned[i] = 0;
P_diag_i[i+1] = 1; /* one element in row i1 of P */
P_offd_i[i+1] = 0;
}
else if (CF_marker[i] == -1)
{
pass_array[p_cnt--] = i;
P_diag_i[i+1] = 0;
P_offd_i[i+1] = 0;
assigned[i] = -1;
fine_to_coarse[i] = -1;
}
else
{
P_diag_i[i+1] = 0;
P_offd_i[i+1] = 0;
assigned[i] = -1;
fine_to_coarse[i] = -1;
}
}
index = 0;
for (i=0; i < num_sends; i++)
{
start = send_map_start[i];
for (j = start; j < send_map_start[i+1]; j++)
{
big_buf_data[index] = (HYPRE_BigInt)fine_to_coarse[send_map_elmt[j]];
if (big_buf_data[index] > -1)
big_buf_data[index] += my_first_cpt;
index++;
}
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(21, comm_pkg, big_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
new_recv_vec_start = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST);
if (n_coarse_offd)
C_array_offd = hypre_CTAlloc(HYPRE_Int, n_coarse_offd, HYPRE_MEMORY_HOST);
cnt = 0;
new_recv_vec_start[0] = 0;
for (j = 0; j < num_recvs; j++)
{
for (i = recv_vec_start[j]; i < recv_vec_start[j+1]; i++)
{
if (CF_marker_offd[i] == 1)
{
map_S_to_new[i] = cnt;
C_array_offd[cnt] = i;
new_col_map_offd[cnt++] = fine_to_coarse_offd[i];
assigned_offd[i] = 0;
}
else
{
assigned_offd[i] = -1;
map_S_to_new[i] = -1;
}
}
new_recv_vec_start[j+1] = cnt;
}
cnt = 0;
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
if (col_offd_S_to_A)
{
map_A_to_S = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols_offd_A; i++)
{
if (cnt < num_cols_offd && col_map_offd_A[i] == col_map_offd[cnt])
map_A_to_S[i] = cnt++;
else
map_A_to_S[i] = -1;
}
}
/*-----------------------------------------------------------------------
* Mark all local neighbors of C points as 'assigned'.
*-----------------------------------------------------------------------*/
pass_pointer[0] = 0;
pass_pointer[1] = 0;
total_nz = n_coarse; /* accumulates total number of nonzeros in P_diag */
total_nz_offd = 0; /* accumulates total number of nonzeros in P_offd */
cnt = 0;
cnt_offd = 0;
cnt_nz = 0;
cnt_nz_offd = 0;
for (i = pass_array_size-1; i > cnt-1; i--)
{
i1 = pass_array[i];
for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++)
{
j1 = S_diag_j[j];
if (CF_marker[j1] == 1)
{
P_diag_i[i1+1]++;
cnt_nz++;
assigned[i1] = 1;
}
}
for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++)
{
j1 = S_offd_j[j];
if (CF_marker_offd[j1] == 1)
{
P_offd_i[i1+1]++;
cnt_nz_offd++;
assigned[i1] = 1;
}
}
if (assigned[i1] == 1)
{
pass_array[i++] = pass_array[cnt];
pass_array[cnt++] = i1;
}
}
pass_pointer[2] = cnt;
/*-----------------------------------------------------------------------
* All local neighbors are assigned, now need to exchange the boundary
* info for assigned strong neighbors.
*-----------------------------------------------------------------------*/
index = 0;
for (i=0; i < num_sends; i++)
{
start = send_map_start[i];
for (j = start; j < send_map_start[i+1]; j++)
{ int_buf_data[index++] = assigned[send_map_elmt[j]]; }
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
assigned_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
/*-----------------------------------------------------------------------
* Now we need to determine strong neighbors of points of pass 1, etc.
* we need to update assigned_offd after each pass
*-----------------------------------------------------------------------*/
pass = 2;
local_pass_array_size = (HYPRE_BigInt)(pass_array_size - cnt);
hypre_MPI_Allreduce(&local_pass_array_size, &global_pass_array_size, 1, HYPRE_MPI_BIG_INT,
hypre_MPI_SUM, comm);
while (global_pass_array_size && pass < max_num_passes)
{
for (i = pass_array_size-1; i > cnt-1; i--)
{
i1 = pass_array[i];
no_break = 1;
for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++)
{
j1 = S_diag_j[j];
if (assigned[j1] == pass-1)
{
pass_array[i++] = pass_array[cnt];
pass_array[cnt++] = i1;
assigned[i1] = pass;
no_break = 0;
break;
}
}
if (no_break)
{
for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++)
{
j1 = S_offd_j[j];
if (assigned_offd[j1] == pass-1)
{
pass_array[i++] = pass_array[cnt];
pass_array[cnt++] = i1;
assigned[i1] = pass;
break;
}
}
}
}
/*hypre_printf("pass %d remaining points %d \n", pass, local_pass_array_size);*/
pass++;
pass_pointer[pass] = cnt;
local_pass_array_size = (HYPRE_BigInt)(pass_array_size - cnt);
hypre_MPI_Allreduce(&local_pass_array_size, &global_pass_array_size, 1, HYPRE_MPI_BIG_INT,
hypre_MPI_SUM, comm);
index = 0;
for (i=0; i < num_sends; i++)
{
start = send_map_start[i];
for (j = start; j < send_map_start[i+1]; j++)
{ int_buf_data[index++] = assigned[send_map_elmt[j]]; }
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
assigned_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
}
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(big_buf_data, HYPRE_MEMORY_HOST);
num_passes = pass;
P_diag_pass = hypre_CTAlloc(HYPRE_Int*, num_passes, HYPRE_MEMORY_HOST); /* P_diag_pass[i] will contain
all column numbers for points of pass i */
P_diag_pass[1] = hypre_CTAlloc(HYPRE_Int, cnt_nz, HYPRE_MEMORY_HOST);
P_diag_start = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); /* P_diag_start[i] contains
pointer to begin of column numbers in P_pass for point i,
P_diag_i[i+1] contains number of columns for point i */
P_offd_start = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
P_offd_pass = hypre_CTAlloc(HYPRE_Int*, num_passes, HYPRE_MEMORY_HOST);
if (cnt_nz_offd)
P_offd_pass[1] = hypre_CTAlloc(HYPRE_Int, cnt_nz_offd, HYPRE_MEMORY_HOST);
else
P_offd_pass[1] = NULL;
new_elmts = hypre_CTAlloc(HYPRE_BigInt*, num_passes, HYPRE_MEMORY_HOST);
new_counter = hypre_CTAlloc(HYPRE_Int, num_passes+1, HYPRE_MEMORY_HOST);
new_counter[0] = 0;
new_counter[1] = n_coarse_offd;
new_num_cols_offd = n_coarse_offd;
new_elmts[0] = new_col_map_offd;
}
/*-----------------------------------------------------------------------
* Pass 1: now we consider points of pass 1, with strong C_neighbors,
*-----------------------------------------------------------------------*/
cnt_nz = 0;
cnt_nz_offd = 0;
/* JBS: Possible candidate for threading */
for (i=pass_pointer[1]; i < pass_pointer[2]; i++)
{
i1 = pass_array[i];
P_diag_start[i1] = cnt_nz;
P_offd_start[i1] = cnt_nz_offd;
for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++)
{
j1 = S_diag_j[j];
if (CF_marker[j1] == 1)
{ P_diag_pass[1][cnt_nz++] = fine_to_coarse[j1]; }
}
for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++)
{
j1 = S_offd_j[j];
if (CF_marker_offd[j1] == 1)
{ P_offd_pass[1][cnt_nz_offd++] = map_S_to_new[j1]; }
}
}
total_nz += cnt_nz;
total_nz_offd += cnt_nz_offd;
if (num_procs > 1)
{
tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST);
Pext_send_map_start = hypre_CTAlloc(HYPRE_Int*, num_passes, HYPRE_MEMORY_HOST);
Pext_recv_vec_start = hypre_CTAlloc(HYPRE_Int*, num_passes, HYPRE_MEMORY_HOST);
Pext_pass = hypre_CTAlloc(HYPRE_Int*, num_passes, HYPRE_MEMORY_HOST);
Pext_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd+1, HYPRE_MEMORY_HOST);
if (num_cols_offd) Pext_start = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
if (send_map_start[num_sends])
P_ncols = hypre_CTAlloc(HYPRE_Int, send_map_start[num_sends], HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_offd+1; i++)
{ Pext_i[i] = 0; }
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < send_map_start[num_sends]; i++)
{ P_ncols[i] = 0; }
}
old_Pext_send_size = 0;
old_Pext_recv_size = 0;
for (pass=2; pass < num_passes; pass++)
{
if (num_procs > 1)
{
Pext_send_map_start[pass] = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST);
Pext_recv_vec_start[pass] = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST);
Pext_send_size = 0;
Pext_send_map_start[pass][0] = 0;
for (i=0; i < num_sends; i++)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j,j1) reduction(+:Pext_send_size) HYPRE_SMP_SCHEDULE
#endif
for (j=send_map_start[i]; j < send_map_start[i+1]; j++)
{
j1 = send_map_elmt[j];
if (assigned[j1] == pass-1)
{
P_ncols[j] = P_diag_i[j1+1] + P_offd_i[j1+1];
Pext_send_size += P_ncols[j];
}
}
Pext_send_map_start[pass][i+1] = Pext_send_size;
}
comm_handle = hypre_ParCSRCommHandleCreate (11, comm_pkg,
P_ncols, &Pext_i[1]);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (Pext_send_size > old_Pext_send_size)
{
hypre_TFree(Pext_send_buffer, HYPRE_MEMORY_HOST);
Pext_send_buffer = hypre_CTAlloc(HYPRE_BigInt, Pext_send_size, HYPRE_MEMORY_HOST);
}
old_Pext_send_size = Pext_send_size;
}
cnt_offd = 0;
for (i=0; i < num_sends; i++)
{
for (j=send_map_start[i]; j < send_map_start[i+1]; j++)
{
j1 = send_map_elmt[j];
if (assigned[j1] == pass-1)
{
j_start = P_diag_start[j1];
j_end = j_start+P_diag_i[j1+1];
for (k=j_start; k < j_end; k++)
{
Pext_send_buffer[cnt_offd++] = my_first_cpt
+ (HYPRE_BigInt) P_diag_pass[pass-1][k];
}
j_start = P_offd_start[j1];
j_end = j_start+P_offd_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = P_offd_pass[pass-1][k];
k3 = 0;
while (k3 < pass-1)
{
if (k1 < new_counter[k3+1])
{
k2 = k1-new_counter[k3];
Pext_send_buffer[cnt_offd++] = new_elmts[k3][k2];
break;
}
k3++;
}
}
}
}
}
if (num_procs > 1)
{
Pext_recv_size = 0;
Pext_recv_vec_start[pass][0] = 0;
cnt_offd = 0;
for (i=0; i < num_recvs; i++)
{
for (j=recv_vec_start[i]; j<recv_vec_start[i+1]; j++)
{
if (assigned_offd[j] == pass-1)
{
Pext_start[j] = cnt_offd;
cnt_offd += Pext_i[j+1];
}
}
Pext_recv_size = cnt_offd;
Pext_recv_vec_start[pass][i+1] = Pext_recv_size;
}
hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm;
hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends;
hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = send_procs;
hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) =
Pext_send_map_start[pass];
hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs;
hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = recv_procs;
hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) =
Pext_recv_vec_start[pass];
if (Pext_recv_size)
{
Pext_pass[pass] = hypre_CTAlloc(HYPRE_Int, Pext_recv_size, HYPRE_MEMORY_HOST);
new_elmts[pass-1] = hypre_CTAlloc(HYPRE_BigInt, Pext_recv_size, HYPRE_MEMORY_HOST);
}
else
{
Pext_pass[pass] = NULL;
new_elmts[pass-1] = NULL;
}
if (Pext_recv_size > old_Pext_recv_size)
{
hypre_TFree(loc, HYPRE_MEMORY_HOST);
loc = hypre_CTAlloc(HYPRE_Int, Pext_recv_size, HYPRE_MEMORY_HOST);
hypre_TFree(big_temp_pass, HYPRE_MEMORY_HOST);
big_temp_pass = hypre_CTAlloc(HYPRE_BigInt, Pext_recv_size, HYPRE_MEMORY_HOST);
}
old_Pext_recv_size = Pext_recv_size;
comm_handle = hypre_ParCSRCommHandleCreate (21, tmp_comm_pkg,
Pext_send_buffer, big_temp_pass);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
cnt_new = 0;
cnt_offd = 0;
/* JBS: Possible candidate for threading */
for (i=0; i < num_recvs; i++)
{
for (j=recv_vec_start[i]; j < recv_vec_start[i+1]; j++)
{
if (assigned_offd[j] == pass-1)
{
for (j1 = cnt_offd; j1 < cnt_offd+Pext_i[j+1]; j1++)
{
big_k1 = big_temp_pass[j1];
k2 = (HYPRE_Int)(big_k1 - my_first_cpt);
if (k2 > -1 && k2 < n_coarse)
{ Pext_pass[pass][j1] = -k2-1; }
else
{
not_found = 1;
k3 = 0;
while (k3 < pass-1 && not_found)
{
k2 = hypre_BigBinarySearch(new_elmts[k3], big_k1,
(new_counter[k3+1]-new_counter[k3]));
if (k2 > -1)
{
Pext_pass[pass][j1] = k2 + new_counter[k3];
not_found = 0;
}
else
{
k3++;
}
}
if (not_found)
{
new_elmts[pass-1][cnt_new] = big_k1;
loc[cnt_new++] = j1;
}
}
}
cnt_offd += Pext_i[j+1];
}
}
}
if (cnt_new)
{
hypre_BigQsortbi(new_elmts[pass-1],loc,0,cnt_new-1);
cnt = 0;
local_index = new_counter[pass-1];
Pext_pass[pass][loc[0]] = local_index;
for (i=1; i < cnt_new; i++)
{
if (new_elmts[pass-1][i] > new_elmts[pass-1][cnt])
{
new_elmts[pass-1][++cnt] = new_elmts[pass-1][i];
local_index++;
}
Pext_pass[pass][loc[i]] = local_index;
}
new_counter[pass] = local_index+1;
}
else if (num_procs > 1)
new_counter[pass] = new_counter[pass-1];
if (new_num_cols_offd < local_index+1)
{ new_num_cols_offd = local_index+1; }
pass_length = pass_pointer[pass+1] - pass_pointer[pass];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,my_thread_num,num_threads,thread_start,thread_stop,cnt_nz,cnt_nz_offd,i1,j,j1,j_start,j_end,k1,k,P_marker,P_marker_offd)
#endif
{
/* Thread by computing the sparsity structure for this pass only over
* each thread's range of rows. Rows are divided up evenly amongst
* the threads. The necessary thread-wise temporary arrays, like
* P_marker, are initialized and de-allocated internally to the
* parallel region. */
my_thread_num = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
thread_start = (pass_length/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{ thread_stop = pass_length; }
else
{ thread_stop = (pass_length/num_threads)*(my_thread_num+1); }
thread_start += pass_pointer[pass];
thread_stop += pass_pointer[pass];
/* Local initializations */
cnt_nz = 0;
cnt_nz_offd = 0;
/* This block of code is to go to the top of the parallel region starting before
* the loop over num_passes. */
P_marker = hypre_CTAlloc(HYPRE_Int, n_coarse, HYPRE_MEMORY_HOST); /* marks points to see if they're counted */
for (i=0; i < n_coarse; i++)
{ P_marker[i] = -1; }
if (new_num_cols_offd == local_index+1)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd, HYPRE_MEMORY_HOST);
for (i=0; i < new_num_cols_offd; i++)
{ P_marker_offd[i] = -1; }
}
else if (n_coarse_offd)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, n_coarse_offd, HYPRE_MEMORY_HOST);
for (i=0; i < n_coarse_offd; i++)
{ P_marker_offd[i] = -1; }
}
/* Need some variables to store each threads cnt_nz and cnt_nz_offd, and
* then stitch things together as in par_interp.c
* This loop writes
* P_diag_i, P_offd_i: data parallel here, and require no special treatment
* P_diag_start, P_offd_start: are not data parallel, require special treatment
*/
for (i=thread_start; i < thread_stop; i++)
{
i1 = pass_array[i];
P_diag_start[i1] = cnt_nz;
P_offd_start[i1] = cnt_nz_offd;
for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++)
{
j1 = S_diag_j[j];
if (assigned[j1] == pass-1)
{
j_start = P_diag_start[j1];
j_end = j_start+P_diag_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = P_diag_pass[pass-1][k];
if (P_marker[k1] != i1)
{
cnt_nz++;
P_diag_i[i1+1]++;
P_marker[k1] = i1;
}
}
j_start = P_offd_start[j1];
j_end = j_start+P_offd_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = P_offd_pass[pass-1][k];
if (P_marker_offd[k1] != i1)
{
cnt_nz_offd++;
P_offd_i[i1+1]++;
P_marker_offd[k1] = i1;
}
}
}
}
j_start = 0;
for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++)
{
j1 = S_offd_j[j];
if (assigned_offd[j1] == pass-1)
{
j_start = Pext_start[j1];
j_end = j_start+Pext_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = Pext_pass[pass][k];
if (k1 < 0)
{
if (P_marker[-k1-1] != i1)
{
cnt_nz++;
P_diag_i[i1+1]++;
P_marker[-k1-1] = i1;
}
}
else if (P_marker_offd[k1] != i1)
{
cnt_nz_offd++;
P_offd_i[i1+1]++;
P_marker_offd[k1] = i1;
}
}
}
}
}
/* Update P_diag_start, P_offd_start with cumulative
* nonzero counts over all threads */
if(my_thread_num == 0)
{ max_num_threads[0] = num_threads; }
cnt_nz_offd_per_thread[my_thread_num] = cnt_nz_offd;
cnt_nz_per_thread[my_thread_num] = cnt_nz;
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if(my_thread_num == 0)
{
for(i = 1; i < max_num_threads[0]; i++)
{
cnt_nz_offd_per_thread[i] += cnt_nz_offd_per_thread[i-1];
cnt_nz_per_thread[i] += cnt_nz_per_thread[i-1];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if(my_thread_num > 0)
{
/* update this thread's section of P_diag_start and P_offd_start
* with the num of nz's counted by previous threads */
for (i=thread_start; i < thread_stop; i++)
{
i1 = pass_array[i];
P_diag_start[i1] += cnt_nz_per_thread[my_thread_num-1];
P_offd_start[i1] += cnt_nz_offd_per_thread[my_thread_num-1];
}
}
else /* if my_thread_num == 0 */
{
/* Grab the nz count for all threads */
cnt_nz = cnt_nz_per_thread[max_num_threads[0]-1];
cnt_nz_offd = cnt_nz_offd_per_thread[max_num_threads[0]-1];
/* Updated total nz count */
total_nz += cnt_nz;
total_nz_offd += cnt_nz_offd;
/* Allocate P_diag_pass and P_offd_pass for all threads */
P_diag_pass[pass] = hypre_CTAlloc(HYPRE_Int, cnt_nz, HYPRE_MEMORY_HOST);
if (cnt_nz_offd)
P_offd_pass[pass] = hypre_CTAlloc(HYPRE_Int, cnt_nz_offd, HYPRE_MEMORY_HOST);
else if (num_procs > 1)
P_offd_pass[pass] = NULL;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* offset cnt_nz and cnt_nz_offd to point to the starting
* point in P_diag_pass and P_offd_pass for each thread */
if(my_thread_num > 0)
{
cnt_nz = cnt_nz_per_thread[my_thread_num-1];
cnt_nz_offd = cnt_nz_offd_per_thread[my_thread_num-1];
}
else
{
cnt_nz = 0;
cnt_nz_offd = 0;
}
/* Set P_diag_pass and P_offd_pass */
for (i=thread_start; i < thread_stop; i++)
{
i1 = pass_array[i];
for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++)
{
j1 = S_diag_j[j];
if (assigned[j1] == pass-1)
{
j_start = P_diag_start[j1];
j_end = j_start+P_diag_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = P_diag_pass[pass-1][k];
if (P_marker[k1] != -i1-1)
{
P_diag_pass[pass][cnt_nz++] = k1;
P_marker[k1] = -i1-1;
}
}
j_start = P_offd_start[j1];
j_end = j_start+P_offd_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = P_offd_pass[pass-1][k];
if (P_marker_offd[k1] != -i1-1)
{
P_offd_pass[pass][cnt_nz_offd++] = k1;
P_marker_offd[k1] = -i1-1;
}
}
}
}
for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++)
{
j1 = S_offd_j[j];
if (assigned_offd[j1] == pass-1)
{
j_start = Pext_start[j1];
j_end = j_start+Pext_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = Pext_pass[pass][k];
if (k1 < 0)
{
if (P_marker[-k1-1] != -i1-1)
{
P_diag_pass[pass][cnt_nz++] = -k1-1;
P_marker[-k1-1] = -i1-1;
}
}
else if (P_marker_offd[k1] != -i1-1)
{
P_offd_pass[pass][cnt_nz_offd++] = k1;
P_marker_offd[k1] = -i1-1;
}
}
}
}
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
if ( (n_coarse_offd) || (new_num_cols_offd == local_index+1) )
{ hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); }
} /* End parallel region */
}
hypre_TFree(loc, HYPRE_MEMORY_HOST);
hypre_TFree(P_ncols, HYPRE_MEMORY_HOST);
hypre_TFree(Pext_send_buffer, HYPRE_MEMORY_HOST);
hypre_TFree(big_temp_pass, HYPRE_MEMORY_HOST);
hypre_TFree(new_recv_vec_start, HYPRE_MEMORY_HOST);
hypre_TFree(cnt_nz_per_thread, HYPRE_MEMORY_HOST);
hypre_TFree(cnt_nz_offd_per_thread, HYPRE_MEMORY_HOST);
hypre_TFree(max_num_threads, HYPRE_MEMORY_HOST);
P_diag_j = hypre_CTAlloc(HYPRE_Int, total_nz, HYPRE_MEMORY_DEVICE);
P_diag_data = hypre_CTAlloc(HYPRE_Real, total_nz, HYPRE_MEMORY_DEVICE);
if (total_nz_offd)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, total_nz_offd, HYPRE_MEMORY_DEVICE);
P_offd_data = hypre_CTAlloc(HYPRE_Real, total_nz_offd, HYPRE_MEMORY_DEVICE);
}
for (i=0; i < n_fine; i++)
{
P_diag_i[i+1] += P_diag_i[i];
P_offd_i[i+1] += P_offd_i[i];
}
/* determine P for coarse points */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,i1) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < n_coarse; i++)
{
i1 = C_array[i];
P_diag_j[P_diag_i[i1]] = fine_to_coarse[i1];
P_diag_data[P_diag_i[i1]] = 1.0;
}
if (weight_option) /*if this is set, weights are separated into
negative and positive offdiagonals and accumulated
accordingly */
{
pass_length = pass_pointer[2]-pass_pointer[1];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,P_marker,P_marker_offd,i,i1,sum_C_pos,sum_C_neg,sum_N_pos,sum_N_neg,j_start,j_end,j,k1,cnt,j1,cnt_offd,diagonal,alfa,beta)
#endif
{
/* Sparsity structure is now finished. Next, calculate interpolation
* weights for pass one. Thread by computing the interpolation
* weights only over each thread's range of rows. Rows are divided
* up evenly amongst the threads. */
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
for (i=0; i < n_fine; i++)
{ P_marker[i] = -1; }
if (num_cols_offd)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols_offd; i++)
P_marker_offd[i] = -1;
}
/* Compute this thread's range of pass_length */
my_thread_num = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
thread_start = pass_pointer[1] + (pass_length/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{ thread_stop = pass_pointer[1] + pass_length; }
else
{ thread_stop = pass_pointer[1] + (pass_length/num_threads)*(my_thread_num+1); }
/* determine P for points of pass 1, i.e. neighbors of coarse points */
for (i=thread_start; i < thread_stop; i++)
{
i1 = pass_array[i];
sum_C_pos = 0;
sum_C_neg = 0;
sum_N_pos = 0;
sum_N_neg = 0;
j_start = P_diag_start[i1];
j_end = j_start+P_diag_i[i1+1]-P_diag_i[i1];
for (j=j_start; j < j_end; j++)
{
k1 = P_diag_pass[1][j];
P_marker[C_array[k1]] = i1;
}
cnt = P_diag_i[i1];
for (j=A_diag_i[i1]+1; j < A_diag_i[i1+1]; j++)
{
j1 = A_diag_j[j];
if (CF_marker[j1] != -3 &&
(num_functions == 1 || dof_func[i1] == dof_func[j1]))
{
if (A_diag_data[j] < 0)
sum_N_neg += A_diag_data[j];
else
sum_N_pos += A_diag_data[j];
}
if (j1 != -1 && P_marker[j1] == i1)
{
P_diag_data[cnt] = A_diag_data[j];
P_diag_j[cnt++] = fine_to_coarse[j1];
if (A_diag_data[j] < 0)
sum_C_neg += A_diag_data[j];
else
sum_C_pos += A_diag_data[j];
}
}
j_start = P_offd_start[i1];
j_end = j_start+P_offd_i[i1+1]-P_offd_i[i1];
for (j=j_start; j < j_end; j++)
{
k1 = P_offd_pass[1][j];
P_marker_offd[C_array_offd[k1]] = i1;
}
cnt_offd = P_offd_i[i1];
for (j=A_offd_i[i1]; j < A_offd_i[i1+1]; j++)
{
if (col_offd_S_to_A)
j1 = map_A_to_S[A_offd_j[j]];
else
j1 = A_offd_j[j];
if (CF_marker_offd[j1] != -3 &&
(num_functions == 1 || dof_func[i1] == dof_func_offd[j1]))
{
if (A_offd_data[j] < 0)
sum_N_neg += A_offd_data[j];
else
sum_N_pos += A_offd_data[j];
}
if (j1 != -1 && P_marker_offd[j1] == i1)
{
P_offd_data[cnt_offd] = A_offd_data[j];
P_offd_j[cnt_offd++] = map_S_to_new[j1];
if (A_offd_data[j] < 0)
sum_C_neg += A_offd_data[j];
else
sum_C_pos += A_offd_data[j];
}
}
diagonal = A_diag_data[A_diag_i[i1]];
if (sum_C_neg*diagonal != 0) alfa = -sum_N_neg/(sum_C_neg*diagonal);
if (sum_C_pos*diagonal != 0) beta = -sum_N_pos/(sum_C_pos*diagonal);
for (j=P_diag_i[i1]; j < cnt; j++)
if (P_diag_data[j] < 0)
P_diag_data[j] *= alfa;
else
P_diag_data[j] *= beta;
for (j=P_offd_i[i1]; j < cnt_offd; j++)
if (P_offd_data[j] < 0)
P_offd_data[j] *= alfa;
else
P_offd_data[j] *= beta;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{ hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); }
} /* End Parallel Region */
old_Pext_send_size = 0;
old_Pext_recv_size = 0;
/*if (!col_offd_S_to_A) hypre_TFree(map_A_to_new);*/
if (n_coarse) hypre_TFree(C_array, HYPRE_MEMORY_HOST);
hypre_TFree(C_array_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_diag_pass[1], HYPRE_MEMORY_HOST);
if (num_procs > 1) hypre_TFree(P_offd_pass[1], HYPRE_MEMORY_HOST);
for (pass = 2; pass < num_passes; pass++)
{
if (num_procs > 1)
{
Pext_send_size = Pext_send_map_start[pass][num_sends];
if (Pext_send_size > old_Pext_send_size)
{
hypre_TFree(Pext_send_data, HYPRE_MEMORY_HOST);
Pext_send_data = hypre_CTAlloc(HYPRE_Real, Pext_send_size, HYPRE_MEMORY_HOST);
}
old_Pext_send_size = Pext_send_size;
cnt_offd = 0;
for (i=0; i < num_sends; i++)
{
for (j=send_map_start[i]; j < send_map_start[i+1]; j++)
{
j1 = send_map_elmt[j];
if (assigned[j1] == pass-1)
{
j_start = P_diag_i[j1];
j_end = P_diag_i[j1+1];
for (k=j_start; k < j_end; k++)
{ Pext_send_data[cnt_offd++] = P_diag_data[k]; }
j_start = P_offd_i[j1];
j_end = P_offd_i[j1+1];
for (k=j_start; k < j_end; k++)
{ Pext_send_data[cnt_offd++] = P_offd_data[k]; }
}
}
}
hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends;
hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) =
Pext_send_map_start[pass];
hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs;
hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) =
Pext_recv_vec_start[pass];
Pext_recv_size = Pext_recv_vec_start[pass][num_recvs];
if (Pext_recv_size > old_Pext_recv_size)
{
hypre_TFree(Pext_data, HYPRE_MEMORY_HOST);
Pext_data = hypre_CTAlloc(HYPRE_Real, Pext_recv_size, HYPRE_MEMORY_HOST);
}
old_Pext_recv_size = Pext_recv_size;
comm_handle = hypre_ParCSRCommHandleCreate (1, tmp_comm_pkg,
Pext_send_data, Pext_data);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(Pext_send_map_start[pass], HYPRE_MEMORY_HOST);
hypre_TFree(Pext_recv_vec_start[pass], HYPRE_MEMORY_HOST);
}
pass_length = pass_pointer[pass+1]-pass_pointer[pass];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,P_marker,P_marker_offd,i,i1,sum_C_neg,sum_C_pos,sum_N_neg,sum_N_pos,j_start,j_end,cnt,j,k1,cnt_offd,j1,k,alfa,beta,diagonal,C_array,C_array_offd)
#endif
{
/* Sparsity structure is now finished. Next, calculate interpolation
* weights for passes >= 2. Thread by computing the interpolation
* weights only over each thread's range of rows. Rows are divided
* up evenly amongst the threads. */
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
for (i=0; i < n_fine; i++)
{ P_marker[i] = -1; }
if (num_cols_offd)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols_offd; i++)
P_marker_offd[i] = -1;
}
C_array = NULL;
C_array_offd = NULL;
if (n_coarse)
{ C_array = hypre_CTAlloc(HYPRE_Int, n_coarse, HYPRE_MEMORY_HOST); }
if (new_num_cols_offd > n_coarse_offd)
{ C_array_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd, HYPRE_MEMORY_HOST); }
else if (n_coarse_offd)
{ C_array_offd = hypre_CTAlloc(HYPRE_Int, n_coarse_offd, HYPRE_MEMORY_HOST); }
/* Compute this thread's range of pass_length */
my_thread_num = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
thread_start = pass_pointer[pass] + (pass_length/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{ thread_stop = pass_pointer[pass] + pass_length; }
else
{ thread_stop = pass_pointer[pass] + (pass_length/num_threads)*(my_thread_num+1); }
/* Loop over each thread's row-range */
for (i=thread_start; i < thread_stop; i++)
{
i1 = pass_array[i];
sum_C_neg = 0;
sum_C_pos = 0;
sum_N_neg = 0;
sum_N_pos = 0;
j_start = P_diag_start[i1];
j_end = j_start+P_diag_i[i1+1]-P_diag_i[i1];
cnt = P_diag_i[i1];
for (j=j_start; j < j_end; j++)
{
k1 = P_diag_pass[pass][j];
C_array[k1] = cnt;
P_diag_data[cnt] = 0;
P_diag_j[cnt++] = k1;
}
j_start = P_offd_start[i1];
j_end = j_start+P_offd_i[i1+1]-P_offd_i[i1];
cnt_offd = P_offd_i[i1];
for (j=j_start; j < j_end; j++)
{
k1 = P_offd_pass[pass][j];
C_array_offd[k1] = cnt_offd;
P_offd_data[cnt_offd] = 0;
P_offd_j[cnt_offd++] = k1;
}
for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++)
{
j1 = S_diag_j[j];
if (assigned[j1] == pass-1)
P_marker[j1] = i1;
}
for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++)
{
j1 = S_offd_j[j];
if (assigned_offd[j1] == pass-1)
P_marker_offd[j1] = i1;
}
for (j=A_diag_i[i1]+1; j < A_diag_i[i1+1]; j++)
{
j1 = A_diag_j[j];
if (P_marker[j1] == i1)
{
for (k=P_diag_i[j1]; k < P_diag_i[j1+1]; k++)
{
k1 = P_diag_j[k];
alfa = A_diag_data[j]*P_diag_data[k];
P_diag_data[C_array[k1]] += alfa;
if (alfa < 0)
{
sum_C_neg += alfa;
sum_N_neg += alfa;
}
else
{
sum_C_pos += alfa;
sum_N_pos += alfa;
}
}
for (k=P_offd_i[j1]; k < P_offd_i[j1+1]; k++)
{
k1 = P_offd_j[k];
alfa = A_diag_data[j]*P_offd_data[k];
P_offd_data[C_array_offd[k1]] += alfa;
if (alfa < 0)
{
sum_C_neg += alfa;
sum_N_neg += alfa;
}
else
{
sum_C_pos += alfa;
sum_N_pos += alfa;
}
}
}
else
{
if (CF_marker[j1] != -3 &&
(num_functions == 1 || dof_func[i1] == dof_func[j1]))
{
if (A_diag_data[j] < 0)
sum_N_neg += A_diag_data[j];
else
sum_N_pos += A_diag_data[j];
}
}
}
for (j=A_offd_i[i1]; j < A_offd_i[i1+1]; j++)
{
if (col_offd_S_to_A)
j1 = map_A_to_S[A_offd_j[j]];
else
j1 = A_offd_j[j];
if (j1 > -1 && P_marker_offd[j1] == i1)
{
j_start = Pext_start[j1];
j_end = j_start+Pext_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = Pext_pass[pass][k];
alfa = A_offd_data[j]*Pext_data[k];
if (k1 < 0)
P_diag_data[C_array[-k1-1]] += alfa;
else
P_offd_data[C_array_offd[k1]] += alfa;
if (alfa < 0)
{
sum_C_neg += alfa;
sum_N_neg += alfa;
}
else
{
sum_C_pos += alfa;
sum_N_pos += alfa;
}
}
}
else
{
if (CF_marker_offd[j1] != -3 &&
(num_functions == 1 || dof_func_offd[j1] == dof_func[i1]))
{
if ( A_offd_data[j] < 0)
sum_N_neg += A_offd_data[j];
else
sum_N_pos += A_offd_data[j];
}
}
}
diagonal = A_diag_data[A_diag_i[i1]];
if (sum_C_neg*diagonal != 0) alfa = -sum_N_neg/(sum_C_neg*diagonal);
if (sum_C_pos*diagonal != 0) beta = -sum_N_pos/(sum_C_pos*diagonal);
for (j=P_diag_i[i1]; j < P_diag_i[i1+1]; j++)
if (P_diag_data[j] < 0)
P_diag_data[j] *= alfa;
else
P_diag_data[j] *= beta;
for (j=P_offd_i[i1]; j < P_offd_i[i1+1]; j++)
if (P_offd_data[j] < 0)
P_offd_data[j] *= alfa;
else
P_offd_data[j] *= beta;
}
hypre_TFree(C_array, HYPRE_MEMORY_HOST);
hypre_TFree(C_array_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{ hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); }
} /* End OMP Parallel Section */
hypre_TFree(P_diag_pass[pass], HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_TFree(P_offd_pass[pass], HYPRE_MEMORY_HOST);
hypre_TFree(Pext_pass[pass], HYPRE_MEMORY_HOST);
}
} /* End num_passes for-loop */
}
else /* no distinction between positive and negative offdiagonal element */
{
pass_length = pass_pointer[2]-pass_pointer[1];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,k,k1,i,i1,j,j1,sum_C,sum_N,j_start,j_end,cnt,tmp_marker,tmp_marker_offd,cnt_offd,diagonal,alfa)
#endif
{
/* Sparsity structure is now finished. Next, calculate interpolation
* weights for pass one. Thread by computing the interpolation
* weights only over each thread's range of rows. Rows are divided
* up evenly amongst the threads. */
/* Initialize thread-wise variables */
tmp_marker = NULL;
if (n_fine)
{ tmp_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); }
tmp_marker_offd = NULL;
if (num_cols_offd)
{ tmp_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); }
for (i=0; i < n_fine; i++)
{ tmp_marker[i] = -1; }
for (i=0; i < num_cols_offd; i++)
{ tmp_marker_offd[i] = -1; }
/* Compute this thread's range of pass_length */
my_thread_num = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
thread_start = pass_pointer[1] + (pass_length/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{ thread_stop = pass_pointer[1] + pass_length; }
else
{ thread_stop = pass_pointer[1] + (pass_length/num_threads)*(my_thread_num+1); }
/* determine P for points of pass 1, i.e. neighbors of coarse points */
for (i=thread_start; i < thread_stop; i++)
{
i1 = pass_array[i];
sum_C = 0;
sum_N = 0;
j_start = P_diag_start[i1];
j_end = j_start+P_diag_i[i1+1]-P_diag_i[i1];
for (j=j_start; j < j_end; j++)
{
k1 = P_diag_pass[1][j];
tmp_marker[C_array[k1]] = i1;
}
cnt = P_diag_i[i1];
for (j=A_diag_i[i1]+1; j < A_diag_i[i1+1]; j++)
{
j1 = A_diag_j[j];
if (CF_marker[j1] != -3 &&
(num_functions == 1 || dof_func[i1] == dof_func[j1]))
sum_N += A_diag_data[j];
if (j1 != -1 && tmp_marker[j1] == i1)
{
P_diag_data[cnt] = A_diag_data[j];
P_diag_j[cnt++] = fine_to_coarse[j1];
sum_C += A_diag_data[j];
}
}
j_start = P_offd_start[i1];
j_end = j_start+P_offd_i[i1+1]-P_offd_i[i1];
for (j=j_start; j < j_end; j++)
{
k1 = P_offd_pass[1][j];
tmp_marker_offd[C_array_offd[k1]] = i1;
}
cnt_offd = P_offd_i[i1];
for (j=A_offd_i[i1]; j < A_offd_i[i1+1]; j++)
{
if (col_offd_S_to_A)
j1 = map_A_to_S[A_offd_j[j]];
else
j1 = A_offd_j[j];
if (CF_marker_offd[j1] != -3 &&
(num_functions == 1 || dof_func[i1] == dof_func_offd[j1]))
sum_N += A_offd_data[j];
if (j1 != -1 && tmp_marker_offd[j1] == i1)
{
P_offd_data[cnt_offd] = A_offd_data[j];
P_offd_j[cnt_offd++] = map_S_to_new[j1];
sum_C += A_offd_data[j];
}
}
diagonal = A_diag_data[A_diag_i[i1]];
if (sum_C*diagonal != 0) alfa = -sum_N/(sum_C*diagonal);
for (j=P_diag_i[i1]; j < cnt; j++)
P_diag_data[j] *= alfa;
for (j=P_offd_i[i1]; j < cnt_offd; j++)
P_offd_data[j] *= alfa;
}
hypre_TFree(tmp_marker, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_marker_offd, HYPRE_MEMORY_HOST);
} /* end OMP parallel region */
old_Pext_send_size = 0;
old_Pext_recv_size = 0;
if (n_coarse) hypre_TFree(C_array, HYPRE_MEMORY_HOST);
hypre_TFree(C_array_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_diag_pass[1], HYPRE_MEMORY_HOST);
if (num_procs > 1) hypre_TFree(P_offd_pass[1], HYPRE_MEMORY_HOST);
for (pass = 2; pass < num_passes; pass++)
{
if (num_procs > 1)
{
Pext_send_size = Pext_send_map_start[pass][num_sends];
if (Pext_send_size > old_Pext_send_size)
{
hypre_TFree(Pext_send_data, HYPRE_MEMORY_HOST);
Pext_send_data = hypre_CTAlloc(HYPRE_Real, Pext_send_size, HYPRE_MEMORY_HOST);
}
old_Pext_send_size = Pext_send_size;
cnt_offd = 0;
for (i=0; i < num_sends; i++)
{
for (j=send_map_start[i]; j < send_map_start[i+1]; j++)
{
j1 = send_map_elmt[j];
if (assigned[j1] == pass-1)
{
j_start = P_diag_i[j1];
j_end = P_diag_i[j1+1];
for (k=j_start; k < j_end; k++)
{
Pext_send_data[cnt_offd++] = P_diag_data[k];
}
j_start = P_offd_i[j1];
j_end = P_offd_i[j1+1];
for (k=j_start; k < j_end; k++)
{
Pext_send_data[cnt_offd++] = P_offd_data[k];
}
}
}
}
hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends;
hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) =
Pext_send_map_start[pass];
hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs;
hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) =
Pext_recv_vec_start[pass];
Pext_recv_size = Pext_recv_vec_start[pass][num_recvs];
if (Pext_recv_size > old_Pext_recv_size)
{
hypre_TFree(Pext_data, HYPRE_MEMORY_HOST);
Pext_data = hypre_CTAlloc(HYPRE_Real, Pext_recv_size, HYPRE_MEMORY_HOST);
}
old_Pext_recv_size = Pext_recv_size;
comm_handle = hypre_ParCSRCommHandleCreate (1, tmp_comm_pkg,
Pext_send_data, Pext_data);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(Pext_send_map_start[pass], HYPRE_MEMORY_HOST);
hypre_TFree(Pext_recv_vec_start[pass], HYPRE_MEMORY_HOST);
}
pass_length = pass_pointer[pass+1]-pass_pointer[pass];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,k,k1,i,i1,j,j1,sum_C,sum_N,j_start,j_end,cnt,tmp_marker,tmp_marker_offd,cnt_offd,diagonal,alfa,tmp_array,tmp_array_offd)
#endif
{
/* Sparsity structure is now finished. Next, calculate interpolation
* weights for passes >= 2. Thread by computing the interpolation
* weights only over each thread's range of rows. Rows are divided
* up evenly amongst the threads. */
/* Initialize thread-wise variables */
tmp_marker = NULL;
if (n_fine)
{ tmp_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); }
tmp_marker_offd = NULL;
if (num_cols_offd)
{ tmp_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); }
tmp_array = NULL;
if (n_coarse)
{ tmp_array = hypre_CTAlloc(HYPRE_Int, n_coarse, HYPRE_MEMORY_HOST); }
tmp_array_offd = NULL;
if (new_num_cols_offd > n_coarse_offd)
{ tmp_array_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd, HYPRE_MEMORY_HOST); }
else
{ tmp_array_offd = hypre_CTAlloc(HYPRE_Int, n_coarse_offd, HYPRE_MEMORY_HOST);}
for (i=0; i < n_fine; i++)
{ tmp_marker[i] = -1; }
for (i=0; i < num_cols_offd; i++)
{ tmp_marker_offd[i] = -1; }
/* Compute this thread's range of pass_length */
my_thread_num = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
thread_start = pass_pointer[pass] + (pass_length/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{ thread_stop = pass_pointer[pass] + pass_length; }
else
{ thread_stop = pass_pointer[pass] + (pass_length/num_threads)*(my_thread_num+1); }
for (i=thread_start; i < thread_stop; i++)
{
i1 = pass_array[i];
sum_C = 0;
sum_N = 0;
j_start = P_diag_start[i1];
j_end = j_start+P_diag_i[i1+1]-P_diag_i[i1];
cnt = P_diag_i[i1];
for (j=j_start; j < j_end; j++)
{
k1 = P_diag_pass[pass][j];
tmp_array[k1] = cnt;
P_diag_data[cnt] = 0;
P_diag_j[cnt++] = k1;
}
j_start = P_offd_start[i1];
j_end = j_start+P_offd_i[i1+1]-P_offd_i[i1];
cnt_offd = P_offd_i[i1];
for (j=j_start; j < j_end; j++)
{
k1 = P_offd_pass[pass][j];
tmp_array_offd[k1] = cnt_offd;
P_offd_data[cnt_offd] = 0;
P_offd_j[cnt_offd++] = k1;
}
for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++)
{
j1 = S_diag_j[j];
if (assigned[j1] == pass-1)
tmp_marker[j1] = i1;
}
for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++)
{
j1 = S_offd_j[j];
if (assigned_offd[j1] == pass-1)
tmp_marker_offd[j1] = i1;
}
for (j=A_diag_i[i1]+1; j < A_diag_i[i1+1]; j++)
{
j1 = A_diag_j[j];
if (tmp_marker[j1] == i1)
{
for (k=P_diag_i[j1]; k < P_diag_i[j1+1]; k++)
{
k1 = P_diag_j[k];
alfa = A_diag_data[j]*P_diag_data[k];
P_diag_data[tmp_array[k1]] += alfa;
sum_C += alfa;
sum_N += alfa;
}
for (k=P_offd_i[j1]; k < P_offd_i[j1+1]; k++)
{
k1 = P_offd_j[k];
alfa = A_diag_data[j]*P_offd_data[k];
P_offd_data[tmp_array_offd[k1]] += alfa;
sum_C += alfa;
sum_N += alfa;
}
}
else
{
if (CF_marker[j1] != -3 &&
(num_functions == 1 || dof_func[i1] == dof_func[j1]))
sum_N += A_diag_data[j];
}
}
for (j=A_offd_i[i1]; j < A_offd_i[i1+1]; j++)
{
if (col_offd_S_to_A)
j1 = map_A_to_S[A_offd_j[j]];
else
j1 = A_offd_j[j];
if (j1 > -1 && tmp_marker_offd[j1] == i1)
{
j_start = Pext_start[j1];
j_end = j_start+Pext_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = Pext_pass[pass][k];
alfa = A_offd_data[j]*Pext_data[k];
if (k1 < 0)
P_diag_data[tmp_array[-k1-1]] += alfa;
else
P_offd_data[tmp_array_offd[k1]] += alfa;
sum_C += alfa;
sum_N += alfa;
}
}
else
{
if (CF_marker_offd[j1] != -3 &&
(num_functions == 1 || dof_func_offd[j1] == dof_func[i1]))
sum_N += A_offd_data[j];
}
}
diagonal = A_diag_data[A_diag_i[i1]];
if (sum_C*diagonal) alfa = -sum_N/(sum_C*diagonal);
for (j=P_diag_i[i1]; j < P_diag_i[i1+1]; j++)
P_diag_data[j] *= alfa;
for (j=P_offd_i[i1]; j < P_offd_i[i1+1]; j++)
P_offd_data[j] *= alfa;
}
hypre_TFree(tmp_marker, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_array, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_array_offd, HYPRE_MEMORY_HOST);
} /* End OMP Parallel Section */
hypre_TFree(P_diag_pass[pass], HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_TFree(P_offd_pass[pass], HYPRE_MEMORY_HOST);
hypre_TFree(Pext_pass[pass], HYPRE_MEMORY_HOST);
}
}
}
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(Pext_send_map_start, HYPRE_MEMORY_HOST);
hypre_TFree(Pext_recv_vec_start, HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_TFree(Pext_send_data, HYPRE_MEMORY_HOST);
hypre_TFree(Pext_data, HYPRE_MEMORY_HOST);
hypre_TFree(P_diag_pass, HYPRE_MEMORY_HOST);
hypre_TFree(P_offd_pass, HYPRE_MEMORY_HOST);
hypre_TFree(Pext_pass, HYPRE_MEMORY_HOST);
hypre_TFree(P_diag_start, HYPRE_MEMORY_HOST);
hypre_TFree(P_offd_start, HYPRE_MEMORY_HOST);
hypre_TFree(Pext_start, HYPRE_MEMORY_HOST);
hypre_TFree(Pext_i, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(assigned, HYPRE_MEMORY_HOST);
hypre_TFree(assigned_offd, HYPRE_MEMORY_HOST);
hypre_TFree(pass_pointer, HYPRE_MEMORY_HOST);
hypre_TFree(pass_array, HYPRE_MEMORY_HOST);
hypre_TFree(map_S_to_new, HYPRE_MEMORY_HOST);
hypre_TFree(map_A_to_S, HYPRE_MEMORY_HOST);
if (num_procs > 1) hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST);
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max
and/or keep yat most <P_max_elmts> per row absolutely maximal coefficients */
if (trunc_factor != 0.0 || P_max_elmts != 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, P_max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
}
P_offd_size = P_offd_i[n_fine];
num_cols_offd_P = 0;
if (P_offd_size)
{
if (new_num_cols_offd > num_cols_offd)
{ P_marker_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd, HYPRE_MEMORY_HOST); }
else
{ P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); }
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < new_num_cols_offd; i++)
{ P_marker_offd[i] = 0; }
num_cols_offd_P = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker_offd[index])
{
num_cols_offd_P++;
P_marker_offd[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_P, HYPRE_MEMORY_HOST);
permute = hypre_CTAlloc(HYPRE_Int, new_counter[num_passes-1], HYPRE_MEMORY_HOST);
big_permute = hypre_CTAlloc(HYPRE_BigInt, new_counter[num_passes-1], HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < new_counter[num_passes-1]; i++)
big_permute[i] = -1;
cnt = 0;
for (i=0; i < num_passes-1; i++)
{
for (j=new_counter[i]; j < new_counter[i+1]; j++)
{
if (P_marker_offd[j])
{
col_map_offd_P[cnt] = new_elmts[i][j-(HYPRE_BigInt)new_counter[i]];
big_permute[j] = col_map_offd_P[cnt++];
}
}
}
hypre_BigQsort0(col_map_offd_P,0,num_cols_offd_P-1);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,big_k1) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < new_counter[num_passes-1]; i++)
{
big_k1 = big_permute[i];
if (big_k1 != -1)
permute[i] = hypre_BigBinarySearch(col_map_offd_P,big_k1,num_cols_offd_P);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
{ P_offd_j[i] = permute[P_offd_j[i]]; }
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
if (num_procs > 1)
{
for (i=0; i < num_passes-1; i++)
hypre_TFree(new_elmts[i], HYPRE_MEMORY_HOST);
}
hypre_TFree(permute, HYPRE_MEMORY_HOST);
hypre_TFree(big_permute, HYPRE_MEMORY_HOST);
hypre_TFree(new_elmts, HYPRE_MEMORY_HOST);
hypre_TFree(new_counter, HYPRE_MEMORY_HOST);
if (num_cols_offd_P)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_offd_P;
}
if (n_SF)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
}
if (num_procs > 1)
{
hypre_MatvecCommPkgCreate(P);
}
*P_ptr = P;
/* wall_time = hypre_MPI_Wtime() - wall_time;
hypre_printf("TOTAL TIME %1.2e \n",wall_time); */
/*-----------------------------------------------------------------------
* Build and return dof_func array for coarse grid.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Free mapping vector and marker array.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MULTIPASS_INTERP] += hypre_MPI_Wtime();
#endif
return(0);
}
|
GB_binop__bset_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bset_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__bset_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__bset_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__bset_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bset_uint16)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bset_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__bset_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bset_uint16)
// C=scalar+B GB (_bind1st__bset_uint16)
// C=scalar+B' GB (_bind1st_tran__bset_uint16)
// C=A+scalar GB (_bind2nd__bset_uint16)
// C=A'+scalar GB (_bind2nd_tran__bset_uint16)
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = GB_BITSET (aij, bij, uint16_t, 16)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITSET (x, y, uint16_t, 16) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSET || GxB_NO_UINT16 || GxB_NO_BSET_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bset_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bset_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bset_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bset_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bset_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bset_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bset_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bset_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bset_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITSET (x, bij, uint16_t, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bset_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITSET (aij, y, uint16_t, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITSET (x, aij, uint16_t, 16) ; \
}
GrB_Info GB (_bind1st_tran__bset_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITSET (aij, y, uint16_t, 16) ; \
}
GrB_Info GB (_bind2nd_tran__bset_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
conv_kernel.c | /*
* Copyright (C) 2015-2020 ETH Zurich and University of Bologna
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdint.h>
#include "pulp.h"
/*
Inputs, weights and outputs are represented in fixed-point Q1.7 unsigned format:
this means that each integer in [0-255] represents a real value in the range [0.0-1.0)
The relationship between the integer I and real R representations is given by
R = I * 2^-FRACTIONARY_BITS
*/
#define FRACTIONARY_BITS 7
#define ROUNDBIT (1 << (FRACTIONARY_BITS -1))
#define SATURATION 255
// K = 3
void __attribute__ ((noinline)) ConvKxK_Naive (uint8_t * In_Img, uint8_t * Out_Img, int R, int lb, int ub, int C, uint8_t * Kernel, int K)
{
int r, c, k, i, j, w, t;
uint8_t coeff;
uint8_t data;
int S;
//image board is black
#pragma omp parallel for
for (r=lb; r < ub; r++) {
for (c=3/2; c < C-3/2; c++) {
S = 0;
t = r*R + c;
//move in the window
/* Coordinate window
(-1;-1) (-1;0) (-1;+1)
( 0;-1) ( 0;0) ( 0;+1)
(+1;-1) (+1;0) (+1;+1)
*/
for (i = -K/2; i <= K/2; i++) {
for (j = -K/2; j <= K/2; j++) {
k = (r+i)*R + (c+j); //coeff for one dimension matrix
data = In_Img[k];
w = (i+1)*K + (j+1);
coeff = Kernel[w];
S = S + (int)(coeff*data);
}
}
// Normalization: Data are Q2.2*(FRACTIONARY_BITS-1), now Q2.FRACTIONARY_BITS-1
S = S >> FRACTIONARY_BITS;
// Saturation
S = S > SATURATION ? SATURATION : S;
S = S < 0 ? 0 : S;
Out_Img[t] = (uint8_t)(S);
}
}
}
|
nvptx_target_printf_codegen.c | // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
// Test target codegen - host bc file has to be created first.
// RUN: %clang_cc1 -verify -fopenmp -x c -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
// RUN: %clang_cc1 -verify -fopenmp -x c -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64
// RUN: %clang_cc1 -verify -fopenmp -x c -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc
// RUN: %clang_cc1 -verify -fopenmp -x c -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32
// expected-no-diagnostics
extern int printf(const char *, ...);
// Check a simple call to printf end-to-end.
int CheckSimple() {
#pragma omp target
{
// printf in master-only basic block.
const char* fmt = "%d %lld %f";
printf(fmt, 1, 2ll, 3.0);
}
return 0;
}
void CheckNoArgs() {
#pragma omp target
{
// printf in master-only basic block.
printf("hello, world!");
}
}
// Check that printf's alloca happens in the entry block, not inside the if
// statement.
int foo;
void CheckAllocaIsInEntryBlock() {
#pragma omp target
{
if (foo) {
printf("%d", 42);
}
}
}
//
//
//
// CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_CheckSimple_l13
// CHECK-64-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK-64-NEXT: entry:
// CHECK-64-NEXT: [[FMT:%.*]] = alloca i8*, align 8
// CHECK-64-NEXT: [[TMP:%.*]] = alloca [[PRINTF_ARGS:%.*]], align 8
// CHECK-64-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 1, i1 true, i1 true)
// CHECK-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
// CHECK-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-64: user_code.entry:
// CHECK-64-NEXT: store i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str, i64 0, i64 0), i8** [[FMT]], align 8
// CHECK-64-NEXT: [[TMP1:%.*]] = load i8*, i8** [[FMT]], align 8
// CHECK-64-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[PRINTF_ARGS]], %printf_args* [[TMP]], i32 0, i32 0
// CHECK-64-NEXT: store i32 1, i32* [[TMP2]], align 4
// CHECK-64-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[PRINTF_ARGS]], %printf_args* [[TMP]], i32 0, i32 1
// CHECK-64-NEXT: store i64 2, i64* [[TMP3]], align 8
// CHECK-64-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[PRINTF_ARGS]], %printf_args* [[TMP]], i32 0, i32 2
// CHECK-64-NEXT: store double 3.000000e+00, double* [[TMP4]], align 8
// CHECK-64-NEXT: [[TMP5:%.*]] = bitcast %printf_args* [[TMP]] to i8*
// CHECK-64-NEXT: [[TMP6:%.*]] = call i32 @__llvm_omp_vprintf(i8* [[TMP1]], i8* [[TMP5]], i32 24)
// CHECK-64-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK-64-NEXT: ret void
// CHECK-64: worker.exit:
// CHECK-64-NEXT: ret void
//
//
// CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_CheckNoArgs_l25
// CHECK-64-SAME: () #[[ATTR0]] {
// CHECK-64-NEXT: entry:
// CHECK-64-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
// CHECK-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
// CHECK-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-64: user_code.entry:
// CHECK-64-NEXT: [[TMP1:%.*]] = call i32 @__llvm_omp_vprintf(i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str1, i64 0, i64 0), i8* null, i32 0)
// CHECK-64-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK-64-NEXT: ret void
// CHECK-64: worker.exit:
// CHECK-64-NEXT: ret void
//
//
// CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_CheckAllocaIsInEntryBlock_l36
// CHECK-64-SAME: (i64 noundef [[FOO:%.*]]) #[[ATTR0]] {
// CHECK-64-NEXT: entry:
// CHECK-64-NEXT: [[FOO_ADDR:%.*]] = alloca i64, align 8
// CHECK-64-NEXT: [[TMP:%.*]] = alloca [[PRINTF_ARGS_0:%.*]], align 8
// CHECK-64-NEXT: store i64 [[FOO]], i64* [[FOO_ADDR]], align 8
// CHECK-64-NEXT: [[CONV:%.*]] = bitcast i64* [[FOO_ADDR]] to i32*
// CHECK-64-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
// CHECK-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
// CHECK-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-64: user_code.entry:
// CHECK-64-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
// CHECK-64-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0
// CHECK-64-NEXT: br i1 [[TOBOOL]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
// CHECK-64: if.then:
// CHECK-64-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[PRINTF_ARGS_0]], %printf_args.0* [[TMP]], i32 0, i32 0
// CHECK-64-NEXT: store i32 42, i32* [[TMP2]], align 4
// CHECK-64-NEXT: [[TMP3:%.*]] = bitcast %printf_args.0* [[TMP]] to i8*
// CHECK-64-NEXT: [[TMP4:%.*]] = call i32 @__llvm_omp_vprintf(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str2, i64 0, i64 0), i8* [[TMP3]], i32 4)
// CHECK-64-NEXT: br label [[IF_END]]
// CHECK-64: worker.exit:
// CHECK-64-NEXT: ret void
// CHECK-64: if.end:
// CHECK-64-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK-64-NEXT: ret void
//
//
//
//
//
// CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_CheckSimple_l13
// CHECK-32-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK-32-NEXT: entry:
// CHECK-32-NEXT: [[FMT:%.*]] = alloca i8*, align 4
// CHECK-32-NEXT: [[TMP:%.*]] = alloca [[PRINTF_ARGS:%.*]], align 8
// CHECK-32-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 1, i1 true, i1 true)
// CHECK-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
// CHECK-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-32: user_code.entry:
// CHECK-32-NEXT: store i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str, i32 0, i32 0), i8** [[FMT]], align 4
// CHECK-32-NEXT: [[TMP1:%.*]] = load i8*, i8** [[FMT]], align 4
// CHECK-32-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[PRINTF_ARGS]], %printf_args* [[TMP]], i32 0, i32 0
// CHECK-32-NEXT: store i32 1, i32* [[TMP2]], align 4
// CHECK-32-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[PRINTF_ARGS]], %printf_args* [[TMP]], i32 0, i32 1
// CHECK-32-NEXT: store i64 2, i64* [[TMP3]], align 8
// CHECK-32-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[PRINTF_ARGS]], %printf_args* [[TMP]], i32 0, i32 2
// CHECK-32-NEXT: store double 3.000000e+00, double* [[TMP4]], align 8
// CHECK-32-NEXT: [[TMP5:%.*]] = bitcast %printf_args* [[TMP]] to i8*
// CHECK-32-NEXT: [[TMP6:%.*]] = call i32 @__llvm_omp_vprintf(i8* [[TMP1]], i8* [[TMP5]], i32 24)
// CHECK-32-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK-32-NEXT: ret void
// CHECK-32: worker.exit:
// CHECK-32-NEXT: ret void
//
//
// CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_CheckNoArgs_l25
// CHECK-32-SAME: () #[[ATTR0]] {
// CHECK-32-NEXT: entry:
// CHECK-32-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
// CHECK-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
// CHECK-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-32: user_code.entry:
// CHECK-32-NEXT: [[TMP1:%.*]] = call i32 @__llvm_omp_vprintf(i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str1, i32 0, i32 0), i8* null, i32 0)
// CHECK-32-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK-32-NEXT: ret void
// CHECK-32: worker.exit:
// CHECK-32-NEXT: ret void
//
//
// CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_CheckAllocaIsInEntryBlock_l36
// CHECK-32-SAME: (i32 noundef [[FOO:%.*]]) #[[ATTR0]] {
// CHECK-32-NEXT: entry:
// CHECK-32-NEXT: [[FOO_ADDR:%.*]] = alloca i32, align 4
// CHECK-32-NEXT: [[TMP:%.*]] = alloca [[PRINTF_ARGS_0:%.*]], align 8
// CHECK-32-NEXT: store i32 [[FOO]], i32* [[FOO_ADDR]], align 4
// CHECK-32-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
// CHECK-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
// CHECK-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-32: user_code.entry:
// CHECK-32-NEXT: [[TMP1:%.*]] = load i32, i32* [[FOO_ADDR]], align 4
// CHECK-32-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0
// CHECK-32-NEXT: br i1 [[TOBOOL]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
// CHECK-32: if.then:
// CHECK-32-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[PRINTF_ARGS_0]], %printf_args.0* [[TMP]], i32 0, i32 0
// CHECK-32-NEXT: store i32 42, i32* [[TMP2]], align 4
// CHECK-32-NEXT: [[TMP3:%.*]] = bitcast %printf_args.0* [[TMP]] to i8*
// CHECK-32-NEXT: [[TMP4:%.*]] = call i32 @__llvm_omp_vprintf(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str2, i32 0, i32 0), i8* [[TMP3]], i32 4)
// CHECK-32-NEXT: br label [[IF_END]]
// CHECK-32: worker.exit:
// CHECK-32-NEXT: ret void
// CHECK-32: if.end:
// CHECK-32-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK-32-NEXT: ret void
//
|
magnitude.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <stdbool.h>
#include "parmt_config.h"
#include "compearth.h"
#ifdef PARMT_USE_INTEL
#include <mkl.h>
#include <mkl_lapacke.h>
#include <mkl_cblas.h>
#include <ipps.h>
#else
#include <lapacke.h>
#include <cblas.h>
#endif
#include "iscl/array/array.h"
#include "iscl/linalg/linalg.h"
#include "iscl/memory/memory.h"
#include "iscl/statistics/statistics.h"
#include "iscl/sorting/sorting.h"
static double median_sorted_array(int n, const double *__restrict x);
static int setG64f(const int npts,
const double *__restrict__ Gxx,
const double *__restrict__ Gyy,
const double *__restrict__ Gzz,
const double *__restrict__ Gxy,
const double *__restrict__ Gxz,
const double *__restrict__ Gyz,
double *__restrict__ G);
#define LDG 8
int parmt_computeL1Magnitude64f(const int ldm,
const int nobs,
const int nmtSolve,
const int maxit, const double eps,
const double tol,
const int *__restrict__ signalPtr,
const int *__restrict__ lags,
const int *__restrict__ mtPtr,
const double *__restrict__ Gxx,
const double *__restrict__ Gyy,
const double *__restrict__ Gzz,
const double *__restrict__ Gxy,
const double *__restrict__ Gxz,
const double *__restrict__ Gyz,
const double *__restrict__ mts,
const double *__restrict__ d,
double *__restrict__ mags)
{
const char *fcnm = "parmt_computeL1Magnitude64f\0";
double *G, *est, *obs, *wts, xmag;
double m6[8] __attribute__((aligned(64)));
int i, i1, i2, ierr, imt, jmt, k, maxlag, nptsAll, nptsPad;
bool luseLag;
const double p = 1.0;
const double one = 1.0;
const double zero = 0.0;
nptsAll = signalPtr[nobs];
if (nobs < 1)
{
printf("%s: Error no observations\n", fcnm);
return -1;
}
// Get the max number of lags
maxlag = 0;
luseLag = false;
if (lags != NULL)
{
luseLag = true;
maxlag = lags[array_absArgmax32i(nobs, lags, &ierr)];
}
if (maxlag == 0){luseLag = false;}
nptsPad = nptsAll + nobs*maxlag;
G = memory_calloc64f(nptsPad*LDG);
obs = memory_calloc64f(nptsPad);
est = memory_calloc64f(nptsPad);
wts = memory_calloc64f(nptsPad);
// Insert the Green's functions and data into the moment tensor matrix
if (!luseLag)
{
for (k=0; k<nobs; k++)
{
i1 = signalPtr[k];
i2 = signalPtr[k+1];
for (i=i1; i<i2; i++)
{
G[i*LDG+0] = Gxx[i];
G[i*LDG+1] = Gyy[i];
G[i*LDG+2] = Gzz[i];
G[i*LDG+3] = Gxy[i];
G[i*LDG+4] = Gxz[i];
G[i*LDG+5] = Gyz[i];
obs[i] = d[i];
}
}
}
else
{
}
for (imt=0; imt<nmtSolve; imt++)
{
// Extract the moment tensor
jmt = mtPtr[imt];
for (i=0; i<6; i++)
{
m6[0] = mts[ldm*jmt+0];
}
// Remove the scalar moment
compearth_CMT2m0(1, 1, m6, &xmag);
cblas_dscal(6, 1.0/xmag, m6, 1);
// Create the linear model [G*m]*M0 = [est]*{M0} = {obs}
cblas_dgemv(CblasRowMajor, CblasNoTrans,
nptsAll, 6, one, G, LDG, m6, 1, zero, est, 1);
// Solve for magnitude in the L1 norm
ierr = linalg_irls64f_work(nptsAll, 1,
maxit, p,
eps, tol,
est, obs, &xmag, wts);
mags[imt] = xmag;
double xmagMw;
compearth_m02mw(1, CE_KANAMORI_1978, &xmag, &xmagMw);
printf("%f\n", xmagMw);
}
memory_free64f(&wts);
memory_free64f(&G);
memory_free64f(&obs);
memory_free64f(&est);
return 0;
}
int parmt_computeL1StationMagnitude64f(const int ldm, const int nmt,
const int npts, const int nblock,
const double *__restrict__ Gxx,
const double *__restrict__ Gyy,
const double *__restrict__ Gzz,
const double *__restrict__ Gxy,
const double *__restrict__ Gxz,
const double *__restrict__ Gyz,
const double *__restrict__ mts,
const double *__restrict__ d,
double *__restrict__ mags)
{
const char *fcnm = "parmt_computeL1StationMagnitude64f\0";
double *G, *M, *R, *Dmat, *res, *resSort, xmag, xopt;
const double one = 1.0;
const double zero = 0.0;
const double negOne =-one;
int *perm;
bool lsorted;
int i, ic, idx, ierr, imt, jdx, jmt, mblock, Mrows, Ncols;
const int Kcols = 6; // Number of columns of G
Mrows = npts;
mblock = nblock;// + computePadding6f(npts);
G = memory_calloc64f(LDG*npts);
R = memory_calloc64f(mblock*npts);
M = memory_calloc64f(mblock*8);
Dmat = memory_calloc64f(mblock*npts);
res = memory_calloc64f(npts);
resSort = memory_calloc64f(npts);
perm = memory_calloc32i(npts);
for (i=0; i<npts; i++){perm[i] = i;}
// Set the observations
for (i=0; i<npts; i++)
{
for (ic=0; ic<nblock; ic++)
{
Dmat[mblock*i+ic] = d[i];
}
}
#ifdef __INTEL_COMPILER
__assume_aligned(G, 64);
#endif
ierr = setG64f(npts, Gxx, Gyy, Gzz, Gxy, Gxz, Gyz, G);
if (ierr != 0)
{
printf("%s: Failed to set G\n", fcnm);
return -1;
}
int nsorted = 0;
for (jmt=0; jmt<nmt; jmt=jmt+nblock)
{
Ncols = MIN(nblock, nmt - jmt); // Number of columns of M
// Set the observations
cblas_dcopy(npts*mblock, Dmat, 1, R, 1);
for (i=0; i<6; i++)
{
for (ic=0; ic<Ncols; ic++)
{
imt = jmt + ic;
idx = ldm*imt + i;
jdx = mblock*i;
M[jdx+ic] = mts[idx];///xmag;
}
}
// Compute R = GM - D = GM - R
cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
Mrows, Ncols, Kcols, one, G, LDG,
M, mblock, negOne, R, mblock);
//lsorted = false;
//for (i=0; i<npts; i++){perm[i] = i;}
// Sort the columns of the residual matrix
for (ic=0; ic<Ncols; ic++)
{
imt = jmt + ic;
// Compute the absolute value of the residual
for (i=0; i<npts; i++)
{
res[i] = fabs(R[ic+i*mblock]);
}
//cblas_dcopy(npts, &R[ic], mblock, res, 1);
// Apply the old permutation
sorting_applyPermutation64f_work(npts, perm, res, resSort);
lsorted = sorting_issorted64f(npts, resSort, SORT_ASCENDING, &ierr);
if (!lsorted)
{
sorting_argsort64f_work(npts, res, SORT_ASCENDING, perm);
sorting_applyPermutation64f_work(npts, perm, res, resSort);
}
else
{
nsorted = nsorted + 1;
}
// Compute the median
xopt = median_sorted_array(npts, resSort);
//compearth_CMT2mw(1, 1, &mts[8*imt], &xmag);
compearth_CMT2m0(1, 1, &mts[8*imt], &xmag);
xopt = xopt*xmag;
compearth_m02mw(1, CE_KANAMORI_1978, &xopt, &mags[imt]);
}
}
printf("%d %d\n", nsorted, nmt);
memory_free32i(&perm);
memory_free64f(&G);
memory_free64f(&R);
memory_free64f(&M);
memory_free64f(&Dmat);
memory_free64f(&res);
memory_free64f(&resSort);
return 0;
}
static double median_sorted_array(int n, const double *__restrict x)
{
double xmed;
int n2;
const double half = 0.5;
n2 = n/2;
// Even -> average middle two elements
if (fmod(n, 2) == 0)
{
xmed = half*(x[n2-1] + x[n2]);
}
// Median is middle element
else
{
xmed = x[n2];
}
return xmed;
}
static int setG64f(const int npts,
const double *__restrict__ Gxx,
const double *__restrict__ Gyy,
const double *__restrict__ Gzz,
const double *__restrict__ Gxy,
const double *__restrict__ Gxz,
const double *__restrict__ Gyz,
double *__restrict__ G)
{
const char *fcnm = "setG64f\0";
int i;
bool lalign;
if (Gxx == NULL || Gyy == NULL || Gzz == NULL || Gxy == NULL ||
Gxz == NULL || G == NULL || npts < 1)
{
if (Gxx == NULL){printf("%s: Error Gxx is NULL\n", fcnm);}
if (Gyy == NULL){printf("%s: Error Gyy is NULL\n", fcnm);}
if (Gzz == NULL){printf("%s: Error Gzz is NULL\n", fcnm);}
if (Gxy == NULL){printf("%s: Error Gxy is NULL\n", fcnm);}
if (Gxz == NULL){printf("%s: Error Gxz is NULL\n", fcnm);}
if (Gyz == NULL){printf("%s: ERror Gyz is NULL\n", fcnm);}
if (npts < 1)
{
printf("%s: No points in Green's functions\n", fcnm);
}
return -1;
}
lalign = true;
if (memory_isAligned(Gxx, 64) != 1 || memory_isAligned(Gyy, 64) != 1 ||
memory_isAligned(Gzz, 64) != 1 || memory_isAligned(Gxy, 64) != 1 ||
memory_isAligned(Gxz, 64) != 1 || memory_isAligned(Gyz, 64) != 1 ||
memory_isAligned(G, 64) != 1)
{
lalign = false;
}
if (lalign)
{
#pragma omp simd aligned(G, Gxx, Gyy, Gzz, Gxy, Gxz, Gyz: 64)
for (i=0; i<npts; i++)
{
G[LDG*i+0] = Gxx[i];
G[LDG*i+1] = Gyy[i];
G[LDG*i+2] = Gzz[i];
G[LDG*i+3] = Gxy[i];
G[LDG*i+4] = Gxz[i];
G[LDG*i+5] = Gyz[i];
}
}
else
{
#pragma omp simd
for (i=0; i<npts; i++)
{
G[LDG*i+0] = Gxx[i];
G[LDG*i+1] = Gyy[i];
G[LDG*i+2] = Gzz[i];
G[LDG*i+3] = Gxy[i];
G[LDG*i+4] = Gxz[i];
G[LDG*i+5] = Gyz[i];
}
}
return 0;
}
|
relu1_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: bzhang@openailab.com
*/
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "utility/sys_port.h"
#include "utility/float.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
int ref_relu1_fp32(struct tensor* input_tensor, struct tensor* output_tensor, int num_thread)
{
int w = input_tensor->dims[3];
int h = output_tensor->dims[2];
int channels = input_tensor->dims[1];
int size = h * w;
int c_step = h * w;
float* input_data = input_tensor->data;
float* out_data = output_tensor->data;
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < channels; q++)
{
float* src = input_data + c_step * q;
float* dst = out_data + c_step * q;
for (int i = 0; i < size; i++)
{
dst[i] = src[i];
if (dst[i] > 1)
dst[i] = 1;
if (dst[i] < -1)
dst[i] = -1;
}
}
return 0;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor;
struct tensor* output_tensor;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
ref_relu1_fp32(input_tensor, output_tensor, exec_graph->num_thread);
return 0;
}
static int reshape(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* node = exec_node->ir_node;
struct graph* ir_graph = node->graph;
struct tensor* input = get_ir_graph_tensor(ir_graph, node->input_tensors[0]);
struct tensor* output = get_ir_graph_tensor(ir_graph, node->output_tensors[0]);
int ret = set_ir_tensor_shape(output, input->dims, input->dim_num);
return ret;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node)
{
return OPS_SCORE_CANDO;
}
static struct node_ops hcl_node_ops = {.prerun = NULL,
.run = run,
.reshape = reshape,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
int register_relu1_ref_op(void* arg)
{
return register_builtin_node_ops(OP_RELU1, &hcl_node_ops);
}
int unregister_relu1_ref_op(void* arg)
{
return unregister_builtin_node_ops(OP_RELU1, &hcl_node_ops);
}
|
diagmm_x_dia_n_row.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT num_threads = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT r = 0; r < mat->rows; r++)
for(ALPHA_INT c = 0; c < columns; c++)
alpha_mul(y[index2(r,c,ldy)],y[index2(r,c,ldy)],beta);
#ifdef _OPENMP
#pragma omp parallel num_threads(num_threads)
#endif
{
ALPHA_INT tid = alpha_get_thread_id();
ALPHA_INT bcl = cross_block_low(tid,num_threads,columns);
ALPHA_INT bch = cross_block_high(tid,num_threads,columns);
for(ALPHA_INT di = 0; di < mat->ndiag;++di){
ALPHA_INT d = mat->distance[di];
if(d == 0){
ALPHA_INT ars = alpha_max(0,-d);
ALPHA_INT acs = alpha_max(0,d);
ALPHA_INT an = alpha_min(mat->rows - ars,mat->cols - acs);
for(ALPHA_INT i = 0; i < an; ++i){
ALPHA_INT ar = ars + i;
ALPHA_INT ac = acs + i;
ALPHA_Number val;
alpha_mul(val,mat->values[index2(di,ar,mat->lval)],alpha);
for(ALPHA_INT bc = bcl;bc < bch;++bc){
alpha_madde(y[index2(ar,bc,ldy)],x[index2(ac,bc,ldx)],val);
}
}
}
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
dlange.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zlange.c, normal z -> d, Fri Sep 28 17:38:07 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
/***************************************************************************//**
*
* @ingroup plasma_lange
*
* Returns the norm of a general matrix as
*
* dlange = ( max(abs(A(i,j))), NORM = PlasmaMaxNorm
* (
* ( norm1(A), NORM = PlasmaOneNorm
* (
* ( normI(A), NORM = PlasmaInfNorm
* (
* ( normF(A), NORM = PlasmaFrobeniusNorm
*
* where norm1 denotes the one norm of a matrix (maximum column sum),
* normI denotes the infinity norm of a matrix (maximum row sum) and
* normF denotes the Frobenius norm of a matrix (square root of sum
* of squares). Note that max(abs(A(i,j))) is not a consistent matrix
* norm.
*
*******************************************************************************
*
* @param[in] norm
* - PlasmaMaxNorm: max norm
* - PlasmaOneNorm: one norm
* - PlasmaInfNorm: infinity norm
* - PlasmaFrobeniusNorm: Frobenius norm
*
* @param[in] m
* The number of rows of the matrix A. m >= 0. When m = 0,
* the returned value is set to zero.
*
* @param[in] n
* The number of columns of the matrix A. n >= 0. When n = 0,
* the returned value is set to zero.
*
* @param[in] pA
* The m-by-n matrix A.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,m).
*
*******************************************************************************
*
* @retval double
* The specified norm of the general matrix A.
*
*******************************************************************************
*
* @sa plasma_omp_dlange
* @sa plasma_clange
* @sa plasma_dlange
* @sa plasma_slange
*
******************************************************************************/
double plasma_dlange(plasma_enum_t norm,
int m, int n,
double *pA, int lda)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((norm != PlasmaMaxNorm) && (norm != PlasmaOneNorm) &&
(norm != PlasmaInfNorm) && (norm != PlasmaFrobeniusNorm) ) {
plasma_error("illegal value of norm");
return -1;
}
if (m < 0) {
plasma_error("illegal value of m");
return -2;
}
if (n < 0) {
plasma_error("illegal value of n");
return -3;
}
if (lda < imax(1, m)) {
printf("%d\n", lda);
plasma_error("illegal value of lda");
return -5;
}
// quick return
if (imin(n, m) == 0)
return 0.0;
// Tune parameters.
if (plasma->tuning)
plasma_tune_lange(plasma, PlasmaRealDouble, m, n);
// Set tiling parameters
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
int retval;
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
m, n, 0, 0, m, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
// Allocate workspace.
double *work = NULL;
switch (norm) {
case PlasmaMaxNorm:
work = (double*)malloc((size_t)A.mt*A.nt*sizeof(double));
break;
case PlasmaOneNorm:
work = (double*)malloc(((size_t)A.mt*A.n+A.n)*sizeof(double));
break;
case PlasmaInfNorm:
work = (double*)malloc(((size_t)A.nt*A.m+A.m)*sizeof(double));
break;
case PlasmaFrobeniusNorm:
work = (double*)malloc((size_t)2*A.mt*A.nt*sizeof(double));
break;
}
if (work == NULL) {
plasma_error("malloc() failed");
return PlasmaErrorOutOfMemory;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
double value;
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_dge2desc(pA, lda, A, &sequence, &request);
// Call tile async function.
plasma_omp_dlange(norm, A, work, &value, &sequence, &request);
}
// implicit synchronization
free(work);
// Free matrix in tile layout.
plasma_desc_destroy(&A);
// Return the norm.
return value;
}
/***************************************************************************//**
*
* @ingroup plasma_lange
*
* Calculates the max, one, infinity or Frobenius norm of a general matrix.
* Non-blocking equivalent of plasma_dlange(). May return before the
* computation is finished. Operates on matrices stored by tiles. All matrices
* are passed through descriptors. All dimensions are taken from the
* descriptors. Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] norm
* - PlasmaMaxNorm: Max norm
* - PlasmaOneNorm: One norm
* - PlasmaInfNorm: Infinity norm
* - PlasmaFrobeniusNorm: Frobenius norm
*
* @param[in] A
* The descriptor of matrix A.
*
* @param[out] work
* Workspace of size:
* - PlasmaMaxNorm: A.mt*A.nt
* - PlasmaOneNorm: A.mt*A.n + A.n
* - PlasmaInfNorm: A.nt*A.m + A.m
* - PlasmaFrobeniusNorm: 2*A.mt*A.nt
*
* @param[out] value
* The calculated value of the norm requested.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes).
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_dlange
* @sa plasma_omp_clange
* @sa plasma_omp_dlange
* @sa plasma_omp_slange
*
******************************************************************************/
void plasma_omp_dlange(plasma_enum_t norm, plasma_desc_t A,
double *work, double *value,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((norm != PlasmaMaxNorm) && (norm != PlasmaOneNorm) &&
(norm != PlasmaInfNorm) && (norm != PlasmaFrobeniusNorm)) {
plasma_error("illegal value of norm");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid descriptor A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (imin(A.m, A.n) == 0) {
*value = 0.0;
return;
}
// Call the parallel function.
plasma_pdlange(norm, A, work, value, sequence, request);
}
|
GB_binop__minus_uint16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__minus_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__minus_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__minus_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__minus_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_uint16)
// A*D function (colscale): GB (_AxD__minus_uint16)
// D*A function (rowscale): GB (_DxB__minus_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__minus_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__minus_uint16)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_uint16)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_uint16)
// C=scalar+B GB (_bind1st__minus_uint16)
// C=scalar+B' GB (_bind1st_tran__minus_uint16)
// C=A+scalar GB (_bind2nd__minus_uint16)
// C=A'+scalar GB (_bind2nd_tran__minus_uint16)
// C type: uint16_t
// A type: uint16_t
// A pattern? 0
// B type: uint16_t
// B pattern? 0
// BinaryOp: cij = (aij - bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x - y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINUS || GxB_NO_UINT16 || GxB_NO_MINUS_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__minus_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__minus_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__minus_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__minus_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__minus_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__minus_uint16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__minus_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
uint16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__minus_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__minus_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__minus_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__minus_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__minus_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x - bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__minus_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij - y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x - aij) ; \
}
GrB_Info GB (_bind1st_tran__minus_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij - y) ; \
}
GrB_Info GB (_bind2nd_tran__minus_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Triangle.c | #define _LARGEFILE_SOURCE
#define _LARGEFILE64_SOURCE
#define _FILE_OFFSET_BITS 64
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <fcntl.h>
#include <time.h>
#include <math.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <omp.h>
unsigned long long TheFileSize(char const *filename)
{
struct stat statbuf;
int a = stat(filename, &statbuf);
if (a == -1)
{
exit(-1);
}
unsigned long long filesize = statbuf.st_size;
return filesize;
}
int main(int argc, char const *argv[])
{
char const *name;
if (argc < 1)
{
printf("error\n");
exit(1);
}
else if (argc == 2)
name = argv[1];
else if(argc == 3)
name = argv[2];
unsigned long long outputsize = TheFileSize(name);
printf("The file is %lld Bytes.\n", outputsize);
time_t t0 = time(NULL);
int openfile = open(name, O_RDONLY);
if (openfile == -1)
{
printf("error\n");
exit(1);
}
unsigned int *reflect;
reflect = mmap(NULL, outputsize, PROT_READ, MAP_SHARED, openfile, 0);
if (reflect == NULL || reflect == (void*)-1)
{
printf("error\n");
close(openfile);
exit(-2);
}
unsigned long long numedge = outputsize/8;
printf("There are %lld edges in the file.\n", numedge);
long long i,j;
unsigned int *front;
unsigned int *behind;
front = (unsigned int *)malloc(sizeof(unsigned int)*numedge);
behind = (unsigned int *)malloc(sizeof(unsigned int)*numedge);
#pragma omp parallel for
for (i = 0; i < numedge; i++)
{
if (reflect[2*i] < reflect[2*i+1])
{
front[i] = reflect[2*i];
behind[i] = reflect[2*i+1];
}
else
{
front[i] = reflect[2*i+1];
behind[i] = reflect[2*i];
}
}
munmap(reflect, outputsize);
close(openfile);
unsigned long long count=0;
//#pragma omp parallel for
// for (i = 0; i < n_edge; i++)
// {for(j=i; j< n_edge; j++)
// {
// if(front[i] == front[j]&& behind[i] == behind[j])
// {front[j] = behind[j]=0;}
// }
// }
#pragma omp parallel for
for (i = 0; i < numedge; i++)
{ if(front[i]=front[i+1])
{
//sumbehind= behind[i]+behind[i+1];
// }
// #pragma omp parallel for
for(j=0; j< numedge; j++)
{
if(front[j] == behind[i])
{if(behind[j] == behind[i+1])
{count++;
printf("processing");}
}
}
}
}
if(count&2 == 0)
printf("The triangles are %lld.\n", count/2 );
else
printf("The triangles are %lld.\n", 1+count/2 );
printf("Time costs %ld sec.\n", time(NULL) - t0 );
}
|
GB_unaryop__ainv_uint8_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint8_int8
// op(A') function: GB_tran__ainv_uint8_int8
// C type: uint8_t
// A type: int8_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT8 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint8_int8
(
uint8_t *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint8_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
tls.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int p;
#pragma omp threadprivate(p)
int s;
int main(int argc, char **argv)
{
#pragma omp parallel
{
printf("%d: %p %p\n", omp_get_thread_num(), &p, &s );
}
return 0;
}
|
jitced_template.c | # define NPY_NO_DEPRECATED_API NPY_1_8_API_VERSION
# include <Python.h>
# include <numpy/arrayobject.h>
# include <math.h>
# include <structmember.h>
# include <assert.h>
# include <stdbool.h>
# define TYPE_INDEX NPY_DOUBLE
{% if not numpy_rng %}
# include "random_numbers.c"
{% endif %}
typedef struct noise_item
{
double h;
double DW[{{n}}];
double DZ[{{n}}];
struct noise_item * next;
} noise_item;
typedef struct
{
PyObject_HEAD
noise_item * noises;
noise_item * current_noise;
noise_item * last_noise;
double state[{{n}}];
double t;
double new_state[{{n}}];
double new_t;
double error[{{n}}];
{% for control_par in control_pars %}
double parameter_{{control_par}};
{% endfor %}
{% if numpy_rng %}
PyObject * RNG;
PyObject * noise_function;
PyObject * noise_size;
{% else %}
rk_state * RNG;
{% endif %}
} sde_integrator;
/* void print_noises(sde_integrator * const self)
{
int noise_index = 0;
bool found = false;
if (self->noises)
for (noise_item * cn=self->noises; cn; cn=cn->next)
{
printf("%e\t%e\t%e\n", cn->h, cn->DW[0], cn->DZ[0]);
found |= (cn==self->current_noise);
if (!found)
noise_index++;
}
else
printf("no noise\n");
printf("%i\n", noise_index);
printf("========\n");
} */
static inline void * safe_malloc(size_t size)
{
void * pointer = malloc(size);
if (pointer == NULL)
PyErr_SetString(PyExc_MemoryError,"Could not allocate memory.");
return pointer;
}
static inline noise_item * malloc_noise(void)
{
return safe_malloc(sizeof(noise_item));
}
noise_item * insert_noise_after_current(sde_integrator * const self)
{
noise_item * new_noise = malloc_noise();
assert(new_noise!=NULL);
if (self->current_noise)
{
assert(self->noises);
new_noise->next = self->current_noise->next;
self->current_noise->next = new_noise;
}
else
{
assert(!self->noises);
self->current_noise = self->noises = new_noise;
new_noise->next = NULL;
}
if (new_noise->next==NULL)
self->last_noise = new_noise;
return new_noise;
}
noise_item * append_noise_item(sde_integrator * const self)
{
noise_item * new_noise = malloc_noise();
assert(new_noise!=NULL);
new_noise->next = NULL;
if (self->noises)
self->last_noise->next = new_noise;
else
self->noises = new_noise;
self->last_noise = new_noise;
return new_noise;
}
{% if numpy_rng %}
void get_gauss(
sde_integrator * const self,
double const scale,
double DW[{{n}}],
double DZ[{{n}}]
)
{
PyArrayObject * noise = (PyArrayObject *) PyObject_CallFunction(
self->noise_function,
"ddO",
0.0,
scale,
self->noise_size
);
#pragma omp parallel for schedule(dynamic, {{chunk_size}})
for (int i=0; i<{{n}}; i++)
{
DW[i] = * (double *) PyArray_GETPTR2(noise,i,0);
DZ[i] = * (double *) PyArray_GETPTR2(noise,i,1);
}
Py_DECREF(noise);
}
{% else %}
void get_gauss(
sde_integrator * const self,
double const scale,
double DW[{{n}}],
double DZ[{{n}}]
)
{
for (int i=0; i<{{n}}; i++)
rk_gauss( self->RNG, &DW[i], &DZ[i], scale );
}
{% endif %}
void remove_first_noise(sde_integrator * const self)
{
if (self->noises)
{
noise_item * old_first_noise = self->noises;
self->noises = old_first_noise->next;
free(old_first_noise);
if (self->noises==NULL)
self->last_noise = NULL;
}
}
void append_noise(sde_integrator * const self, double const h_need)
{
noise_item * new_noise = append_noise_item(self);
self->current_noise = new_noise;
new_noise->h = h_need;
get_gauss( self, sqrt(h_need), new_noise->DW, new_noise->DZ );
}
void Brownian_bridge(sde_integrator * const self, double const h_need)
{
noise_item * noise_1 = self->current_noise;
noise_item * noise_2 = insert_noise_after_current(self);
double h = noise_1->h;
double h_exc = h - h_need;
double const factor = h_exc/h;
noise_2->h = h_exc;
get_gauss( self, sqrt(factor*h_need), noise_2->DW, noise_2->DZ );
#pragma omp parallel for schedule(dynamic, {{chunk_size}})
for (int i=0; i<{{n}}; i++)
{
noise_2->DW[i] += noise_1->DW[i]*factor;
noise_2->DZ[i] += noise_1->DZ[i]*factor;
}
noise_1->h = h_need;
#pragma omp parallel for schedule(dynamic, {{chunk_size}})
for (int i=0; i<{{n}}; i++)
{
noise_1->DW[i] -= noise_2->DW[i];
noise_1->DZ[i] -= noise_2->DZ[i];
}
}
void get_noise(
sde_integrator * const self,
double h_need,
double DW_acc[{{n}}],
double DZ_acc[{{n}}]
)
{
bool initialised = false;
self->current_noise = self->noises;
while (h_need)
{
if (self->current_noise)
{
if (self->current_noise->h <= h_need)
{
if (initialised)
#pragma omp parallel for schedule(dynamic, {{chunk_size}})
for (int i=0; i<{{n}}; i++)
{
DW_acc[i] += self->current_noise->DW[i];
DZ_acc[i] += self->current_noise->DZ[i];
}
else
{
initialised = true;
#pragma omp parallel for schedule(dynamic, {{chunk_size}})
for (int i=0; i<{{n}}; i++)
{
DW_acc[i] = self->current_noise->DW[i];
DZ_acc[i] = self->current_noise->DZ[i];
}
}
h_need -= self->current_noise->h;
self->current_noise = self->current_noise->next;
}
else
Brownian_bridge(self, h_need);
}
else
append_noise(self, h_need);
}
if (!initialised)
#pragma omp parallel for schedule(dynamic, {{chunk_size}})
for (int i=0; i<{{n}}; i++)
DW_acc[i] = DZ_acc[i] = 0;
}
static PyObject * pin_noise(sde_integrator * self, PyObject * args)
{
unsigned int number;
double step;
if (!PyArg_ParseTuple(args,"Id",&number,&step))
{
PyErr_SetString(PyExc_ValueError,"Wrong input.");
return NULL;
}
for (unsigned int i=0; i<number; i++)
append_noise(self, step);
Py_RETURN_NONE;
}
void get_I(
sde_integrator * const self,
double const h,
double DW[{{n}}],
{% if not additive %}
double I_11dbsqh[{{n}}],
double I_111[{{n}}],
{% endif %}
double I_10[{{n}}]
)
{
double DZ[{{n}}];
get_noise(self, h, DW, DZ);
#pragma omp parallel for schedule(dynamic, {{chunk_size}})
for (int i=0; i<{{n}}; i++)
{
{% if not additive %}
I_11dbsqh[i] = ( DW[i]*DW[i] - h ) * 0.5/sqrt(h);
I_111[i] = ( DW[i]*DW[i]*DW[i] - 3*h*DW[i] ) * (1./6.) ;
{% endif %}
I_10 [i] = ( DW[i] + DZ[i]*(1./sqrt(3)) ) * (h /2.) ;
}
}
{% if control_pars|length %}
static PyObject * set_parameters(sde_integrator * const self, PyObject * args)
{
if (!PyArg_ParseTuple(
args,
"{{'d'*control_pars|length}}"
{% for control_par in control_pars %}
, &(self->parameter_{{control_par}})
{% endfor %}
))
{
PyErr_SetString(PyExc_ValueError,"Wrong input.");
return NULL;
}
Py_RETURN_NONE;
}
{% endif %}
npy_intp dim[1] = { {{n}} };
{% if callbacks|length %}
PyObject * n_dim_read_only_array_from_data(void * data) {
PyObject * result = PyArray_SimpleNewFromData( 1, dim, TYPE_INDEX, data );
PyArray_CLEARFLAGS( (PyArrayObject *) result, NPY_ARRAY_WRITEABLE );
return result;
}
static inline double callback(PyObject * Python_function, PyObject * arglist)
{
PyObject * py_result = PyObject_CallObject(Python_function,arglist);
Py_DECREF(arglist);
double result = PyFloat_AsDouble(py_result);
Py_DECREF(py_result);
return result;
}
{% endif %}
{% for function,nargs in callbacks %}
static PyObject * callback_{{function}};
# define {{function}}(...) callback(\
callback_{{function}}, \
Py_BuildValue( \
{% if nargs -%}
"(O{{'d'*nargs}})", n_dim_read_only_array_from_data(Y) , __VA_ARGS__ \
{% else -%}
"(O)", n_dim_read_only_array_from_data(Y) \
{% endif -%}
))
{% endfor %}
# define set_drift(i, value) (drift[i] = (value)*h)
# define set_diffusion(i, value) (diffusion[i] = value)
# define y(i) (Y[i])
# define get_f_helper(i) ((f_helper[i]))
# define set_f_helper(i,value) (f_helper[i] = value)
# define get_g_helper(i) ((g_helper[i]))
# define set_g_helper(i,value) (g_helper[i] = value)
{% if has_any_helpers: %}
# include "helpers_definitions.c"
{% endif %}
# include "f_definitions.c"
void eval_drift(
sde_integrator * const self,
double const t,
double Y[{{n}}],
double const h,
double drift[{{n}}])
{
{% if number_of_f_helpers>0: %}
double f_helper[{{number_of_f_helpers}}];
# include "f_helpers.c"
{% endif %}
# include "f.c"
}
# include "g_definitions.c"
void eval_diffusion(
sde_integrator * const self,
double const t,
{% if not additive %}
double Y[{{n}}],
{% endif %}
double diffusion[{{n}}])
{
{% if number_of_g_helpers>0: %}
double g_helper[{{number_of_g_helpers}}];
# include "g_helpers.c"
{% endif %}
# include "g.c"
}
static PyObject * get_next_step(sde_integrator * const self, PyObject * args)
{
double h;
if (!PyArg_ParseTuple(args, "d", &h))
{
PyErr_SetString(PyExc_ValueError,"Wrong input.");
return NULL;
}
double I_1[{{n}}];
double I_10[{{n}}];
{% if not additive %}
double I_11dbsqh[{{n}}];
double I_111[{{n}}];
get_I(self,h,I_1,I_11dbsqh,I_111,I_10);
double argument[{{n}}];
double fh_1[{{n}}];
eval_drift(self,self->t,self->state,h,fh_1);
double g_1[{{n}}];
eval_diffusion(self,self->t,self->state,g_1);
double fh_2[{{n}}];
#pragma omp parallel for schedule(dynamic, {{chunk_size}})
for (int i=0; i<{{n}}; i++)
argument[i] = self->state[i] + 0.75*fh_1[i] + 1.5*g_1[i]*I_10[i]/h;
eval_drift(self,self->t+0.75*h,argument,h,fh_2);
double g_2[{{n}}];
#pragma omp parallel for schedule(dynamic, {{chunk_size}})
for (int i=0; i<{{n}}; i++)
argument[i] = self->state[i] + 0.25*fh_1[i] + 0.5*g_1[i]*sqrt(h);
eval_diffusion(self,self->t+0.25*h,argument,g_2);
double g_3[{{n}}];
#pragma omp parallel for schedule(dynamic, {{chunk_size}})
for (int i=0; i<{{n}}; i++)
argument[i] = self->state[i] + fh_1[i] - g_1[i]*sqrt(h);
eval_diffusion(self,self->t+h,argument,g_3);
double g_4[{{n}}];
#pragma omp parallel for schedule(dynamic, {{chunk_size}})
for (int i=0; i<{{n}}; i++)
argument[i] = self->state[i] + 0.25*fh_1[i] + sqrt(h)*(-5*g_1[i]+3*g_2[i]+0.5*g_3[i]);
eval_diffusion(self,self->t+0.25*h,argument,g_4);
#pragma omp parallel for schedule(dynamic, {{chunk_size}})
for (int i=0; i<{{n}}; i++)
{
double E_N = (1./h/3.) * (
+ ( 6*I_10[i] - 6*I_111[i] ) * g_1[i]
+ ( -4*I_10[i] + 5*I_111[i] ) * g_2[i]
+ ( -2*I_10[i] - 2*I_111[i] ) * g_3[i]
+ ( 3*I_111[i] ) * g_4[i]
);
self->new_state[i] = self->state[i] + E_N + (1./3.)*(
fh_1[i] + 2*fh_2[i]
+ (-3*I_1[i] - 3*I_11dbsqh[i] ) * g_1[i]
+ ( 4*I_1[i] + 4*I_11dbsqh[i] ) * g_2[i]
+ ( 2*I_1[i] - I_11dbsqh[i] ) * g_3[i]
);
double E_D = (fh_2[i]-fh_1[i])/6;
self->error[i] = fabs(E_D) + fabs(E_N);
}
{% else %}
get_I(self,h,I_1,I_10);
double argument[{{n}}];
double fh_1[{{n}}];
eval_drift(self,self->t,self->state,h,fh_1);
double g_1[{{n}}];
eval_diffusion(self,self->t+h,g_1);
double fh_2[{{n}}];
#pragma omp parallel for schedule(dynamic, {{chunk_size}})
for (int i=0; i<{{n}}; i++)
argument[i] = self->state[i] + 0.75*fh_1[i] + 0.5*g_1[i]*I_10[i]/h;
eval_drift(self,self->t+0.75*h,argument,h,fh_2);
double g_2[{{n}}];
eval_diffusion(self,self->t,g_2);
#pragma omp parallel for schedule(dynamic, {{chunk_size}})
for (int i=0; i<{{n}}; i++)
{
double E_N = I_10[i] /h * (g_2[i]-g_1[i]);
self->new_state[i] = self->state[i] + E_N + (1./3.)*(fh_1[i] + 2*fh_2[i]) + I_1[i]*g_1[i];
double E_D = (fh_2[i]-fh_1[i])/6;
self->error[i] = fabs(E_D) + fabs(E_N);
}
{% endif %}
self->new_t = self->t + h;
Py_RETURN_NONE;
}
static PyObject * get_p(sde_integrator const * const self, PyObject * args)
{
double atol;
double rtol;
if (!PyArg_ParseTuple(args, "dd", &atol, &rtol))
{
PyErr_SetString(PyExc_ValueError,"Wrong input.");
return NULL;
}
double p=0.0;
for (int i=0; i<{{n}}; i++)
{
double error = self->error[i];
double tolerance = atol+rtol*fabs(self->new_state[i]);
if (error!=0.0 || tolerance!=0.0)
{
double x = error/tolerance;
if (x>p)
p = x;
}
}
return PyFloat_FromDouble(p);
}
static PyObject * accept_step(sde_integrator * const self)
{
memcpy(self->state, self->new_state, sizeof(self->state));
self->t = self->new_t;
while ((self->noises) && (self->noises != self->current_noise))
remove_first_noise(self);
self->current_noise = self->noises;
Py_RETURN_NONE;
}
static PyObject * get_state(sde_integrator * const self)
{
PyArrayObject * array = (PyArrayObject*) PyArray_SimpleNewFromData(1, dim, TYPE_INDEX, self->state);
// Copy is necessary because self->state may be overwritten after this.
PyObject * result = PyArray_NewCopy(array,NPY_ANYORDER);
Py_DECREF(array);
return result;
}
static PyObject * apply_jump(sde_integrator * const self, PyObject * args)
{
PyArrayObject * change;
if (!PyArg_ParseTuple(args,"O!",&PyArray_Type,&change))
{
PyErr_SetString(PyExc_ValueError,"Wrong input. Note that the function returning jump amplitudes must return a NumPy array.");
return NULL;
}
#pragma omp parallel for schedule(dynamic, {{chunk_size}})
for (int i=0; i<{{n}}; i++)
self->state[i] += * (double *) PyArray_GETPTR1(change,i);
Py_RETURN_NONE;
}
static void sde_integrator_dealloc(sde_integrator * const self)
{
while (self->noises)
remove_first_noise(self);
{% if numpy_rng %}
Py_DECREF(self->RNG);
Py_DECREF(self->noise_function);
Py_DECREF(self->noise_size);
{% else %}
free(self->RNG);
{% endif %}
Py_TYPE(self)->tp_free((PyObject *)self);
}
static int sde_integrator_init(sde_integrator * self, PyObject * args)
{
PyArrayObject * Y;
PyObject * seed;
if (!PyArg_ParseTuple(
args,
"dO!O{{'O'*callbacks|length}}",
&(self->t),
&PyArray_Type, &Y,
&seed
{% for function,nargs in callbacks %}
, &callback_{{function}}
{% endfor %}
))
{
PyErr_SetString(PyExc_ValueError,"Wrong input.");
return 1;
}
{% for function,nargs in callbacks %}
if (!PyCallable_Check(callback_{{function}}))
{
PyErr_SetString(PyExc_TypeError,"Callback must be callable.");
return -1;
}
{% endfor %}
self->noises = NULL;
self->current_noise = NULL;
self->last_noise = NULL;
for (int i=0; i<{{n}}; i++)
self->state[i] = * (double *) PyArray_GETPTR1(Y,i);
{% if numpy_rng %}
PyObject * nprandom = PyImport_ImportModule("numpy.random");
self->RNG = PyObject_CallFunctionObjArgs(
PyObject_GetAttrString(nprandom,"RandomState"),
seed, NULL
);
self->noise_function = PyObject_GetAttrString(self->RNG,"normal");
self->noise_size = PyTuple_Pack(2,PyLong_FromLong({{n}}),PyLong_FromLong(2));
{% else %}
self->RNG = safe_malloc(sizeof(rk_state));
rk_seed( PyLong_AsUnsignedLong(seed), self->RNG );
{% endif %}
return 0;
}
// ======================================================
static PyMemberDef sde_integrator_members[] = {
{"t", T_DOUBLE, offsetof(sde_integrator,t), 0, "t"},
{NULL} /* Sentinel */
};
static PyMethodDef sde_integrator_methods[] = {
{% if control_pars|length %}
{"set_parameters", (PyCFunction) set_parameters, METH_VARARGS, NULL},
{% endif %}
{"pin_noise" , (PyCFunction) pin_noise , METH_VARARGS, NULL},
{"get_next_step" , (PyCFunction) get_next_step , METH_VARARGS, NULL},
{"get_p" , (PyCFunction) get_p , METH_VARARGS, NULL},
{"accept_step" , (PyCFunction) accept_step , METH_NOARGS , NULL},
{"apply_jump" , (PyCFunction) apply_jump , METH_VARARGS, NULL},
{"get_state" , (PyCFunction) get_state , METH_NOARGS , NULL},
{ NULL , NULL , 0 , NULL}
};
static PyTypeObject sde_integrator_type = {
PyVarObject_HEAD_INIT(NULL, 0)
"_jitced.sde_integrator",
sizeof(sde_integrator),
0, // tp_itemsize
(destructor) sde_integrator_dealloc,
0, // tp_print
0,0,0,0,0,0,0,0,0,0,0,0, // ...
0, // tp_as_buffer
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
0, // tp_doc
0,0,0,0,0, // ...
0, // tp_iternext
sde_integrator_methods,
sde_integrator_members,
0, // tp_getset
0,0,0,0, // ...
0, // tp_dictoffset
(initproc) sde_integrator_init,
0, // tp_alloc
0 // tp_new
};
static PyMethodDef {{module_name}}_methods[] = {
{NULL, NULL, 0, NULL}
};
static struct PyModuleDef moduledef =
{
PyModuleDef_HEAD_INIT,
"{{module_name}}",
NULL,
-1,
{{module_name}}_methods,
NULL,
NULL,
NULL,
NULL
};
PyMODINIT_FUNC PyInit_{{module_name}}(void)
{
sde_integrator_type.tp_new = PyType_GenericNew;
if (PyType_Ready(&sde_integrator_type) < 0)
return NULL;
PyObject * module = PyModule_Create(&moduledef);
if (module == NULL)
return NULL;
Py_INCREF(&sde_integrator_type);
PyModule_AddObject(module, "sde_integrator", (PyObject *)&sde_integrator_type);
import_array();
return module;
}
|
Example_affinity.4.c | /*
* @@name: affinity.4c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
* @@version: omp_4.0
*/
void work();
void foo()
{
#pragma omp parallel num_threads(16) proc_bind(close)
{
work();
}
}
|
gemm.c | /* Copyright 1990-2016, Jsoftware Inc. All rights reserved. */
/* Licensed use only. Any other use is in violation of copyright. */
/* */
/* gemm macro kernel */
#include <stddef.h>
#include <stdint.h>
#include "j.h"
#include "gemm.h"
#ifdef MC
#undef MC
#endif
#ifdef ZRE
#undef ZRE
#endif
#ifdef ZIM
#undef ZIM
#endif
#define ZRE(x,y) ((TYMES((x).real,(y).real))-TYMES((x).imag,(y).imag))
#define ZIM(x,y) ((TYMES((x).real,(y).imag))+TYMES((x).imag,(y).real))
#include "blis.h"
#define MC BLIS_DEFAULT_MC_D
#define KC BLIS_DEFAULT_KC_D
#define NC BLIS_DEFAULT_NC_D
#define MR BLIS_DEFAULT_MR_D
#define NR BLIS_DEFAULT_NR_D
//
// Packing complete panels from A (i.e. without padding)
//
static void
pack_MRxk(dim_t k, const double *A, inc_t rs_a, inc_t cs_a, double *buffer)
{
dim_t j;
for (j=0; j<k; ++j) {
dim_t i;
for (i=0; i<MR; ++i) {
buffer[i] = A[i*rs_a];
}
buffer += MR;
A += cs_a;
}
}
//
// Packing panels from A with padding if required
//
static void
pack_A(dim_t mc, dim_t kc, const double *A, inc_t rs_a, inc_t cs_a, double *buffer)
{
dim_t mp = mc / MR;
dim_t _mr = mc % MR;
dim_t ri;
for (ri=0; ri<mp; ++ri) {
pack_MRxk(kc, A, rs_a, cs_a, buffer);
buffer += kc*MR;
A += MR*rs_a;
}
if (_mr>0) {
dim_t j;
for (j=0; j<kc; ++j) {
dim_t i;
for (i=0; i<_mr; ++i) {
buffer[i] = A[i*rs_a];
}
for (i=_mr; i<MR; ++i) {
buffer[i] = 0.0;
}
buffer += MR;
A += cs_a;
}
}
}
//
// Packing complete panels from B (i.e. without padding)
//
static void
pack_kxNR(dim_t k, const double *B, inc_t rs_b, inc_t cs_b, double *buffer)
{
dim_t i;
for (i=0; i<k; ++i) {
dim_t j;
for (j=0; j<NR; ++j) {
buffer[j] = B[j*cs_b];
}
buffer += NR;
B += rs_b;
}
}
//
// Packing panels from B with padding if required
//
static void
pack_B(dim_t kc, dim_t nc, const double *B, inc_t rs_b, inc_t cs_b, double *buffer)
{
dim_t np = nc / NR;
dim_t _nr = nc % NR;
dim_t rj;
for (rj=0; rj<np; ++rj) {
pack_kxNR(kc, B, rs_b, cs_b, buffer);
buffer += kc*NR;
B += NR*cs_b;
}
if (_nr>0) {
dim_t i;
for (i=0; i<kc; ++i) {
dim_t j;
for (j=0; j<_nr; ++j) {
buffer[j] = B[j*cs_b];
}
for (j=_nr; j<NR; ++j) {
buffer[j] = 0.0;
}
buffer += NR;
B += rs_b;
}
}
}
//
// Compute Y += alpha*X
//
static void
dgeaxpy(dim_t m,
dim_t n,
double alpha,
const double *X,
inc_t rs_x,
inc_t cs_x,
double *Y,
inc_t rs_y,
inc_t cs_y)
{
if (alpha!=1.0) {
dim_t j;
for (j=0; j<n; ++j) {
dim_t i;
for (i=0; i<m; ++i) {
Y[i*rs_y+j*cs_y] += alpha*X[i*rs_x+j*cs_x];
}
}
} else {
dim_t j;
for (j=0; j<n; ++j) {
dim_t i;
for (i=0; i<m; ++i) {
Y[i*rs_y+j*cs_y] += X[i*rs_x+j*cs_x];
}
}
}
}
//
// Compute X *= beta
//
static void
dgescal(dim_t m,
dim_t n,
double beta,
double *X,
inc_t rs_x,
inc_t cs_x)
{
// since J always set beta=0 and C is zero initialized
// no need to check 0 * inf
if (beta!=0.0) {
dim_t j;
for (j=0; j<n; ++j) {
dim_t i;
for (i=0; i<m; ++i) {
X[i*rs_x+j*cs_x] *= beta;
}
}
} else {
dim_t j;
for (j=0; j<n; ++j) {
dim_t i;
for (i=0; i<m; ++i) {
X[i*rs_x+j*cs_x] = 0.0;
}
}
}
}
//
// Macro Kernel for the multiplication of blocks of A and B. We assume that
// these blocks were previously packed to buffers _A and _B.
//
static void
dgemm_macro_kernel(dim_t mc,
dim_t nc,
dim_t kc,
double alpha,
double beta,
double *_A,
double *_B,
double *C,
inc_t rs_c,
inc_t cs_c)
{
auxinfo_t auxdata;
dim_t mp = (mc+MR-1) / MR;
dim_t np = (nc+NR-1) / NR;
dim_t _mr = mc % MR;
dim_t _nr = nc % NR;
const double *nextA;
const double *nextB;
double * _C = aligned_malloc(MR*NR*SZD, alignv);
memset((void*)_C,0,MR*NR*SZD); // must initialize memory
dim_t j;
for (j=0; j<np; ++j) {
dim_t nr;
nr = (j!=np-1 || _nr==0) ? NR : _nr;
nextB = &_B[j*kc*NR];
dim_t i;
for (i=0; i<mp; ++i) {
dim_t mr;
mr = (i!=mp-1 || _mr==0) ? MR : _mr;
nextA = &_A[(i+1)*kc*MR];
if (i==mp-1) {
nextA = _A;
nextB = &_B[(j+1)*kc*NR];
if (j==np-1) {
nextB = _B;
}
}
bli_auxinfo_set_next_a(&auxdata, nextA)
bli_auxinfo_set_next_b(&auxdata, nextB)
if (mr==MR && nr==NR) {
((hwfma)?dgemm2_micro_kernel:dgemm_micro_kernel)(kc, &alpha, &_A[i*kc*MR], &_B[j*kc*NR],
&beta,
&C[i*MR*rs_c+j*NR*cs_c],
rs_c, cs_c,
&auxdata, 0);
} else {
((hwfma)?dgemm2_micro_kernel:dgemm_micro_kernel)(kc, &alpha, &_A[i*kc*MR], &_B[j*kc*NR],
(double*)&dzero,
_C, 1, MR,
&auxdata, 0);
dgescal(mr, nr, beta,
&C[i*MR*rs_c+j*NR*cs_c], rs_c, cs_c);
dgeaxpy(mr, nr, 1.0, _C, 1, MR,
&C[i*MR*rs_c+j*NR*cs_c], rs_c, cs_c);
}
}
}
aligned_free( _C );
}
//
// Compute C <- beta*C + alpha*A*B
//
void
dgemm_nn (I m,
I n,
I k,
double alpha,
double *A,
I rs_a,
I cs_a,
double *B,
I rs_b,
I cs_b,
double beta,
double *C,
I rs_c,
I cs_c)
{
I mb = (m+MC-1) / MC;
I nb = (n+NC-1) / NC;
I kb = (k+KC-1) / KC;
I _mc = m % MC;
I _nc = n % NC;
I _kc = k % KC;
double _beta;
if (alpha==0.0 || k==0) {
dgescal(m, n, beta, C, rs_c, cs_c);
return;
}
// loop 5
I j;
for (j=0; j<nb; ++j) {
I nc;
nc = (j!=nb-1 || _nc==0) ? NC : _nc;
// loop 4
I l;
for (l=0; l<kb; ++l) {
I kc;
kc = (l!=kb-1 || _kc==0) ? KC : _kc;
_beta = (l==0) ? beta : 1.0;
double * _B = aligned_malloc((KC+1)*NC*SZD, alignv); /* extra bytes for pre-read */
pack_B(kc, nc,
&B[l*KC*rs_b+j*NC*cs_b], rs_b, cs_b,
_B);
// loop 3
I i=0;
#pragma omp parallel for default(none),private(i),shared(j,l,A,C,mb,nc,kc,alpha,_beta,_mc,_B,rs_a,cs_a,rs_c,cs_c)
for (i=0; i<mb; ++i) {
I mc;
mc = (i!=mb-1 || _mc==0) ? MC : _mc;
double * _A = aligned_malloc(MC*(KC+1)*SZD, alignv);
pack_A(mc, kc,
&A[i*MC*rs_a+l*KC*cs_a], rs_a, cs_a,
_A);
dgemm_macro_kernel(mc, nc, kc, alpha, _beta, _A, _B,
&C[i*MC*rs_c+j*NC*cs_c],
rs_c, cs_c);
aligned_free( _A );
}
aligned_free( _B );
}
}
}
// -----------------------------------------------------------------
// INT matrix
/*
#if defined(_WIN64)||defined(__LP64__)
typedef long long I;
#else
typedef long I;
#endif
*/
//
// Packing complete panels from A (i.e. without padding)
//
static void
ipack_MRxk(dim_t k, const I *A, inc_t rs_a, inc_t cs_a, double *buffer)
{
dim_t j;
for (j=0; j<k; ++j) {
dim_t i;
for (i=0; i<MR; ++i) {
buffer[i] = (double)A[i*rs_a];
}
buffer += MR;
A += cs_a;
}
}
//
// Packing panels from A with padding if required
//
static void
ipack_A(dim_t mc, dim_t kc, const I *A, inc_t rs_a, inc_t cs_a, double *buffer)
{
dim_t mp = mc / MR;
dim_t _mr = mc % MR;
dim_t ri;
for (ri=0; ri<mp; ++ri) {
ipack_MRxk(kc, A, rs_a, cs_a, buffer);
buffer += kc*MR;
A += MR*rs_a;
}
if (_mr>0) {
dim_t j;
for (j=0; j<kc; ++j) {
dim_t i;
for (i=0; i<_mr; ++i) {
buffer[i] = (double)A[i*rs_a];
}
for (i=_mr; i<MR; ++i) {
buffer[i] = 0.0;
}
buffer += MR;
A += cs_a;
}
}
}
//
// Packing complete panels from B (i.e. without padding)
//
static void
ipack_kxNR(dim_t k, const I *B, inc_t rs_b, inc_t cs_b, double *buffer)
{
dim_t i;
for (i=0; i<k; ++i) {
dim_t j;
for (j=0; j<NR; ++j) {
buffer[j] = (double)B[j*cs_b];
}
buffer += NR;
B += rs_b;
}
}
//
// Packing panels from B with padding if required
//
static void
ipack_B(dim_t kc, dim_t nc, const I *B, inc_t rs_b, inc_t cs_b, double *buffer)
{
dim_t np = nc / NR;
dim_t _nr = nc % NR;
dim_t rj;
for (rj=0; rj<np; ++rj) {
ipack_kxNR(kc, B, rs_b, cs_b, buffer);
buffer += kc*NR;
B += NR*cs_b;
}
if (_nr>0) {
dim_t i;
for (i=0; i<kc; ++i) {
dim_t j;
for (j=0; j<_nr; ++j) {
buffer[j] = (double)B[j*cs_b];
}
for (j=_nr; j<NR; ++j) {
buffer[j] = 0.0;
}
buffer += NR;
B += rs_b;
}
}
}
//
// Compute C <- beta*C + alpha*A*B
//
void
igemm_nn (I m,
I n,
I k,
I alpha,
I *A,
I rs_a,
I cs_a,
I *B,
I rs_b,
I cs_b,
I beta,
double *C,
I rs_c,
I cs_c)
{
I mb = (m+MC-1) / MC;
I nb = (n+NC-1) / NC;
I kb = (k+KC-1) / KC;
I _mc = m % MC;
I _nc = n % NC;
I _kc = k % KC;
double _beta;
if (alpha==0 || k==0) {
dgescal(m, n, (double)beta, C, rs_c, cs_c);
return;
}
// loop 5
I j;
for (j=0; j<nb; ++j) {
I nc;
nc = (j!=nb-1 || _nc==0) ? NC : _nc;
// loop 4
I l;
for (l=0; l<kb; ++l) {
I kc;
kc = (l!=kb-1 || _kc==0) ? KC : _kc;
_beta = (l==0) ? (double)beta : 1.0;
double * _B = aligned_malloc((1+KC)*NC*SZD, alignv);
ipack_B(kc, nc,
&B[l*KC*rs_b+j*NC*cs_b], rs_b, cs_b,
_B);
// loop 3
I i=0;
#pragma omp parallel for default(none),private(i),shared(j,l,A,C,mb,nc,kc,alpha,_beta,_mc,_B,rs_a,cs_a,rs_c,cs_c)
for (i=0; i<mb; ++i) {
I mc;
mc = (i!=mb-1 || _mc==0) ? MC : _mc;
double * _A = aligned_malloc(MC*(1+KC)*SZD, alignv);
ipack_A(mc, kc,
&A[i*MC*rs_a+l*KC*cs_a], rs_a, cs_a,
_A);
dgemm_macro_kernel(mc, nc, kc, (double)alpha, _beta, _A, _B,
&C[i*MC*rs_c+j*NC*cs_c],
rs_c, cs_c);
aligned_free( _A );
}
aligned_free( _B );
}
}
}
// -----------------------------------------------------------------
// COMPLEX matrix
#undef MC
#undef KC
#undef NC
#undef MR
#undef NR
#define MC BLIS_DEFAULT_MC_Z
#define KC BLIS_DEFAULT_KC_Z
#define NC BLIS_DEFAULT_NC_Z
#define MR BLIS_DEFAULT_MR_Z
#define NR BLIS_DEFAULT_NR_Z
//
// Packing complete panels from A (i.e. without padding)
//
static void
zpack_MRxk(dim_t k, const dcomplex *A, inc_t rs_a, inc_t cs_a, dcomplex *buffer)
{
dim_t j;
for (j=0; j<k; ++j) {
dim_t i;
for (i=0; i<MR; ++i) {
buffer[i] = A[i*rs_a];
}
buffer += MR;
A += cs_a;
}
}
//
// Packing panels from A with padding if required
//
static void
zpack_A(dim_t mc, dim_t kc, const dcomplex *A, inc_t rs_a, inc_t cs_a, dcomplex *buffer)
{
dim_t mp = mc / MR;
dim_t _mr = mc % MR;
dim_t ri;
for (ri=0; ri<mp; ++ri) {
zpack_MRxk(kc, A, rs_a, cs_a, buffer);
buffer += kc*MR;
A += MR*rs_a;
}
if (_mr>0) {
dim_t j;
for (j=0; j<kc; ++j) {
dim_t i;
for (i=0; i<_mr; ++i) {
buffer[i] = A[i*rs_a];
}
for (i=_mr; i<MR; ++i) {
buffer[i] = zzero;
}
buffer += MR;
A += cs_a;
}
}
}
//
// Packing complete panels from B (i.e. without padding)
//
static void
zpack_kxNR(dim_t k, const dcomplex *B, inc_t rs_b, inc_t cs_b, dcomplex *buffer)
{
dim_t i;
for (i=0; i<k; ++i) {
dim_t j;
for (j=0; j<NR; ++j) {
buffer[j] = B[j*cs_b];
}
buffer += NR;
B += rs_b;
}
}
//
// Packing panels from B with padding if required
//
static void
zpack_B(dim_t kc, dim_t nc, const dcomplex *B, inc_t rs_b, inc_t cs_b, dcomplex *buffer)
{
dim_t np = nc / NR;
dim_t _nr = nc % NR;
dim_t rj;
for (rj=0; rj<np; ++rj) {
zpack_kxNR(kc, B, rs_b, cs_b, buffer);
buffer += kc*NR;
B += NR*cs_b;
}
if (_nr>0) {
dim_t i;
for (i=0; i<kc; ++i) {
dim_t j;
for (j=0; j<_nr; ++j) {
buffer[j] = B[j*cs_b];
}
for (j=_nr; j<NR; ++j) {
buffer[j] = zzero;
}
buffer += NR;
B += rs_b;
}
}
}
//
// Compute Y += alpha*X
//
static void
zgeaxpy(dim_t m,
dim_t n,
dcomplex alpha,
const dcomplex *X,
inc_t rs_x,
inc_t cs_x,
dcomplex *Y,
inc_t rs_y,
inc_t cs_y)
{
if (alpha.real!=1.0||alpha.imag!=0.0) {
dim_t j;
for (j=0; j<n; ++j) {
dim_t i;
for (i=0; i<m; ++i) {
// Y[i*rs_y+j*cs_y] += alpha*X[i*rs_x+j*cs_x];
Y[i*rs_y+j*cs_y].real += ZRE(alpha,X[i*rs_x+j*cs_x]);
Y[i*rs_y+j*cs_y].real += ZIM(alpha,X[i*rs_x+j*cs_x]);
}
}
} else {
dim_t j;
for (j=0; j<n; ++j) {
dim_t i;
for (i=0; i<m; ++i) {
// Y[i*rs_y+j*cs_y] += X[i*rs_x+j*cs_x];
Y[i*rs_y+j*cs_y].real += X[i*rs_x+j*cs_x].real;
Y[i*rs_y+j*cs_y].imag += X[i*rs_x+j*cs_x].imag;
}
}
}
}
//
// Compute X *= beta
//
static void
zgescal(dim_t m,
dim_t n,
dcomplex beta,
dcomplex *X,
inc_t rs_x,
inc_t cs_x)
{
// since J always set beta=0 and C is zero initialized
// no need to check 0 * inf
if (beta.real!=0.0||beta.imag!=0.0) {
dim_t j;
for (j=0; j<n; ++j) {
dim_t i;
for (i=0; i<m; ++i) {
// X[i*rs_x+j*cs_x] *= beta;
X[i*rs_x+j*cs_x].real = ZRE(X[i*rs_x+j*cs_x], beta);
X[i*rs_x+j*cs_x].imag = ZIM(X[i*rs_x+j*cs_x], beta);
}
}
} else {
dim_t j;
for (j=0; j<n; ++j) {
dim_t i;
for (i=0; i<m; ++i) {
X[i*rs_x+j*cs_x] = zzero;
}
}
}
}
//
// Macro Kernel for the multiplication of blocks of A and B. We assume that
// these blocks were previously packed to buffers _A and _B.
//
static void
zgemm_macro_kernel(dim_t mc,
dim_t nc,
dim_t kc,
dcomplex alpha,
dcomplex beta,
dcomplex *_A,
dcomplex *_B,
dcomplex *C,
inc_t rs_c,
inc_t cs_c)
{
auxinfo_t auxdata;
dim_t mp = (mc+MR-1) / MR;
dim_t np = (nc+NR-1) / NR;
dim_t _mr = mc % MR;
dim_t _nr = nc % NR;
const dcomplex *nextA;
const dcomplex *nextB;
dcomplex * _C = aligned_malloc(2*MR*NR*SZD, alignv);
memset((void*)_C,0,2*MR*NR*SZD); // must initialize memory
dim_t j;
for (j=0; j<np; ++j) {
dim_t nr;
nr = (j!=np-1 || _nr==0) ? NR : _nr;
nextB = &_B[j*kc*NR];
dim_t i;
for (i=0; i<mp; ++i) {
dim_t mr;
mr = (i!=mp-1 || _mr==0) ? MR : _mr;
nextA = &_A[(i+1)*kc*MR];
if (i==mp-1) {
nextA = _A;
nextB = &_B[(j+1)*kc*NR];
if (j==np-1) {
nextB = _B;
}
}
bli_auxinfo_set_next_a(&auxdata, nextA)
bli_auxinfo_set_next_b(&auxdata, nextB)
if (mr==MR && nr==NR) {
((hwfma)?zgemm2_micro_kernel:zgemm_micro_kernel)(kc, &alpha, &_A[i*kc*MR], &_B[j*kc*NR],
&beta,
&C[i*MR*rs_c+j*NR*cs_c],
rs_c, cs_c,
&auxdata, 0);
} else {
((hwfma)?zgemm2_micro_kernel:zgemm_micro_kernel)(kc, &alpha, &_A[i*kc*MR], &_B[j*kc*NR],
(dcomplex*)&zzero,
_C, 1, MR,
&auxdata, 0);
zgescal(mr, nr, beta,
&C[i*MR*rs_c+j*NR*cs_c], rs_c, cs_c);
zgeaxpy(mr, nr, zone, _C, 1, MR,
&C[i*MR*rs_c+j*NR*cs_c], rs_c, cs_c);
}
}
}
aligned_free( _C );
}
//
// Compute C <- beta*C + alpha*A*B
//
void
zgemm_nn (I m,
I n,
I k,
dcomplex alpha,
dcomplex *A,
I rs_a,
I cs_a,
dcomplex *B,
I rs_b,
I cs_b,
dcomplex beta,
dcomplex *C,
I rs_c,
I cs_c)
{
I mb = (m+MC-1) / MC;
I nb = (n+NC-1) / NC;
I kb = (k+KC-1) / KC;
I _mc = m % MC;
I _nc = n % NC;
I _kc = k % KC;
dcomplex _beta;
if ((alpha.real==0.0 && alpha.imag==0.0) || k==0) {
zgescal(m, n, beta, C, rs_c, cs_c);
return;
}
// loop 5
I j;
for (j=0; j<nb; ++j) {
I nc;
nc = (j!=nb-1 || _nc==0) ? NC : _nc;
// loop 4
I l;
for (l=0; l<kb; ++l) {
I kc;
kc = (l!=kb-1 || _kc==0) ? KC : _kc;
_beta = (l==0) ? beta : zone;
dcomplex * _B = aligned_malloc(2*(1+KC)*NC*SZD, alignv);
zpack_B(kc, nc,
&B[l*KC*rs_b+j*NC*cs_b], rs_b, cs_b,
_B);
// loop 3
I i=0;
#pragma omp parallel for default(none),private(i),shared(j,l,A,C,mb,nc,kc,alpha,_beta,_mc,_B,rs_a,cs_a,rs_c,cs_c)
for (i=0; i<mb; ++i) {
I mc;
mc = (i!=mb-1 || _mc==0) ? MC : _mc;
dcomplex * _A = aligned_malloc(2*MC*(1+KC)*SZD, alignv);
zpack_A(mc, kc,
&A[i*MC*rs_a+l*KC*cs_a], rs_a, cs_a,
_A);
zgemm_macro_kernel(mc, nc, kc, alpha, _beta, _A, _B,
&C[i*MC*rs_c+j*NC*cs_c],
rs_c, cs_c);
aligned_free( _A );
}
aligned_free( _B );
}
}
}
|
mat_mul_simd_1000.c | /*
* file for mat_mul.c
*/
#include "./mat_mul.h"
#include "./size.h"
void mat_mul(int *a, int *b, int *c);
void mat_mul(int *a, int *b, int *c)
{
int i, j, k, t;
#pragma omp simd private(j, t, k)
for(i = 0; i <= 999; i += 1)
for(j = 0; j <= 999; j += 1) {
c[i*1000+j] = 0;
for(k = 0; k <= 999; k += 1)
for(t = 0; t <= 99; t += 1)
c[i*1000+j] += a[i*1000+k]*b[j*1000+k];
}
return;
}
|
resize-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file resize-inl.h
* \brief image resize operator using opencv and only support bilinear resize
* \author Jake Lee
*/
#ifndef MXNET_OPERATOR_IMAGE_RESIZE_INL_H_
#define MXNET_OPERATOR_IMAGE_RESIZE_INL_H_
#include <mxnet/base.h>
#include <vector>
#include "../mxnet_op.h"
#include "../operator_common.h"
#include "image_utils.h"
#if MXNET_USE_OPENCV
#include <opencv2/opencv.hpp>
#endif // MXNET_USE_OPENCV
namespace mxnet {
namespace op {
namespace image {
using namespace mshadow;
#if MXNET_USE_CUDA
template <typename DType, typename T, typename Acctype>
void ResizeImplCUDA(Stream<gpu>* s, const T input, const T output);
#endif // MXNET_USE_CUDA
struct ResizeParam : public dmlc::Parameter<ResizeParam> {
mxnet::Tuple<int> size;
bool keep_ratio;
int interp;
DMLC_DECLARE_PARAMETER(ResizeParam) {
DMLC_DECLARE_FIELD(size)
.set_default(mxnet::Tuple<int>())
.describe("Size of new image. Could be (width, height) or (size)");
DMLC_DECLARE_FIELD(keep_ratio)
.describe(
"Whether to resize the short edge or both edges to `size`, "
"if size is give as an integer.")
.set_default(false);
DMLC_DECLARE_FIELD(interp).set_default(1).describe(
"Interpolation method for resizing. By default uses bilinear interpolation"
"Options are INTER_NEAREST - a nearest-neighbor interpolation"
"INTER_LINEAR - a bilinear interpolation"
"INTER_AREA - resampling using pixel area relation"
"INTER_CUBIC - a bicubic interpolation over 4x4 pixel neighborhood"
"INTER_LANCZOS4 - a Lanczos interpolation over 8x8 pixel neighborhood"
"Note that the GPU version only support bilinear interpolation(1)");
}
};
// handle the keep ratio param
inline SizeParam GetHeightAndWidth(int data_h, int data_w, const ResizeParam& param) {
CHECK((param.size.ndim() == 1) || (param.size.ndim() == 2))
<< "Input size dimension must be 1 or 2, but got " << param.size.ndim();
int resized_h;
int resized_w;
if (param.size.ndim() == 1) {
CHECK_GT(param.size[0], 0) << "Input size should be greater than 0, but got " << param.size[0];
if (!param.keep_ratio) {
resized_h = param.size[0];
resized_w = param.size[0];
} else {
if (data_h > data_w) {
resized_w = param.size[0];
resized_h = static_cast<int>(data_h * resized_w / data_w);
} else {
resized_h = param.size[0];
resized_w = static_cast<int>(data_w * resized_h / data_h);
}
}
} else {
CHECK_GT(param.size[0], 0) << "Input width should be greater than 0, but got " << param.size[0];
CHECK_GT(param.size[1], 0) << "Input height should be greater than 0, but got "
<< param.size[1];
resized_h = param.size[1];
resized_w = param.size[0];
}
return SizeParam(resized_h, resized_w);
}
inline bool ResizeShapeImpl(const ResizeParam& param,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
// input attrs should only be (h, w, c) or (n, h, w, c)
CHECK((in_attrs->at(0).ndim() == 3U) || (in_attrs->at(0).ndim() == 4U))
<< "Input image dimension should be 3 or 4 but got " << in_attrs->at(0).ndim();
const auto& ishape = (*in_attrs)[0];
SizeParam size;
if (ishape.ndim() == 3) {
size = GetHeightAndWidth(ishape[H], ishape[W], param);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape({size.height, size.width, ishape[C]}));
} else {
size = GetHeightAndWidth(ishape[kH], ishape[kW], param);
SHAPE_ASSIGN_CHECK(
*out_attrs, 0, mxnet::TShape({ishape[N], size.height, size.width, ishape[kC]}));
}
return true;
}
inline bool ResizeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const ResizeParam& param = nnvm::get<ResizeParam>(attrs.parsed);
return ResizeShapeImpl(param, in_attrs, out_attrs);
}
inline void ResizeImpl(const std::vector<TBlob>& inputs,
const std::vector<TBlob>& outputs,
const int height,
const int width,
const int interp,
const int input_index = 0,
const int output_index = 0) {
#if MXNET_USE_OPENCV
CHECK_NE(inputs[0].type_flag_, mshadow::kFloat16) << "opencv image mat doesn't support fp16";
CHECK((inputs[0].type_flag_ != mshadow::kInt32) || (inputs[0].type_flag_ != mshadow::kInt64))
<< "opencv resize doesn't support int32, int64";
// mapping to opencv matrix element type according to channel
const int DTYPE[] = {CV_32F, CV_64F, -1, CV_8U, CV_32S};
if (inputs[0].ndim() == 3) {
const int cv_type = CV_MAKETYPE(DTYPE[inputs[0].type_flag_], inputs[0].shape_[C]);
cv::Mat buf(inputs[0].shape_[H], inputs[0].shape_[W], cv_type, inputs[0].dptr_);
cv::Mat dst(outputs[0].shape_[H], outputs[0].shape_[W], cv_type, outputs[0].dptr_);
cv::resize(buf, dst, cv::Size(width, height), 0, 0, interp);
CHECK(!dst.empty());
CHECK_EQ(static_cast<void*>(dst.ptr()), outputs[0].dptr_);
} else {
const int cv_type = CV_MAKETYPE(DTYPE[inputs[0].type_flag_], inputs[0].shape_[kC]);
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
cv::Mat buf(inputs[0].shape_[kH],
inputs[0].shape_[kW],
cv_type,
inputs[0].dptr<DType>() + input_index);
cv::Mat dst(outputs[0].shape_[kH],
outputs[0].shape_[kW],
cv_type,
outputs[0].dptr<DType>() + output_index);
cv::resize(buf, dst, cv::Size(width, height), 0, 0, interp);
CHECK(!dst.empty());
CHECK_EQ(static_cast<void*>(dst.ptr()), outputs[0].dptr<DType>() + output_index);
});
}
#else
LOG(FATAL) << "Build with USE_OPENCV=1 for image resize operator.";
#endif // MXNET_USE_OPENCV
}
template <typename xpu>
inline void ResizeImplWrapper(const ResizeParam& param,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<TBlob>& outputs) {
SizeParam size;
if (std::is_same<xpu, gpu>::value) {
#if MXNET_USE_CUDA
CHECK(param.interp == 1) << "interp should be 1 for using Resize on GPU.";
mshadow::Stream<gpu>* s = ctx.get_stream<gpu>();
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
if (inputs[0].ndim() == 3) {
Tensor<gpu, 3, DType> input = inputs[0].get<gpu, 3, DType>(s);
Tensor<gpu, 3, DType> output = outputs[0].get<gpu, 3, DType>(s);
ResizeImplCUDA<DType, Tensor<gpu, 3, DType>, float>(s, input, output);
} else {
Tensor<gpu, 4, DType> input = inputs[0].get<gpu, 4, DType>(s);
Tensor<gpu, 4, DType> output = outputs[0].get<gpu, 4, DType>(s);
ResizeImplCUDA<DType, Tensor<gpu, 4, DType>, float>(s, input, output);
}
});
#endif // MXNET_USE_CUDA
} else if (inputs[0].ndim() == 3) {
size = GetHeightAndWidth(inputs[0].shape_[H], inputs[0].shape_[W], param);
ResizeImpl(inputs, outputs, size.height, size.width, param.interp);
} else {
size = GetHeightAndWidth(inputs[0].shape_[kH], inputs[0].shape_[kW], param);
const auto batch_size = inputs[0].shape_[N];
const auto input_step = inputs[0].shape_[kH] * inputs[0].shape_[kW] * inputs[0].shape_[kC];
const auto output_step = size.height * size.width * inputs[0].shape_[kC];
#pragma omp parallel for
for (auto i = 0; i < batch_size; ++i) {
ResizeImpl(
inputs, outputs, size.height, size.width, param.interp, i * input_step, i * output_step);
}
}
}
template <typename xpu>
inline void Resize(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(outputs.size(), 1U);
const ResizeParam& param = nnvm::get<ResizeParam>(attrs.parsed);
ResizeImplWrapper<xpu>(param, ctx, inputs, outputs);
}
} // namespace image
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_IMAGE_RESIZE_INL_H_
|
ams.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "float.h"
#include "ams.h"
#include "_hypre_utilities.hpp"
/*--------------------------------------------------------------------------
* hypre_ParCSRRelax
*
* Relaxation on the ParCSR matrix A with right-hand side f and
* initial guess u. Possible values for relax_type are:
*
* 1 = l1-scaled (or weighted) Jacobi
* 2 = l1-scaled block Gauss-Seidel/SSOR
* 3 = Kaczmarz
* 4 = truncated version of 2 (Remark 6.2 in smoothers paper)
* x = BoomerAMG relaxation with relax_type = |x|
* (16 = Cheby)
*
* The default value of relax_type is 2.
*--------------------------------------------------------------------------*/
#if defined(HYPRE_USING_CUDA)
struct l1_norm_op1 : public thrust::binary_function<HYPRE_Complex, HYPRE_Complex, HYPRE_Complex>
{
__host__ __device__
HYPRE_Complex operator()(HYPRE_Complex &x, HYPRE_Complex &y) const
{
return x <= 4.0/3.0 * y ? y : x;
}
};
#endif
HYPRE_Int hypre_ParCSRRelax(/* matrix to relax with */
hypre_ParCSRMatrix *A,
/* right-hand side */
hypre_ParVector *f,
/* relaxation type */
HYPRE_Int relax_type,
/* number of sweeps */
HYPRE_Int relax_times,
/* l1 norms of the rows of A */
HYPRE_Real *l1_norms,
/* damping coefficient (usually <= 1) */
HYPRE_Real relax_weight,
/* SOR parameter (usually in (0,2) */
HYPRE_Real omega,
/* for cheby smoothers */
HYPRE_Real max_eig_est,
HYPRE_Real min_eig_est,
HYPRE_Int cheby_order,
HYPRE_Real cheby_fraction,
/* initial/updated approximation */
hypre_ParVector *u,
/* temporary vector */
hypre_ParVector *v,
/* temporary vector */
hypre_ParVector *z)
{
HYPRE_Int sweep;
HYPRE_Complex *u_data = hypre_VectorData(hypre_ParVectorLocalVector(u));
HYPRE_Complex *f_data = hypre_VectorData(hypre_ParVectorLocalVector(f));
HYPRE_Complex *v_data = hypre_VectorData(hypre_ParVectorLocalVector(v));
for (sweep = 0; sweep < relax_times; sweep++)
{
if (relax_type == 1) /* l1-scaled Jacobi */
{
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
HYPRE_Int sync_stream = hypre_HandleCudaComputeStreamSync(hypre_handle());
hypre_HandleCudaComputeStreamSync(hypre_handle()) = 0;
#endif
hypre_ParVectorCopy(f, v);
hypre_ParCSRMatrixMatvec(-relax_weight, A, u, relax_weight, v);
#if defined(HYPRE_USING_CUDA)
hypreDevice_IVAXPY(num_rows, l1_norms, v_data, u_data);
#else /* #if defined(HYPRE_USING_CUDA) */
HYPRE_Int i;
/* u += w D^{-1}(f - A u), where D_ii = ||A(i,:)||_1 */
#if defined(HYPRE_USING_DEVICE_OPENMP)
#pragma omp target teams distribute parallel for private(i) is_device_ptr(u_data,v_data,l1_norms)
#endif
for (i = 0; i < num_rows; i++)
{
u_data[i] += v_data[i] / l1_norms[i];
}
#endif /* #if defined(HYPRE_USING_CUDA) */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
hypre_HandleCudaComputeStreamSync(hypre_handle()) = sync_stream;
hypre_SyncCudaComputeStream(hypre_handle());
#endif
}
else if (relax_type == 2 || relax_type == 4) /* offd-l1-scaled block GS */
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int i, j;
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Real *u_offd_data = hypre_TAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
HYPRE_Real res;
HYPRE_Int num_procs;
hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &num_procs);
/* Copy off-diagonal values of u to the current processor */
if (num_procs > 1)
{
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int num_sends;
HYPRE_Real *u_buf_data;
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int index = 0, start;
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
u_buf_data = hypre_TAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
u_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate(1,comm_pkg,u_buf_data,u_offd_data);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(u_buf_data, HYPRE_MEMORY_HOST);
}
if (relax_weight == 1.0 && omega == 1.0) /* symmetric Gauss-Seidel */
{
/* Forward local pass */
for (i = 0; i < num_rows; i++)
{
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
u_data[i] += res / l1_norms[i];
}
/* Backward local pass */
for (i = num_rows-1; i > -1; i--)
{
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
u_data[i] += res / l1_norms[i];
}
}
else if (relax_weight == 1.0) /* SSOR */
{
/* Forward local pass */
for (i = 0; i < num_rows; i++)
{
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
u_data[i] += omega * res / l1_norms[i];
}
/* Backward local pass */
for (i = num_rows-1; i > -1; i--)
{
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
u_data[i] += omega * res / l1_norms[i];
}
}
else /* scaled SSOR */
{
HYPRE_Real dif;
HYPRE_Real c1 = omega * relax_weight;
HYPRE_Real c2 = omega * (1.0 - relax_weight);
/* Forward local pass (save initial guess in v_data) */
for (i = 0; i < num_rows; i++)
{
dif = 0.0;
v_data[i] = u_data[i];
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (A_diag_J[j] < i)
dif += A_diag_data[j] * (v_data[A_diag_J[j]] - u_data[A_diag_J[j]]);
}
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
u_data[i] += (c1 * res + c2 * dif) / l1_norms[i];
}
/* Backward local pass */
for (i = num_rows-1; i > -1; i--)
{
dif = 0.0;
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (A_diag_J[j] > i)
dif += A_diag_data[j] * (v_data[A_diag_J[j]] - u_data[A_diag_J[j]]);
}
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
u_data[i] += (c1 * res + c2 * dif) / l1_norms[i];
}
}
hypre_TFree(u_offd_data, HYPRE_MEMORY_HOST);
}
else if (relax_type == 3) /* Kaczmarz */
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int i, j;
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Real *u_offd_data = hypre_TAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
HYPRE_Real res;
HYPRE_Int num_procs;
hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &num_procs);
/* Copy off-diagonal values of u to the current processor */
if (num_procs > 1)
{
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int num_sends;
HYPRE_Real *u_buf_data;
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int index = 0, start;
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
u_buf_data = hypre_TAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
u_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate(1,comm_pkg,u_buf_data,u_offd_data);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(u_buf_data, HYPRE_MEMORY_HOST);
}
/* Forward local pass */
for (i = 0; i < num_rows; i++)
{
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
res /= l1_norms[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
u_data[A_diag_J[j]] += omega * res * A_diag_data[j];
}
/* Backward local pass */
for (i = num_rows-1; i > -1; i--)
{
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
res /= l1_norms[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
u_data[A_diag_J[j]] += omega * res * A_diag_data[j];
}
hypre_TFree(u_offd_data, HYPRE_MEMORY_HOST);
}
else /* call BoomerAMG relaxation */
{
if (relax_type == 16)
{
hypre_ParCSRRelax_Cheby(A,
f,
max_eig_est,
min_eig_est,
cheby_fraction, cheby_order, 1,
0, u, v, z);
}
else
{
hypre_BoomerAMGRelax(A, f, NULL, hypre_abs(relax_type), 0, relax_weight,
omega, l1_norms, u, v, z);
}
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorInRangeOf
*
* Return a vector that belongs to the range of a given matrix.
*--------------------------------------------------------------------------*/
hypre_ParVector *hypre_ParVectorInRangeOf(hypre_ParCSRMatrix *A)
{
hypre_ParVector *x;
x = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(x);
hypre_ParVectorOwnsData(x) = 1;
hypre_ParVectorOwnsPartitioning(x) = 0;
return x;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorInDomainOf
*
* Return a vector that belongs to the domain of a given matrix.
*--------------------------------------------------------------------------*/
hypre_ParVector *hypre_ParVectorInDomainOf(hypre_ParCSRMatrix *A)
{
hypre_ParVector *x;
x = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumCols(A),
hypre_ParCSRMatrixColStarts(A));
hypre_ParVectorInitialize(x);
hypre_ParVectorOwnsData(x) = 1;
hypre_ParVectorOwnsPartitioning(x) = 0;
return x;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorBlockSplit
*
* Extract the dim sub-vectors x_0,...,x_{dim-1} composing a parallel
* block vector x. It is assumed that &x[i] = [x_0[i],...,x_{dim-1}[i]].
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParVectorBlockSplit(hypre_ParVector *x,
hypre_ParVector *x_[3],
HYPRE_Int dim)
{
HYPRE_Int i, d, size_;
HYPRE_Real *x_data, *x_data_[3];
size_ = hypre_VectorSize(hypre_ParVectorLocalVector(x_[0]));
x_data = hypre_VectorData(hypre_ParVectorLocalVector(x));
for (d = 0; d < dim; d++)
x_data_[d] = hypre_VectorData(hypre_ParVectorLocalVector(x_[d]));
for (i = 0; i < size_; i++)
for (d = 0; d < dim; d++)
x_data_[d][i] = x_data[dim*i+d];
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorBlockGather
*
* Compose a parallel block vector x from dim given sub-vectors
* x_0,...,x_{dim-1}, such that &x[i] = [x_0[i],...,x_{dim-1}[i]].
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParVectorBlockGather(hypre_ParVector *x,
hypre_ParVector *x_[3],
HYPRE_Int dim)
{
HYPRE_Int i, d, size_;
HYPRE_Real *x_data, *x_data_[3];
size_ = hypre_VectorSize(hypre_ParVectorLocalVector(x_[0]));
x_data = hypre_VectorData(hypre_ParVectorLocalVector(x));
for (d = 0; d < dim; d++)
x_data_[d] = hypre_VectorData(hypre_ParVectorLocalVector(x_[d]));
for (i = 0; i < size_; i++)
for (d = 0; d < dim; d++)
x_data[dim*i+d] = x_data_[d][i];
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_BoomerAMGBlockSolve
*
* Apply the block-diagonal solver diag(B) to the system diag(A) x = b.
* Here B is a given BoomerAMG solver for A, while x and b are "block"
* parallel vectors.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_BoomerAMGBlockSolve(void *B,
hypre_ParCSRMatrix *A,
hypre_ParVector *b,
hypre_ParVector *x)
{
HYPRE_Int d, dim = 1;
hypre_ParVector *b_[3];
hypre_ParVector *x_[3];
dim = hypre_ParVectorGlobalSize(x) / hypre_ParCSRMatrixGlobalNumRows(A);
if (dim == 1)
{
hypre_BoomerAMGSolve(B, A, b, x);
return hypre_error_flag;
}
for (d = 0; d < dim; d++)
{
b_[d] = hypre_ParVectorInRangeOf(A);
x_[d] = hypre_ParVectorInRangeOf(A);
}
hypre_ParVectorBlockSplit(b, b_, dim);
hypre_ParVectorBlockSplit(x, x_, dim);
for (d = 0; d < dim; d++)
hypre_BoomerAMGSolve(B, A, b_[d], x_[d]);
hypre_ParVectorBlockGather(x, x_, dim);
for (d = 0; d < dim; d++)
{
hypre_ParVectorDestroy(b_[d]);
hypre_ParVectorDestroy(x_[d]);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixFixZeroRows
*
* For every zero row in the matrix: set the diagonal element to 1.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRMatrixFixZeroRows(hypre_ParCSRMatrix *A)
{
HYPRE_Int i, j;
HYPRE_Real l1_norm;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
/* a row will be considered zero if its l1 norm is less than eps */
HYPRE_Real eps = 0.0; /* DBL_EPSILON * 1e+4; */
for (i = 0; i < num_rows; i++)
{
l1_norm = 0.0;
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm += fabs(A_diag_data[j]);
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm += fabs(A_offd_data[j]);
if (l1_norm <= eps)
{
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
if (A_diag_J[j] == i)
A_diag_data[j] = 1.0;
else
A_diag_data[j] = 0.0;
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
A_offd_data[j] = 0.0;
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRComputeL1Norms
*
* Compute the l1 norms of the rows of a given matrix, depending on
* the option parameter:
*
* option 1 = Compute the l1 norm of the rows
* option 2 = Compute the l1 norm of the (processor) off-diagonal
* part of the rows plus the diagonal of A
* option 3 = Compute the l2 norm^2 of the rows
* option 4 = Truncated version of option 2 based on Remark 6.2 in "Multigrid
* Smoothers for Ultra-Parallel Computing"
*
* The above computations are done in a CF manner, whenever the provided
* cf_marker is not NULL.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRComputeL1Norms(hypre_ParCSRMatrix *A,
HYPRE_Int option,
HYPRE_Int *cf_marker,
HYPRE_Real **l1_norm_ptr)
{
HYPRE_Int i, j;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_MemoryLocation memory_location_l1 = hypre_ParCSRMatrixMemoryLocation(A);
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( memory_location_l1 );
if (exec == HYPRE_EXEC_HOST)
{
HYPRE_Int num_threads = hypre_NumThreads();
if (num_threads > 1)
{
return hypre_ParCSRComputeL1NormsThreads(A, option, num_threads, cf_marker, l1_norm_ptr);
}
}
HYPRE_Real *l1_norm = hypre_TAlloc(HYPRE_Real, num_rows, memory_location_l1);
HYPRE_MemoryLocation memory_location_tmp = exec == HYPRE_EXEC_HOST ? HYPRE_MEMORY_HOST : HYPRE_MEMORY_DEVICE;
HYPRE_Real *diag_tmp = NULL;
HYPRE_Int *cf_marker_offd = NULL, *cf_marker_dev = NULL;
/* collect the cf marker data from other procs */
if (cf_marker != NULL)
{
HYPRE_Int index;
HYPRE_Int num_sends;
HYPRE_Int start;
HYPRE_Int *int_buf_data = NULL;
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
if (num_cols_offd)
{
cf_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, memory_location_tmp);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends))
{
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_HOST);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index++] = cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate_v2(11, comm_pkg, HYPRE_MEMORY_HOST, int_buf_data,
memory_location_tmp, cf_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
if (exec == HYPRE_EXEC_DEVICE)
{
cf_marker_dev = hypre_TAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(cf_marker_dev, cf_marker, HYPRE_Int, num_rows, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
}
else
{
cf_marker_dev = cf_marker;
}
}
if (option == 1)
{
/* Set the l1 norm of the diag part */
hypre_CSRMatrixComputeRowSum(A_diag, cf_marker_dev, cf_marker_dev, l1_norm, 1, 1.0, "set");
/* Add the l1 norm of the offd part */
if (num_cols_offd)
{
hypre_CSRMatrixComputeRowSum(A_offd, cf_marker_dev, cf_marker_offd, l1_norm, 1, 1.0, "add");
}
}
else if (option == 2)
{
/* Set the abs(diag) element */
hypre_CSRMatrixExtractDiagonal(A_diag, l1_norm, 1);
/* Add the l1 norm of the offd part */
if (num_cols_offd)
{
hypre_CSRMatrixComputeRowSum(A_offd, cf_marker_dev, cf_marker_offd, l1_norm, 1, 1.0, "add");
}
}
else if (option == 3)
{
/* Set the CF l2 norm of the diag part */
hypre_CSRMatrixComputeRowSum(A_diag, NULL, NULL, l1_norm, 2, 1.0, "set");
/* Add the CF l2 norm of the offd part */
if (num_cols_offd)
{
hypre_CSRMatrixComputeRowSum(A_offd, NULL, NULL, l1_norm, 2, 1.0, "add");
}
}
else if (option == 4)
{
/* Set the abs(diag) element */
hypre_CSRMatrixExtractDiagonal(A_diag, l1_norm, 1);
diag_tmp = hypre_TAlloc(HYPRE_Real, num_rows, memory_location_tmp);
hypre_TMemcpy(diag_tmp, l1_norm, HYPRE_Real, num_rows, memory_location_tmp, memory_location_l1);
/* Add the scaled l1 norm of the offd part */
if (num_cols_offd)
{
hypre_CSRMatrixComputeRowSum(A_offd, cf_marker_dev, cf_marker_offd, l1_norm, 1, 0.5, "add");
}
/* Truncate according to Remark 6.2 */
#if defined(HYPRE_USING_CUDA)
if (exec == HYPRE_EXEC_DEVICE)
{
HYPRE_THRUST_CALL( transform, l1_norm, l1_norm + num_rows, diag_tmp, l1_norm, l1_norm_op1() );
}
else
#endif
{
for (i = 0; i < num_rows; i++)
{
if (l1_norm[i] <= 4.0/3.0 * diag_tmp[i])
{
l1_norm[i] = diag_tmp[i];
}
}
}
}
else if (option == 5) /*stores diagonal of A for Jacobi using matvec, rlx 7 */
{
/* Set the diag element */
hypre_CSRMatrixExtractDiagonal(A_diag, l1_norm, 0);
#if defined(HYPRE_USING_CUDA)
if ( exec == HYPRE_EXEC_DEVICE)
{
thrust::identity<HYPRE_Complex> identity;
HYPRE_THRUST_CALL( replace_if, l1_norm, l1_norm + num_rows, thrust::not1(identity), 1.0 );
}
else
#endif
{
for (i = 0; i < num_rows; i++)
{
if (l1_norm[i] == 0.0)
{
l1_norm[i] = 1.0;
}
}
}
*l1_norm_ptr = l1_norm;
return hypre_error_flag;
}
/* Handle negative definite matrices */
if (!diag_tmp)
{
diag_tmp = hypre_TAlloc(HYPRE_Real, num_rows, memory_location_tmp);
}
/* Set the diag element */
hypre_CSRMatrixExtractDiagonal(A_diag, diag_tmp, 0);
#if defined(HYPRE_USING_CUDA)
if (exec == HYPRE_EXEC_DEVICE)
{
HYPRE_THRUST_CALL( transform_if, l1_norm, l1_norm + num_rows, diag_tmp, l1_norm, thrust::negate<HYPRE_Real>(),
is_negative<HYPRE_Real>() );
//bool any_zero = HYPRE_THRUST_CALL( any_of, l1_norm, l1_norm + num_rows, thrust::not1(thrust::identity<HYPRE_Complex>()) );
bool any_zero = 0.0 == HYPRE_THRUST_CALL( reduce, l1_norm, l1_norm + num_rows, 1.0, thrust::minimum<HYPRE_Real>() );
if ( any_zero )
{
hypre_error_in_arg(1);
}
}
else
#endif
{
for (i = 0; i < num_rows; i++)
{
if (diag_tmp[i] < 0.0)
{
l1_norm[i] = -l1_norm[i];
}
}
for (i = 0; i < num_rows; i++)
{
/* if (fabs(l1_norm[i]) < DBL_EPSILON) */
if (fabs(l1_norm[i]) == 0.0)
{
hypre_error_in_arg(1);
break;
}
}
}
if (exec == HYPRE_EXEC_DEVICE)
{
hypre_TFree(cf_marker_dev, HYPRE_MEMORY_DEVICE);
}
hypre_TFree(cf_marker_offd, memory_location_tmp);
hypre_TFree(diag_tmp, memory_location_tmp);
*l1_norm_ptr = l1_norm;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixSetDiagRows
*
* For every row containing only a diagonal element: set it to d.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRMatrixSetDiagRows(hypre_ParCSRMatrix *A, HYPRE_Real d)
{
HYPRE_Int i, j;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
for (i = 0; i < num_rows; i++)
{
j = A_diag_I[i];
if ((A_diag_I[i+1] == j+1) && (A_diag_J[j] == i) &&
(!num_cols_offd || (A_offd_I[i+1] == A_offd_I[i])))
{
A_diag_data[j] = d;
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSCreate
*
* Allocate the AMS solver structure.
*--------------------------------------------------------------------------*/
void * hypre_AMSCreate()
{
hypre_AMSData *ams_data;
ams_data = hypre_CTAlloc(hypre_AMSData, 1, HYPRE_MEMORY_HOST);
/* Default parameters */
ams_data -> dim = 3; /* 3D problem */
ams_data -> maxit = 20; /* perform at most 20 iterations */
ams_data -> tol = 1e-6; /* convergence tolerance */
ams_data -> print_level = 1; /* print residual norm at each step */
ams_data -> cycle_type = 1; /* a 3-level multiplicative solver */
ams_data -> A_relax_type = 2; /* offd-l1-scaled GS */
ams_data -> A_relax_times = 1; /* one relaxation sweep */
ams_data -> A_relax_weight = 1.0; /* damping parameter */
ams_data -> A_omega = 1.0; /* SSOR coefficient */
ams_data -> A_cheby_order = 2; /* Cheby: order (1 -4 are vaild) */
ams_data -> A_cheby_fraction = .3; /* Cheby: fraction of spectrum to smooth */
ams_data -> B_G_coarsen_type = 10; /* HMIS coarsening */
ams_data -> B_G_agg_levels = 1; /* Levels of aggressive coarsening */
ams_data -> B_G_relax_type = 3; /* hybrid G-S/Jacobi */
ams_data -> B_G_theta = 0.25; /* strength threshold */
ams_data -> B_G_interp_type = 0; /* interpolation type */
ams_data -> B_G_Pmax = 0; /* max nonzero elements in interp. rows */
ams_data -> B_Pi_coarsen_type = 10; /* HMIS coarsening */
ams_data -> B_Pi_agg_levels = 1; /* Levels of aggressive coarsening */
ams_data -> B_Pi_relax_type = 3; /* hybrid G-S/Jacobi */
ams_data -> B_Pi_theta = 0.25; /* strength threshold */
ams_data -> B_Pi_interp_type = 0; /* interpolation type */
ams_data -> B_Pi_Pmax = 0; /* max nonzero elements in interp. rows */
ams_data -> beta_is_zero = 0; /* the problem has a mass term */
/* By default, do l1-GS smoothing on the coarsest grid */
ams_data -> B_G_coarse_relax_type = 8;
ams_data -> B_Pi_coarse_relax_type = 8;
/* The rest of the fields are initialized using the Set functions */
ams_data -> A = NULL;
ams_data -> G = NULL;
ams_data -> A_G = NULL;
ams_data -> B_G = 0;
ams_data -> Pi = NULL;
ams_data -> A_Pi = NULL;
ams_data -> B_Pi = 0;
ams_data -> x = NULL;
ams_data -> y = NULL;
ams_data -> z = NULL;
ams_data -> Gx = NULL;
ams_data -> Gy = NULL;
ams_data -> Gz = NULL;
ams_data -> r0 = NULL;
ams_data -> g0 = NULL;
ams_data -> r1 = NULL;
ams_data -> g1 = NULL;
ams_data -> r2 = NULL;
ams_data -> g2 = NULL;
ams_data -> Pix = NULL;
ams_data -> Piy = NULL;
ams_data -> Piz = NULL;
ams_data -> A_Pix = NULL;
ams_data -> A_Piy = NULL;
ams_data -> A_Piz = NULL;
ams_data -> B_Pix = 0;
ams_data -> B_Piy = 0;
ams_data -> B_Piz = 0;
ams_data -> interior_nodes = NULL;
ams_data -> G0 = NULL;
ams_data -> A_G0 = NULL;
ams_data -> B_G0 = 0;
ams_data -> projection_frequency = 5;
ams_data -> A_l1_norms = NULL;
ams_data -> A_max_eig_est = 0;
ams_data -> A_min_eig_est = 0;
ams_data -> owns_Pi = 1;
ams_data -> owns_A_G = 0;
ams_data -> owns_A_Pi = 0;
return (void *) ams_data;
}
/*--------------------------------------------------------------------------
* hypre_AMSDestroy
*
* Deallocate the AMS solver structure. Note that the input data (given
* through the Set functions) is not destroyed.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSDestroy(void *solver)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
if (!ams_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (ams_data -> owns_A_G)
if (ams_data -> A_G)
hypre_ParCSRMatrixDestroy(ams_data -> A_G);
if (!ams_data -> beta_is_zero)
if (ams_data -> B_G)
HYPRE_BoomerAMGDestroy(ams_data -> B_G);
if (ams_data -> owns_Pi && ams_data -> Pi)
hypre_ParCSRMatrixDestroy(ams_data -> Pi);
if (ams_data -> owns_A_Pi)
if (ams_data -> A_Pi)
hypre_ParCSRMatrixDestroy(ams_data -> A_Pi);
if (ams_data -> B_Pi)
HYPRE_BoomerAMGDestroy(ams_data -> B_Pi);
if (ams_data -> owns_Pi && ams_data -> Pix)
hypre_ParCSRMatrixDestroy(ams_data -> Pix);
if (ams_data -> A_Pix)
hypre_ParCSRMatrixDestroy(ams_data -> A_Pix);
if (ams_data -> B_Pix)
HYPRE_BoomerAMGDestroy(ams_data -> B_Pix);
if (ams_data -> owns_Pi && ams_data -> Piy)
hypre_ParCSRMatrixDestroy(ams_data -> Piy);
if (ams_data -> A_Piy)
hypre_ParCSRMatrixDestroy(ams_data -> A_Piy);
if (ams_data -> B_Piy)
HYPRE_BoomerAMGDestroy(ams_data -> B_Piy);
if (ams_data -> owns_Pi && ams_data -> Piz)
hypre_ParCSRMatrixDestroy(ams_data -> Piz);
if (ams_data -> A_Piz)
hypre_ParCSRMatrixDestroy(ams_data -> A_Piz);
if (ams_data -> B_Piz)
HYPRE_BoomerAMGDestroy(ams_data -> B_Piz);
if (ams_data -> r0)
hypre_ParVectorDestroy(ams_data -> r0);
if (ams_data -> g0)
hypre_ParVectorDestroy(ams_data -> g0);
if (ams_data -> r1)
hypre_ParVectorDestroy(ams_data -> r1);
if (ams_data -> g1)
hypre_ParVectorDestroy(ams_data -> g1);
if (ams_data -> r2)
hypre_ParVectorDestroy(ams_data -> r2);
if (ams_data -> g2)
hypre_ParVectorDestroy(ams_data -> g2);
if (ams_data -> G0)
hypre_ParCSRMatrixDestroy(ams_data -> A);
if (ams_data -> G0)
hypre_ParCSRMatrixDestroy(ams_data -> G0);
if (ams_data -> A_G0)
hypre_ParCSRMatrixDestroy(ams_data -> A_G0);
if (ams_data -> B_G0)
HYPRE_BoomerAMGDestroy(ams_data -> B_G0);
hypre_SeqVectorDestroy(ams_data -> A_l1_norms);
/* G, x, y ,z, Gx, Gy and Gz are not destroyed */
if (ams_data)
{
hypre_TFree(ams_data, HYPRE_MEMORY_HOST);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetDimension
*
* Set problem dimension (2 or 3). By default we assume dim = 3.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetDimension(void *solver,
HYPRE_Int dim)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
if (dim != 2 && dim != 3)
hypre_error_in_arg(2);
ams_data -> dim = dim;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetDiscreteGradient
*
* Set the discrete gradient matrix G.
* This function should be called before hypre_AMSSetup()!
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetDiscreteGradient(void *solver,
hypre_ParCSRMatrix *G)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> G = G;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetCoordinateVectors
*
* Set the x, y and z coordinates of the vertices in the mesh.
*
* Either SetCoordinateVectors or SetEdgeConstantVectors should be
* called before hypre_AMSSetup()!
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetCoordinateVectors(void *solver,
hypre_ParVector *x,
hypre_ParVector *y,
hypre_ParVector *z)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> x = x;
ams_data -> y = y;
ams_data -> z = z;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetEdgeConstantVectors
*
* Set the vectors Gx, Gy and Gz which give the representations of
* the constant vector fields (1,0,0), (0,1,0) and (0,0,1) in the
* edge element basis.
*
* Either SetCoordinateVectors or SetEdgeConstantVectors should be
* called before hypre_AMSSetup()!
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetEdgeConstantVectors(void *solver,
hypre_ParVector *Gx,
hypre_ParVector *Gy,
hypre_ParVector *Gz)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> Gx = Gx;
ams_data -> Gy = Gy;
ams_data -> Gz = Gz;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetInterpolations
*
* Set the (components of) the Nedelec interpolation matrix Pi=[Pix,Piy,Piz].
*
* This function is generally intended to be used only for high-order Nedelec
* discretizations (in the lowest order case, Pi is constructed internally in
* AMS from the discreet gradient matrix and the coordinates of the vertices),
* though it can also be used in the lowest-order case or for other types of
* discretizations (e.g. ones based on the second family of Nedelec elements).
*
* By definition, Pi is the matrix representation of the linear operator that
* interpolates (high-order) vector nodal finite elements into the (high-order)
* Nedelec space. The component matrices are defined as Pix phi = Pi (phi,0,0)
* and similarly for Piy and Piz. Note that all these operators depend on the
* choice of the basis and degrees of freedom in the high-order spaces.
*
* The column numbering of Pi should be node-based, i.e. the x/y/z components of
* the first node (vertex or high-order dof) should be listed first, followed by
* the x/y/z components of the second node and so on (see the documentation of
* HYPRE_BoomerAMGSetDofFunc).
*
* If used, this function should be called before hypre_AMSSetup() and there is
* no need to provide the vertex coordinates. Furthermore, only one of the sets
* {Pi} and {Pix,Piy,Piz} needs to be specified (though it is OK to provide
* both). If Pix is NULL, then scalar Pi-based AMS cycles, i.e. those with
* cycle_type > 10, will be unavailable. Similarly, AMS cycles based on
* monolithic Pi (cycle_type < 10) require that Pi is not NULL.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetInterpolations(void *solver,
hypre_ParCSRMatrix *Pi,
hypre_ParCSRMatrix *Pix,
hypre_ParCSRMatrix *Piy,
hypre_ParCSRMatrix *Piz)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> Pi = Pi;
ams_data -> Pix = Pix;
ams_data -> Piy = Piy;
ams_data -> Piz = Piz;
ams_data -> owns_Pi = 0;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetAlphaPoissonMatrix
*
* Set the matrix corresponding to the Poisson problem with coefficient
* alpha (the curl-curl term coefficient in the Maxwell problem).
*
* If this function is called, the coarse space solver on the range
* of Pi^T is a block-diagonal version of A_Pi. If this function is not
* called, the coarse space solver on the range of Pi^T is constructed
* as Pi^T A Pi in hypre_AMSSetup().
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetAlphaPoissonMatrix(void *solver,
hypre_ParCSRMatrix *A_Pi)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> A_Pi = A_Pi;
/* Penalize the eliminated degrees of freedom */
hypre_ParCSRMatrixSetDiagRows(A_Pi, HYPRE_REAL_MAX);
/* Make sure that the first entry in each row is the diagonal one. */
/* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A_Pi)); */
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetBetaPoissonMatrix
*
* Set the matrix corresponding to the Poisson problem with coefficient
* beta (the mass term coefficient in the Maxwell problem).
*
* This function call is optional - if not given, the Poisson matrix will
* be computed in hypre_AMSSetup(). If the given matrix is NULL, we assume
* that beta is 0 and use two-level (instead of three-level) methods.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetBetaPoissonMatrix(void *solver,
hypre_ParCSRMatrix *A_G)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> A_G = A_G;
if (!A_G)
ams_data -> beta_is_zero = 1;
else
{
/* Penalize the eliminated degrees of freedom */
hypre_ParCSRMatrixSetDiagRows(A_G, HYPRE_REAL_MAX);
/* Make sure that the first entry in each row is the diagonal one. */
/* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A_G)); */
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetInteriorNodes
*
* Set the list of nodes which are interior to the zero-conductivity region.
* A node is interior if interior_nodes[i] == 1.0.
*
* Should be called before hypre_AMSSetup()!
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetInteriorNodes(void *solver,
hypre_ParVector *interior_nodes)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> interior_nodes = interior_nodes;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetProjectionFrequency
*
* How often to project the r.h.s. onto the compatible sub-space Ker(G0^T),
* when iterating with the solver.
*
* The default value is every 5th iteration.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetProjectionFrequency(void *solver,
HYPRE_Int projection_frequency)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> projection_frequency = projection_frequency;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetMaxIter
*
* Set the maximum number of iterations in the three-level method.
* The default value is 20. To use the AMS solver as a preconditioner,
* set maxit to 1, tol to 0.0 and print_level to 0.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetMaxIter(void *solver,
HYPRE_Int maxit)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> maxit = maxit;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetTol
*
* Set the convergence tolerance (if the method is used as a solver).
* The default value is 1e-6.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetTol(void *solver,
HYPRE_Real tol)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> tol = tol;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetCycleType
*
* Choose which three-level solver to use. Possible values are:
*
* 1 = 3-level multipl. solver (01210) <-- small solution time
* 2 = 3-level additive solver (0+1+2)
* 3 = 3-level multipl. solver (02120)
* 4 = 3-level additive solver (010+2)
* 5 = 3-level multipl. solver (0102010) <-- small solution time
* 6 = 3-level additive solver (1+020)
* 7 = 3-level multipl. solver (0201020) <-- small number of iterations
* 8 = 3-level additive solver (0(1+2)0) <-- small solution time
* 9 = 3-level multipl. solver (01210) with discrete divergence
* 11 = 5-level multipl. solver (013454310) <-- small solution time, memory
* 12 = 5-level additive solver (0+1+3+4+5)
* 13 = 5-level multipl. solver (034515430) <-- small solution time, memory
* 14 = 5-level additive solver (01(3+4+5)10)
* 20 = 2-level multipl. solver (0[12]0)
*
* 0 = a Hiptmair-like smoother (010)
*
* The default value is 1.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetCycleType(void *solver,
HYPRE_Int cycle_type)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> cycle_type = cycle_type;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetPrintLevel
*
* Control how much information is printed during the solution iterations.
* The defaut values is 1 (print residual norm at each step).
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetPrintLevel(void *solver,
HYPRE_Int print_level)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> print_level = print_level;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetSmoothingOptions
*
* Set relaxation parameters for A. Default values: 2, 1, 1.0, 1.0.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetSmoothingOptions(void *solver,
HYPRE_Int A_relax_type,
HYPRE_Int A_relax_times,
HYPRE_Real A_relax_weight,
HYPRE_Real A_omega)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> A_relax_type = A_relax_type;
ams_data -> A_relax_times = A_relax_times;
ams_data -> A_relax_weight = A_relax_weight;
ams_data -> A_omega = A_omega;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetChebySmoothingOptions
* AB: note: this could be added to the above,
* but I didn't want to change parameter list)
* Set parameters for chebyshev smoother for A. Default values: 2,.3.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetChebySmoothingOptions(void *solver,
HYPRE_Int A_cheby_order,
HYPRE_Int A_cheby_fraction)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> A_cheby_order = A_cheby_order;
ams_data -> A_cheby_fraction = A_cheby_fraction;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetAlphaAMGOptions
*
* Set AMG parameters for B_Pi. Default values: 10, 1, 3, 0.25, 0, 0.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetAlphaAMGOptions(void *solver,
HYPRE_Int B_Pi_coarsen_type,
HYPRE_Int B_Pi_agg_levels,
HYPRE_Int B_Pi_relax_type,
HYPRE_Real B_Pi_theta,
HYPRE_Int B_Pi_interp_type,
HYPRE_Int B_Pi_Pmax)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> B_Pi_coarsen_type = B_Pi_coarsen_type;
ams_data -> B_Pi_agg_levels = B_Pi_agg_levels;
ams_data -> B_Pi_relax_type = B_Pi_relax_type;
ams_data -> B_Pi_theta = B_Pi_theta;
ams_data -> B_Pi_interp_type = B_Pi_interp_type;
ams_data -> B_Pi_Pmax = B_Pi_Pmax;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetAlphaAMGCoarseRelaxType
*
* Set the AMG coarsest level relaxation for B_Pi. Default value: 8.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetAlphaAMGCoarseRelaxType(void *solver,
HYPRE_Int B_Pi_coarse_relax_type)
{
hypre_AMSData *ams_data = (hypre_AMSData *)solver;
ams_data -> B_Pi_coarse_relax_type = B_Pi_coarse_relax_type;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetBetaAMGOptions
*
* Set AMG parameters for B_G. Default values: 10, 1, 3, 0.25, 0, 0.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetBetaAMGOptions(void *solver,
HYPRE_Int B_G_coarsen_type,
HYPRE_Int B_G_agg_levels,
HYPRE_Int B_G_relax_type,
HYPRE_Real B_G_theta,
HYPRE_Int B_G_interp_type,
HYPRE_Int B_G_Pmax)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> B_G_coarsen_type = B_G_coarsen_type;
ams_data -> B_G_agg_levels = B_G_agg_levels;
ams_data -> B_G_relax_type = B_G_relax_type;
ams_data -> B_G_theta = B_G_theta;
ams_data -> B_G_interp_type = B_G_interp_type;
ams_data -> B_G_Pmax = B_G_Pmax;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetBetaAMGCoarseRelaxType
*
* Set the AMG coarsest level relaxation for B_G. Default value: 8.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetBetaAMGCoarseRelaxType(void *solver,
HYPRE_Int B_G_coarse_relax_type)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> B_G_coarse_relax_type = B_G_coarse_relax_type;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSComputePi
*
* Construct the Pi interpolation matrix, which maps the space of vector
* linear finite elements to the space of edge finite elements.
*
* The construction is based on the fact that Pi = [Pi_x, Pi_y, Pi_z],
* where each block has the same sparsity structure as G, and the entries
* can be computed from the vectors Gx, Gy, Gz.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSComputePi(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *G,
hypre_ParVector *Gx,
hypre_ParVector *Gy,
hypre_ParVector *Gz,
HYPRE_Int dim,
hypre_ParCSRMatrix **Pi_ptr)
{
hypre_ParCSRMatrix *Pi;
/* Compute Pi = [Pi_x, Pi_y, Pi_z] */
{
HYPRE_Int i, j, d;
HYPRE_Real *Gx_data, *Gy_data, *Gz_data;
MPI_Comm comm = hypre_ParCSRMatrixComm(G);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G);
HYPRE_BigInt global_num_cols = dim*hypre_ParCSRMatrixGlobalNumCols(G);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G);
HYPRE_BigInt *col_starts;
HYPRE_Int col_starts_size;
HYPRE_Int num_cols_offd = dim*hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G));
HYPRE_Int num_nonzeros_diag = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G));
HYPRE_Int num_nonzeros_offd = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G));
HYPRE_BigInt *col_starts_G = hypre_ParCSRMatrixColStarts(G);
#ifdef HYPRE_NO_GLOBAL_PARTITION
col_starts_size = 2;
#else
HYPRE_Int num_procs;
hypre_MPI_Comm_size(comm, &num_procs);
col_starts_size = num_procs+1;
#endif
col_starts = hypre_TAlloc(HYPRE_BigInt, col_starts_size, HYPRE_MEMORY_HOST);
for (i = 0; i < col_starts_size; i++)
col_starts[i] = (HYPRE_BigInt)dim * col_starts_G[i];
Pi = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(Pi) = 1;
hypre_ParCSRMatrixOwnsRowStarts(Pi) = 0;
hypre_ParCSRMatrixOwnsColStarts(Pi) = 1;
hypre_ParCSRMatrixInitialize(Pi);
Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx));
Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy));
if (dim == 3)
Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz));
/* Fill-in the diagonal part */
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag);
HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag);
HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag);
HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag);
HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag);
hypre_CSRMatrix *Pi_diag = hypre_ParCSRMatrixDiag(Pi);
HYPRE_Int *Pi_diag_I = hypre_CSRMatrixI(Pi_diag);
HYPRE_Int *Pi_diag_J = hypre_CSRMatrixJ(Pi_diag);
HYPRE_Real *Pi_diag_data = hypre_CSRMatrixData(Pi_diag);
for (i = 0; i < G_diag_nrows+1; i++)
Pi_diag_I[i] = dim * G_diag_I[i];
for (i = 0; i < G_diag_nnz; i++)
for (d = 0; d < dim; d++)
Pi_diag_J[dim*i+d] = dim*G_diag_J[i]+d;
for (i = 0; i < G_diag_nrows; i++)
for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++)
{
*Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i];
*Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i];
if (dim == 3)
*Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i];
}
}
/* Fill-in the off-diagonal part */
{
hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G);
HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd);
HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd);
HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd);
HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd);
HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd);
HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd);
hypre_CSRMatrix *Pi_offd = hypre_ParCSRMatrixOffd(Pi);
HYPRE_Int *Pi_offd_I = hypre_CSRMatrixI(Pi_offd);
HYPRE_Int *Pi_offd_J = hypre_CSRMatrixJ(Pi_offd);
HYPRE_Real *Pi_offd_data = hypre_CSRMatrixData(Pi_offd);
HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G);
HYPRE_BigInt *Pi_cmap = hypre_ParCSRMatrixColMapOffd(Pi);
if (G_offd_ncols)
for (i = 0; i < G_offd_nrows+1; i++)
Pi_offd_I[i] = dim * G_offd_I[i];
for (i = 0; i < G_offd_nnz; i++)
for (d = 0; d < dim; d++)
Pi_offd_J[dim*i+d] = dim*G_offd_J[i]+d;
for (i = 0; i < G_offd_nrows; i++)
for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++)
{
*Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i];
*Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i];
if (dim == 3)
*Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i];
}
for (i = 0; i < G_offd_ncols; i++)
for (d = 0; d < dim; d++)
Pi_cmap[dim*i+d] = (HYPRE_BigInt)dim*G_cmap[i]+(HYPRE_BigInt)d;
}
}
*Pi_ptr = Pi;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSComputePixyz
*
* Construct the components Pix, Piy, Piz of the interpolation matrix Pi,
* which maps the space of vector linear finite elements to the space of
* edge finite elements.
*
* The construction is based on the fact that each component has the same
* sparsity structure as G, and the entries can be computed from the vectors
* Gx, Gy, Gz.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSComputePixyz(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *G,
hypre_ParVector *Gx,
hypre_ParVector *Gy,
hypre_ParVector *Gz,
HYPRE_Int dim,
hypre_ParCSRMatrix **Pix_ptr,
hypre_ParCSRMatrix **Piy_ptr,
hypre_ParCSRMatrix **Piz_ptr)
{
hypre_ParCSRMatrix *Pix, *Piy, *Piz;
/* Compute Pix, Piy, Piz */
{
HYPRE_Int i, j;
HYPRE_Real *Gx_data, *Gy_data, *Gz_data;
MPI_Comm comm = hypre_ParCSRMatrixComm(G);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G);
HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(G);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G);
HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(G);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G));
HYPRE_Int num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G));
HYPRE_Int num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G));
Pix = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(Pix) = 1;
hypre_ParCSRMatrixOwnsRowStarts(Pix) = 0;
hypre_ParCSRMatrixOwnsColStarts(Pix) = 0;
hypre_ParCSRMatrixInitialize(Pix);
Piy = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(Piy) = 1;
hypre_ParCSRMatrixOwnsRowStarts(Piy) = 0;
hypre_ParCSRMatrixOwnsColStarts(Piy) = 0;
hypre_ParCSRMatrixInitialize(Piy);
if (dim == 3)
{
Piz = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(Piz) = 1;
hypre_ParCSRMatrixOwnsRowStarts(Piz) = 0;
hypre_ParCSRMatrixOwnsColStarts(Piz) = 0;
hypre_ParCSRMatrixInitialize(Piz);
}
Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx));
Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy));
if (dim == 3)
Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz));
/* Fill-in the diagonal part */
if (dim == 3)
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag);
HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag);
HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag);
HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag);
HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag);
hypre_CSRMatrix *Pix_diag = hypre_ParCSRMatrixDiag(Pix);
HYPRE_Int *Pix_diag_I = hypre_CSRMatrixI(Pix_diag);
HYPRE_Int *Pix_diag_J = hypre_CSRMatrixJ(Pix_diag);
HYPRE_Real *Pix_diag_data = hypre_CSRMatrixData(Pix_diag);
hypre_CSRMatrix *Piy_diag = hypre_ParCSRMatrixDiag(Piy);
HYPRE_Int *Piy_diag_I = hypre_CSRMatrixI(Piy_diag);
HYPRE_Int *Piy_diag_J = hypre_CSRMatrixJ(Piy_diag);
HYPRE_Real *Piy_diag_data = hypre_CSRMatrixData(Piy_diag);
hypre_CSRMatrix *Piz_diag = hypre_ParCSRMatrixDiag(Piz);
HYPRE_Int *Piz_diag_I = hypre_CSRMatrixI(Piz_diag);
HYPRE_Int *Piz_diag_J = hypre_CSRMatrixJ(Piz_diag);
HYPRE_Real *Piz_diag_data = hypre_CSRMatrixData(Piz_diag);
for (i = 0; i < G_diag_nrows+1; i++)
{
Pix_diag_I[i] = G_diag_I[i];
Piy_diag_I[i] = G_diag_I[i];
Piz_diag_I[i] = G_diag_I[i];
}
for (i = 0; i < G_diag_nnz; i++)
{
Pix_diag_J[i] = G_diag_J[i];
Piy_diag_J[i] = G_diag_J[i];
Piz_diag_J[i] = G_diag_J[i];
}
for (i = 0; i < G_diag_nrows; i++)
for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++)
{
*Pix_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i];
*Piy_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i];
*Piz_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i];
}
}
else
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag);
HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag);
HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag);
HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag);
HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag);
hypre_CSRMatrix *Pix_diag = hypre_ParCSRMatrixDiag(Pix);
HYPRE_Int *Pix_diag_I = hypre_CSRMatrixI(Pix_diag);
HYPRE_Int *Pix_diag_J = hypre_CSRMatrixJ(Pix_diag);
HYPRE_Real *Pix_diag_data = hypre_CSRMatrixData(Pix_diag);
hypre_CSRMatrix *Piy_diag = hypre_ParCSRMatrixDiag(Piy);
HYPRE_Int *Piy_diag_I = hypre_CSRMatrixI(Piy_diag);
HYPRE_Int *Piy_diag_J = hypre_CSRMatrixJ(Piy_diag);
HYPRE_Real *Piy_diag_data = hypre_CSRMatrixData(Piy_diag);
for (i = 0; i < G_diag_nrows+1; i++)
{
Pix_diag_I[i] = G_diag_I[i];
Piy_diag_I[i] = G_diag_I[i];
}
for (i = 0; i < G_diag_nnz; i++)
{
Pix_diag_J[i] = G_diag_J[i];
Piy_diag_J[i] = G_diag_J[i];
}
for (i = 0; i < G_diag_nrows; i++)
for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++)
{
*Pix_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i];
*Piy_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i];
}
}
/* Fill-in the off-diagonal part */
if (dim == 3)
{
hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G);
HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd);
HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd);
HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd);
HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd);
HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd);
HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd);
hypre_CSRMatrix *Pix_offd = hypre_ParCSRMatrixOffd(Pix);
HYPRE_Int *Pix_offd_I = hypre_CSRMatrixI(Pix_offd);
HYPRE_Int *Pix_offd_J = hypre_CSRMatrixJ(Pix_offd);
HYPRE_Real *Pix_offd_data = hypre_CSRMatrixData(Pix_offd);
hypre_CSRMatrix *Piy_offd = hypre_ParCSRMatrixOffd(Piy);
HYPRE_Int *Piy_offd_I = hypre_CSRMatrixI(Piy_offd);
HYPRE_Int *Piy_offd_J = hypre_CSRMatrixJ(Piy_offd);
HYPRE_Real *Piy_offd_data = hypre_CSRMatrixData(Piy_offd);
hypre_CSRMatrix *Piz_offd = hypre_ParCSRMatrixOffd(Piz);
HYPRE_Int *Piz_offd_I = hypre_CSRMatrixI(Piz_offd);
HYPRE_Int *Piz_offd_J = hypre_CSRMatrixJ(Piz_offd);
HYPRE_Real *Piz_offd_data = hypre_CSRMatrixData(Piz_offd);
HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G);
HYPRE_BigInt *Pix_cmap = hypre_ParCSRMatrixColMapOffd(Pix);
HYPRE_BigInt *Piy_cmap = hypre_ParCSRMatrixColMapOffd(Piy);
HYPRE_BigInt *Piz_cmap = hypre_ParCSRMatrixColMapOffd(Piz);
if (G_offd_ncols)
for (i = 0; i < G_offd_nrows+1; i++)
{
Pix_offd_I[i] = G_offd_I[i];
Piy_offd_I[i] = G_offd_I[i];
Piz_offd_I[i] = G_offd_I[i];
}
for (i = 0; i < G_offd_nnz; i++)
{
Pix_offd_J[i] = G_offd_J[i];
Piy_offd_J[i] = G_offd_J[i];
Piz_offd_J[i] = G_offd_J[i];
}
for (i = 0; i < G_offd_nrows; i++)
for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++)
{
*Pix_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i];
*Piy_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i];
*Piz_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i];
}
for (i = 0; i < G_offd_ncols; i++)
{
Pix_cmap[i] = G_cmap[i];
Piy_cmap[i] = G_cmap[i];
Piz_cmap[i] = G_cmap[i];
}
}
else
{
hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G);
HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd);
HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd);
HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd);
HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd);
HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd);
HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd);
hypre_CSRMatrix *Pix_offd = hypre_ParCSRMatrixOffd(Pix);
HYPRE_Int *Pix_offd_I = hypre_CSRMatrixI(Pix_offd);
HYPRE_Int *Pix_offd_J = hypre_CSRMatrixJ(Pix_offd);
HYPRE_Real *Pix_offd_data = hypre_CSRMatrixData(Pix_offd);
hypre_CSRMatrix *Piy_offd = hypre_ParCSRMatrixOffd(Piy);
HYPRE_Int *Piy_offd_I = hypre_CSRMatrixI(Piy_offd);
HYPRE_Int *Piy_offd_J = hypre_CSRMatrixJ(Piy_offd);
HYPRE_Real *Piy_offd_data = hypre_CSRMatrixData(Piy_offd);
HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G);
HYPRE_BigInt *Pix_cmap = hypre_ParCSRMatrixColMapOffd(Pix);
HYPRE_BigInt *Piy_cmap = hypre_ParCSRMatrixColMapOffd(Piy);
if (G_offd_ncols)
for (i = 0; i < G_offd_nrows+1; i++)
{
Pix_offd_I[i] = G_offd_I[i];
Piy_offd_I[i] = G_offd_I[i];
}
for (i = 0; i < G_offd_nnz; i++)
{
Pix_offd_J[i] = G_offd_J[i];
Piy_offd_J[i] = G_offd_J[i];
}
for (i = 0; i < G_offd_nrows; i++)
for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++)
{
*Pix_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i];
*Piy_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i];
}
for (i = 0; i < G_offd_ncols; i++)
{
Pix_cmap[i] = G_cmap[i];
Piy_cmap[i] = G_cmap[i];
}
}
}
*Pix_ptr = Pix;
*Piy_ptr = Piy;
if (dim == 3)
*Piz_ptr = Piz;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSComputeGPi
*
* Construct the matrix [G,Pi] which can be considered an interpolation
* matrix from S_h^4 (4 copies of the scalar linear finite element space)
* to the edge finite elements space.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSComputeGPi(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *G,
hypre_ParVector *Gx,
hypre_ParVector *Gy,
hypre_ParVector *Gz,
HYPRE_Int dim,
hypre_ParCSRMatrix **GPi_ptr)
{
hypre_ParCSRMatrix *GPi;
/* Take into account G */
dim++;
/* Compute GPi = [Pi_x, Pi_y, Pi_z, G] */
{
HYPRE_Int i, j, d;
HYPRE_Real *Gx_data, *Gy_data, *Gz_data;
MPI_Comm comm = hypre_ParCSRMatrixComm(G);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G);
HYPRE_BigInt global_num_cols = dim*hypre_ParCSRMatrixGlobalNumCols(G);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G);
HYPRE_BigInt *col_starts;
HYPRE_Int col_starts_size;
HYPRE_Int num_cols_offd = dim*hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G));
HYPRE_Int num_nonzeros_diag = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G));
HYPRE_Int num_nonzeros_offd = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G));
HYPRE_BigInt *col_starts_G = hypre_ParCSRMatrixColStarts(G);
#ifdef HYPRE_NO_GLOBAL_PARTITION
col_starts_size = 2;
#else
HYPRE_Int num_procs;
hypre_MPI_Comm_size(comm, &num_procs);
col_starts_size = num_procs+1;
#endif
col_starts = hypre_TAlloc(HYPRE_BigInt, col_starts_size, HYPRE_MEMORY_HOST);
for (i = 0; i < col_starts_size; i++)
col_starts[i] = (HYPRE_BigInt) dim * col_starts_G[i];
GPi = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(GPi) = 1;
hypre_ParCSRMatrixOwnsRowStarts(GPi) = 0;
hypre_ParCSRMatrixOwnsColStarts(GPi) = 1;
hypre_ParCSRMatrixInitialize(GPi);
Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx));
Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy));
if (dim == 4)
Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz));
/* Fill-in the diagonal part */
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag);
HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag);
HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag);
HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag);
HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag);
hypre_CSRMatrix *GPi_diag = hypre_ParCSRMatrixDiag(GPi);
HYPRE_Int *GPi_diag_I = hypre_CSRMatrixI(GPi_diag);
HYPRE_Int *GPi_diag_J = hypre_CSRMatrixJ(GPi_diag);
HYPRE_Real *GPi_diag_data = hypre_CSRMatrixData(GPi_diag);
for (i = 0; i < G_diag_nrows+1; i++)
GPi_diag_I[i] = dim * G_diag_I[i];
for (i = 0; i < G_diag_nnz; i++)
for (d = 0; d < dim; d++)
GPi_diag_J[dim*i+d] = dim*G_diag_J[i]+d;
for (i = 0; i < G_diag_nrows; i++)
for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++)
{
*GPi_diag_data++ = G_diag_data[j];
*GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i];
*GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i];
if (dim == 4)
*GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i];
}
}
/* Fill-in the off-diagonal part */
{
hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G);
HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd);
HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd);
HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd);
HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd);
HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd);
HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd);
hypre_CSRMatrix *GPi_offd = hypre_ParCSRMatrixOffd(GPi);
HYPRE_Int *GPi_offd_I = hypre_CSRMatrixI(GPi_offd);
HYPRE_Int *GPi_offd_J = hypre_CSRMatrixJ(GPi_offd);
HYPRE_Real *GPi_offd_data = hypre_CSRMatrixData(GPi_offd);
HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G);
HYPRE_BigInt *GPi_cmap = hypre_ParCSRMatrixColMapOffd(GPi);
if (G_offd_ncols)
for (i = 0; i < G_offd_nrows+1; i++)
GPi_offd_I[i] = dim * G_offd_I[i];
for (i = 0; i < G_offd_nnz; i++)
for (d = 0; d < dim; d++)
GPi_offd_J[dim*i+d] = dim*G_offd_J[i]+d;
for (i = 0; i < G_offd_nrows; i++)
for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++)
{
*GPi_offd_data++ = G_offd_data[j];
*GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i];
*GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i];
if (dim == 4)
*GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i];
}
for (i = 0; i < G_offd_ncols; i++)
for (d = 0; d < dim; d++)
GPi_cmap[dim*i+d] = dim*G_cmap[i]+d;
}
}
*GPi_ptr = GPi;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetup
*
* Construct the AMS solver components.
*
* The following functions need to be called before hypre_AMSSetup():
* - hypre_AMSSetDimension() (if solving a 2D problem)
* - hypre_AMSSetDiscreteGradient()
* - hypre_AMSSetCoordinateVectors() or hypre_AMSSetEdgeConstantVectors
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetup(void *solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *b,
hypre_ParVector *x)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
HYPRE_Int input_info = 0;
ams_data -> A = A;
/* Modifications for problems with zero-conductivity regions */
if (ams_data -> interior_nodes)
{
hypre_ParCSRMatrix *G0t, *Aorig = A;
/* Make sure that multiple Setup()+Solve() give identical results */
ams_data -> solve_counter = 0;
/* Construct the discrete gradient matrix for the zero-conductivity region
by eliminating the zero-conductivity nodes from G^t. The range of G0
represents the kernel of A, i.e. the gradients of nodal basis functions
supported in zero-conductivity regions. */
hypre_ParCSRMatrixTranspose(ams_data -> G, &G0t, 1);
{
HYPRE_Int i, j;
HYPRE_Int nv = hypre_ParCSRMatrixNumCols(ams_data -> G);
hypre_CSRMatrix *G0td = hypre_ParCSRMatrixDiag(G0t);
HYPRE_Int *G0tdI = hypre_CSRMatrixI(G0td);
HYPRE_Real *G0tdA = hypre_CSRMatrixData(G0td);
hypre_CSRMatrix *G0to = hypre_ParCSRMatrixOffd(G0t);
HYPRE_Int *G0toI = hypre_CSRMatrixI(G0to);
HYPRE_Real *G0toA = hypre_CSRMatrixData(G0to);
HYPRE_Real *interior_nodes_data=hypre_VectorData(
hypre_ParVectorLocalVector((hypre_ParVector*) ams_data -> interior_nodes));
for (i = 0; i < nv; i++)
{
if (interior_nodes_data[i] != 1)
{
for (j = G0tdI[i]; j < G0tdI[i+1]; j++)
G0tdA[j] = 0.0;
if (G0toI)
for (j = G0toI[i]; j < G0toI[i+1]; j++)
G0toA[j] = 0.0;
}
}
}
hypre_ParCSRMatrixTranspose(G0t, & ams_data -> G0, 1);
/* Construct the subspace matrix A_G0 = G0^T G0 */
ams_data -> A_G0 = hypre_ParMatmul(G0t, ams_data -> G0);
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_G0);
/* Create AMG solver for A_G0 */
HYPRE_BoomerAMGCreate(&ams_data -> B_G0);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_G0, ams_data -> B_G_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_G0, ams_data -> B_G_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_G0, ams_data -> B_G_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_G0, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G0, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_G0, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_G0, 3); /* use just a few V-cycles */
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_G0, ams_data -> B_G_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_G0, ams_data -> B_G_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_G0, ams_data -> B_G_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_G0, 2); /* don't coarsen to 0 */
/* Generally, don't use exact solve on the coarsest level (matrix may be singular) */
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_G0, ams_data -> B_G_coarse_relax_type, 3);
HYPRE_BoomerAMGSetup(ams_data -> B_G0,
(HYPRE_ParCSRMatrix)ams_data -> A_G0,
0, 0);
/* Construct the preconditioner for ams_data->A = A + G0 G0^T.
NOTE: this can be optimized significantly by taking into account that
the sparsity pattern of A is subset of the sparsity pattern of G0 G0^T */
{
hypre_ParCSRMatrix *A = hypre_ParMatmul(ams_data -> G0, G0t);
hypre_ParCSRMatrix *B = Aorig;
hypre_ParCSRMatrix **C_ptr = &ams_data -> A;
hypre_ParCSRMatrix *C;
HYPRE_Real factor, lfactor;
/* scale (penalize) G0 G0^T before adding it to the matrix */
{
HYPRE_Int i;
HYPRE_Int B_num_rows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(B));
HYPRE_Real *B_diag_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(B));
HYPRE_Real *B_offd_data = hypre_CSRMatrixData(hypre_ParCSRMatrixOffd(B));
HYPRE_Int *B_diag_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(B));
HYPRE_Int *B_offd_i = hypre_CSRMatrixI(hypre_ParCSRMatrixOffd(B));
lfactor = -1;
for (i = 0; i < B_diag_i[B_num_rows]; i++)
if (fabs(B_diag_data[i]) > lfactor)
lfactor = fabs(B_diag_data[i]);
for (i = 0; i < B_offd_i[B_num_rows]; i++)
if (fabs(B_offd_data[i]) > lfactor)
lfactor = fabs(B_offd_data[i]);
lfactor *= 1e-10; /* scaling factor: max|A_ij|*1e-10 */
hypre_MPI_Allreduce(&lfactor, &factor, 1, HYPRE_MPI_REAL, hypre_MPI_MAX,
hypre_ParCSRMatrixComm(A));
}
hypre_ParcsrAdd(factor, A, 1.0, B, &C);
/*hypre_CSRMatrix *A_local, *B_local, *C_local, *C_tmp;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A);
HYPRE_Int A_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A));
HYPRE_Int A_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A));
HYPRE_Int A_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A));
HYPRE_Int B_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(B));
HYPRE_Int B_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(B));
HYPRE_Int B_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(B));
A_local = hypre_MergeDiagAndOffd(A);
B_local = hypre_MergeDiagAndOffd(B);*/
/* scale (penalize) G0 G0^T before adding it to the matrix */
/*{
HYPRE_Int i, nnz = hypre_CSRMatrixNumNonzeros(A_local);
HYPRE_Real *data = hypre_CSRMatrixData(A_local);
HYPRE_Real *dataB = hypre_CSRMatrixData(B_local);
HYPRE_Int nnzB = hypre_CSRMatrixNumNonzeros(B_local);
HYPRE_Real factor, lfactor;
lfactor = -1;
for (i = 0; i < nnzB; i++)
if (fabs(dataB[i]) > lfactor)
lfactor = fabs(dataB[i]);
lfactor *= 1e-10;
hypre_MPI_Allreduce(&lfactor, &factor, 1, HYPRE_MPI_REAL, hypre_MPI_MAX,
hypre_ParCSRMatrixComm(A));
for (i = 0; i < nnz; i++)
data[i] *= factor;
}
C_tmp = hypre_CSRMatrixBigAdd(A_local, B_local);
C_local = hypre_CSRMatrixBigDeleteZeros(C_tmp,0.0);
if (C_local)
hypre_CSRMatrixDestroy(C_tmp);
else
C_local = C_tmp;
C = hypre_ParCSRMatrixCreate (comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
A_num_cols_offd + B_num_cols_offd,
A_num_nonzeros_diag + B_num_nonzeros_diag,
A_num_nonzeros_offd + B_num_nonzeros_offd);
GenerateDiagAndOffd(C_local, C,
hypre_ParCSRMatrixFirstColDiag(A),
hypre_ParCSRMatrixLastColDiag(A));
hypre_ParCSRMatrixOwnsRowStarts(C) = 0;
hypre_ParCSRMatrixOwnsColStarts(C) = 1;
hypre_ParCSRMatrixOwnsColStarts(G0t) = 0;
hypre_CSRMatrixDestroy(A_local);
hypre_CSRMatrixDestroy(B_local);
hypre_CSRMatrixDestroy(C_local);
*/
hypre_ParCSRMatrixDestroy(A);
*C_ptr = C;
}
hypre_ParCSRMatrixDestroy(G0t);
}
/* Make sure that the first entry in each row is the diagonal one. */
/* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(ams_data -> A)); */
/* Compute the l1 norm of the rows of A */
if (ams_data -> A_relax_type >= 1 && ams_data -> A_relax_type <= 4)
{
HYPRE_Real *l1_norm_data = NULL;
hypre_ParCSRComputeL1Norms(ams_data -> A, ams_data -> A_relax_type, NULL, &l1_norm_data);
ams_data -> A_l1_norms = hypre_SeqVectorCreate(hypre_ParCSRMatrixNumRows(ams_data -> A));
hypre_VectorData(ams_data -> A_l1_norms) = l1_norm_data;
hypre_SeqVectorInitialize_v2(ams_data -> A_l1_norms, hypre_ParCSRMatrixMemoryLocation(ams_data -> A));
}
/* Chebyshev? */
if (ams_data -> A_relax_type == 16)
{
hypre_ParCSRMaxEigEstimateCG(ams_data->A, 1, 10,
&ams_data->A_max_eig_est,
&ams_data->A_min_eig_est);
}
/* If not given, compute Gx, Gy and Gz */
{
if (ams_data -> x != NULL && ams_data -> y != NULL &&
(ams_data -> dim == 2 || ams_data -> z != NULL))
input_info = 1;
if (ams_data -> Gx != NULL && ams_data -> Gy != NULL &&
(ams_data -> dim == 2 || ams_data -> Gz != NULL))
input_info = 2;
if (input_info == 1)
{
ams_data -> Gx = hypre_ParVectorInRangeOf(ams_data -> G);
hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> x, 0.0, ams_data -> Gx);
ams_data -> Gy = hypre_ParVectorInRangeOf(ams_data -> G);
hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> y, 0.0, ams_data -> Gy);
if (ams_data -> dim == 3)
{
ams_data -> Gz = hypre_ParVectorInRangeOf(ams_data -> G);
hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> z, 0.0, ams_data -> Gz);
}
}
}
if (ams_data -> Pi == NULL && ams_data -> Pix == NULL)
{
if (ams_data -> cycle_type == 20)
/* Construct the combined interpolation matrix [G,Pi] */
hypre_AMSComputeGPi(ams_data -> A,
ams_data -> G,
ams_data -> Gx,
ams_data -> Gy,
ams_data -> Gz,
ams_data -> dim,
&ams_data -> Pi);
else if (ams_data -> cycle_type > 10)
/* Construct Pi{x,y,z} instead of Pi = [Pix,Piy,Piz] */
hypre_AMSComputePixyz(ams_data -> A,
ams_data -> G,
ams_data -> Gx,
ams_data -> Gy,
ams_data -> Gz,
ams_data -> dim,
&ams_data -> Pix,
&ams_data -> Piy,
&ams_data -> Piz);
else
/* Construct the Pi interpolation matrix */
hypre_AMSComputePi(ams_data -> A,
ams_data -> G,
ams_data -> Gx,
ams_data -> Gy,
ams_data -> Gz,
ams_data -> dim,
&ams_data -> Pi);
}
/* Keep Gx, Gy and Gz only if use the method with discrete divergence
stabilization (where we use them to compute the local mesh size). */
if (input_info == 1 && ams_data -> cycle_type != 9)
{
hypre_ParVectorDestroy(ams_data -> Gx);
hypre_ParVectorDestroy(ams_data -> Gy);
if (ams_data -> dim == 3)
hypre_ParVectorDestroy(ams_data -> Gz);
}
/* Create the AMG solver on the range of G^T */
if (!ams_data -> beta_is_zero && ams_data -> cycle_type != 20)
{
HYPRE_BoomerAMGCreate(&ams_data -> B_G);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_G, ams_data -> B_G_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_G, ams_data -> B_G_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_G, ams_data -> B_G_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_G, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_G, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_G, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_G, ams_data -> B_G_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_G, ams_data -> B_G_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_G, ams_data -> B_G_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_G, 2); /* don't coarsen to 0 */
/* Generally, don't use exact solve on the coarsest level (matrix may be singular) */
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_G, ams_data -> B_G_coarse_relax_type, 3);
if (ams_data -> cycle_type == 0)
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G, 2);
/* If not given, construct the coarse space matrix by RAP */
if (!ams_data -> A_G)
{
HYPRE_Int G_owned_col_starts;
if (!hypre_ParCSRMatrixCommPkg(ams_data -> G))
hypre_MatvecCommPkgCreate(ams_data -> G);
if (!hypre_ParCSRMatrixCommPkg(ams_data -> A))
hypre_MatvecCommPkgCreate(ams_data -> A);
G_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> G);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> G,
ams_data -> A,
ams_data -> G,
&ams_data -> A_G);
/* Make sure that A_G has no zero rows (this can happen
if beta is zero in part of the domain). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_G);
hypre_ParCSRMatrixOwnsColStarts(ams_data -> G) = G_owned_col_starts;
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_G) = 0;
ams_data -> owns_A_G = 1;
}
HYPRE_BoomerAMGSetup(ams_data -> B_G,
(HYPRE_ParCSRMatrix)ams_data -> A_G,
0, 0);
}
if (ams_data -> cycle_type > 10 && ams_data -> cycle_type != 20)
/* Create the AMG solvers on the range of Pi{x,y,z}^T */
{
HYPRE_Int P_owned_col_starts;
HYPRE_BoomerAMGCreate(&ams_data -> B_Pix);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Pix, ams_data -> B_Pi_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Pix, ams_data -> B_Pi_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Pix, ams_data -> B_Pi_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Pix, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pix, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_Pix, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Pix, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Pix, ams_data -> B_Pi_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_Pix, ams_data -> B_Pi_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Pix, ams_data -> B_Pi_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Pix, 2);
HYPRE_BoomerAMGCreate(&ams_data -> B_Piy);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Piy, ams_data -> B_Pi_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Piy, ams_data -> B_Pi_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Piy, ams_data -> B_Pi_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Piy, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piy, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_Piy, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Piy, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Piy, ams_data -> B_Pi_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_Piy, ams_data -> B_Pi_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Piy, ams_data -> B_Pi_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Piy, 2);
HYPRE_BoomerAMGCreate(&ams_data -> B_Piz);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Piz, ams_data -> B_Pi_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Piz, ams_data -> B_Pi_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Piz, ams_data -> B_Pi_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Piz, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piz, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_Piz, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Piz, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Piz, ams_data -> B_Pi_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_Piz, ams_data -> B_Pi_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Piz, ams_data -> B_Pi_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Piz, 2);
/* Generally, don't use exact solve on the coarsest level (matrices may be singular) */
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Pix, ams_data -> B_Pi_coarse_relax_type, 3);
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Piy, ams_data -> B_Pi_coarse_relax_type, 3);
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Piz, ams_data -> B_Pi_coarse_relax_type, 3);
if (ams_data -> cycle_type == 0)
{
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pix, 2);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piy, 2);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piz, 2);
}
/* Construct the coarse space matrices by RAP */
if (!hypre_ParCSRMatrixCommPkg(ams_data -> Pix))
hypre_MatvecCommPkgCreate(ams_data -> Pix);
P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Pix);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pix,
ams_data -> A,
ams_data -> Pix,
&ams_data -> A_Pix);
if (!P_owned_col_starts)
{
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Pix) = 0;
hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Pix) = 0;
}
/* Make sure that A_Pix has no zero rows (this can happen
for some kinds of boundary conditions with contact). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Pix);
HYPRE_BoomerAMGSetup(ams_data -> B_Pix,
(HYPRE_ParCSRMatrix)ams_data -> A_Pix,
0, 0);
if (!hypre_ParCSRMatrixCommPkg(ams_data -> Piy))
hypre_MatvecCommPkgCreate(ams_data -> Piy);
P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Piy);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Piy,
ams_data -> A,
ams_data -> Piy,
&ams_data -> A_Piy);
if (!P_owned_col_starts)
{
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Piy) = 0;
hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Piy) = 0;
}
/* Make sure that A_Piy has no zero rows (this can happen
for some kinds of boundary conditions with contact). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Piy);
HYPRE_BoomerAMGSetup(ams_data -> B_Piy,
(HYPRE_ParCSRMatrix)ams_data -> A_Piy,
0, 0);
if (ams_data -> Piz)
{
if (!hypre_ParCSRMatrixCommPkg(ams_data -> Piz))
hypre_MatvecCommPkgCreate(ams_data -> Piz);
P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Piz);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Piz,
ams_data -> A,
ams_data -> Piz,
&ams_data -> A_Piz);
if (!P_owned_col_starts)
{
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Piz) = 0;
hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Piz) = 0;
}
/* Make sure that A_Piz has no zero rows (this can happen
for some kinds of boundary conditions with contact). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Piz);
HYPRE_BoomerAMGSetup(ams_data -> B_Piz,
(HYPRE_ParCSRMatrix)ams_data -> A_Piz,
0, 0);
}
}
else
/* Create the AMG solver on the range of Pi^T */
{
HYPRE_BoomerAMGCreate(&ams_data -> B_Pi);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Pi, ams_data -> B_Pi_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Pi, ams_data -> B_Pi_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Pi, ams_data -> B_Pi_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Pi, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pi, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_Pi, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Pi, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Pi, ams_data -> B_Pi_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_Pi, ams_data -> B_Pi_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Pi, ams_data -> B_Pi_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Pi, 2); /* don't coarsen to 0 */
/* Generally, don't use exact solve on the coarsest level (matrix may be singular) */
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Pi, ams_data -> B_Pi_coarse_relax_type, 3);
if (ams_data -> cycle_type == 0)
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pi, 2);
/* If not given, construct the coarse space matrix by RAP and
notify BoomerAMG that this is a dim x dim block system. */
if (!ams_data -> A_Pi)
{
HYPRE_Int P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Pi);
if (!hypre_ParCSRMatrixCommPkg(ams_data -> Pi))
hypre_MatvecCommPkgCreate(ams_data -> Pi);
if (!hypre_ParCSRMatrixCommPkg(ams_data -> A))
hypre_MatvecCommPkgCreate(ams_data -> A);
if (ams_data -> cycle_type == 9)
{
/* Add a discrete divergence term to A before computing Pi^t A Pi */
{
hypre_ParCSRMatrix *Gt, *GGt, *ApGGt;
hypre_ParCSRMatrixTranspose(ams_data -> G, &Gt, 1);
hypre_ParCSRMatrixOwnsColStarts(Gt) = 0;
hypre_ParCSRMatrixOwnsRowStarts(Gt) = 0;
/* scale GGt by h^2 */
{
HYPRE_Real h2;
HYPRE_Int i, j, k, ne;
hypre_CSRMatrix *Gt_diag = hypre_ParCSRMatrixDiag(Gt);
HYPRE_Int Gt_num_rows = hypre_CSRMatrixNumRows(Gt_diag);
HYPRE_Int *Gt_diag_I = hypre_CSRMatrixI(Gt_diag);
HYPRE_Int *Gt_diag_J = hypre_CSRMatrixJ(Gt_diag);
HYPRE_Real *Gt_diag_data = hypre_CSRMatrixData(Gt_diag);
hypre_CSRMatrix *Gt_offd = hypre_ParCSRMatrixOffd(Gt);
HYPRE_Int *Gt_offd_I = hypre_CSRMatrixI(Gt_offd);
HYPRE_Real *Gt_offd_data = hypre_CSRMatrixData(Gt_offd);
HYPRE_Real *Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gx));
HYPRE_Real *Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gy));
HYPRE_Real *Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gz));
for (i = 0; i < Gt_num_rows; i++)
{
/* determine the characteristic mesh size for vertex i */
h2 = 0.0;
ne = 0;
for (j = Gt_diag_I[i]; j < Gt_diag_I[i+1]; j++)
{
k = Gt_diag_J[j];
h2 += Gx_data[k]*Gx_data[k]+Gy_data[k]*Gy_data[k]+Gz_data[k]*Gz_data[k];
ne++;
}
if (ne != 0)
{
h2 /= ne;
for (j = Gt_diag_I[i]; j < Gt_diag_I[i+1]; j++)
Gt_diag_data[j] *= h2;
for (j = Gt_offd_I[i]; j < Gt_offd_I[i+1]; j++)
Gt_offd_data[j] *= h2;
}
}
}
/* we only needed Gx, Gy and Gz to compute the local mesh size */
if (input_info == 1)
{
hypre_ParVectorDestroy(ams_data -> Gx);
hypre_ParVectorDestroy(ams_data -> Gy);
if (ams_data -> dim == 3)
hypre_ParVectorDestroy(ams_data -> Gz);
}
GGt = hypre_ParMatmul(ams_data -> G, Gt);
hypre_ParCSRMatrixDestroy(Gt);
/* hypre_ParCSRMatrixAdd(GGt, A, &ams_data -> A); */
hypre_ParcsrAdd(1.0, GGt, 1.0, ams_data -> A, &ApGGt);
/*{
hypre_ParCSRMatrix *A = GGt;
hypre_ParCSRMatrix *B = ams_data -> A;
hypre_ParCSRMatrix **C_ptr = &ApGGt;
hypre_ParCSRMatrix *C;
hypre_CSRMatrix *A_local, *B_local, *C_local;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A);
HYPRE_Int A_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A));
HYPRE_Int A_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A));
HYPRE_Int A_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A));
HYPRE_Int B_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(B));
HYPRE_Int B_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(B));
HYPRE_Int B_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(B));
A_local = hypre_MergeDiagAndOffd(A);
B_local = hypre_MergeDiagAndOffd(B);
C_local = hypre_CSRMatrixBigAdd(A_local, B_local);
hypre_CSRMatrixBigJtoJ(C_local);
C = hypre_ParCSRMatrixCreate (comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
A_num_cols_offd + B_num_cols_offd,
A_num_nonzeros_diag + B_num_nonzeros_diag,
A_num_nonzeros_offd + B_num_nonzeros_offd);
GenerateDiagAndOffd(C_local, C,
hypre_ParCSRMatrixFirstColDiag(A),
hypre_ParCSRMatrixLastColDiag(A));
hypre_ParCSRMatrixOwnsRowStarts(C) = 0;
hypre_ParCSRMatrixOwnsColStarts(C) = 0;
hypre_CSRMatrixDestroy(A_local);
hypre_CSRMatrixDestroy(B_local);
hypre_CSRMatrixDestroy(C_local);
*C_ptr = C;
}*/
hypre_ParCSRMatrixDestroy(GGt);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pi,
ApGGt,
ams_data -> Pi,
&ams_data -> A_Pi);
}
}
else
{
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pi,
ams_data -> A,
ams_data -> Pi,
&ams_data -> A_Pi);
}
if (!P_owned_col_starts)
{
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Pi) = 0;
hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Pi) = 0;
}
ams_data -> owns_A_Pi = 1;
if (ams_data -> cycle_type != 20)
HYPRE_BoomerAMGSetNumFunctions(ams_data -> B_Pi, ams_data -> dim);
else
HYPRE_BoomerAMGSetNumFunctions(ams_data -> B_Pi, ams_data -> dim + 1);
/* HYPRE_BoomerAMGSetNodal(ams_data -> B_Pi, 1); */
}
/* Make sure that A_Pi has no zero rows (this can happen for
some kinds of boundary conditions with contact). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Pi);
HYPRE_BoomerAMGSetup(ams_data -> B_Pi,
(HYPRE_ParCSRMatrix)ams_data -> A_Pi,
0, 0);
}
/* Allocate temporary vectors */
ams_data -> r0 = hypre_ParVectorInRangeOf(ams_data -> A);
ams_data -> g0 = hypre_ParVectorInRangeOf(ams_data -> A);
if (ams_data -> A_G)
{
ams_data -> r1 = hypre_ParVectorInRangeOf(ams_data -> A_G);
ams_data -> g1 = hypre_ParVectorInRangeOf(ams_data -> A_G);
}
if (ams_data -> r1 == NULL && ams_data -> A_Pix)
{
ams_data -> r1 = hypre_ParVectorInRangeOf(ams_data -> A_Pix);
ams_data -> g1 = hypre_ParVectorInRangeOf(ams_data -> A_Pix);
}
if (ams_data -> Pi)
{
ams_data -> r2 = hypre_ParVectorInDomainOf(ams_data -> Pi);
ams_data -> g2 = hypre_ParVectorInDomainOf(ams_data -> Pi);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSolve
*
* Solve the system A x = b.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSolve(void *solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *b,
hypre_ParVector *x)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
HYPRE_Int i, my_id = -1;
HYPRE_Real r0_norm, r_norm, b_norm, relative_resid = 0, old_resid;
char cycle[30];
hypre_ParCSRMatrix *Ai[5], *Pi[5];
HYPRE_Solver Bi[5];
HYPRE_PtrToSolverFcn HBi[5];
hypre_ParVector *ri[5], *gi[5];
hypre_ParVector *z = NULL;
Ai[0] = ams_data -> A_G; Pi[0] = ams_data -> G;
Ai[1] = ams_data -> A_Pi; Pi[1] = ams_data -> Pi;
Ai[2] = ams_data -> A_Pix; Pi[2] = ams_data -> Pix;
Ai[3] = ams_data -> A_Piy; Pi[3] = ams_data -> Piy;
Ai[4] = ams_data -> A_Piz; Pi[4] = ams_data -> Piz;
Bi[0] = ams_data -> B_G; HBi[0] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve;
Bi[1] = ams_data -> B_Pi; HBi[1] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGBlockSolve;
Bi[2] = ams_data -> B_Pix; HBi[2] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve;
Bi[3] = ams_data -> B_Piy; HBi[3] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve;
Bi[4] = ams_data -> B_Piz; HBi[4] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve;
ri[0] = ams_data -> r1; gi[0] = ams_data -> g1;
ri[1] = ams_data -> r2; gi[1] = ams_data -> g2;
ri[2] = ams_data -> r1; gi[2] = ams_data -> g1;
ri[3] = ams_data -> r1; gi[3] = ams_data -> g1;
ri[4] = ams_data -> r1; gi[4] = ams_data -> g1;
/* may need to create an additional temporary vector for relaxation */
if (hypre_NumThreads() > 1 || ams_data -> A_relax_type == 16)
{
z = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(z);
hypre_ParVectorSetPartitioningOwner(z,0);
}
if (ams_data -> print_level > 0)
hypre_MPI_Comm_rank(hypre_ParCSRMatrixComm(A), &my_id);
/* Compatible subspace projection for problems with zero-conductivity regions.
Note that this modifies the input (r.h.s.) vector b! */
if ( (ams_data -> B_G0) &&
(++ams_data->solve_counter % ( ams_data -> projection_frequency ) == 0) )
{
/* hypre_printf("Projecting onto the compatible subspace...\n"); */
hypre_AMSProjectOutGradients(ams_data, b);
}
if (ams_data -> beta_is_zero)
{
switch (ams_data -> cycle_type)
{
case 0:
hypre_sprintf(cycle,"%s","0");
break;
case 1:
case 3:
case 5:
case 7:
default:
hypre_sprintf(cycle,"%s","020");
break;
case 2:
case 4:
case 6:
case 8:
hypre_sprintf(cycle,"%s","(0+2)");
break;
case 11:
case 13:
hypre_sprintf(cycle,"%s","0345430");
break;
case 12:
hypre_sprintf(cycle,"%s","(0+3+4+5)");
break;
case 14:
hypre_sprintf(cycle,"%s","0(+3+4+5)0");
break;
}
}
else
{
switch (ams_data -> cycle_type)
{
case 0:
hypre_sprintf(cycle,"%s","010");
break;
case 1:
default:
hypre_sprintf(cycle,"%s","01210");
break;
case 2:
hypre_sprintf(cycle,"%s","(0+1+2)");
break;
case 3:
hypre_sprintf(cycle,"%s","02120");
break;
case 4:
hypre_sprintf(cycle,"%s","(010+2)");
break;
case 5:
hypre_sprintf(cycle,"%s","0102010");
break;
case 6:
hypre_sprintf(cycle,"%s","(020+1)");
break;
case 7:
hypre_sprintf(cycle,"%s","0201020");
break;
case 8:
hypre_sprintf(cycle,"%s","0(+1+2)0");
break;
case 9:
hypre_sprintf(cycle,"%s","01210");
break;
case 11:
hypre_sprintf(cycle,"%s","013454310");
break;
case 12:
hypre_sprintf(cycle,"%s","(0+1+3+4+5)");
break;
case 13:
hypre_sprintf(cycle,"%s","034515430");
break;
case 14:
hypre_sprintf(cycle,"%s","01(+3+4+5)10");
break;
case 20:
hypre_sprintf(cycle,"%s","020");
break;
}
}
for (i = 0; i < ams_data -> maxit; i++)
{
/* Compute initial residual norms */
if (ams_data -> maxit > 1 && i == 0)
{
hypre_ParVectorCopy(b, ams_data -> r0);
hypre_ParCSRMatrixMatvec(-1.0, ams_data -> A, x, 1.0, ams_data -> r0);
r_norm = sqrt(hypre_ParVectorInnerProd(ams_data -> r0,ams_data -> r0));
r0_norm = r_norm;
b_norm = sqrt(hypre_ParVectorInnerProd(b, b));
if (b_norm)
relative_resid = r_norm / b_norm;
else
relative_resid = r_norm;
if (my_id == 0 && ams_data -> print_level > 0)
{
hypre_printf(" relative\n");
hypre_printf(" residual factor residual\n");
hypre_printf(" -------- ------ --------\n");
hypre_printf(" Initial %e %e\n",
r_norm, relative_resid);
}
}
/* Apply the preconditioner */
hypre_ParCSRSubspacePrec(ams_data -> A,
ams_data -> A_relax_type,
ams_data -> A_relax_times,
ams_data -> A_l1_norms ? hypre_VectorData(ams_data -> A_l1_norms) : NULL,
ams_data -> A_relax_weight,
ams_data -> A_omega,
ams_data -> A_max_eig_est,
ams_data -> A_min_eig_est,
ams_data -> A_cheby_order,
ams_data -> A_cheby_fraction,
Ai, Bi, HBi, Pi, ri, gi,
b, x,
ams_data -> r0,
ams_data -> g0,
cycle,
z);
/* Compute new residual norms */
if (ams_data -> maxit > 1)
{
old_resid = r_norm;
hypre_ParVectorCopy(b, ams_data -> r0);
hypre_ParCSRMatrixMatvec(-1.0, ams_data -> A, x, 1.0, ams_data -> r0);
r_norm = sqrt(hypre_ParVectorInnerProd(ams_data -> r0,ams_data -> r0));
if (b_norm)
relative_resid = r_norm / b_norm;
else
relative_resid = r_norm;
if (my_id == 0 && ams_data -> print_level > 0)
hypre_printf(" Cycle %2d %e %f %e \n",
i+1, r_norm, r_norm / old_resid, relative_resid);
}
if (relative_resid < ams_data -> tol)
{
i++;
break;
}
}
if (my_id == 0 && ams_data -> print_level > 0 && ams_data -> maxit > 1)
hypre_printf("\n\n Average Convergence Factor = %f\n\n",
pow((r_norm/r0_norm),(1.0/(HYPRE_Real) i)));
ams_data -> num_iterations = i;
ams_data -> rel_resid_norm = relative_resid;
if (ams_data -> num_iterations == ams_data -> maxit && ams_data -> tol > 0.0)
hypre_error(HYPRE_ERROR_CONV);
if (z)
hypre_ParVectorDestroy(z);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRSubspacePrec
*
* General subspace preconditioner for A0 y = x, based on ParCSR storage.
*
* P[i] and A[i] are the interpolation and coarse grid matrices for
* the (i+1)'th subspace. B[i] is an AMG solver for A[i]. r[i] and g[i]
* are temporary vectors. A0_* are the fine grid smoothing parameters.
*
* The default mode is multiplicative, '+' changes the next correction
* to additive, based on residual computed at '('.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRSubspacePrec(/* fine space matrix */
hypre_ParCSRMatrix *A0,
/* relaxation parameters */
HYPRE_Int A0_relax_type,
HYPRE_Int A0_relax_times,
HYPRE_Real *A0_l1_norms,
HYPRE_Real A0_relax_weight,
HYPRE_Real A0_omega,
HYPRE_Real A0_max_eig_est,
HYPRE_Real A0_min_eig_est,
HYPRE_Int A0_cheby_order,
HYPRE_Real A0_cheby_fraction,
/* subspace matrices */
hypre_ParCSRMatrix **A,
/* subspace preconditioners */
HYPRE_Solver *B,
/* hypre solver functions for B */
HYPRE_PtrToSolverFcn *HB,
/* subspace interpolations */
hypre_ParCSRMatrix **P,
/* temporary subspace vectors */
hypre_ParVector **r,
hypre_ParVector **g,
/* right-hand side */
hypre_ParVector *x,
/* current approximation */
hypre_ParVector *y,
/* current residual */
hypre_ParVector *r0,
/* temporary vector */
hypre_ParVector *g0,
char *cycle,
/* temporary vector */
hypre_ParVector *z)
{
char *op;
HYPRE_Int use_saved_residual = 0;
for (op = cycle; *op != '\0'; op++)
{
/* do nothing */
if (*op == ')')
continue;
/* compute the residual: r = x - Ay */
else if (*op == '(')
{
hypre_ParVectorCopy(x,r0);
hypre_ParCSRMatrixMatvec(-1.0, A0, y, 1.0, r0);
}
/* switch to additive correction */
else if (*op == '+')
{
use_saved_residual = 1;
continue;
}
/* smooth: y += S (x - Ay) */
else if (*op == '0')
{
hypre_ParCSRRelax(A0, x,
A0_relax_type,
A0_relax_times,
A0_l1_norms,
A0_relax_weight,
A0_omega,
A0_max_eig_est,
A0_min_eig_est,
A0_cheby_order,
A0_cheby_fraction,
y, g0, z);
}
/* subspace correction: y += P B^{-1} P^t r */
else
{
HYPRE_Int i = *op - '1';
if (i < 0)
hypre_error_in_arg(16);
/* skip empty subspaces */
if (!A[i]) continue;
/* compute the residual? */
if (use_saved_residual)
{
use_saved_residual = 0;
hypre_ParCSRMatrixMatvecT(1.0, P[i], r0, 0.0, r[i]);
}
else
{
hypre_ParVectorCopy(x,g0);
hypre_ParCSRMatrixMatvec(-1.0, A0, y, 1.0, g0);
hypre_ParCSRMatrixMatvecT(1.0, P[i], g0, 0.0, r[i]);
}
hypre_ParVectorSetConstantValues(g[i], 0.0);
(*HB[i]) (B[i], (HYPRE_Matrix)A[i],
(HYPRE_Vector)r[i], (HYPRE_Vector)g[i]);
hypre_ParCSRMatrixMatvec(1.0, P[i], g[i], 0.0, g0);
hypre_ParVectorAxpy(1.0, g0, y);
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSGetNumIterations
*
* Get the number of AMS iterations.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSGetNumIterations(void *solver,
HYPRE_Int *num_iterations)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
*num_iterations = ams_data -> num_iterations;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSGetFinalRelativeResidualNorm
*
* Get the final relative residual norm in AMS.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSGetFinalRelativeResidualNorm(void *solver,
HYPRE_Real *rel_resid_norm)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
*rel_resid_norm = ams_data -> rel_resid_norm;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSProjectOutGradients
*
* For problems with zero-conductivity regions, project the vector onto the
* compatible subspace: x = (I - G0 (G0^t G0)^{-1} G0^T) x, where G0 is the
* discrete gradient restricted to the interior nodes of the regions with
* zero conductivity. This ensures that x is orthogonal to the gradients in
* the range of G0.
*
* This function is typically called after the solution iteration is complete,
* in order to facilitate the visualization of the computed field. Without it
* the values in the zero-conductivity regions contain kernel components.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSProjectOutGradients(void *solver,
hypre_ParVector *x)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
if (ams_data -> B_G0)
{
hypre_ParCSRMatrixMatvecT(1.0, ams_data -> G0, x, 0.0, ams_data -> r1);
hypre_ParVectorSetConstantValues(ams_data -> g1, 0.0);
hypre_BoomerAMGSolve(ams_data -> B_G0, ams_data -> A_G0, ams_data -> r1, ams_data -> g1);
hypre_ParCSRMatrixMatvec(1.0, ams_data -> G0, ams_data -> g1, 0.0, ams_data -> g0);
hypre_ParVectorAxpy(-1.0, ams_data -> g0, x);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSConstructDiscreteGradient
*
* Construct and return the lowest-order discrete gradient matrix G, based on:
* - a matrix on the egdes (e.g. the stiffness matrix A)
* - a vector on the vertices (e.g. the x coordinates)
* - the array edge_vertex, which lists the global indexes of the
* vertices of the local edges.
*
* We assume that edge_vertex lists the edge vertices consecutively,
* and that the orientation of all edges is consistent. More specificaly:
* If edge_orientation = 1, the edges are already oriented.
* If edge_orientation = 2, the orientation of edge i depends only on the
* sign of edge_vertex[2*i+1] - edge_vertex[2*i].
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSConstructDiscreteGradient(hypre_ParCSRMatrix *A,
hypre_ParVector *x_coord,
HYPRE_BigInt *edge_vertex,
HYPRE_Int edge_orientation,
hypre_ParCSRMatrix **G_ptr)
{
hypre_ParCSRMatrix *G;
HYPRE_Int nedges;
nedges = hypre_ParCSRMatrixNumRows(A);
/* Construct the local part of G based on edge_vertex and the edge
and vertex partitionings from A and x_coord */
{
HYPRE_Int i, *II = hypre_CTAlloc(HYPRE_Int, nedges+1, HYPRE_MEMORY_HOST);
HYPRE_Int part_size;
HYPRE_BigInt *row_starts, *col_starts;
HYPRE_Real *data = hypre_CTAlloc(HYPRE_Real, 2*nedges, HYPRE_MEMORY_HOST);
hypre_CSRMatrix *local = hypre_CSRMatrixCreate (nedges,
hypre_ParVectorGlobalSize(x_coord),
2*nedges);
for (i = 0; i <= nedges; i++)
II[i] = 2*i;
if (edge_orientation == 1)
{
/* Assume that the edges are already oriented */
for (i = 0; i < 2*nedges; i+=2)
{
data[i] = -1.0;
data[i+1] = 1.0;
}
}
else if (edge_orientation == 2)
{
/* Assume that the edge orientation is based on the vertex indexes */
for (i = 0; i < 2*nedges; i+=2)
{
if (edge_vertex[i] < edge_vertex[i+1])
{
data[i] = -1.0;
data[i+1] = 1.0;
}
else
{
data[i] = 1.0;
data[i+1] = -1.0;
}
}
}
else
{
hypre_error_in_arg(4);
}
hypre_CSRMatrixI(local) = II;
hypre_CSRMatrixBigJ(local) = edge_vertex;
hypre_CSRMatrixData(local) = data;
hypre_CSRMatrixRownnz(local) = NULL;
hypre_CSRMatrixOwnsData(local) = 1;
hypre_CSRMatrixNumRownnz(local) = nedges;
/* Copy partitioning from A and x_coord (previously they were re-used) */
#ifdef HYPRE_NO_GLOBAL_PARTITION
part_size = 2;
#else
hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &part_size);
part_size++;
#endif
row_starts = hypre_TAlloc(HYPRE_BigInt, part_size, HYPRE_MEMORY_HOST);
col_starts = hypre_TAlloc(HYPRE_BigInt, part_size, HYPRE_MEMORY_HOST);
for (i = 0; i < part_size; i++)
{
row_starts[i] = hypre_ParCSRMatrixRowStarts(A)[i];
col_starts[i] = hypre_ParVectorPartitioning(x_coord)[i];
}
/* Generate the discrete gradient matrix */
G = hypre_ParCSRMatrixCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParVectorGlobalSize(x_coord),
row_starts, col_starts, 0, 0, 0);
hypre_ParCSRMatrixOwnsRowStarts(G) = 1;
hypre_ParCSRMatrixOwnsColStarts(G) = 1;
hypre_CSRMatrixBigJtoJ(local);
GenerateDiagAndOffd(local, G,
hypre_ParVectorFirstIndex(x_coord),
hypre_ParVectorLastIndex(x_coord));
/* Account for empty rows in G. These may appear when A includes only
the interior (non-Dirichlet b.c.) edges. */
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
G_diag->num_cols = hypre_VectorSize(hypre_ParVectorLocalVector(x_coord));
}
/* Free the local matrix */
hypre_CSRMatrixDestroy(local);
}
*G_ptr = G;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSFEISetup
*
* Construct an AMS solver object based on the following data:
*
* A - the edge element stiffness matrix
* num_vert - number of vertices (nodes) in the processor
* num_local_vert - number of vertices owned by the processor
* vert_number - global indexes of the vertices in the processor
* vert_coord - coordinates of the vertices in the processor
* num_edges - number of edges owned by the processor
* edge_vertex - the vertices of the edges owned by the processor.
* Vertices are in local numbering (the same as in
* vert_number), and edge orientation is always from
* the first to the second vertex.
*
* Here we distinguish between vertices that belong to elements in the
* current processor, and the subset of these vertices that is owned by
* the processor.
*
* This function is written specifically for input from the FEI and should
* be called before hypre_AMSSetup().
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSFEISetup(void *solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *b,
hypre_ParVector *x,
HYPRE_Int num_vert,
HYPRE_Int num_local_vert,
HYPRE_BigInt *vert_number,
HYPRE_Real *vert_coord,
HYPRE_Int num_edges,
HYPRE_BigInt *edge_vertex)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
HYPRE_Int i, j;
hypre_ParCSRMatrix *G;
hypre_ParVector *x_coord, *y_coord, *z_coord;
HYPRE_Real *x_data, *y_data, *z_data;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_BigInt *vert_part, num_global_vert;
HYPRE_BigInt vert_start, vert_end;
HYPRE_BigInt big_local_vert = (HYPRE_BigInt) num_local_vert;
/* Find the processor partitioning of the vertices */
#ifdef HYPRE_NO_GLOBAL_PARTITION
vert_part = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
hypre_MPI_Scan(&big_local_vert, &vert_part[1], 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm);
vert_part[0] = vert_part[1] - big_local_vert;
hypre_MPI_Allreduce(&big_local_vert, &num_global_vert, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm);
#else
HYPRE_Int num_procs;
hypre_MPI_Comm_size(comm, &num_procs);
vert_part = hypre_TAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST);
hypre_MPI_Allgather(&big_local_vert, 1, HYPRE_MPI_BIG_INT, &vert_part[1], 1, HYPRE_MPI_BIG_INT, comm);
vert_part[0] = 0;
for (i = 0; i < num_procs; i++)
vert_part[i+1] += vert_part[i];
num_global_vert = vert_part[num_procs];
#endif
/* Construct hypre parallel vectors for the vertex coordinates */
x_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part);
hypre_ParVectorInitialize(x_coord);
hypre_ParVectorOwnsData(x_coord) = 1;
hypre_ParVectorOwnsPartitioning(x_coord) = 0;
x_data = hypre_VectorData(hypre_ParVectorLocalVector(x_coord));
y_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part);
hypre_ParVectorInitialize(y_coord);
hypre_ParVectorOwnsData(y_coord) = 1;
hypre_ParVectorOwnsPartitioning(y_coord) = 0;
y_data = hypre_VectorData(hypre_ParVectorLocalVector(y_coord));
z_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part);
hypre_ParVectorInitialize(z_coord);
hypre_ParVectorOwnsData(z_coord) = 1;
hypre_ParVectorOwnsPartitioning(z_coord) = 0;
z_data = hypre_VectorData(hypre_ParVectorLocalVector(z_coord));
vert_start = hypre_ParVectorFirstIndex(x_coord);
vert_end = hypre_ParVectorLastIndex(x_coord);
/* Save coordinates of locally owned vertices */
for (i = 0; i < num_vert; i++)
{
if (vert_number[i] >= vert_start && vert_number[i] <= vert_end)
{
j = (HYPRE_Int)(vert_number[i] - vert_start);
x_data[j] = vert_coord[3*i];
y_data[j] = vert_coord[3*i+1];
z_data[j] = vert_coord[3*i+2];
}
}
/* Change vertex numbers from local to global */
for (i = 0; i < 2*num_edges; i++)
edge_vertex[i] = vert_number[edge_vertex[i]];
/* Construct the local part of G based on edge_vertex */
{
/* HYPRE_Int num_edges = hypre_ParCSRMatrixNumRows(A); */
HYPRE_Int *II = hypre_CTAlloc(HYPRE_Int, num_edges+1, HYPRE_MEMORY_HOST);
HYPRE_Real *data = hypre_CTAlloc(HYPRE_Real, 2*num_edges, HYPRE_MEMORY_HOST);
hypre_CSRMatrix *local = hypre_CSRMatrixCreate (num_edges,
num_global_vert,
2*num_edges);
for (i = 0; i <= num_edges; i++)
II[i] = 2*i;
/* Assume that the edge orientation is based on the vertex indexes */
for (i = 0; i < 2*num_edges; i+=2)
{
data[i] = 1.0;
data[i+1] = -1.0;
}
hypre_CSRMatrixI(local) = II;
hypre_CSRMatrixBigJ(local) = edge_vertex;
hypre_CSRMatrixData(local) = data;
hypre_CSRMatrixRownnz(local) = NULL;
hypre_CSRMatrixOwnsData(local) = 1;
hypre_CSRMatrixNumRownnz(local) = num_edges;
G = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
num_global_vert,
hypre_ParCSRMatrixRowStarts(A),
vert_part,
0, 0, 0);
hypre_ParCSRMatrixOwnsRowStarts(G) = 0;
hypre_ParCSRMatrixOwnsColStarts(G) = 1;
hypre_CSRMatrixBigJtoJ(local);
GenerateDiagAndOffd(local, G, vert_start, vert_end);
//hypre_CSRMatrixJ(local) = NULL;
hypre_CSRMatrixDestroy(local);
}
ams_data -> G = G;
ams_data -> x = x_coord;
ams_data -> y = y_coord;
ams_data -> z = z_coord;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSFEIDestroy
*
* Free the additional memory allocated in hypre_AMSFEISetup().
*
* This function is written specifically for input from the FEI and should
* be called before hypre_AMSDestroy().
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSFEIDestroy(void *solver)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
if (ams_data -> G)
hypre_ParCSRMatrixDestroy(ams_data -> G);
if (ams_data -> x)
hypre_ParVectorDestroy(ams_data -> x);
if (ams_data -> y)
hypre_ParVectorDestroy(ams_data -> y);
if (ams_data -> z)
hypre_ParVectorDestroy(ams_data -> z);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRComputeL1Norms Threads
*
* Compute the l1 norms of the rows of a given matrix, depending on
* the option parameter:
*
* option 1 = Compute the l1 norm of the rows
* option 2 = Compute the l1 norm of the (processor) off-diagonal
* part of the rows plus the diagonal of A
* option 3 = Compute the l2 norm^2 of the rows
* option 4 = Truncated version of option 2 based on Remark 6.2 in "Multigrid
* Smoothers for Ultra-Parallel Computing"
*
* The above computations are done in a CF manner, whenever the provided
* cf_marker is not NULL.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRComputeL1NormsThreads(hypre_ParCSRMatrix *A,
HYPRE_Int option,
HYPRE_Int num_threads,
HYPRE_Int *cf_marker,
HYPRE_Real **l1_norm_ptr)
{
HYPRE_Int i, j, k;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Real diag;
HYPRE_Real *l1_norm = hypre_TAlloc(HYPRE_Real, num_rows, hypre_ParCSRMatrixMemoryLocation(A));
HYPRE_Int ii, ns, ne, rest, size;
HYPRE_Int *cf_marker_offd = NULL;
HYPRE_Int cf_diag;
/* collect the cf marker data from other procs */
if (cf_marker != NULL)
{
HYPRE_Int index;
HYPRE_Int num_sends;
HYPRE_Int start;
HYPRE_Int *int_buf_data = NULL;
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
if (num_cols_offd)
cf_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends))
int_buf_data = hypre_CTAlloc(HYPRE_Int,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index++] = cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
cf_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,k,ns,ne,rest,size,diag,cf_diag) HYPRE_SMP_SCHEDULE
#endif
for (k = 0; k < num_threads; k++)
{
size = num_rows/num_threads;
rest = num_rows - size*num_threads;
if (k < rest)
{
ns = k*size+k;
ne = (k+1)*size+k+1;
}
else
{
ns = k*size+rest;
ne = (k+1)*size+rest;
}
if (option == 1)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the l1 norm of the diag part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm[i] += fabs(A_diag_data[j]);
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the CF l1 norm of the diag part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
if (cf_diag == cf_marker[A_diag_J[j]])
l1_norm[i] += fabs(A_diag_data[j]);
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += fabs(A_offd_data[j]);
}
}
}
}
else if (option == 2)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if (ii == i || ii < ns || ii >= ne)
l1_norm[i] += fabs(A_diag_data[j]);
}
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if ((ii == i || ii < ns || ii >= ne) &&
(cf_diag == cf_marker[A_diag_J[j]]))
l1_norm[i] += fabs(A_diag_data[j]);
}
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += fabs(A_offd_data[j]);
}
}
}
}
else if (option == 3)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm[i] += A_diag_data[j] * A_diag_data[j];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += A_offd_data[j] * A_offd_data[j];
}
}
else if (option == 4)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if (ii == i || ii < ns || ii >= ne)
{
if (ii == i)
{
diag = fabs(A_diag_data[j]);
l1_norm[i] += fabs(A_diag_data[j]);
}
else
l1_norm[i] += 0.5*fabs(A_diag_data[j]);
}
}
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += 0.5*fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if ((ii == i || ii < ns || ii >= ne) &&
(cf_diag == cf_marker[A_diag_J[j]]))
{
if (ii == i)
{
diag = fabs(A_diag_data[j]);
l1_norm[i] += fabs(A_diag_data[j]);
}
else
l1_norm[i] += 0.5*fabs(A_diag_data[j]);
}
}
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += 0.5*fabs(A_offd_data[j]);
}
}
/* Truncate according to Remark 6.2 */
if (l1_norm[i] <= 4.0/3.0*diag)
l1_norm[i] = diag;
}
}
else if (option == 5) /*stores diagonal of A for Jacobi using matvec, rlx 7 */
{
/* Set the diag element */
for (i = ns; i < ne; i++)
{
l1_norm[i] = A_diag_data[A_diag_I[i]];
if (l1_norm[i] == 0) l1_norm[i] = 1.0;
}
}
if (option < 5)
{
/* Handle negative definite matrices */
for (i = ns; i < ne; i++)
if (A_diag_data[A_diag_I[i]] < 0)
l1_norm[i] = -l1_norm[i];
for (i = ns; i < ne; i++)
/* if (fabs(l1_norm[i]) < DBL_EPSILON) */
if (fabs(l1_norm[i]) == 0.0)
{
hypre_error_in_arg(1);
break;
}
}
}
hypre_TFree(cf_marker_offd, HYPRE_MEMORY_HOST);
*l1_norm_ptr = l1_norm;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRRelaxThreads
* 1 = l1-scaled Jacobi
* 2 = l1-scaled block Gauss-Seidel/SSOR
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRRelaxThreads(hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int relax_type,
HYPRE_Int relax_times,
HYPRE_Real *l1_norms,
HYPRE_Real relax_weight,
HYPRE_Real omega,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
hypre_ParVector *z)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Real *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
HYPRE_Real *f_data = hypre_VectorData(f_local);
hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp);
HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local);
HYPRE_Real *Vext_data;
HYPRE_Real *v_buf_data;
HYPRE_Real *tmp_data;
HYPRE_Int i, j;
HYPRE_Int ii, jj;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int relax_error = 0;
HYPRE_Int num_sends;
HYPRE_Int index, start;
HYPRE_Int num_procs, num_threads, my_id;
HYPRE_Real zero = 0.0;
HYPRE_Real res, res2;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
/* only allow jacobi and GS */
if (relax_type > 2)
relax_type = 2;
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, v_buf_data,
Vext_data);
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
if (relax_type == 1) /* Jacobi */
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += (relax_weight*res)/l1_norms[i];
}
}
}
else if (relax_type == 2) /* GS */
{
if (relax_weight == 1 && omega == 1)
{
tmp_data = hypre_CTAlloc(HYPRE_Real, n, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
else
{
HYPRE_Real c1 = omega*relax_weight;
HYPRE_Real c2 = omega*(1.0-relax_weight);
tmp_data = hypre_CTAlloc(HYPRE_Real, n, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
tmp_data[i] = u_data[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (A_diag_data[A_diag_i[i]] != zero)
{
res2 = 0.0;
res = f_data[i];
Vtemp_data[i] = u_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
if (ii < i)
res2 += A_diag_data[jj] * (Vtemp_data[ii] - u_data[ii]);
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += (c1*res + c2*res2) / l1_norms[i];
}
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (A_diag_data[A_diag_i[i]] != zero)
{
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
if (ii > i)
res2 += A_diag_data[jj] * (Vtemp_data[ii] - u_data[ii]);
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += (c1*res + c2*res2) / l1_norms[i];
}
}
}
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
} /* end of Jacobi or G.S. */
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
return(relax_error);
}
|
GB_binop__div_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__div_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__div_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__div_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__div_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__div_int16)
// A*D function (colscale): GB (_AxD__div_int16)
// D*A function (rowscale): GB (_DxB__div_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__div_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__div_int16)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_int16)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_int16)
// C=scalar+B GB (_bind1st__div_int16)
// C=scalar+B' GB (_bind1st_tran__div_int16)
// C=A+scalar GB (_bind2nd__div_int16)
// C=A'+scalar GB (_bind2nd_tran__div_int16)
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = GB_IDIV_SIGNED (aij, bij, 16)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IDIV_SIGNED (x, y, 16) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_DIV || GxB_NO_INT16 || GxB_NO_DIV_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__div_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__div_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__div_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__div_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__div_int16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__div_int16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__div_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__div_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__div_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__div_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__div_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__div_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IDIV_SIGNED (x, bij, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__div_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IDIV_SIGNED (aij, y, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_SIGNED (x, aij, 16) ; \
}
GrB_Info GB (_bind1st_tran__div_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_SIGNED (aij, y, 16) ; \
}
GrB_Info GB (_bind2nd_tran__div_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
kvstore_dist_server.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file mxnet_node.h
* \brief implement mxnet nodes
*/
#ifndef MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
#define MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
#include <mxnet/c_api.h>
#include <mxnet/kvstore.h>
#include <ps/ps.h>
#include <queue>
#include <string>
#include <mutex>
#include <condition_variable>
#include <memory>
#include <functional>
#include <future>
#include <vector>
#include "../profiler/profiler.h"
#include "../operator/tensor/elemwise_binary_op-inl.h"
#include "../operator/tensor/init_op.h"
namespace mxnet {
namespace kvstore {
// maintain same order in frontend.
enum class CommandType {
kController, kSetMultiPrecision, kStopServer, kSyncMode,
kSetGradientCompression, kSetProfilerParams
};
enum class RequestType {
kDefaultPushPull, kRowSparsePushPull, kCompressedPushPull
};
struct DataHandleType {
RequestType requestType;
int dtype;
};
/*!
* Uses Cantor pairing function to generate a unique number given two numbers.
* This number can also be inverted to find the unique pair whose Cantor value is this number.
* Ref: https://en.wikipedia.org/wiki/Pairing_function#Cantor_pairing_function
* \param requestType RequestType
* \param dtype integer
* \return Cantor value of arguments
*/
static int GetCommandType(RequestType requestType, int d) {
int m = static_cast<int>(requestType);
return (((m + d) * (m + d + 1)) / 2) + d;
}
/*!
* Unpairs Cantor value and finds the two integers used to pair.
* Then returns DataHandleType object with those numbers.
* \param cmd DataHandleCommand generated by GetCommandType function
* \return DataHandleType
*/
static DataHandleType DepairDataHandleType(int cmd) {
int w = std::floor((std::sqrt(8 * cmd + 1) - 1)/2);
int t = ((w * w) + w) / 2;
int y = cmd - t;
int x = w - y;
CHECK_GE(x, 0);
CHECK_GE(y, 0);
DataHandleType type;
type.requestType = static_cast<RequestType>(x);
type.dtype = y;
return type;
}
/**
* \brief executor runs a function using the thread called \ref Start
*/
class Executor {
public:
/**
* \brief start the executor
*/
void Start() {
std::unique_lock<std::mutex> lk(mu_);
while (true) {
cond_.wait(lk, [this]{return !queue_.empty();});
Block blk = std::move(queue_.front());
queue_.pop();
lk.unlock();
if (blk.f) {
blk.f();
blk.p->set_value();
} else {
blk.p->set_value(); break;
}
lk.lock();
}
}
/**
* \brief function
*/
typedef std::function<void()> Func;
/**
* \brief let the thread called \ref Start to exec a function. threadsafe
*/
void Exec(const Func& func) {
Block blk(func);
auto fut = blk.p->get_future();
{
std::lock_guard<std::mutex> lk(mu_);
queue_.push(std::move(blk));
cond_.notify_one();
}
fut.wait();
}
/**
* \brief stop the thread, threadsafe
*/
void Stop() {
Exec(Func());
}
private:
struct Block {
explicit Block(const Func& func) : f(func), p(std::make_shared<std::promise<void>>()) { }
Func f;
std::shared_ptr<std::promise<void>> p;
};
std::queue<Block> queue_;
std::mutex mu_;
std::condition_variable cond_;
};
class KVStoreDistServer {
public:
KVStoreDistServer() {
using namespace std::placeholders;
ps_server_ = new ps::KVServer<char>(0);
static_cast<ps::SimpleApp*>(ps_server_)->set_request_handle(
std::bind(&KVStoreDistServer::CommandHandle, this, _1, _2));
ps_server_->set_request_handle(
std::bind(&KVStoreDistServer::DataHandleEx, this, _1, _2, _3));
sync_mode_ = false;
gradient_compression_ = std::make_shared<GradientCompression>();
log_verbose_ = dmlc::GetEnv("MXNET_KVSTORE_DIST_ROW_SPARSE_VERBOSE", false);
}
~KVStoreDistServer() {
profiler::Profiler::Get()->SetState(profiler::Profiler::ProfilerState(0));
delete ps_server_;
}
void set_controller(const KVStore::Controller& controller) {
CHECK(controller);
controller_ = controller;
}
void set_updater(const KVStore::Updater& updater) {
CHECK(updater);
updater_ = updater;
}
/**
* \brief blocked until received the command \a kSyncMode
*/
void Run() {
exec_.Start();
}
private:
struct UpdateBuf {
std::vector<ps::KVMeta> request;
NDArray merged;
// temp_array is used to cast received values as float32 for computation if required
NDArray temp_array;
};
void CommandHandle(const ps::SimpleData& recved, ps::SimpleApp* app) {
CommandType recved_type = static_cast<CommandType>(recved.head);
switch (recved_type) {
case CommandType::kStopServer:
exec_.Stop();
break;
case CommandType::kSyncMode:
sync_mode_ = true;
break;
case CommandType::kSetGradientCompression:
gradient_compression_->DecodeParams(recved.body);
break;
case CommandType::kSetProfilerParams:
// last char is the type of profiler command
ProcessServerProfilerCommands(static_cast<KVStoreServerProfilerCommand>
(recved.body.back() - '0'),
recved.body);
break;
case CommandType::kSetMultiPrecision:
// uses value 1 for message id from frontend
if (!multi_precision_) {
multi_precision_ = true;
CreateMultiPrecisionCopies();
}
break;
case CommandType::kController:
// this uses value 0 for message id from frontend
// let the main thread to execute ctrl, which is necessary for python
exec_.Exec([this, recved]() {
CHECK(controller_);
controller_(recved.head, recved.body);
});
break;
}
app->Response(recved);
}
/*
* For keys already initialized, if necessary create stored_realt.
* This will only be used if by some wrong usage of kvstore,
* some keys are initialized before optimizer is set.
*/
void CreateMultiPrecisionCopies() {
for (auto const &stored_entry : store_) {
const int key = stored_entry.first;
const NDArray &stored = stored_entry.second;
if (stored.dtype() != mshadow::kFloat32) {
auto &stored_realt = store_realt_[key];
if (stored.storage_type() == kRowSparseStorage) {
stored_realt = NDArray(kRowSparseStorage, stored.shape(), stored.ctx(),
true, mshadow::kFloat32);
} else {
stored_realt = NDArray(stored.shape(), stored.ctx(), false, mshadow::kFloat32);
}
auto &update = update_buf_[key];
if (!update.merged.is_none()) {
if (update.merged.storage_type() == kRowSparseStorage) {
update.merged = NDArray(kRowSparseStorage, update.merged.shape(), update.merged.ctx(),
true, mshadow::kFloat32);
} else {
update.merged = NDArray(update.merged.shape(), update.merged.ctx(), false,
mshadow::kFloat32);
}
}
CHECK(update.request.size() == 0)
<< ps::MyRank() << "Multiprecision mode can not be set while pushes are underway."
<< "Please set optimizer before pushing keys." << key << " " << update.request.size();
CopyFromTo(stored, stored_realt);
}
}
for (auto const &stored_realt_entry : store_realt_) {
stored_realt_entry.second.WaitToRead();
}
}
void ProcessServerProfilerCommands(KVStoreServerProfilerCommand type, const std::string& body) {
switch (type) {
case KVStoreServerProfilerCommand::kSetConfig:
SetProfilerConfig(body.substr(0, body.size() - 1));
break;
case KVStoreServerProfilerCommand::kState:
MXSetProfilerState(static_cast<int>(body.front() - '0'));
break;
case KVStoreServerProfilerCommand::kPause:
MXProfilePause(static_cast<int>(body.front() - '0'));
break;
case KVStoreServerProfilerCommand::kDump:
MXDumpProfile(static_cast<int>(body.front() - '0'));
break;
}
}
void SetProfilerConfig(std::string params_str) {
std::vector<std::string> elems;
mxnet::kvstore::split(params_str, ',', std::back_inserter(elems));
std::vector<const char*> ckeys;
std::vector<const char*> cvals;
ckeys.reserve(elems.size());
cvals.reserve(elems.size());
for (size_t i=0; i < elems.size(); i++) {
std::vector<std::string> parts;
mxnet::kvstore::split(elems[i], ':', std::back_inserter(parts));
CHECK_EQ(parts.size(), 2) << "Improper profiler config passed from worker";
CHECK(!parts[0].empty()) << "ProfilerConfig parameter is empty";
CHECK(!parts[1].empty()) << "ProfilerConfig value is empty for parameter "<< parts[0];
if (parts[0] == "filename") {
parts[1] = "rank" + std::to_string(ps::MyRank()) + "_" + parts[1];
}
char* ckey = new char[parts[0].length() + 1];
std::snprintf(ckey, parts[0].length() + 1, "%s", parts[0].c_str());
ckeys.push_back(ckey);
char* cval = new char[parts[1].length() + 1];
std::snprintf(cval, parts[1].length() + 1, "%s", parts[1].c_str());
cvals.push_back(cval);
}
MXSetProfilerConfig(elems.size(), &ckeys[0], &cvals[0]);
for (size_t i=0; i < ckeys.size(); i++) {
delete[] ckeys[i];
delete[] cvals[i];
}
}
void DataHandleEx(const ps::KVMeta& req_meta,
const ps::KVPairs<char>& req_data,
ps::KVServer<char>* server) {
DataHandleType type = DepairDataHandleType(req_meta.cmd);
switch (type.requestType) {
case RequestType::kRowSparsePushPull:
DataHandleRowSparse(type, req_meta, req_data, server);
break;
case RequestType::kCompressedPushPull:
DataHandleCompressed(type, req_meta, req_data, server);
break;
case RequestType::kDefaultPushPull:
DataHandleDefault(type, req_meta, req_data, server);
break;
}
}
inline bool has_multi_precision_copy(const DataHandleType type) {
return multi_precision_ && type.dtype != mshadow::kFloat32;
}
inline void ApplyUpdates(const DataHandleType type, const int key,
const ps::KVPairs<char>& req_data, UpdateBuf *update_buf,
ps::KVServer<char>* server) {
if (!sync_mode_ || update_buf->request.size() == (size_t) ps::NumWorkers()) {
// let the main thread to execute updater_, which is necessary for python
auto& stored = has_multi_precision_copy(type) ? store_realt_[key] : store_[key];
auto& update = sync_mode_ ? update_buf->merged : update_buf->temp_array;
if (updater_) {
exec_.Exec([this, key, &update, &stored](){
CHECK(updater_);
updater_(key, update, &stored);
});
} else {
CHECK(sync_mode_) << "Updater needs to be set for async mode";
// if no updater, just copy
CopyFromTo(update_buf->merged, &stored);
}
if (log_verbose_) {
LOG(INFO) << "sent response to " << update_buf->request.size() << " workers";
}
/**
* Request can be for either push, pull or pushpull
* If pull flag is set, respond immediately with the updated values
* Otherwise, only send the notification
*/
bool has_pull = false;
for (const auto& req : update_buf->request) {
has_pull = has_pull || req.pull;
}
if (has_pull) {
// if there is a pull request, perform WaitToRead() once before DefaultStorageResponse
if (has_multi_precision_copy(type)) CopyFromTo(stored, store_[key]);
stored.WaitToRead();
for (const auto& req : update_buf->request) {
if (req.pull) {
DefaultStorageResponse(type, key, req, req_data, server);
}
}
update_buf->request.clear();
} else {
// otherwise, send response directly
for (const auto& req : update_buf->request) {
server->Response(req);
}
update_buf->request.clear();
if (has_multi_precision_copy(type)) CopyFromTo(stored, store_[key]);
stored.WaitToRead();
}
} else {
update_buf->merged.WaitToRead();
}
}
void DecodeRowIds(const ps::SArray<ps::Key> &keys, int64_t *indices,
const int64_t master_key, const int64_t num_rows) {
indices[0] = 0;
for (int64_t i = 1; i <= num_rows; i++) {
int key = DecodeKey(keys[i]);
auto row_id = key - master_key;
indices[i - 1] = row_id;
}
}
void AccumulateRowSparseGrads(const DataHandleType type,
const NDArray& recved,
UpdateBuf* updateBuf) {
NDArray out(kRowSparseStorage, updateBuf->merged.shape(), Context(), true,
has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype);
if (has_multi_precision_copy(type)) CopyFromTo(recved, updateBuf->temp_array);
const NDArray& to_merge = has_multi_precision_copy(type) ? updateBuf->temp_array : recved;
// accumulate row_sparse gradients
using namespace mshadow;
Engine::Get()->PushAsync(
[to_merge, updateBuf, out](RunContext ctx, Engine::CallbackOnComplete on_complete) {
op::ElemwiseBinaryOp::ComputeEx<cpu, op::mshadow_op::plus>(
{}, {}, {to_merge, updateBuf->merged}, {kWriteTo}, {out});
on_complete();
}, to_merge.ctx(), {to_merge.var(), updateBuf->merged.var()}, {out.var()},
FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME);
CopyFromTo(out, &(updateBuf->merged), 0);
updateBuf->merged.WaitToRead();
}
void RowSparsePullResponse(const DataHandleType type,
const int master_key,
const size_t num_rows,
const ps::KVMeta& req_meta,
const ps::KVPairs<char>& req_data,
ps::KVServer<char>* server) {
if (log_verbose_) LOG(INFO) << "pull: " << master_key;
ps::KVPairs<char> response;
if (num_rows == 0) {
std::vector<int> lens(req_data.keys.size(), 0);
response.keys = req_data.keys;
response.lens.CopyFrom(lens.begin(), lens.end());
server->Response(req_meta, response);
return;
}
const NDArray& stored = store_[master_key];
if (has_multi_precision_copy(type)) stored.WaitToRead();
CHECK(!stored.is_none()) << "init " << master_key << " first";
auto shape = stored.shape();
auto unit_len = shape.ProdShape(1, shape.ndim());
const int num_bytes = mshadow::mshadow_sizeof(type.dtype);
const int unit_size = unit_len * num_bytes;
const char* data = static_cast<char *> (stored.data().dptr_);
auto len = num_rows * unit_size;
// concat values
response.vals.resize(len);
#pragma omp parallel for
for (size_t i = 1; i <= num_rows; i++) {
int key = DecodeKey(req_data.keys[i]);
int64_t row_id = key - master_key;
const auto src = data + row_id * unit_size;
auto begin = (i - 1) * unit_size;
auto end = i * unit_size;
response.vals.segment(begin, end).CopyFrom(src, unit_size);
}
// setup response
response.keys = req_data.keys;
std::vector<int> lens(req_data.keys.size(), unit_len);
lens[0] = 0;
response.lens.CopyFrom(lens.begin(), lens.end());
server->Response(req_meta, response);
}
void InitRowSparseStored(const DataHandleType type,
const int master_key,
const size_t num_rows,
const ps::KVMeta& req_meta,
const ps::KVPairs<char>& req_data,
ps::KVServer<char>* server) {
auto& stored = has_multi_precision_copy(type) ? store_realt_[master_key] : store_[master_key];
int dtype = type.dtype;
int num_bytes = mshadow::mshadow_sizeof(dtype);
auto unit_len = req_data.lens[1] / num_bytes;
CHECK_GT(unit_len, 0);
size_t ds[] = {num_rows, (size_t) unit_len};
mxnet::TShape dshape(ds, ds + 2);
CHECK_EQ(req_data.vals.size(), num_rows * unit_len * num_bytes);
TBlob recv_blob;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask);
})
NDArray recved = NDArray(recv_blob, 0);
stored = NDArray(kRowSparseStorage, dshape, Context(), true,
has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype);
if (has_multi_precision_copy(type)) {
store_[master_key] = NDArray(kRowSparseStorage, dshape, Context(), true, type.dtype);
}
Engine::Get()->PushAsync(
[this, recved, stored, type](RunContext ctx, Engine::CallbackOnComplete on_complete) {
NDArray rsp = stored;
stored.CheckAndAlloc({mshadow::Shape1(recved.shape()[0])});
mshadow::Stream<cpu> *s = ctx.get_stream<cpu>();
using namespace mxnet::op;
nnvm::dim_t nnr = rsp.shape()[0];
MSHADOW_IDX_TYPE_SWITCH(rsp.aux_type(rowsparse::kIdx), IType, {
IType* idx = rsp.aux_data(rowsparse::kIdx).dptr<IType>();
mxnet_op::Kernel<PopulateFullIdxRspKernel, cpu>::Launch(s, nnr, idx);
});
TBlob rsp_data = rsp.data();
// copies or casts as appropriate
ndarray::Copy<cpu, cpu>(recved.data(), &rsp_data, Context(), Context(), RunContext());
on_complete();
}, recved.ctx(), {recved.var()}, {stored.var()},
FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME);
if (has_multi_precision_copy(type)) {
CopyFromTo(stored, store_[master_key]);
store_[master_key].WaitToRead();
}
stored.WaitToRead();
server->Response(req_meta);
}
void DataHandleRowSparse(const DataHandleType type, const ps::KVMeta& req_meta,
const ps::KVPairs<char>& req_data,
ps::KVServer<char>* server) {
int master_key = DecodeKey(req_data.keys[0]);
auto num_rows = req_data.keys.size() - 1;
auto& stored = store_[master_key];
if (req_meta.push) {
CHECK_GT(req_data.lens.size(), 0) << "req_data.lens cannot be empty";
CHECK_EQ(req_data.lens[0], 0);
if (stored.is_none()) {
if (log_verbose_) LOG(INFO) << "initial push: " << master_key;
// initialization
CHECK_GT(num_rows, 0) << "init with empty data is not supported";
InitRowSparseStored(type, master_key, num_rows, req_meta, req_data, server);
return;
} else {
if (log_verbose_) LOG(INFO) << "push: " << master_key << " " << req_data.keys;
auto& updates = update_buf_[master_key];
if (sync_mode_ && updates.merged.is_none()) {
updates.merged = NDArray(kRowSparseStorage, stored.shape(), Context(), true,
has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype);
}
if (has_multi_precision_copy(type) && updates.temp_array.is_none()) {
updates.temp_array = NDArray(kRowSparseStorage, stored.shape(), Context(), false,
mshadow::kFloat32);
}
if (num_rows == 0) {
if (sync_mode_) {
if (updates.request.empty()) {
// reset to zeros
int merged_dtype = has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype;
updates.merged = NDArray(kRowSparseStorage, stored.shape(), Context(),
true, merged_dtype);
} // else nothing to aggregate
updates.request.push_back(req_meta);
ApplyUpdates(type, master_key, req_data, &updates, server);
} else {
server->Response(req_meta);
}
} else {
auto unit_len = req_data.lens[1] / mshadow::mshadow_sizeof(type.dtype);
CHECK_GT(unit_len, 0);
// indices
std::vector<int64_t> indices(num_rows);
DecodeRowIds(req_data.keys, indices.data(), master_key, num_rows);
// data
TBlob idx_blob(indices.data(), mshadow::Shape1(num_rows), cpu::kDevMask);
size_t ds[] = {(size_t) num_rows, (size_t) unit_len};
mxnet::TShape dshape(ds, ds + 2);
TBlob recv_blob;
MSHADOW_REAL_TYPE_SWITCH(type.dtype, DType, {
recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()),
dshape, cpu::kDevMask);
})
// row_sparse NDArray
NDArray recved(kRowSparseStorage, stored.shape(), recv_blob, {idx_blob}, 0);
if (updates.request.empty()) {
if (sync_mode_) {
CopyFromTo(recved, updates.merged);
} else {
if (has_multi_precision_copy(type)) {
CopyFromTo(recved, updates.temp_array);
} else {
updates.temp_array = recved;
}
}
} else {
CHECK(sync_mode_);
AccumulateRowSparseGrads(type, recved, &updates);
}
updates.request.push_back(req_meta);
ApplyUpdates(type, master_key, req_data, &updates, server);
}
}
} else {
// pull
RowSparsePullResponse(type, master_key, num_rows, req_meta, req_data, server);
}
}
void DefaultStorageResponse(const DataHandleType type,
const int key,
const ps::KVMeta& req_meta,
const ps::KVPairs<char> &req_data,
ps::KVServer<char>* server) {
ps::KVPairs<char> response;
const NDArray& stored = store_[key];
CHECK(!stored.is_none()) << "init " << key << " first";
// as server returns when store_realt is ready in this case
if (has_multi_precision_copy(type)) stored.WaitToRead();
auto len = stored.shape().Size() * mshadow::mshadow_sizeof(stored.dtype());
response.keys = req_data.keys;
response.lens = {len};
// TODO(mli) try to remove this CopyFrom
response.vals.CopyFrom(static_cast<const char*>(stored.data().dptr_), len);
server->Response(req_meta, response);
}
void DataHandleCompressed(const DataHandleType type,
const ps::KVMeta& req_meta,
const ps::KVPairs<char> &req_data,
ps::KVServer<char>* server) {
CHECK_EQ(type.dtype, mshadow::kFloat32)
<< "Gradient compression is currently supported for fp32 only";
if (req_meta.push) {
// there used several WaitToRead, this is because \a recved's memory
// could be deallocated when this function returns. so we need to make sure
// the operators with \a NDArray are actually finished
// first for dummy key which represents original size of array, whose len is 0
CHECK_EQ(req_data.keys.size(), (size_t)2);
CHECK_EQ(req_data.lens.size(), (size_t)2);
CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[1]);
int original_size = DecodeKey(req_data.keys[0]);
int key = DecodeKey(req_data.keys[1]);
auto& stored = store_[key];
size_t ds[] = {(size_t)req_data.lens[1] / mshadow::mshadow_sizeof(type.dtype)};
mxnet::TShape dshape(ds, ds + 1);
TBlob recv_blob(reinterpret_cast<real_t*>(req_data.vals.data()), dshape, cpu::kDevMask);
NDArray recved = NDArray(recv_blob, 0);
NDArray decomp_buf = decomp_buf_[key];
dshape = mxnet::TShape{(int64_t) original_size};
if (decomp_buf.is_none()) {
decomp_buf = NDArray(dshape, Context());
}
if (stored.is_none()) {
stored = NDArray(dshape, Context());
gradient_compression_->Dequantize(recved, &stored, 0);
server->Response(req_meta);
stored.WaitToRead();
} else if (sync_mode_) {
// synced push
auto& merged = update_buf_[key];
if (merged.merged.is_none()) {
merged.merged = NDArray(dshape, Context());
}
if (merged.request.size() == 0) {
gradient_compression_->Dequantize(recved, &merged.merged, 0);
} else {
gradient_compression_->Dequantize(recved, &decomp_buf, 0);
merged.merged += decomp_buf;
}
merged.request.push_back(req_meta);
ApplyUpdates(type, key, req_data, &merged, server);
} else {
// async push
gradient_compression_->Dequantize(recved, &decomp_buf, 0);
exec_.Exec([this, key, &decomp_buf, &stored]() {
CHECK(updater_);
updater_(key, decomp_buf, &stored);
});
server->Response(req_meta);
stored.WaitToRead();
}
} else { // pull
CHECK_EQ(req_data.keys.size(), (size_t)1);
CHECK_EQ(req_data.lens.size(), (size_t)0);
int key = DecodeKey(req_data.keys[0]);
DefaultStorageResponse(type, key, req_meta, req_data, server);
}
}
void DataHandleDefault(const DataHandleType type, const ps::KVMeta& req_meta,
const ps::KVPairs<char> &req_data,
ps::KVServer<char>* server) {
// do some check
CHECK_EQ(req_data.keys.size(), (size_t)1);
if (req_meta.push) {
CHECK_EQ(req_data.lens.size(), (size_t)1);
CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[0]);
}
int key = DecodeKey(req_data.keys[0]);
auto& stored = has_multi_precision_copy(type) ? store_realt_[key] : store_[key];
// there used several WaitToRead, this is because \a recved's memory
// could be deallocated when this function returns. so we need to make sure
// the operators with \a NDArray are actually finished
if (req_meta.push) {
size_t ds[] = {(size_t) req_data.lens[0] / mshadow::mshadow_sizeof(type.dtype)};
mxnet::TShape dshape(ds, ds + 1);
TBlob recv_blob;
MSHADOW_REAL_TYPE_SWITCH(type.dtype, DType, {
recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask);
})
NDArray recved = NDArray(recv_blob, 0);
if (stored.is_none()) {
// initialization
stored = NDArray(dshape, Context(), false,
has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype);
CopyFromTo(recved, &stored, 0);
server->Response(req_meta);
if (has_multi_precision_copy(type)) {
auto& stored_dtype = store_[key];
stored_dtype = NDArray(dshape, Context(), false, type.dtype);
CopyFromTo(stored, stored_dtype);
stored_dtype.WaitToRead();
}
stored.WaitToRead();
} else {
auto &updates = update_buf_[key];
if (sync_mode_ && updates.merged.is_none()) {
updates.merged = NDArray(dshape, Context(), false,
has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype);
}
if (has_multi_precision_copy(type) && updates.temp_array.is_none()) {
updates.temp_array = NDArray(dshape, Context(), false, mshadow::kFloat32);
}
if (updates.request.empty()) {
if (sync_mode_) {
CopyFromTo(recved, updates.merged);
} else {
if (has_multi_precision_copy(type)) {
CopyFromTo(recved, updates.temp_array);
} else {
updates.temp_array = recved;
}
}
} else {
CHECK(sync_mode_);
if (has_multi_precision_copy(type)) {
CopyFromTo(recved, updates.temp_array);
updates.merged += updates.temp_array;
} else {
updates.merged += recved;
}
}
updates.request.push_back(req_meta);
ApplyUpdates(type, key, req_data, &updates, server);
}
} else {
DefaultStorageResponse(type, key, req_meta, req_data, server);
}
}
int DecodeKey(ps::Key key) {
auto kr = ps::Postoffice::Get()->GetServerKeyRanges()[ps::MyRank()];
return key - kr.begin();
}
/**
* \brief user defined mode for push
*/
bool sync_mode_;
KVStore::Controller controller_;
KVStore::Updater updater_;
/**
* \brief store_ contains the value at kvstore for each key
*/
std::unordered_map<int, NDArray> store_;
std::unordered_map<int, NDArray> store_realt_;
/**
* \brief merge_buf_ is a buffer used if sync_mode is true. It represents
* values from different workers being merged. The store will be updated
* to this value when values from all workers are pushed into this buffer.
*/
std::unordered_map<int, UpdateBuf> update_buf_;
/**
* \brief decomp_buf_ is a buffer into which compressed values are
* decompressed before merging to the store. used when compress_!='none'
*/
std::unordered_map<int, NDArray> decomp_buf_;
Executor exec_;
ps::KVServer<char>* ps_server_;
// whether to LOG verbose information
bool log_verbose_;
/*
* \brief whether to use multi precision mode.
* in multi precision mode, all weights are stored as float32.
* any gradient received will be cast to float32 before accumulation and updating of weights.
*/
bool multi_precision_;
/**
* \brief gradient compression object.
* starts with none, used after SetGradientCompression sets the type
* currently there is no support for unsetting gradient compression
*/
std::shared_ptr<kvstore::GradientCompression> gradient_compression_;
};
} // namespace kvstore
} // namespace mxnet
#endif // MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
|
rnn_impl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file rnn_impl.h
* \brief
* \author Shu Zhang
*/
#ifndef MXNET_OPERATOR_RNN_IMPL_H_
#define MXNET_OPERATOR_RNN_IMPL_H_
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <algorithm>
#include <map>
#include <vector>
#include <string>
#include <utility>
#include "./math.h"
#include "./math_functions-inl.h"
#include "./operator_common.h"
#include "./mshadow_op.h"
#include "./linalg.h"
namespace mxnet {
namespace op {
template<typename DType>
inline DType sigmoid(DType x) {
return 1.0f / (1.0f + exp(-x));
}
template<typename DType>
inline DType relu(DType x) {
return x > 0.0f ? static_cast<float>(x) : 0.0f;
}
template<typename DType>
void LstmForwardTrainingSingleLayer(DType* ws,
DType* rs,
bool state_outputs,
bool bid,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
const Tensor<cpu, 2, DType> &cx,
const Tensor<cpu, 3, DType> &y,
DType* w_ptr,
DType* b_ptr,
DType* hy_ptr,
DType* cy_ptr) {
using namespace mshadow;
const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I));
const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H));
const Tensor<cpu, 2, DType> bx(b_ptr, Shape2(4, H));
const Tensor<cpu, 2, DType> bh(b_ptr + H * 4, Shape2(4, H));
const Tensor<cpu, 2, DType> yx_flat(ws, Shape2(T * N, 4 * H));
const Tensor<cpu, 2, DType> yh_flat(ws + T * N * H * 4, Shape2(N, 4 * H));
const Tensor<cpu, 4, DType> yx(yx_flat.dptr_, Shape4(T, N, 4, H));
const Tensor<cpu, 3, DType> yh(yh_flat.dptr_, Shape3(N, 4, H));
Tensor<cpu, 2, DType> h(yh_flat.dptr_ + N * H * 4, Shape2(N, H));
DType *c_ptr = bid ? rs + T * N * H * 7 : rs;
Tensor<cpu, 3, DType> c(c_ptr, Shape3(T, N, H));
Tensor<cpu, 4, DType> ifgo(c_ptr + T * N * H, Shape4(T, N, H, 4));
const int offset = bid ? H : 0;
const DType alpha = 1.0;
const DType beta = 0.0;
const int cell_size = N * H;
linalg_gemm(x, wx, yx_flat, alpha, beta, false, true);
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int i = 0; i < T; ++i) {
int t = bid ? T - 1 - i : i;
linalg_gemm(i ? h : hx, wh, yh_flat, alpha, beta, false, true);
#pragma omp parallel for num_threads(omp_threads)
for (int jk = 0; jk < cell_size; ++jk) {
int j = jk / H;
int k = jk % H;
DType it = sigmoid<DType>(yx[t][j][0][k] + yh[j][0][k] + bx[0][k] + bh[0][k]);
DType ft = sigmoid<DType>(yx[t][j][1][k] + yh[j][1][k] + bx[1][k] + bh[1][k]);
DType gt = tanh(yx[t][j][2][k] + yh[j][2][k] + bx[2][k] + bh[2][k]);
DType ot = sigmoid<DType>(yx[t][j][3][k] + yh[j][3][k] + bx[3][k] + bh[3][k]);
DType ct = (i ? c[i-1][j][k] : cx[j][k]) * ft + it * gt;
DType ht = ot * tanh(ct);
h[j][k] = ht;
// reserve
y[t][j][k + offset] = ht;
c[i][j][k] = ct;
ifgo[i][j][k][0] = it;
ifgo[i][j][k][1] = ft;
ifgo[i][j][k][2] = gt;
ifgo[i][j][k][3] = ot;
if (i == T - 1 && state_outputs) {
hy_ptr[jk] = ht;
cy_ptr[jk] = ct;
}
}
}
}
template <typename DType>
void LstmForwardTraining(DType* ws,
DType* rs,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
const int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* b_ptr,
DType* y_ptr,
DType* hy_ptr,
DType* cy_ptr,
const float dropout) {
DType* dropout_random = rs;
DType* rs2 = dropout_random + (L - 1) * D * T * N * H;
const int total_layers = D * L;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H));
const int b_size = 2 * H * 4;
const int r_size = D * T * N * H * 6;
const int y_offset = T * N * H * 5;
const int cell_size = N * H;
unsigned int seed_ = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn)
int idx = 0; // state & cell state's idx;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int i = 0; i < L; ++i) {
const int input_size = i ? H * D : I;
const int w_size = (input_size + H) * H * 4;
Tensor<cpu, 2, DType> x(x_ptr, Shape2(T * N, input_size));
Tensor<cpu, 3, DType> y(rs2 + y_offset, Shape3(T, N, H * D));
LstmForwardTrainingSingleLayer<DType>(ws, rs2, state_outputs, false, T, N, input_size, H, x,
hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
if (D == 2) {
w_ptr += w_size;
b_ptr += b_size;
++idx;
if (state_outputs) {
hy_ptr += cell_size;
cy_ptr += cell_size;
}
LstmForwardTrainingSingleLayer<DType>(ws, rs2, state_outputs, true, T, N, input_size, H, x,
hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
}
if (i != L - 1) {
w_ptr += w_size;
b_ptr += b_size;
if (dropout > 0.0f) {
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < T * N * H * D; j++) {
int rand_data = rand_r(&seed_);
if (static_cast<float>(rand_data % 1000) < static_cast<float>(1000 * dropout)) {
dropout_random[i * T * N * H * D + j] = 0;
y.dptr_[j] = 0;
} else {
dropout_random[i * T * N * H * D + j] = 1.0f - dropout;
y.dptr_[j] = y.dptr_[j] / (1.0f - dropout);
}
}
}
x_ptr = y.dptr_;
rs2 += r_size;
++idx;
if (state_outputs) {
hy_ptr += cell_size;
cy_ptr += cell_size;
}
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * H * D; ++i) {
y_ptr[i] = (rs2 + y_offset)[i];
}
}
template<typename DType>
void LstmForwardInferenceSingleLayer(DType* ws,
bool state_outputs,
bool bid,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
const Tensor<cpu, 2, DType> &cx,
const Tensor<cpu, 3, DType> &y,
DType* w_ptr,
DType* b_ptr,
DType* hy_ptr,
DType* cy_ptr) {
using namespace mshadow;
const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I));
const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H));
const Tensor<cpu, 2, DType> bx(b_ptr, Shape2(4, H));
const Tensor<cpu, 2, DType> bh(b_ptr + H * 4, Shape2(4, H));
Tensor<cpu, 2, DType> yx_flat(ws, Shape2(T * N, H * 4));
Tensor<cpu, 2, DType> yh_flat(ws + T * N * H * 4, Shape2(N, H * 4));
const Tensor<cpu, 4, DType> yx(yx_flat.dptr_, Shape4(T, N, 4, H));
const Tensor<cpu, 3, DType> yh(yh_flat.dptr_, Shape3(N, 4, H));
Tensor<cpu, 2, DType> h(yh_flat.dptr_ + N * H * 4, Shape2(N, H));
Tensor<cpu, 2, DType> c(h.dptr_ + N * H, Shape2(N, H));
const int offset = bid ? H : 0;
const DType alpha = 1.0;
const DType beta = 0.0;
const int cell_size = N * H;
linalg_gemm(x, wx, yx_flat, alpha, beta, false, true);
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int i = 0; i < T; ++i) {
int t = bid ? T - 1 - i : i;
linalg_gemm(i ? h : hx, wh, yh_flat, alpha, beta, false, true);
#pragma omp parallel for num_threads(omp_threads)
for (int jk = 0; jk < cell_size; ++jk) {
int j = jk / H;
int k = jk % H;
DType it = sigmoid<DType>(yx[t][j][0][k] + yh[j][0][k] + bx[0][k] + bh[0][k]);
DType ft = sigmoid<DType>(yx[t][j][1][k] + yh[j][1][k] + bx[1][k] + bh[1][k]);
DType gt = tanh(yx[t][j][2][k] + yh[j][2][k] + bx[2][k] + bh[2][k]);
DType ot = sigmoid<DType>(yx[t][j][3][k] + yh[j][3][k] + bx[3][k] + bh[3][k]);
DType ct = (i ? c[j][k] : cx[j][k]) * ft + it * gt;
DType ht = ot * tanh(ct);
y[t][j][k + offset] = ht;
if (i == T - 1 && state_outputs) {
hy_ptr[jk] = ht;
cy_ptr[jk] = ct;
} else {
h[j][k] = ht;
c[j][k] = ct;
}
}
}
}
template <typename DType>
void LstmForwardInference(DType* ws,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
const int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* b_ptr,
DType* y_ptr,
DType* hy_ptr,
DType* cy_ptr) {
const int total_layers = D * L;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H));
const int b_size = 2 * H * 4;
const int cell_size = N * H;
DType* y_tmp_ptr = ws + (T + 1) * cell_size * 4 + cell_size * 2;
DType* y_cur_ptr = y_ptr;
int idx = 0; // state & cell state's idx;
bool flag = L % 2 ? false : true;
for (int i = 0; i < L; ++i) {
const int input_size = i ? H * D : I;
const int w_size = (input_size + H) * H * 4;
// If bidirectional, need space to save current layer output y.
if (D == 2) {
y_cur_ptr = flag ? y_tmp_ptr : y_ptr;
flag = !flag;
}
Tensor<cpu, 2, DType> x(x_ptr, Shape2(T * N, input_size));
Tensor<cpu, 3, DType> y(y_cur_ptr, Shape3(T, N, H * D));
LstmForwardInferenceSingleLayer<DType>(ws, state_outputs, false, T, N, input_size, H,
x, hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
// If bidirectional, then calculate the reverse direction's forward result.
if (D == 2) {
w_ptr += w_size;
b_ptr += b_size;
++idx;
if (state_outputs) {
hy_ptr += cell_size;
cy_ptr += cell_size;
}
LstmForwardInferenceSingleLayer<DType>(ws, state_outputs, true, T, N, input_size, H,
x, hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
}
// Don't need to move pointer in the last layer.
if (i != L - 1) {
w_ptr += w_size;
b_ptr += b_size;
x_ptr = y_cur_ptr;
++idx;
if (state_outputs) {
hy_ptr += cell_size;
cy_ptr += cell_size;
}
}
}
}
template <typename DType>
void LstmBackwardSingleLayer(DType* ws,
DType* rs,
DType* tmp_buf,
bool bid,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
const Tensor<cpu, 2, DType> &cx,
const Tensor<cpu, 3, DType> &y,
const Tensor<cpu, 3, DType> &dy,
const Tensor<cpu, 2, DType> &dx,
const Tensor<cpu, 2, DType> &dhx,
const Tensor<cpu, 2, DType> &dcx,
DType* dhy_ptr,
DType* dcy_ptr,
DType* w_ptr,
DType* dw_ptr,
DType* db_ptr,
int req_data,
int req_params,
int req_state,
int req_statecell) {
using namespace mshadow;
const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I));
const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H));
Tensor<cpu, 2, DType> dwx(dw_ptr, Shape2(H * 4, I));
Tensor<cpu, 2, DType> dwh(dw_ptr + I * H * 4, Shape2(H * 4, H));
Tensor<cpu, 1, DType> dbx(db_ptr, Shape1(H * 4));
Tensor<cpu, 1, DType> dbh(dbx.dptr_ + H * 4, Shape1(H * 4));
DType *c_ptr = bid ? rs + T * N * H * 7 : rs;
const Tensor<cpu, 3, DType> c(c_ptr, Shape3(T, N, H));
const Tensor<cpu, 4, DType> ifgo(c_ptr + T * N * H, Shape4(T, N, H, 4));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (req_params != kNullOp && req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H * 4 * H; ++i) {
dwh.dptr_[i] = 0;
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 4 * H; ++i) {
dbx.dptr_[i] = 0;
dbh.dptr_[i] = 0;
}
}
Tensor<cpu, 4, DType> difgo(ws, Shape4(T, N, 4, H));
Tensor<cpu, 2, DType> dh(ws + T * N * H * 4, Shape2(N, H));
Tensor<cpu, 2, DType> dc(dh.dptr_ + N * H, Shape2(N, H));
Tensor<cpu, 2, DType> htmp(dc.dptr_ + N * H, Shape2(N, H));
const int offset = bid ? H : 0;
const DType alpha = 1.0;
const DType beta0 = 0.0;
const DType beta1 = 1.0;
const DType beta2 = 2.0;
const int cell_size = N * H;
if (dhy_ptr != NULL) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < cell_size; ++i) {
dh.dptr_[i] = dhy_ptr[i];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < cell_size; ++i) {
dh.dptr_[i] = 0;
}
}
if (dcy_ptr != NULL) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < cell_size; ++i) {
dc.dptr_[i] = dcy_ptr[i];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < cell_size; ++i) {
dc.dptr_[i] = 0;
}
}
for (int i = T - 1; i >= 0; --i) {
int t = bid ? T - 1 - i : i;
int tnext = bid ? t + 1 : t - 1;
const Tensor<cpu, 2, DType>& dhnext = i ? dh : dhx;
const Tensor<cpu, 2, DType>& dcnext = i ? dc : dcx;
const Tensor<cpu, 2, DType>& hnext = i ? htmp : hx;
const Tensor<cpu, 2, DType>& cnext = i ? c[i - 1] : cx;
#pragma omp parallel for num_threads(omp_threads)
for (int jk = 0; jk < cell_size; ++jk) {
int j = jk / H;
int k = jk % H;
DType tc = tanh(c[i][j][k]);
DType it = ifgo[i][j][k][0];
DType ft = ifgo[i][j][k][1];
DType gt = ifgo[i][j][k][2];
DType ot = ifgo[i][j][k][3];
dh[j][k] += dy[t][j][k + offset];
dc[j][k] += dh[j][k] * ot * (1 - tc * tc);
difgo[t][j][0][k] = dc[j][k] * gt * it * (1 - it);
difgo[t][j][1][k] = dc[j][k] * cnext[j][k] * ft * (1 - ft);
difgo[t][j][2][k] = dc[j][k] * it * (1 - gt * gt);
difgo[t][j][3][k] = dh[j][k] * tc * ot * (1 - ot);
if (req_statecell != kNullOp || i > 0) {
dcnext[j][k] = dc[j][k] * ft;
}
if (i) {
htmp[j][k] = y[tnext][j][k + offset];
}
}
Tensor<cpu, 2, DType> dyh(difgo[t].dptr_, Shape2(N, H * 4));
if (req_state != kNullOp || i > 0) {
linalg_gemm(dyh, wh, dhnext, alpha, beta0, false, false);
}
if (req_params != kNullOp) {
if (req_params != kAddTo) {
linalg_gemm(dyh, hnext, dwh, alpha, beta1, true, false);
} else {
linalg_gemm(dyh, hnext, dwh, alpha, beta2, true, false);
// generate dwx every time step for AddTo
Tensor<cpu, 2, DType> x_t(x.dptr_ + i * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> dyx_t(difgo.dptr_ + i * N * H * 4, Shape2(N, H * 4));
linalg_gemm(dyx_t, x_t, dwx, alpha, beta2, true, false);
}
}
}
Tensor<cpu, 2, DType> dyx(difgo.dptr_, Shape2(T * N, H * 4));
if (req_data != kNullOp) {
linalg_gemm(dyx, wx, dx, alpha, bid ? beta1 : beta0, false, false);
}
if (req_params != kNullOp && req_params != kAddTo) {
linalg_gemm(dyx, x, dwx, alpha, beta0, true, false);
}
const int row = T * N;
const int col = H * 4;
if (req_params != kNullOp) {
if (req_params != kAddTo) {
for (int i = 0; i < row; ++i) {
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < col; ++j) {
dbx[j] += dyx[i][j];
dbh[j] = dbx[j];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf, Shape2(col, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + col * T, Shape2(col, T));
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < col * T; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (int t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < col; ++j) {
for (int i = 0; i < N; ++i) {
tmp_dbx[j][t] += dyx[t * N + i][j];
tmp_dbh[j][t] = tmp_dbx[j][t];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < col; ++j) {
dbx[j] += tmp_dbx[j][t] + dbx[j];
dbh[j] += tmp_dbh[j][t] + dbh[j];
}
}
}
}
}
template <typename DType>
void LstmBackward(DType* ws,
DType* rs,
const int L,
const int D,
const int T,
const int N,
const int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* dcy_ptr,
DType* dx_ptr,
DType* dhx_ptr,
DType* dcx_ptr,
DType* dw_ptr,
DType* db_ptr,
int req_data,
int req_params,
int req_state,
int req_statecell,
const float dropout) {
DType* dropout_random = rs + (L - 1) * D * T * N * H;
DType* rs2 = rs + (L - 1) * D * T * N * H;
DType* tmp_buf = ws;
DType* ws2 = tmp_buf + 8 * T * H;
const int total_layers = D * L;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> dhx(dhx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> dcx(dcx_ptr, Shape3(total_layers, N, H));
const int b_size = 2 * H * 4;
const int r_size = D * T * N * H * 6;
const int y_offset = T * N * H * 5;
const int w_size1 = (I + H) * H * 4; // first layer
const int w_size2 = (D * H + H) * H * 4; // other layers
const int cell_size = N * H;
DType* dy_tmp_ptr = ws2 + T * cell_size * 4 + cell_size * 3;
for (int i = L - 1; i >= 0; --i) {
const int input_size = i ? H * D : I;
const int w_size = i ? w_size2 : w_size1;
int idx = i * D;
DType* w_cur_ptr = i ? w_ptr + (w_size1 + (i - 1) * w_size2) * D : w_ptr;
DType* dw_cur_ptr = i ? dw_ptr + (w_size1 + (i - 1) * w_size2) * D : dw_ptr;
DType* db_cur_ptr = db_ptr + i * b_size * D;
DType* rs_cur_ptr = rs2 + i * r_size;
DType* dhy_cur_ptr = dhy_ptr ? dhy_ptr + i * cell_size * D : NULL;
DType* dcy_cur_ptr = dcy_ptr ? dcy_ptr + i * cell_size * D : NULL;
Tensor<cpu, 3, DType> y(rs_cur_ptr + y_offset, Shape3(T, N, H * D));
Tensor<cpu, 3, DType> dy(dy_ptr, Shape3(T, N, H * D));
Tensor<cpu, 2, DType> x(i ? y.dptr_ - r_size : x_ptr, Shape2(T * N, input_size));
Tensor<cpu, 2, DType> dx(i ? dy_tmp_ptr : dx_ptr, Shape2(T * N, input_size));
LstmBackwardSingleLayer<DType>(ws2, rs_cur_ptr, tmp_buf, false, T, N, input_size, H,
x, hx[idx], cx[idx], y, dy, dx, dhx[idx], dcx[idx],
dhy_cur_ptr, dcy_cur_ptr, w_cur_ptr, dw_cur_ptr, db_cur_ptr,
req_data, req_params, req_state, req_statecell);
if (D == 2) {
w_cur_ptr += w_size;
dw_cur_ptr += w_size;
db_cur_ptr += b_size;
++idx;
dhy_cur_ptr = dhy_ptr ? dhy_cur_ptr + cell_size : NULL;
dcy_cur_ptr = dcy_ptr ? dcy_cur_ptr + cell_size : NULL;
LstmBackwardSingleLayer<DType>(ws2, rs_cur_ptr, tmp_buf, true, T, N, input_size, H,
x, hx[idx], cx[idx], y, dy, dx, dhx[idx], dcx[idx],
dhy_cur_ptr, dcy_cur_ptr, w_cur_ptr, dw_cur_ptr, db_cur_ptr,
req_data, req_params, req_state, req_statecell);
}
if (dropout > 0.0f && i > 0 && req_data != kNullOp) {
dropout_random = dropout_random - T * N * D * H;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < T * N * D * H; j++) {
if (dropout_random[j] == 0) {
dx.dptr_[j] = 0;
} else {
dx.dptr_[j] = dx.dptr_[j] / (1.0f - dropout);
}
}
}
dy_ptr = dx.dptr_;
}
}
template<typename DType>
void GruForwardInferenceSingleLayer(DType* ws,
DType* tmp_buf,
bool state_outputs,
const int D,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* bx_ptr,
DType* bh_ptr,
DType* y_ptr,
DType* hy_ptr) {
DType* ht = y_ptr;
DType* ht_1 = y_ptr;
DType* back_ht_1 = y_ptr + (T-1) * N * H * D + H;
DType* back_ht = back_ht_1;
DType* gemmC1 = ws; // [D, T, N, 3 * H]
DType* gemmC2 = gemmC1 + D * T * N * 3 * H; // N * 3 * H
DType* rt = gemmC2 + N * 3 * H;
DType* zt = rt + N * H;
DType* nt = zt + N * H;
DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H;
DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H;
DType* back_bx_ptr = (bx_ptr != NULL)? bx_ptr + 3 * H * 2 : NULL;
DType* back_bh_ptr = (bh_ptr != NULL)? bh_ptr + 3 * H * 2: NULL;
DType* back_gemmC1 = gemmC1 + T * N * 3 * H;
DType* gemmC1_t = gemmC1;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(3, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (D == 1) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * H + j] = hx[i][j];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * D * H + j] = hx[i][j];
back_ht_1[i * D * H + j] = hx[N + i][j];
}
}
Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, 3 * H));
Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, 3 * H));
// x * wx.T : [T * N, I] * [I, 3 * H]
DType alpha = 1.0;
DType beta = 0.0;
linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true);
if (D == 2) {
linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true);
}
for (int t = 0; t < T; t++) {
// perform the first direction, X * wx and H * wh for each step
// ht-1 * wh, ht-1:[N, H] wh:[3 * H, H]
Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H));
if (D == 1) {
linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true);
} else {
Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf),
Shape3(D, H, N));
dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N));
linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true);
}
gemmC1_t = gemmC1 + t * N * 3 * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int rtb = i * 3 * H;
int ztb = i * 3 * H + H;
int ntb = i * 3 * H + 2 * H;
rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] + gemmC2[rtb + j]
+ bx[0][j] + bh[0][j]);
zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] + gemmC2[ztb + j]
+ bx[1][j] + bh[1][j]);
nt[i * H + j] = tanh(gemmC1_t[ntb + j] + bx[2][j] +
rt[i * H + j] * (gemmC2[ntb + j] + bh[2][j]));
ht[i * D * H + j] = (1-zt[i * H + j]) * nt[i * H + j] +
zt[i * H + j] * ht_1[i * D * H + j];
}
}
ht_1 = ht;
ht = ht + D * H * N;
// perform the second direction
if (D == 2) {
gemmC1_t = back_gemmC1 + (T - 1 - t) * N * 3 * H;
Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H));
Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N));
linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true);
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int rtb = i * 3 * H;
int ztb = i * 3 * H + H;
int ntb = i * 3 * H + 2 * H;
rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] +
gemmC2[rtb + j] + back_bx[0][j] + back_bh[0][j]);
zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] +
gemmC2[ztb + j] + back_bx[1][j]+ back_bh[1][j]);
nt[i * H + j] = tanh(gemmC1_t[ntb + j] + back_bx[2][j]
+ rt[i * H + j] * (gemmC2[ntb + j] + back_bh[2][j]));
back_ht[i * D * H + j] = (1 - zt[i * H + j]) * nt[i * H + j]
+ zt[i * H + j] * back_ht_1[i * D * H + j];
}
}
back_ht_1 = back_ht;
back_ht = back_ht - D * H * N;
}
}
// copy last state to hy, from(N, H * D) to (D, N, H)
if (state_outputs) {
if (D == 1) {
DType* y_start = y_ptr + (T - 1) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * H + j];
}
} else {
DType* y_start = y_ptr + (T - 1) * N * H * D;
DType* y_back_start = y_ptr + H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * D * H + j];
hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j];
}
}
}
}
template <typename DType>
void GruForwardInference(DType* ws,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* hy_ptr) {
DType* wx = w_ptr;
DType* wh = wx + I * H * 3;
DType* bx = wh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3)
+ (L - 1) * ((D + 1) * H) * H * 3 * D;
DType* bh = bx + H * 3;
DType* y_tmp = ws;
DType* y_l = x_ptr;
DType* tmp_buf = y_tmp + D * T * N * H;
DType* ws2 = y_tmp + D * T * N * H + D * H * N;
DType* wx_l = wx;
DType* wh_l = wh;
DType* bx_l = bx;
DType* bh_l = bh;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H));
DType* hy_l = hy_ptr;
for (int l = 0; l < L; l++) {
Tensor<cpu, 2, DType> x_l(y_l, Shape2(T * N, I));
if ((L + l) % 2) {
y_l = y_ptr;
} else {
y_l = y_tmp;
}
Tensor<cpu, 2, DType> hx_l = hx[D * l];
GruForwardInferenceSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H,
x_l, hx_l, wx_l, wh_l, bx_l, bh_l, y_l, hy_l);
hy_l = hy_l + D * N * H;
bx_l = bx_l + 3 * H * D * 2;
bh_l = bh_l + 3 * H * D * 2;
wx_l = wx_l + I * H * 3 * D + H * H * 3 * D;
if (l == 0) {
I = D * H;
}
wh_l = wx_l + I * 3 * H;
}
}
template<typename DType>
void GruForwardTrainingSingleLayer(DType* ws,
DType* tmp_buf,
bool state_outputs,
const int D,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* bx_ptr,
DType* bh_ptr,
DType* gateR,
DType* gateZ,
DType* gateN,
DType* Mnh,
DType* y_ptr,
DType* hy_ptr) {
DType* ht = y_ptr;
DType* ht_1 = y_ptr;
DType* back_ht_1 = y_ptr + (T - 1)* N * H * D + H;
DType* back_ht = back_ht_1;
DType* gemmC1 = ws; // [D, T, N, 3 * H]
DType* gemmC2 = gemmC1 + D * T * N * 3 * H; // N * 3 * H
DType* rt = gateR;
DType* zt = gateZ;
DType* nt = gateN;
DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H;
DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H;
DType* back_bx_ptr = (bx_ptr != NULL)? bx_ptr + 3 * H * 2 : NULL;
DType* back_bh_ptr = (bh_ptr != NULL)? bh_ptr + 3 * H * 2 : NULL;
DType* back_gateR = gateR + T * N * H;
DType* back_gateZ = gateZ + T * N * H;
DType* back_gateN = gateN + T * N * H;
DType* back_Mnh = Mnh + T * N * H;
DType* back_gemmC1 = gemmC1 + T * N * 3 * H;
DType* gemmC1_t = gemmC1;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(3, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (D == 1) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * H + j] = hx[i][j];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * D * H + j] = hx[i][j];
back_ht_1[i * D * H + j] = hx[N + i][j];
}
}
Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, 3 * H));
Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, 3 * H));
// x * wx.T : [T * N, I] * [I, 3 * H]
DType alpha = 1.0;
DType beta = 0.0;
linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true);
if (D == 2) {
linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true);
}
for (int t = 0; t < T; t++) {
// perform the first direction, X * wx and H * wh for each step
// ht-1 * wh, ht-1:[N, H] wh:[3 * H, H]
Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H));
if (D == 1) {
linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true);
} else {
Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf),
Shape3(D, H, N));
dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N));
linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true);
}
rt = gateR + t * N * H;
zt = gateZ + t * N * H;
nt = gateN + t * N * H;
gemmC1_t = gemmC1 + t * N * 3 * H;
DType* Mnht = Mnh + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int rtb = i * 3 * H;
int ztb = i * 3 * H + H;
int ntb = i * 3 * H + 2 * H;
Mnht[i * H + j] = gemmC2[ntb + j] + bh[2][j];
rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] + gemmC2[rtb + j]
+ bx[0][j] + bh[0][j]);
zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] + gemmC2[ztb + j]
+ bx[1][j] + bh[1][j]);
nt[i * H + j] = tanh(gemmC1_t[ntb + j] + bx[2][j] +
rt[i * H + j] * (gemmC2[ntb + j] + bh[2][j]));
ht[i * D * H + j] = (1-zt[i * H + j]) * nt[i * H + j] +
zt[i * H + j] * ht_1[i * D * H + j];
}
}
ht_1 = ht;
ht = ht + D * H * N;
// perform the second direction
if (D == 2) {
rt = back_gateR + (T - 1 - t) * N * H;
zt = back_gateZ + (T - 1 - t) * N * H;
nt = back_gateN + (T - 1 - t) * N * H;
gemmC1_t = back_gemmC1 + (T - 1 - t) * N * 3 * H;
Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H));
Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N));
linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true);
DType* back_Mnht = back_Mnh + (T - 1 - t) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int rtb = i * 3 * H;
int ztb = i * 3 * H + H;
int ntb = i * 3 * H + 2 * H;
back_Mnht[i * H + j] = gemmC2[ntb + j] + back_bh[2][j];
rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] +
gemmC2[rtb + j] + back_bx[0][j] + back_bh[0][j]);
zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] +
gemmC2[ztb + j] + back_bx[1][j] + back_bh[1][j]);
nt[i * H + j] = tanh(gemmC1_t[ntb + j] + back_bx[2][j]
+ rt[i * H + j] * (gemmC2[ntb + j] + back_bh[2][j]));
back_ht[i * D * H + j] = (1 - zt[i * H + j]) * nt[i * H + j]
+ zt[i * H + j] * back_ht_1[i * D * H + j];
}
}
back_ht_1 = back_ht;
back_ht = back_ht - D * H * N;
}
}
// copy last state to hy, from(N, H * D) to (D, N, H)
if (state_outputs) {
if (D == 1) {
DType* y_start = y_ptr + (T - 1) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * H + j];
}
} else {
DType* y_start = y_ptr + (T - 1) * N * H * D;
DType* y_back_start = y_ptr + H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * D * H + j];
hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j];
}
}
}
}
template <typename DType>
void GruForwardTraining(DType* ws,
DType* rs,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* hy_ptr,
const float dropout) {
DType* wx = w_ptr;
DType* wh = wx + I * H * 3;
DType* bx = wh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3)
+ (L - 1) * ((D + 1) * H) * H * 3 * D;
DType* bh = bx + H * 3;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H));
DType* hy_l = hy_ptr;
DType* gateR_l = rs;
DType* gateZ_l = gateR_l + L * T * D * N * H;
DType* gateN_l = gateZ_l + L * T * D * N * H;
DType* y_l = gateN_l + L * T * D * N * H;
DType* Mnh_l = y_l + L * T * N * H * D;
DType* dropout_random = Mnh_l + L * D * T * N * H;
DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H;
DType* ws2 = tmp_buf + D * N * H;
DType* wx_l = wx;
DType* wh_l = wh;
DType* bx_l = bx;
DType* bh_l = bh;
DType* y_tmp = x_ptr;
unsigned int seed_ = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn)
for (int l = 0; l < L; l++) {
if (l != 0) {
y_tmp = y_l;
y_l = y_l + T * N * H * D;
}
if (dropout > 0.0f && l > 0) {
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * I; i++) {
int rand_data = rand_r(&seed_);
if (static_cast<float>(rand_data % 1000) < static_cast<float>(1000 * dropout)) {
dropout_random[(l - 1) * T * N * I + i] = 0;
y_tmp[i] = 0;
} else {
dropout_random[(l - 1) * T * N * I + i] = 1.0f - dropout;
y_tmp[i] = y_tmp[i] / (1.0f - dropout);
}
}
}
Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I));
Tensor<cpu, 2, DType> hx_l = hx[D * l];
GruForwardTrainingSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H,
x_l, hx_l, wx_l, wh_l, bx_l, bh_l,
gateR_l, gateZ_l, gateN_l, Mnh_l, y_l, hy_l);
gateR_l = gateR_l + T * D * N * H;
gateZ_l = gateZ_l + T * D * N * H;
gateN_l = gateN_l + T * D * N * H;
Mnh_l = Mnh_l + T * D * N * H;
hy_l = hy_l + D * N * H;
bx_l = bx_l + 3 * H * D * 2;
bh_l = bh_l + 3 * H * D * 2;
wx_l = wx_l + I * H * 3 * D + H * H * 3 * D;
if (l == 0) {
I = D * H;
}
wh_l = wx_l + I * 3 * H;
}
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * H * D; ++i) {
y_ptr[i] = y_l[i];
}
}
template <typename DType>
void GruBackwardSingleLayer(DType* ws,
DType* tmp_buf,
const int D,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* y_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* gateR,
DType* gateZ,
DType* gateN,
DType* Mnh,
DType* dx,
DType* dhx,
DType* dwx,
DType* dwh,
DType* dbx,
DType* dbh,
int req_data,
int req_params,
int req_state) {
DType* dyt;
DType* ht1; // [N, D, H]
DType* rt;
DType* zt;
DType* nt;
DType* dat;
DType* dart;
DType* dar = ws; // [T, N, 3 * H]
DType* da = dar + T * N * 3 * H; // [T, N, 3 * H]
DType* dht1 = da + T * N * 3 * H; // [D, N, H]
DType* hx_ = dht1 + D * N * H; // [N, D, H]
DType* Mnht = Mnh;
DType* back_ht1;
DType* back_dht1 = dht1 + N * H; // [N, H]
DType* back_Mnht = Mnh + T * N * H;
DType* back_gateR = gateR + T * N * H;
DType* back_gateZ = gateZ + T * N * H;
DType* back_gateN = gateN + T * N * H;
DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H;
DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H;
DType* back_dwx = dwx + I * 3 * H + H * 3 * H;
DType* back_dwh = dwh + I * 3 * H + H * 3 * H;
DType* back_dbx = dbx + 3 * H * 2;
DType* back_dbh = dbh + 3 * H * 2;
DType alpha = 1.0;
DType beta = 0.0;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (req_params != kNullOp && req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < D * H * 3 * H; ++i) {
dwh[i] = 0;
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < D * 3 * H; ++i) {
dbx[i] = 0;
dbh[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N * H; ++i) {
if (dhy_ptr) {
dht1[i] = dhy_ptr[i];
} else {
dht1[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
hx_[i * D * H + j] = hx[i][j];
}
}
if (D == 2) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N * H; ++i) {
if (dhy_ptr) {
back_dht1[i] = dhy_ptr[N * H + i];
} else {
back_dht1[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
hx_[i * D * H + H + j] = hx[N + i][j];
}
}
}
for (int t = T - 1; t >= 0; --t) {
if (t) {
ht1 = y_ptr + (t - 1) * N * D * H;
} else {
ht1 = hx_;
}
// add dy[T, N, D, H] to dhy[D, N, H]
dyt = dy_ptr + t * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
dht1[i * H + j] += dyt[i * D * H + j];
}
}
rt = gateR + t * N * H;
zt = gateZ + t * N * H;
nt = gateN + t * N * H;
Mnht = Mnh + t * N * H;
dat = da + t * N * 3 * H;
dart = dar + t * N * 3 * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int nid = i * 3 * H + 2 * H + j;
int zid = i * 3 * H + H + j;
int rid = i * 3 * H + j;
int id = i * H + j;
dat[nid] = dht1[id] * (1 - zt[id]) * (1 - nt[id] * nt[id]);
dart[zid] = dat[zid] = dht1[id] * (ht1[i * D * H + j] - nt[id]) *
zt[id] * (1 - zt[id]);
dart[rid] = dat[rid] = dat[nid] * Mnht[id] * rt[id] *
(1 - rt[id]);
dart[nid] = dat[nid] * rt[id];
dht1[id] = dht1[id] * zt[id];
}
}
if (req_params != kNullOp) {
alpha = 1.0;
beta = 1.0;
// dht1 = dart * wh [N, H] = [N, 3 * H] * [3 * H, H]
Tensor<cpu, 2, DType> d_dht1(dht1, Shape2(N, H));
Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, 3 * H));
linalg_gemm(d_dart, wh, d_dht1, alpha, beta, false, false);
if (req_params == kAddTo) {
beta = 2.0;
// dwx = da.T * x [3 * H, I] = [3 * H, N] * [N, I] for AddTo
Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> d_dat(dat, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(3 * H, I));
linalg_gemm(d_dat, d_xt, d_dwx, alpha, beta, true, false);
}
// dwh = dart.T * ht1 [3 * H, H] = [3 * H, N] * [N, H]
Tensor<cpu, 2, DType> d_ht1(ht1, Shape2(N, D * H));
Tensor<cpu, 2, DType> d_dwh(dwh, Shape2(3 * H, H));
Tensor<cpu, 3, DType> d_ht1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
d_ht1_tmp = reshape(d_ht1.T(), Shape3(D, H, N));
linalg_gemm(d_dart, d_ht1_tmp[0], d_dwh, alpha, beta, true, true);
}
}
if (req_params != kNullOp) {
// dbx = e * da [1, 3 * H] = [1, N] * [N, 3 * H]
if (req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
for (int j = 0; j < N * T; ++j) {
dbx[i] += da[j * 3 * H + i];
dbh[i] += dar[j * 3 * H + i];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H * 3, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + 3 * H * T, Shape2(H * 3, T));
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H * T * 3; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (int t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
for (int j = 0; j < N; ++j) {
tmp_dbx[i][t] += da[t * N * 3 * H + j * 3 * H + i];
tmp_dbh[i][t] += dar[t * N * 3 * H + j * 3 * H + i];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
dbx[i] += tmp_dbx[i][t] + dbx[i];
dbh[i] += tmp_dbh[i][t] + dbh[i];
}
}
}
}
alpha = 1.0;
beta = 0.0;
// dx = da * wx [T * N, I] = [T * N, 3 * H] * [3 * H, I]
Tensor<cpu, 2, DType> d_da(da, Shape2(T * N, 3 * H));
if (req_data != kNullOp) {
Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I));
linalg_gemm(d_da, wx, d_dx, alpha, beta, false, false);
}
// dwx = da.T * x [3 * H, I] = [3 * H, T * N] * [T * N, I]
if (req_params != kNullOp && req_params != kAddTo) {
Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(3 * H, I));
linalg_gemm(d_da, x, d_dwx, alpha, beta, true, false);
}
if (D == 2) {
for (int t = 0; t < T; ++t) {
if (t == T-1) {
back_ht1 = hx_;
} else {
back_ht1 = y_ptr + (t + 1) * N * D * H;
}
// add dy[T, N, D, H] to dhy[D, N, H]
dyt = dy_ptr + t * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
back_dht1[i * H + j] += dyt[i * D * H + H + j];
}
}
rt = back_gateR + t * N * H;
zt = back_gateZ + t * N * H;
nt = back_gateN + t * N * H;
back_Mnht = Mnh + (T + t) * N * H;
dat = da + t * N * 3 * H;
dart = dar + t * N * 3 * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int nid = i * 3 * H + 2 * H + j;
int zid = i * 3 * H + H + j;
int rid = i * 3 * H + j;
int id = i * H + j;
dat[nid] = back_dht1[id] * (1 - zt[id]) * (1 - nt[id] * nt[id]);
dart[zid] = dat[zid] = back_dht1[id] * (back_ht1[i * D * H + H + j] -
nt[id]) * zt[id] * (1 - zt[id]);
dart[rid] = dat[rid] = dat[nid] * back_Mnht[id] * rt[id] *
(1 - rt[id]);
dart[nid] = dat[nid] * rt[id];
back_dht1[id] = back_dht1[id] * zt[id];
}
}
if (req_params != kNullOp) {
alpha = 1.0;
beta = 1.0;
// dht1 = da * wh [N, H] = [N, 3 * H] * [3 * H, H]
Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> d_back_dht1(back_dht1, Shape2(N, H));
linalg_gemm(d_dart, back_wh, d_back_dht1, alpha, beta, false, false);
// dwh = da.T * ht1 [3 * H, H] = [3 * H, N] * [N, H]
Tensor<cpu, 2, DType> d_back_dwh(back_dwh, Shape2(3 * H, H));
Tensor<cpu, 2, DType> d_back_ht1(back_ht1 + H, Shape2(N, D * H));
Tensor<cpu, 3, DType> d_back_ht1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
d_back_ht1_tmp = reshape(d_back_ht1.T(), Shape3(D, H, N));
if (req_params == kAddTo) {
beta = 2.0;
// dwx = da.T * x [3 * H, I] = [3 * H, N] * [N, I] for AddTo
Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> d_dat(dat, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(3 * H, I));
linalg_gemm(d_dat, d_xt, d_back_dwx, alpha, beta, true, false);
}
linalg_gemm(d_dart, d_back_ht1_tmp[0], d_back_dwh, alpha, beta, true, true);
}
}
if (req_params != kNullOp) {
// dbx = e * da [1, 3 * H] = [1, N] * [N, 3 * H]
if (req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
for (int j = 0; j < N * T; ++j) {
back_dbx[i] += da[j * 3 * H + i];
back_dbh[i] += dar[j * 3 * H + i];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H * 3, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + 3 * H * T, Shape2(H * 3, T));
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H * T * 3; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (int t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
for (int j = 0; j < N; ++j) {
tmp_dbx[i][t] += da[t * N * 3 * H + j * 3 * H + i];
tmp_dbh[i][t] += dar[t * N * 3 * H + j * 3 * H + i];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
back_dbx[i] += tmp_dbx[i][t] + back_dbx[i];
back_dbh[i] += tmp_dbh[i][t] + back_dbh[i];
}
}
}
}
alpha = 1.0;
beta = 1.0;
// dxt = da * wx [T * N, I] = [T * N, 3 * H] * [3 * H, I]
Tensor<cpu, 2, DType> d_da2(da, Shape2(T * N, 3 * H));
if (req_data != kNullOp) {
Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I));
linalg_gemm(d_da2, back_wx, d_dx, alpha, beta, false, false);
}
alpha = 1.0;
beta = 0.0;
// dwx = da.T * x [3 * H, I] = [3 * H, T * N] * [T * N, I]
if (req_params != kNullOp && req_params != kAddTo) {
Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(3 * H, I));
linalg_gemm(d_da2, x, d_back_dwx, alpha, beta, true, false);
}
}
if (req_state != kNullOp) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N * H * D; ++i) {
dhx[i] = dht1[i];
}
}
}
template <typename DType>
void GruBackward(DType* ws,
DType* rs,
const int L,
const int D,
const int T,
const int N,
int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* dx_ptr,
DType* dhx_ptr,
DType* dw_ptr,
int req_data,
int req_params,
int req_state,
const float dropout) {
DType* wx = w_ptr;
DType* dwx = dw_ptr;
DType* dwh = dwx + I * H * 3;
DType* dbx = dwh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3)
+ (L - 1) * ((D + 1) * H) * H * 3 * D;
DType* gateR_l = rs + (L - 1) * T * D * N * H;
DType* gateZ_l = gateR_l + L * T * D * N * H;
DType* gateN_l = gateZ_l + L * T * D * N * H;
DType* y_l = gateN_l + L * T * D * N * H;
DType* Mnh_l = y_l + L * T * N * H * D;
DType* dropout_random = Mnh_l + L * D * T * N * H;
DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H;
DType* dx_l = tmp_buf + T * N * D * H + 3 * H * T * 2;
DType* ws2 = dx_l + T * N * D * H;
DType* wx_l = (L == 1)? wx : wx + (L - 2) * D * (D + 1) * H * 3 * H
+ D * I * 3 * H + D * H * 3 * H;
DType* wh_l = wx_l;
if (L == 1) {
wh_l = wh_l + I * H * 3;
} else {
wh_l = wh_l + (D * H) * H * 3;
}
DType* dhy_l = NULL;
if (dhy_ptr)
dhy_l = dhy_ptr + (L - 1) * D * N * H;
DType* dwx_l = (L == 1)? dwx : dwx + (L - 2) * D * (D + 1) * H * 3 * H
+ D * I * 3 * H + D * H * 3 * H;
DType* dwh_l = NULL;
if (L == 1) {
dwh_l = dwx_l + I * H * 3;
} else {
dwh_l = dwx_l + (D * H) * H * 3;
}
DType* dbx_l = dbx + (L - 1) * D * 3 * H * 2;
DType* dbh_l = dbx_l + 3 * H;
DType* dhx_l = dhx_ptr + (L - 1) * D * N * H;
DType* dy_l = dy_ptr;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(L, D * N, H));
int inputsize = I;
DType* y_tmp = y_l - T * N * H * D;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int l = L - 1; l >= 0; --l) {
if (l == 0) {
I = inputsize;
y_tmp = x_ptr;
dx_l = dx_ptr;
} else {
I = D * H;
}
Tensor<cpu, 2, DType> hx_l = hx[l];
Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I));
GruBackwardSingleLayer<DType>(ws2, tmp_buf, D, T, N, I, H, x_l, hx_l, wx_l, wh_l, y_l, dy_l,
dhy_l, gateR_l, gateZ_l, gateN_l, Mnh_l, dx_l, dhx_l,
dwx_l, dwh_l, dbx_l, dbh_l, req_data, req_params, req_state);
if (dropout > 0.0f && l > 0 && req_data != kNullOp) {
dropout_random = dropout_random - T * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * I; i++) {
if (dropout_random[i] == 0) {
dx_l[i] = 0;
} else {
dx_l[i] = dx_l[i] / (1.0f - dropout);
}
}
}
if (l > 0) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * H * D; ++i) {
dy_l[i] = dx_l[i];
}
gateR_l = gateR_l - T * D * N * H;
gateZ_l = gateZ_l - T * D * N * H;
gateN_l = gateN_l - T * D * N * H;
Mnh_l = Mnh_l - T * D * N * H;
dhx_l = dhx_l - D * N * H;
if (dhy_l)
dhy_l = dhy_l - D * N * H;
y_l = y_l - T * N * H * D;
y_tmp = y_l;
if (l == 1) {
wx_l = wx_l - (inputsize + H) * H * 3 * D;
wh_l = wx_l + inputsize * 3 * H;
dwx_l = dwx_l - (inputsize + H) * H * 3 * D;
dwh_l = dwx_l + inputsize * 3 * H;
} else {
wx_l = wx_l - (I + H) * H * 3 * D;
wh_l = wx_l + I * 3 * H;
dwx_l = dwx_l - (I + H) * H * 3 * D;
dwh_l = dwx_l + I * 3 * H;
}
dbx_l = dbx_l - D * 3 * H * 2;
dbh_l = dbx_l + 3 * H;
}
}
}
template<typename DType>
void VanillaRNNForwardInferenceSingleLayer(DType* ws,
DType* tmp_buf,
bool state_outputs,
const int D,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* bx_ptr,
DType* bh_ptr,
DType* y_ptr,
DType* hy_ptr,
int mode) {
DType* ht = y_ptr;
DType* ht_1 = y_ptr;
DType* back_ht_1 = y_ptr + (T-1) * N * H * D + H;
DType* back_ht = back_ht_1;
DType* gemmC1 = ws; // [D, T, N, H]
DType* gemmC2 = gemmC1 + D * T * N * H; // N * H
DType* back_wx_ptr = wx_ptr + I * H + H * H;
DType* back_wh_ptr = wh_ptr + I * H + H * H;
DType* back_bx_ptr = (bx_ptr != NULL)? bx_ptr + H * 2 : NULL;
DType* back_bh_ptr = (bh_ptr != NULL)? bh_ptr + H * 2: NULL;
DType* back_gemmC1 = gemmC1 + T * N * H;
DType* gemmC1_t = gemmC1;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H, H));
const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H, H));
const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(1, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (D == 1) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * H + j] = hx[i][j];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * D * H + j] = hx[i][j];
back_ht_1[i * D * H + j] = hx[N + i][j];
}
}
Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, H));
Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, H));
Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, H));
// x * wx.T : [T * N, I] * [I, H]
DType alpha = 1.0;
DType beta = 0.0;
linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true);
if (D == 2) {
linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true);
}
for (int t = 0; t < T; t++) {
// perform the first direction, X * wx and H * wh for each step
// ht-1 * wh, ht-1:[N, H] wh:[H, H]
Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H));
if (D == 1) {
linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true);
} else {
Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf),
Shape3(D, H, N));
dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N));
linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true);
}
gemmC1_t = gemmC1 + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int tb = i * H;
if (mode == 1) {
ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + bx[0][j] +
gemmC2[tb + j] + bh[0][j]);
} else {
ht[i * D * H + j] = relu(gemmC1_t[tb + j] + bx[0][j] +
gemmC2[tb + j] + bh[0][j]);
}
}
}
ht_1 = ht;
ht = ht + D * H * N;
// perform the second direction
if (D == 2) {
gemmC1_t = back_gemmC1 + (T - 1 - t) * N * H;
Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H));
Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N));
linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true);
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int tb = i * H;
if (mode == 1) {
back_ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + back_bx[0][j]
+ gemmC2[tb + j] + back_bh[0][j]);
} else {
back_ht[i * D * H + j] = relu(gemmC1_t[tb + j] + back_bx[0][j]
+ gemmC2[tb + j] + back_bh[0][j]);
}
}
}
back_ht_1 = back_ht;
back_ht = back_ht - D * H * N;
}
}
// copy last state to hy, from(N, H * D) to (D, N, H)
if (state_outputs) {
if (D == 1) {
DType* y_start = y_ptr + (T - 1) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * H + j];
}
} else {
DType* y_start = y_ptr + (T - 1) * N * H * D;
DType* y_back_start = y_ptr + H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * D * H + j];
hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j];
}
}
}
}
template <typename DType>
void VanillaRNNForwardInference(DType* ws,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* hy_ptr,
int mode) {
DType* wx = w_ptr;
DType* wh = wx + I * H;
DType* bx = wh + H * H + (D - 1) * (H * H + I * H)
+ (L - 1) * ((D + 1) * H) * H * D;
DType* bh = bx + H;
DType* y_tmp = ws;
DType* y_l = x_ptr;
DType* tmp_buf = y_tmp + D * T * N * H;
DType* ws2 = y_tmp + D * T * N * H + D * H * N;
DType* wx_l = wx;
DType* wh_l = wh;
DType* bx_l = bx;
DType* bh_l = bh;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H));
DType* hy_l = hy_ptr;
for (int l = 0; l < L; l++) {
Tensor<cpu, 2, DType> x_l(y_l, Shape2(T * N, I));
if ((L + l) % 2) {
y_l = y_ptr;
} else {
y_l = y_tmp;
}
Tensor<cpu, 2, DType> hx_l = hx[D * l];
VanillaRNNForwardInferenceSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H,
x_l, hx_l, wx_l, wh_l, bx_l, bh_l, y_l,
hy_l, mode);
hy_l = hy_l + D * N * H;
bx_l = bx_l + H * D * 2;
bh_l = bh_l + H * D * 2;
wx_l = wx_l + I * H * D + H * H * D;
if (l == 0) {
I = D * H;
}
wh_l = wx_l + I * H;
}
}
template<typename DType>
void VanillaRNNForwardTrainingSingleLayer(DType* ws,
DType* tmp_buf,
bool state_outputs,
const int D,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* bx_ptr,
DType* bh_ptr,
DType* gateN,
DType* y_ptr,
DType* hy_ptr,
int mode) {
DType* ht = y_ptr;
DType* ht_1 = y_ptr;
DType* back_ht_1 = y_ptr + (T - 1)* N * H * D + H;
DType* back_ht = back_ht_1;
DType* gemmC1 = ws; // [D, T, N, H]
DType* gemmC2 = gemmC1 + D * T * N * H; // N * H
DType* nt = gateN;
DType* back_wx_ptr = wx_ptr + I * H + H * H;
DType* back_wh_ptr = wh_ptr + I * H + H * H;
DType* back_bx_ptr = (bx_ptr != NULL)? bx_ptr + H * 2 : NULL;
DType* back_bh_ptr = (bh_ptr != NULL)? bh_ptr + H * 2 : NULL;
DType* back_gateN = gateN + T * N * H;
DType* back_gemmC1 = gemmC1 + T * N * H;
DType* gemmC1_t = gemmC1;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H, H));
const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 1, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 1, H));
const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(1, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (D == 1) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * H + j] = hx[i][j];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * D * H + j] = hx[i][j];
back_ht_1[i * D * H + j] = hx[N + i][j];
}
}
Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, H));
Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, H));
Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, H));
// x * wx.T : [T * N, I] * [I, H]
DType alpha = 1.0;
DType beta = 0.0;
linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true);
if (D == 2) {
linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true);
}
for (int t = 0; t < T; t++) {
// perform the first direction, X * wx and H * wh for each step
// ht-1 * wh, ht-1:[N, H] wh:[H, H]
Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H));
if (D == 1) {
linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true);
} else {
Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf),
Shape3(D, H, N));
dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N));
linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true);
}
nt = gateN + t * N * H;
gemmC1_t = gemmC1 + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int tb = i * H;
if (mode == 1) {
nt[tb + j] = ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + bx[0][j] +
gemmC2[tb + j] + bh[0][j]);
} else {
nt[tb + j] = gemmC1_t[tb + j] + bx[0][j] + gemmC2[tb + j] + bh[0][j];
ht[i * D * H + j] = relu(nt[tb + j]);
}
}
}
ht_1 = ht;
ht = ht + D * H * N;
// perform the second direction
if (D == 2) {
nt = back_gateN + (T - 1 - t) * N * H;
gemmC1_t = back_gemmC1 + (T - 1 - t) * N * H;
Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H));
Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N));
linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true);
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int tb = i * H;
if (mode == 1) {
nt[tb + j] = back_ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + back_bx[0][j]
+ gemmC2[tb + j] + back_bh[0][j]);
} else {
nt[tb + j] = gemmC1_t[tb + j] + back_bx[0][j] + gemmC2[tb + j] + back_bh[0][j];
back_ht[i * D * H + j] = relu(nt[tb + j]);
}
}
}
back_ht_1 = back_ht;
back_ht = back_ht - D * H * N;
}
}
// copy last state to hy, from(N, H * D) to (D, N, H)
if (state_outputs) {
if (D == 1) {
DType* y_start = y_ptr + (T - 1) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * H + j];
}
} else {
DType* y_start = y_ptr + (T - 1) * N * H * D;
DType* y_back_start = y_ptr + H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * D * H + j];
hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j];
}
}
}
}
template <typename DType>
void VanillaRNNForwardTraining(DType* ws,
DType* rs,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* hy_ptr,
const float dropout,
int mode) {
DType* wx = w_ptr;
DType* wh = wx + I * H;
DType* bx = wh + H * H + (D - 1) * (H * H + I * H)
+ (L - 1) * ((D + 1) * H) * H * D;
DType* bh = bx + H;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H));
DType* hy_l = hy_ptr;
DType* gateN_l = rs;
DType* y_l = gateN_l + L * T * D * N * H;
DType* dropout_random = y_l + L * D * T * N * H;
DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H;
DType* ws2 = tmp_buf + D * N * H;
DType* wx_l = wx;
DType* wh_l = wh;
DType* bx_l = bx;
DType* bh_l = bh;
DType* y_tmp = x_ptr;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
unsigned int seed_ = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn)
for (int l = 0; l < L; l++) {
if (l != 0) {
y_tmp = y_l;
y_l = y_l + T * N * H * D;
}
if (dropout > 0.0f && l > 0) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * I; i++) {
int rand_data = rand_r(&seed_);
if (static_cast<float>(rand_data % 1000) < static_cast<float>(1000 * dropout)) {
dropout_random[(l - 1) * T * N * I + i] = 0;
y_tmp[i] = 0;
} else {
dropout_random[(l - 1) * T * N * I + i] = 1.0f - dropout;
y_tmp[i] = y_tmp[i] / (1.0f - dropout);
}
}
}
Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I));
Tensor<cpu, 2, DType> hx_l = hx[D * l];
VanillaRNNForwardTrainingSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H,
x_l, hx_l, wx_l, wh_l, bx_l, bh_l,
gateN_l, y_l, hy_l, mode);
gateN_l = gateN_l + T * D * N * H;
hy_l = hy_l + D * N * H;
bx_l = bx_l + H * D * 2;
bh_l = bh_l + H * D * 2;
wx_l = wx_l + I * H * D + H * H * D;
if (l == 0) {
I = D * H;
}
wh_l = wx_l + I * H;
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * H * D; ++i) {
y_ptr[i] = y_l[i];
}
}
template <typename DType>
void VanillaRNNBackwardSingleLayer(DType* ws,
DType* tmp_buf,
const int D,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* y_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* gateN,
DType* dx,
DType* dhx,
DType* dwx,
DType* dwh,
DType* dbx,
DType* dbh,
int req_data,
int req_params,
int req_state,
int mode) {
DType* dyt;
DType* ht1; // [N, D, H]
DType* dart;
DType* nt;
DType* dar = ws; // [T, N, H]
DType* dht1 = dar + T * N * H; // [D, N, H]
DType* hx_ = dht1 + D * N * H; // [N, D, H]
DType* back_ht1;
DType* back_dht1 = dht1 + N * H; // [N, H]
DType* back_gateN = gateN + T * N * H;
DType* back_wx_ptr = wx_ptr + I * H + H * H;
DType* back_wh_ptr = wh_ptr + I * H + H * H;
DType* back_dwx = dwx + I * H + H * H;
DType* back_dwh = dwh + I * H + H * H;
DType* back_dbx = dbx + H * 2;
DType* back_dbh = dbh + H * 2;
DType alpha = 1.0;
DType beta = 0.0;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (req_params != kNullOp && req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < D * H * H; ++i) {
dwh[i] = 0;
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < D * H; ++i) {
dbx[i] = 0;
dbh[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N * H; ++i) {
if (dhy_ptr) {
dht1[i] = dhy_ptr[i];
} else {
dht1[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
hx_[i * D * H + j] = hx[i][j];
}
}
if (D == 2) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N * H; ++i) {
if (dhy_ptr) {
back_dht1[i] = dhy_ptr[N * H + i];
} else {
back_dht1[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
hx_[i * D * H + H + j] = hx[N + i][j];
}
}
}
for (int t = T - 1; t >= 0; --t) {
if (t) {
ht1 = y_ptr + (t - 1) * N * D * H;
} else {
ht1 = hx_;
}
// add dy[T, N, D, H] to dhy[D, N, H]
dyt = dy_ptr + t * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
dht1[i * H + j] += dyt[i * D * H + j];
}
}
nt = gateN + t * N * H;
dart = dar + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int id = i * H + j;
if (mode == 1) {
dart[id] = dht1[id] * (1 - nt[id] * nt[id]);
} else {
dart[id] = nt[id] > 0.0f ? static_cast<float>(dht1[id]) : 0.0f;
}
dht1[id] = 0;
}
}
if (req_params != kNullOp) {
alpha = 1.0;
beta = 1.0;
// dht1 = dart * wh [N, H] = [N, H] * [H, H]
Tensor<cpu, 2, DType> d_dht1(dht1, Shape2(N, H));
Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, H));
linalg_gemm(d_dart, wh, d_dht1, alpha, beta, false, false);
if (req_params == kAddTo) {
beta = 2.0;
// dwx = da.T * x [H, I] = [H, N] * [N, I] for AddTo
Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(H, I));
linalg_gemm(d_dart, d_xt, d_dwx, alpha, beta, true, false);
}
// dwh = dart.T * ht1 [H, H] = [H, N] * [N, H]
Tensor<cpu, 2, DType> d_ht1(ht1, Shape2(N, D * H));
Tensor<cpu, 2, DType> d_dwh(dwh, Shape2(H, H));
Tensor<cpu, 3, DType> d_ht1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
d_ht1_tmp = reshape(d_ht1.T(), Shape3(D, H, N));
linalg_gemm(d_dart, d_ht1_tmp[0], d_dwh, alpha, beta, true, true);
}
}
if (req_params != kNullOp) {
// dbx = e * da [1, H] = [1, N] * [N, H]
if (req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
for (int j = 0; j < N * T; ++j) {
dbx[i] += dar[j * H + i];
dbh[i] = dbx[i];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + H * T, Shape2(H, T));
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H * T; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (int t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
for (int j = 0; j < N; ++j) {
tmp_dbx[i][t] += dar[t * N * H + j * H + i];
tmp_dbh[i][t] = tmp_dbx[i][t];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
dbx[i] += tmp_dbx[i][t] + dbx[i];
dbh[i] = dbx[i];
}
}
}
}
alpha = 1.0;
beta = 0.0;
// dx = da * wx [T * N, I] = [T * N, H] * [H, I]
Tensor<cpu, 2, DType> d_dar(dar, Shape2(T * N, H));
if (req_data != kNullOp) {
Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I));
linalg_gemm(d_dar, wx, d_dx, alpha, beta, false, false);
}
// dwx = da.T * x [H, I] = [H, T * N] * [T * N, I]
if (req_params != kNullOp && req_params != kAddTo) {
Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(H, I));
linalg_gemm(d_dar, x, d_dwx, alpha, beta, true, false);
}
if (D == 2) {
for (int t = 0; t < T; ++t) {
if (t == T-1) {
back_ht1 = hx_;
} else {
back_ht1 = y_ptr + (t + 1) * N * D * H;
}
// add dy[T, N, D, H] to dhy[D, N, H]
dyt = dy_ptr + t * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
back_dht1[i * H + j] += dyt[i * D * H + H + j];
}
}
nt = back_gateN + t * N * H;
dart = dar + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int id = i * H + j;
if (mode == 1) {
dart[id] = back_dht1[id] * (1 - nt[id] * nt[id]);
} else {
dart[id] = nt[id] > 0.0f ? static_cast<float>(back_dht1[id]) : 0.0f;
}
back_dht1[id] = 0;
}
}
if (req_params != kNullOp) {
alpha = 1.0;
beta = 1.0;
// dht1 = da * wh [N, H] = [N, H] * [H, H]
Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, H));
Tensor<cpu, 2, DType> d_back_dht1(back_dht1, Shape2(N, H));
linalg_gemm(d_dart, back_wh, d_back_dht1, alpha, beta, false, false);
// dwh = da.T * ht1 [H, H] = [H, N] * [N, H]
Tensor<cpu, 2, DType> d_back_dwh(back_dwh, Shape2(H, H));
Tensor<cpu, 2, DType> d_back_ht1(back_ht1 + H, Shape2(N, D * H));
Tensor<cpu, 3, DType> d_back_ht1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
d_back_ht1_tmp = reshape(d_back_ht1.T(), Shape3(D, H, N));
if (req_params == kAddTo) {
beta = 2.0;
// dwx = da.T * x [ H, I] = [H, N] * [N, I] for AddTo
Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(H, I));
linalg_gemm(d_dart, d_xt, d_back_dwx, alpha, beta, true, false);
}
linalg_gemm(d_dart, d_back_ht1_tmp[0], d_back_dwh, alpha, beta, true, true);
}
}
if (req_params != kNullOp) {
// dbx = e * da [1, H] = [1, N] * [N, H]
if (req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
for (int j = 0; j < N * T; ++j) {
back_dbx[i] += dar[j * H + i];
back_dbh[i] = back_dbx[i];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + H * T, Shape2(H, T));
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H * T; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (int t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
for (int j = 0; j < N; ++j) {
tmp_dbx[i][t] += dar[t * N * H + j * H + i];
tmp_dbh[i][t] = tmp_dbx[i][t];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
back_dbx[i] += tmp_dbx[i][t] + back_dbx[i];
back_dbh[i] = back_dbx[i];
}
}
}
}
alpha = 1.0;
beta = 1.0;
// dxt = da * wx [T * N, I] = [T * N, H] * [H, I]
Tensor<cpu, 2, DType> d_dar2(dar, Shape2(T * N, H));
if (req_data != kNullOp) {
Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I));
linalg_gemm(d_dar2, back_wx, d_dx, alpha, beta, false, false);
}
alpha = 1.0;
beta = 0.0;
// dwx = da.T * x [H, I] = [H, T * N] * [T * N, I]
if (req_params != kNullOp && req_params != kAddTo) {
Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(H, I));
linalg_gemm(d_dar2, x, d_back_dwx, alpha, beta, true, false);
}
}
if (req_state != kNullOp) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N * H * D; ++i) {
dhx[i] = dht1[i];
}
}
}
template <typename DType>
void VanillaRNNBackward(DType* ws,
DType* rs,
const int L,
const int D,
const int T,
const int N,
int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* dx_ptr,
DType* dhx_ptr,
DType* dw_ptr,
int req_data,
int req_params,
int req_state,
const float dropout,
int mode) {
DType* wx = w_ptr;
DType* dwx = dw_ptr;
DType* dwh = dwx + I * H;
DType* dbx = dwh + H * H + (D - 1) * (H * H + I * H)
+ (L - 1) * ((D + 1) * H) * H * D;
DType* gateN_l = rs + (L - 1) * T * D * N * H;
DType* y_l = gateN_l + L * T * D * N * H;
DType* dropout_random = y_l + L * D * T * N * H;
DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H;
DType* dx_l = tmp_buf + T * N * D * H + H * T * 2;
DType* ws2 = dx_l + T * N * D * H;
DType* wx_l = (L == 1)? wx : wx + (L - 2) * D * (D + 1) * H * H
+ D * I * H + D * H * H;
DType* wh_l = wx_l;
if (L == 1) {
wh_l = wh_l + I * H;
} else {
wh_l = wh_l + (D * H) * H;
}
DType* dhy_l = NULL;
if (dhy_ptr)
dhy_l = dhy_ptr + (L - 1) * D * N * H;
DType* dwx_l = (L == 1)? dwx : dwx + (L - 2) * D * (D + 1) * H * H
+ D * I * H + D * H * H;
DType* dwh_l = NULL;
if (L == 1) {
dwh_l = dwx_l + I * H;
} else {
dwh_l = dwx_l + (D * H) * H;
}
DType* dbx_l = dbx + (L - 1) * D * H * 2;
DType* dbh_l = dbx_l + H;
DType* dhx_l = dhx_ptr + (L - 1) * D * N * H;
DType* dy_l = dy_ptr;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(L, D * N, H));
int inputsize = I;
DType* y_tmp = y_l - T * N * H * D;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int l = L - 1; l >= 0; --l) {
if (l == 0) {
I = inputsize;
y_tmp = x_ptr;
dx_l = dx_ptr;
} else {
I = D * H;
}
Tensor<cpu, 2, DType> hx_l = hx[l];
Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I));
VanillaRNNBackwardSingleLayer<DType>(ws2, tmp_buf, D, T, N, I, H, x_l, hx_l, wx_l, wh_l,
y_l, dy_l, dhy_l, gateN_l, dx_l, dhx_l, dwx_l, dwh_l,
dbx_l, dbh_l, req_data, req_params, req_state, mode);
if (dropout > 0.0f && l > 0 && req_data != kNullOp) {
dropout_random = dropout_random - T * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * I; i++) {
if (dropout_random[i] == 0) {
dx_l[i] = 0;
} else {
dx_l[i] = dx_l[i] / (1.0f - dropout);
}
}
}
if (l > 0) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * H * D; ++i) {
dy_l[i] = dx_l[i];
}
gateN_l = gateN_l - T * D * N * H;
dhx_l = dhx_l - D * N * H;
if (dhy_l)
dhy_l = dhy_l - D * N * H;
y_l = y_l - T * N * H * D;
y_tmp = y_l;
if (l == 1) {
wx_l = wx_l - (inputsize + H) * H * D;
wh_l = wx_l + inputsize * H;
dwx_l = dwx_l - (inputsize + H) * H * D;
dwh_l = dwx_l + inputsize * H;
} else {
wx_l = wx_l - (I + H) * H * D;
wh_l = wx_l + I * H;
dwx_l = dwx_l - (I + H) * H * D;
dwh_l = dwx_l + I * H;
}
dbx_l = dbx_l - D * H * 2;
dbh_l = dbx_l + H;
}
}
}
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_RNN_IMPL_H_
|
omp_for_ordered.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
static int last_i = 0;
/* Utility function to check that i is increasing monotonically
with each call */
static int check_i_islarger (int i)
{
int islarger;
islarger = (i > last_i);
last_i = i;
return (islarger);
}
int test_omp_for_ordered()
{
int sum;
int is_larger = 1;
int known_sum;
last_i = 0;
sum = 0;
#pragma omp parallel
{
int i;
int my_islarger = 1;
#pragma omp for schedule(static,1) ordered
for (i = 1; i < 100; i++) {
#pragma omp ordered
{
my_islarger = check_i_islarger(i) && my_islarger;
sum = sum + i;
}
}
#pragma omp critical
{
is_larger = is_larger && my_islarger;
}
}
known_sum=(99 * 100) / 2;
return ((known_sum == sum) && is_larger);
}
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_for_ordered()) {
num_failed++;
}
}
return num_failed;
}
|
TwoViewReconstruction.h | /**
* Copyright (c) 2017 Darius Rückert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#pragma once
#include "saiga/core/time/all.h"
#include "saiga/vision/VisionTypes.h"
#include "saiga/vision/ba/BAWrapper.h"
#include "saiga/vision/reconstruction/FivePoint.h"
#include "saiga/vision/scene/Scene.h"
namespace Saiga
{
/**
* Complete two-view reconstruction based on the 5-point algorithm.
*
* Input:
* Set of feature matches (in normalized image space!!!)
* Output:
* Relative Camera Transformation
* Set of geometric inliers
* 3D world points of inliers
* Optional Output (for further processing)
* Median triangulation angle
*/
class TwoViewReconstruction
{
public:
inline TwoViewReconstruction();
// must be called once before running compute!
void init(const RansacParameters& fivePointParams) { fpr.init(fivePointParams); }
inline void compute(ArrayView<const Vec2> points1, ArrayView<const Vec2> points2);
inline double medianAngle();
// scales the scene so that the median depth is d
inline void setMedianDepth(double d);
inline double getMedianDepth();
// optimize with bundle adjustment
inline int optimize(int its, float threshold);
SE3& pose1() { return scene.extrinsics[0].se3; }
SE3& pose2() { return scene.extrinsics[1].se3; }
int N;
Mat3 E;
std::vector<int> inliers;
std::vector<char> inlierMask;
int inlierCount;
// AlignedVector<Vec3> worldPoints;
Scene scene;
std::vector<double> tmpArray;
FivePointRansac fpr;
Triangulation<double> triangulation;
OptimizationOptions op_options;
BAOptions ba_options;
BAWrapper ba;
};
TwoViewReconstruction::TwoViewReconstruction()
{
Intrinsics4 intr;
scene.intrinsics.push_back(intr);
scene.images.resize(2);
scene.images[0].extr = 0;
scene.images[0].intr = 0;
scene.images[1].extr = 1;
scene.images[1].intr = 0;
scene.extrinsics.push_back(Extrinsics(SE3()));
scene.extrinsics.push_back(Extrinsics(SE3()));
scene.extrinsics[0].constant = true;
int maxPoints = 2000;
scene.worldPoints.reserve(maxPoints);
scene.images[0].stereoPoints.reserve(maxPoints);
scene.images[1].stereoPoints.reserve(maxPoints);
op_options = defaultBAOptimizationOptions();
}
void TwoViewReconstruction::compute(ArrayView<const Vec2> points1, ArrayView<const Vec2> points2)
{
N = points1.size();
inliers.clear();
inliers.reserve(N);
inlierMask.resize(N);
// inlierCount = computeERansac(points1.data(), points2.data(), points1.size(), E, T(), inliers, inlierMask);
//#pragma omp parallel num_threads(1)
// {
pose1() = SE3();
inlierCount = fpr.solve(points1, points2, E, pose2(), inliers, inlierMask);
// }
// triangulate points
scene.worldPoints.clear();
scene.worldPoints.reserve(inlierCount);
// scene.extrinsics[1].se3 = T();
scene.images[0].stereoPoints.resize(N);
scene.images[1].stereoPoints.resize(N);
scene.worldPoints.resize(N);
for (int i = 0; i < N; ++i)
{
auto&& wp = scene.worldPoints[i];
auto&& ip1 = scene.images[0].stereoPoints[i];
auto&& ip2 = scene.images[1].stereoPoints[i];
if (!inlierMask[i])
{
// outlier
wp.valid = false;
ip1.wp = -1;
ip2.wp = -1;
continue;
}
// inlier
wp.p = triangulation.triangulateHomogeneous(pose1(), pose2(), points1[i], points2[i]);
wp.valid = true;
ip1.wp = i;
ip1.point = points1[i];
ip2.wp = i;
ip2.point = points2[i];
}
scene.fixWorldPointReferences();
SAIGA_ASSERT(scene);
}
double TwoViewReconstruction::medianAngle()
{
tmpArray.clear();
auto c1 = pose1().inverse().translation();
auto c2 = pose2().inverse().translation();
for (auto& wp2 : scene.worldPoints)
{
auto A = TriangulationAngle(c1, c2, wp2.p);
tmpArray.push_back(A);
}
std::sort(tmpArray.begin(), tmpArray.end());
return tmpArray[tmpArray.size() / 2];
}
double TwoViewReconstruction::getMedianDepth()
{
tmpArray.clear();
for (auto& wp2 : scene.worldPoints)
{
auto wp = wp2.p;
tmpArray.push_back(wp.z());
}
std::sort(tmpArray.begin(), tmpArray.end());
return tmpArray[tmpArray.size() / 2];
}
int TwoViewReconstruction::optimize(int its, float threshold)
{
ba.create(scene);
ba.initAndSolve(op_options, ba_options);
// recompute inliers
inlierCount = 0;
for (int i = 0; i < N; ++i)
{
if (!inlierMask[i]) continue;
auto e1 = scene.residual2(scene.images[0], scene.images[0].stereoPoints[i]).squaredNorm();
auto e2 = scene.residual2(scene.images[1], scene.images[1].stereoPoints[i]).squaredNorm();
if (std::max(e1, e2) < threshold)
{
inlierCount++;
}
else
{
inlierMask[i] = false;
}
}
return inlierCount;
}
void TwoViewReconstruction::setMedianDepth(double d)
{
auto md = getMedianDepth();
auto factor = d / md;
scene.rescale(factor);
}
} // namespace Saiga
|
GB_binop__first_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__first_int8
// A.*B function (eWiseMult): GB_AemultB__first_int8
// A*D function (colscale): GB_AxD__first_int8
// D*A function (rowscale): GB_DxB__first_int8
// C+=B function (dense accum): GB_Cdense_accumB__first_int8
// C+=b function (dense accum): GB_Cdense_accumb__first_int8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__first_int8
// C=scalar+B GB_bind1st__first_int8
// C=scalar+B' GB_bind1st_tran__first_int8
// C=A+scalar (none)
// C=A'+scalar (none)
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = aij
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
;
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = x ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FIRST || GxB_NO_INT8 || GxB_NO_FIRST_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__first_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__first_int8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__first_int8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__first_int8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__first_int8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__first_int8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__first_int8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__first_int8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
; ;
Cx [p] = x ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
Cx [p] = aij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = x ; \
}
GrB_Info GB_bind1st_tran__first_int8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = aij ; \
}
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
GB_unaryop__ainv_uint16_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint16_int16
// op(A') function: GB_tran__ainv_uint16_int16
// C type: uint16_t
// A type: int16_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
uint16_t z = (uint16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT16 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint16_int16
(
uint16_t *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint16_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pr93465-1.c | #pragma omp declare target
#pragma acc routine seq /* { dg-error "cannot apply '#pragma acc routine' to '\(void \)?f1\(\\(\\)\)?', which has also been marked with an OpenMP 'declare target' directive" } */
void f1 (void) {}
#pragma omp end declare target
#pragma omp declare target
void f1 (void);
#pragma acc routine seq /* { dg-error "cannot apply '#pragma acc routine' to '\(void \)?f1\(\\(\\)\)?', which has also been marked with an OpenMP 'declare target' directive" } */
void f1 (void);
#pragma omp declare target
#pragma acc routine /* { dg-error "cannot apply '#pragma acc routine' to '\(void \)?f2\(\\(\\)\)?', which has also been marked with an OpenMP 'declare target' directive" } */
extern void f2 (void);
#pragma omp end declare target
#pragma omp declare target
extern void f2 (void);
#pragma omp end declare target
#pragma acc routine gang /* { dg-error "cannot apply '#pragma acc routine' to '\(void \)?f2\(\\(\\)\)?', which has also been marked with an OpenMP 'declare target' directive" } */
extern void f2 (void);
#pragma omp declare target
#pragma acc routine gang /* { dg-error "cannot apply '#pragma acc routine' to '\(void \)?f3\(\\(\\)\)?', which has also been marked with an OpenMP 'declare target' directive" } */
void f3 (void);
#pragma omp end declare target
#pragma omp declare target
void f3 (void) {}
#pragma omp end declare target
#pragma acc routine (f3) gang /* { dg-error "cannot apply '#pragma acc routine' to '\(void \)?f3\(\\(\\)\)?', which has also been marked with an OpenMP 'declare target' directive" } */
/* Surprisingly, this diagnosis also works for '#pragma acc routine' first,
followed by '#pragma omp declare target'; the latter gets applied first. */
#pragma acc routine /* { dg-error "cannot apply '#pragma acc routine' to '\(void \)?f4\(\\(\\)\)?', which has also been marked with an OpenMP 'declare target' directive" } */
extern void f4 (void);
#pragma omp declare target
extern void f4 (void);
#pragma omp end declare target
#pragma acc routine gang /* { dg-error "cannot apply '#pragma acc routine' to '\(void \)?f5\(\\(\\)\)?', which has also been marked with an OpenMP 'declare target' directive" } */
void f5 (void) {}
#pragma omp declare target
extern void f5 (void);
#pragma omp end declare target
|
6.norace1.c | // RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s
// XFAIL: *
// Taken from ompVerify, Fig. 3
#include <omp.h>
#define N 20
int main() {
int A[N][N];
int p1, p2, temp;
#pragma omp parallel private(p1, p2, temp)
{
#pragma omp for
for (p1 = 0; p1 < N; p1++)
#pragma omp parallel for
for (p2 = 0; p2 < p1; p2++) {
temp = A[p1][p2];
A[p1][p2] = A[p2][p1];
A[p2][p1] = temp;
}
}
}
// CHECK: Region is Data Race Free.
// END
|
omp-places-invalid-syntax.c | // RUN: %libomp-compile && env KMP_SETTINGS=1 OMP_PLACES=invalid %libomp-run 2>&1 | FileCheck %s
// CHECK-DAG: Effective settings
// CHECK: OMP_PLACES=
// CHECK-SAME: cores
// REQUIRES: affinity
int main() {
#pragma omp parallel
{}
return 0;
}
|
PostProcess_EnergySpectrum.c | /* Generated by Cython 0.29.7 */
#define PY_SSIZE_T_CLEAN
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)
#error Cython requires Python 2.6+ or Python 3.3+.
#else
#define CYTHON_ABI "0_29_7"
#define CYTHON_HEX_VERSION 0x001D07F0
#define CYTHON_FUTURE_DIVISION 0
#include <stddef.h>
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#define __PYX_COMMA ,
#ifndef HAVE_LONG_LONG
#if PY_VERSION_HEX >= 0x02070000
#define HAVE_LONG_LONG
#endif
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 0
#undef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 0
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#if PY_VERSION_HEX < 0x03050000
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#undef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#undef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 1
#undef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 0
#undef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 0
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#elif defined(PYSTON_VERSION)
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#elif !defined(CYTHON_USE_PYTYPE_LOOKUP)
#define CYTHON_USE_PYTYPE_LOOKUP 1
#endif
#if PY_MAJOR_VERSION < 3
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#elif !defined(CYTHON_USE_PYLONG_INTERNALS)
#define CYTHON_USE_PYLONG_INTERNALS 1
#endif
#ifndef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 1
#endif
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#if PY_VERSION_HEX < 0x030300F0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#elif !defined(CYTHON_USE_UNICODE_WRITER)
#define CYTHON_USE_UNICODE_WRITER 1
#endif
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#ifndef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 1
#endif
#ifndef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 1
#endif
#ifndef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000)
#endif
#ifndef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1)
#endif
#ifndef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1)
#endif
#ifndef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3)
#endif
#endif
#if !defined(CYTHON_FAST_PYCCALL)
#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
#endif
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#undef SHIFT
#undef BASE
#undef MASK
#ifdef SIZEOF_VOID_P
enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) };
#endif
#endif
#ifndef __has_attribute
#define __has_attribute(x) 0
#endif
#ifndef __has_cpp_attribute
#define __has_cpp_attribute(x) 0
#endif
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
#ifndef CYTHON_MAYBE_UNUSED_VAR
# if defined(__cplusplus)
template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
# else
# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
# endif
#endif
#ifndef CYTHON_NCP_UNUSED
# if CYTHON_COMPILING_IN_CPYTHON
# define CYTHON_NCP_UNUSED
# else
# define CYTHON_NCP_UNUSED CYTHON_UNUSED
# endif
#endif
#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
#ifdef _MSC_VER
#ifndef _MSC_STDINT_H_
#if _MSC_VER < 1300
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
#else
typedef unsigned __int8 uint8_t;
typedef unsigned __int32 uint32_t;
#endif
#endif
#else
#include <stdint.h>
#endif
#ifndef CYTHON_FALLTHROUGH
#if defined(__cplusplus) && __cplusplus >= 201103L
#if __has_cpp_attribute(fallthrough)
#define CYTHON_FALLTHROUGH [[fallthrough]]
#elif __has_cpp_attribute(clang::fallthrough)
#define CYTHON_FALLTHROUGH [[clang::fallthrough]]
#elif __has_cpp_attribute(gnu::fallthrough)
#define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
#endif
#endif
#ifndef CYTHON_FALLTHROUGH
#if __has_attribute(fallthrough)
#define CYTHON_FALLTHROUGH __attribute__((fallthrough))
#else
#define CYTHON_FALLTHROUGH
#endif
#endif
#if defined(__clang__ ) && defined(__apple_build_version__)
#if __apple_build_version__ < 7000000
#undef CYTHON_FALLTHROUGH
#define CYTHON_FALLTHROUGH
#endif
#endif
#endif
#ifndef CYTHON_INLINE
#if defined(__clang__)
#define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
#elif defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
#define Py_OptimizeFlag 0
#endif
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyType_Type
#endif
#ifndef Py_TPFLAGS_CHECKTYPES
#define Py_TPFLAGS_CHECKTYPES 0
#endif
#ifndef Py_TPFLAGS_HAVE_INDEX
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#ifndef Py_TPFLAGS_HAVE_FINALIZE
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#ifndef METH_STACKLESS
#define METH_STACKLESS 0
#endif
#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)
#ifndef METH_FASTCALL
#define METH_FASTCALL 0x80
#endif
typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
Py_ssize_t nargs, PyObject *kwnames);
#else
#define __Pyx_PyCFunctionFast _PyCFunctionFast
#define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
#endif
#if CYTHON_FAST_PYCCALL
#define __Pyx_PyFastCFunction_Check(func)\
((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)))))
#else
#define __Pyx_PyFastCFunction_Check(func) 0
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
#define PyObject_Malloc(s) PyMem_Malloc(s)
#define PyObject_Free(p) PyMem_Free(p)
#define PyObject_Realloc(p) PyMem_Realloc(p)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1
#define PyMem_RawMalloc(n) PyMem_Malloc(n)
#define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n)
#define PyMem_RawFree(p) PyMem_Free(p)
#endif
#if CYTHON_COMPILING_IN_PYSTON
#define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
#else
#define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
#endif
#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#elif PY_VERSION_HEX >= 0x03060000
#define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
#elif PY_VERSION_HEX >= 0x03000000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#else
#define __Pyx_PyThreadState_Current _PyThreadState_Current
#endif
#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)
#include "pythread.h"
#define Py_tss_NEEDS_INIT 0
typedef int Py_tss_t;
static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {
*key = PyThread_create_key();
return 0;
}
static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {
Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));
*key = Py_tss_NEEDS_INIT;
return key;
}
static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {
PyObject_Free(key);
}
static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {
return *key != Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {
PyThread_delete_key(*key);
*key = Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {
return PyThread_set_key_value(*key, value);
}
static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {
return PyThread_get_key_value(*key);
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
#else
#define __Pyx_PyDict_NewPresized(n) PyDict_New()
#endif
#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS
#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
#else
#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name)
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
0 : _PyUnicode_Ready((PyObject *)(op)))
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
#else
#define CYTHON_PEP393_ENABLED 0
#define PyUnicode_1BYTE_KIND 1
#define PyUnicode_2BYTE_KIND 2
#define PyUnicode_4BYTE_KIND 4
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
#define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
#define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
#define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
#define PyObject_ASCII(o) PyObject_Repr(o)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#define PyObject_Unicode PyObject_Str
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#if CYTHON_ASSUME_SAFE_MACROS
#define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq)
#else
#define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
#ifndef PyUnicode_InternFromString
#define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : (Py_INCREF(func), func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
#if CYTHON_USE_ASYNC_SLOTS
#if PY_VERSION_HEX >= 0x030500B1
#define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
#define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
#else
#define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
#endif
#else
#define __Pyx_PyType_AsAsync(obj) NULL
#endif
#ifndef __Pyx_PyAsyncMethodsStruct
typedef struct {
unaryfunc am_await;
unaryfunc am_aiter;
unaryfunc am_anext;
} __Pyx_PyAsyncMethodsStruct;
#endif
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
#define __Pyx_truncl trunc
#else
#define __Pyx_truncl truncl
#endif
#define __PYX_ERR(f_index, lineno, Ln_error) \
{ \
__pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \
}
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#define __PYX_HAVE__PostProcess_EnergySpectrum
#define __PYX_HAVE_API__PostProcess_EnergySpectrum
/* Early includes */
#include <string.h>
#include <stdio.h>
#include "numpy/arrayobject.h"
#include "numpy/ufuncobject.h"
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)
#define CYTHON_WITHOUT_ASSERTIONS
#endif
typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8)
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_uchar_cast(c) ((unsigned char)c)
#define __Pyx_long_cast(x) ((long)x)
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
(sizeof(type) < sizeof(Py_ssize_t)) ||\
(sizeof(type) > sizeof(Py_ssize_t) &&\
likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX) &&\
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
v == (type)PY_SSIZE_T_MIN))) ||\
(sizeof(type) == sizeof(Py_ssize_t) &&\
(is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX))) )
static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) {
return (size_t) i < (size_t) limit;
}
#if defined (__cplusplus) && __cplusplus >= 201103L
#include <cstdlib>
#define __Pyx_sst_abs(value) std::abs(value)
#elif SIZEOF_INT >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) abs(value)
#elif SIZEOF_LONG >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) labs(value)
#elif defined (_MSC_VER)
#define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define __Pyx_sst_abs(value) llabs(value)
#elif defined (__GNUC__)
#define __Pyx_sst_abs(value) __builtin_llabs(value)
#else
#define __Pyx_sst_abs(value) ((value<0) ? -value : value)
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) {
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return (size_t)(u_end - u - 1);
}
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
#define __Pyx_PySequence_Tuple(obj)\
(likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
#if CYTHON_ASSUME_SAFE_MACROS
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
#else
#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
#endif
#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
const char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
if (strcmp(default_encoding_c, "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (!ascii_chars_u) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
Py_DECREF(ascii_chars_u);
Py_DECREF(ascii_chars_b);
}
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1);
if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
/* Test for GCC > 2.95 */
#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
static PyObject *__pyx_m = NULL;
static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_cython_runtime = NULL;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static PyObject *__pyx_empty_unicode;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
/* Header.proto */
#if !defined(CYTHON_CCOMPLEX)
#if defined(__cplusplus)
#define CYTHON_CCOMPLEX 1
#elif defined(_Complex_I)
#define CYTHON_CCOMPLEX 1
#else
#define CYTHON_CCOMPLEX 0
#endif
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#include <complex>
#else
#include <complex.h>
#endif
#endif
#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__)
#undef _Complex_I
#define _Complex_I 1.0fj
#endif
static const char *__pyx_f[] = {
"PostProcess_EnergySpectrum.pyx",
"__init__.pxd",
"type.pxd",
};
/* BufferFormatStructs.proto */
#define IS_UNSIGNED(type) (((type) -1) > 0)
struct __Pyx_StructField_;
#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
typedef struct {
const char* name;
struct __Pyx_StructField_* fields;
size_t size;
size_t arraysize[8];
int ndim;
char typegroup;
char is_unsigned;
int flags;
} __Pyx_TypeInfo;
typedef struct __Pyx_StructField_ {
__Pyx_TypeInfo* type;
const char* name;
size_t offset;
} __Pyx_StructField;
typedef struct {
__Pyx_StructField* field;
size_t parent_offset;
} __Pyx_BufFmt_StackElem;
typedef struct {
__Pyx_StructField root;
__Pyx_BufFmt_StackElem* head;
size_t fmt_offset;
size_t new_count, enc_count;
size_t struct_alignment;
int is_complex;
char enc_type;
char new_packmode;
char enc_packmode;
char is_valid_array;
} __Pyx_BufFmt_Context;
/* NoFastGil.proto */
#define __Pyx_PyGILState_Ensure PyGILState_Ensure
#define __Pyx_PyGILState_Release PyGILState_Release
#define __Pyx_FastGIL_Remember()
#define __Pyx_FastGIL_Forget()
#define __Pyx_FastGilFuncInit()
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":776
* # in Cython to enable them only on the right systems.
*
* ctypedef npy_int8 int8_t # <<<<<<<<<<<<<<
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
*/
typedef npy_int8 __pyx_t_5numpy_int8_t;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":777
*
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t # <<<<<<<<<<<<<<
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t
*/
typedef npy_int16 __pyx_t_5numpy_int16_t;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":778
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t # <<<<<<<<<<<<<<
* ctypedef npy_int64 int64_t
* #ctypedef npy_int96 int96_t
*/
typedef npy_int32 __pyx_t_5numpy_int32_t;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":779
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t # <<<<<<<<<<<<<<
* #ctypedef npy_int96 int96_t
* #ctypedef npy_int128 int128_t
*/
typedef npy_int64 __pyx_t_5numpy_int64_t;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":783
* #ctypedef npy_int128 int128_t
*
* ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<<
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
*/
typedef npy_uint8 __pyx_t_5numpy_uint8_t;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":784
*
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<<
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t
*/
typedef npy_uint16 __pyx_t_5numpy_uint16_t;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":785
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<<
* ctypedef npy_uint64 uint64_t
* #ctypedef npy_uint96 uint96_t
*/
typedef npy_uint32 __pyx_t_5numpy_uint32_t;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":786
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<<
* #ctypedef npy_uint96 uint96_t
* #ctypedef npy_uint128 uint128_t
*/
typedef npy_uint64 __pyx_t_5numpy_uint64_t;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":790
* #ctypedef npy_uint128 uint128_t
*
* ctypedef npy_float32 float32_t # <<<<<<<<<<<<<<
* ctypedef npy_float64 float64_t
* #ctypedef npy_float80 float80_t
*/
typedef npy_float32 __pyx_t_5numpy_float32_t;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":791
*
* ctypedef npy_float32 float32_t
* ctypedef npy_float64 float64_t # <<<<<<<<<<<<<<
* #ctypedef npy_float80 float80_t
* #ctypedef npy_float128 float128_t
*/
typedef npy_float64 __pyx_t_5numpy_float64_t;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":800
* # The int types are mapped a bit surprising --
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t
*/
typedef npy_long __pyx_t_5numpy_int_t;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":801
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong longlong_t
*
*/
typedef npy_longlong __pyx_t_5numpy_long_t;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":802
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_ulong uint_t
*/
typedef npy_longlong __pyx_t_5numpy_longlong_t;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":804
* ctypedef npy_longlong longlong_t
*
* ctypedef npy_ulong uint_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t
*/
typedef npy_ulong __pyx_t_5numpy_uint_t;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":805
*
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulonglong_t
*
*/
typedef npy_ulonglong __pyx_t_5numpy_ulong_t;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":806
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_intp intp_t
*/
typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":808
* ctypedef npy_ulonglong ulonglong_t
*
* ctypedef npy_intp intp_t # <<<<<<<<<<<<<<
* ctypedef npy_uintp uintp_t
*
*/
typedef npy_intp __pyx_t_5numpy_intp_t;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":809
*
* ctypedef npy_intp intp_t
* ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<<
*
* ctypedef npy_double float_t
*/
typedef npy_uintp __pyx_t_5numpy_uintp_t;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":811
* ctypedef npy_uintp uintp_t
*
* ctypedef npy_double float_t # <<<<<<<<<<<<<<
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t
*/
typedef npy_double __pyx_t_5numpy_float_t;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":812
*
* ctypedef npy_double float_t
* ctypedef npy_double double_t # <<<<<<<<<<<<<<
* ctypedef npy_longdouble longdouble_t
*
*/
typedef npy_double __pyx_t_5numpy_double_t;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":813
* ctypedef npy_double float_t
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cfloat cfloat_t
*/
typedef npy_longdouble __pyx_t_5numpy_longdouble_t;
/* Declarations.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< float > __pyx_t_float_complex;
#else
typedef float _Complex __pyx_t_float_complex;
#endif
#else
typedef struct { float real, imag; } __pyx_t_float_complex;
#endif
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float);
/* Declarations.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< double > __pyx_t_double_complex;
#else
typedef double _Complex __pyx_t_double_complex;
#endif
#else
typedef struct { double real, imag; } __pyx_t_double_complex;
#endif
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double);
/*--- Type declarations ---*/
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":815
* ctypedef npy_longdouble longdouble_t
*
* ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<<
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t
*/
typedef npy_cfloat __pyx_t_5numpy_cfloat_t;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":816
*
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<<
* ctypedef npy_clongdouble clongdouble_t
*
*/
typedef npy_cdouble __pyx_t_5numpy_cdouble_t;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":817
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cdouble complex_t
*/
typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":819
* ctypedef npy_clongdouble clongdouble_t
*
* ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew1(a):
*/
typedef npy_cdouble __pyx_t_5numpy_complex_t;
struct __pyx_opt_args_26PostProcess_EnergySpectrum_readStructuredSliceData;
struct __pyx_opt_args_26PostProcess_EnergySpectrum_getPlanarEnergySpectrum;
/* "PostProcess_EnergySpectrum.pyx":20
* # Deactivate negative indexing
* @cython.wraparound(False)
* cpdef tuple readStructuredSliceData(str sliceName, str case = 'ABL_N_H', str caseDir = '.', str time = 'auto', str resultFolder = 'Result', str sliceFolder = 'Slices'): # <<<<<<<<<<<<<<
* cdef str sliceFullPath
* cdef np.ndarray[np.float_t] row, scalarField
*/
struct __pyx_opt_args_26PostProcess_EnergySpectrum_readStructuredSliceData {
int __pyx_n;
PyObject *__pyx_case;
PyObject *caseDir;
PyObject *time;
PyObject *resultFolder;
PyObject *sliceFolder;
};
/* "PostProcess_EnergySpectrum.pyx":81
* @cython.wraparound(False)
* @cython.cdivision(True)
* cpdef getPlanarEnergySpectrum(np.ndarray[np.float_t, ndim= 2] u2D, np.ndarray[np.float_t, ndim= 2] v2D, np.ndarray[np.float_t, ndim= 2] w2D, double L, tuple cellSizes2D, horizontalEii = False): # <<<<<<<<<<<<<<
* cdef np.ndarray[np.float_t, ndim = 2] uRes2D, vRes2D, wRes2D, KrOld
* cdef np.ndarray[np.complex128_t, ndim = 2] uResFft, vResFft, wResFft
*/
struct __pyx_opt_args_26PostProcess_EnergySpectrum_getPlanarEnergySpectrum {
int __pyx_n;
PyObject *horizontalEii;
};
/* --- Runtime support code (head) --- */
/* Refnanny.proto */
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
if (acquire_gil) {\
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
PyGILState_Release(__pyx_gilstate_save);\
} else {\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext()\
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif
#define __Pyx_XDECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_XDECREF(tmp);\
} while (0)
#define __Pyx_DECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_DECREF(tmp);\
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
/* PyObjectGetAttrStr.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
/* GetBuiltinName.proto */
static PyObject *__Pyx_GetBuiltinName(PyObject *name);
/* PyDictVersioning.proto */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1)
#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\
(version_var) = __PYX_GET_DICT_VERSION(dict);\
(cache_var) = (value);
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\
(VAR) = __pyx_dict_cached_value;\
} else {\
(VAR) = __pyx_dict_cached_value = (LOOKUP);\
__pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\
}\
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj);
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj);
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version);
#else
#define __PYX_GET_DICT_VERSION(dict) (0)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP);
#endif
/* GetModuleGlobalName.proto */
#if CYTHON_USE_DICT_VERSIONS
#define __Pyx_GetModuleGlobalName(var, name) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
(var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\
(likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\
__Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
}
#define __Pyx_GetModuleGlobalNameUncached(var, name) {\
PY_UINT64_T __pyx_dict_version;\
PyObject *__pyx_dict_cached_value;\
(var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
}
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value);
#else
#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name)
#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name)
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name);
#endif
/* PyCFunctionFastCall.proto */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs);
#else
#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL)
#endif
/* PyFunctionFastCall.proto */
#if CYTHON_FAST_PYCALL
#define __Pyx_PyFunction_FastCall(func, args, nargs)\
__Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs);
#else
#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs)
#endif
#define __Pyx_BUILD_ASSERT_EXPR(cond)\
(sizeof(char [1 - 2*!(cond)]) - 1)
#ifndef Py_MEMBER_SIZE
#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
#endif
static size_t __pyx_pyframe_localsplus_offset = 0;
#include "frameobject.h"
#define __Pxy_PyFrame_Initialize_Offsets()\
((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\
(void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus)))
#define __Pyx_PyFrame_GetLocalsplus(frame)\
(assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset))
#endif
/* PyObjectCall.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
/* PyObjectCall2Args.proto */
static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2);
/* PyObjectCallMethO.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
#endif
/* PyObjectCallOneArg.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
/* GetTopmostException.proto */
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate);
#endif
/* PyThreadStateGet.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current;
#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type
#else
#define __Pyx_PyThreadState_declare
#define __Pyx_PyThreadState_assign
#define __Pyx_PyErr_Occurred() PyErr_Occurred()
#endif
/* SaveResetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
#else
#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb)
#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb)
#endif
/* PyErrExceptionMatches.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err)
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);
#else
#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err)
#endif
/* PyErrFetchRestore.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
#else
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#endif
#else
#define __Pyx_PyErr_Clear() PyErr_Clear()
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
#endif
/* GetItemInt.proto */
#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\
(is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\
__Pyx_GetItemInt_Generic(o, to_py_func(i))))
#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
int is_list, int wraparound, int boundscheck);
/* ExtTypeTest.proto */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
/* IsLittleEndian.proto */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void);
/* BufferFormatCheck.proto */
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts);
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type);
/* BufferGetAndValidate.proto */
#define __Pyx_GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)\
((obj == Py_None || obj == NULL) ?\
(__Pyx_ZeroBuffer(buf), 0) :\
__Pyx__GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack))
static int __Pyx__GetBufferAndValidate(Py_buffer* buf, PyObject* obj,
__Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack);
static void __Pyx_ZeroBuffer(Py_buffer* buf);
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info);
static Py_ssize_t __Pyx_minusones[] = { -1, -1, -1, -1, -1, -1, -1, -1 };
static Py_ssize_t __Pyx_zeros[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
/* BufferFallbackError.proto */
static void __Pyx_RaiseBufferFallbackError(void);
/* ObjectGetItem.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key);
#else
#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key)
#endif
#define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0)
/* None.proto */
static CYTHON_INLINE npy_intp __Pyx_div_npy_intp(npy_intp, npy_intp);
/* UnaryNegOverflows.proto */
#define UNARY_NEG_WOULD_OVERFLOW(x)\
(((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x)))
#define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1)
/* RaiseDoubleKeywords.proto */
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
/* ParseKeywords.proto */
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
const char* function_name);
/* RaiseArgTupleInvalid.proto */
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
/* ArgTypeTest.proto */
#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\
((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\
__Pyx__ArgTypeTest(obj, type, name, exact))
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact);
/* PyObjectCallNoArg.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func);
#else
#define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL)
#endif
/* RaiseTooManyValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
/* RaiseNeedMoreValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
/* IterFinish.proto */
static CYTHON_INLINE int __Pyx_IterFinish(void);
/* UnpackItemEndCheck.proto */
static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected);
/* RaiseException.proto */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
/* DictGetItem.proto */
#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY
static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key);
#define __Pyx_PyObject_Dict_GetItem(obj, name)\
(likely(PyDict_CheckExact(obj)) ?\
__Pyx_PyDict_GetItem(obj, name) : PyObject_GetItem(obj, name))
#else
#define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key)
#define __Pyx_PyObject_Dict_GetItem(obj, name) PyObject_GetItem(obj, name)
#endif
/* RaiseNoneIterError.proto */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
/* GetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb)
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* TypeImport.proto */
#ifndef __PYX_HAVE_RT_ImportType_proto
#define __PYX_HAVE_RT_ImportType_proto
enum __Pyx_ImportType_CheckSize {
__Pyx_ImportType_CheckSize_Error = 0,
__Pyx_ImportType_CheckSize_Warn = 1,
__Pyx_ImportType_CheckSize_Ignore = 2
};
static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size);
#endif
/* Import.proto */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
/* CLineInTraceback.proto */
#ifdef CYTHON_CLINE_IN_TRACEBACK
#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
#else
static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);
#endif
/* CodeObjectCache.proto */
typedef struct {
PyCodeObject* code_object;
int code_line;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
/* AddTraceback.proto */
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename);
/* BufferStructDeclare.proto */
typedef struct {
Py_ssize_t shape, strides, suboffsets;
} __Pyx_Buf_DimInfo;
typedef struct {
size_t refcount;
Py_buffer pybuffer;
} __Pyx_Buffer;
typedef struct {
__Pyx_Buffer *rcbuffer;
char *data;
__Pyx_Buf_DimInfo diminfo[8];
} __Pyx_LocalBuf_ND;
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
static void __Pyx_ReleaseBuffer(Py_buffer *view);
#else
#define __Pyx_GetBuffer PyObject_GetBuffer
#define __Pyx_ReleaseBuffer PyBuffer_Release
#endif
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_Py_intptr_t(Py_intptr_t value);
/* Print.proto */
static int __Pyx_Print(PyObject*, PyObject *, int);
#if CYTHON_COMPILING_IN_PYPY || PY_MAJOR_VERSION >= 3
static PyObject* __pyx_print = 0;
static PyObject* __pyx_print_kwargs = 0;
#endif
/* FromPy.proto */
static __pyx_t_double_complex __Pyx_PyComplex_As___pyx_t_double_complex(PyObject*);
/* RealImag.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#define __Pyx_CREAL(z) ((z).real())
#define __Pyx_CIMAG(z) ((z).imag())
#else
#define __Pyx_CREAL(z) (__real__(z))
#define __Pyx_CIMAG(z) (__imag__(z))
#endif
#else
#define __Pyx_CREAL(z) ((z).real)
#define __Pyx_CIMAG(z) ((z).imag)
#endif
#if defined(__cplusplus) && CYTHON_CCOMPLEX\
&& (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103)
#define __Pyx_SET_CREAL(z,x) ((z).real(x))
#define __Pyx_SET_CIMAG(z,y) ((z).imag(y))
#else
#define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x)
#define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y)
#endif
/* Arithmetic.proto */
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq_float(a, b) ((a)==(b))
#define __Pyx_c_sum_float(a, b) ((a)+(b))
#define __Pyx_c_diff_float(a, b) ((a)-(b))
#define __Pyx_c_prod_float(a, b) ((a)*(b))
#define __Pyx_c_quot_float(a, b) ((a)/(b))
#define __Pyx_c_neg_float(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero_float(z) ((z)==(float)0)
#define __Pyx_c_conj_float(z) (::std::conj(z))
#if 1
#define __Pyx_c_abs_float(z) (::std::abs(z))
#define __Pyx_c_pow_float(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero_float(z) ((z)==0)
#define __Pyx_c_conj_float(z) (conjf(z))
#if 1
#define __Pyx_c_abs_float(z) (cabsf(z))
#define __Pyx_c_pow_float(a, b) (cpowf(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex);
static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex);
#if 1
static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex);
#endif
#endif
/* Arithmetic.proto */
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq_double(a, b) ((a)==(b))
#define __Pyx_c_sum_double(a, b) ((a)+(b))
#define __Pyx_c_diff_double(a, b) ((a)-(b))
#define __Pyx_c_prod_double(a, b) ((a)*(b))
#define __Pyx_c_quot_double(a, b) ((a)/(b))
#define __Pyx_c_neg_double(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero_double(z) ((z)==(double)0)
#define __Pyx_c_conj_double(z) (::std::conj(z))
#if 1
#define __Pyx_c_abs_double(z) (::std::abs(z))
#define __Pyx_c_pow_double(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero_double(z) ((z)==0)
#define __Pyx_c_conj_double(z) (conj(z))
#if 1
#define __Pyx_c_abs_double(z) (cabs(z))
#define __Pyx_c_pow_double(a, b) (cpow(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex);
static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex);
#if 1
static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex);
#endif
#endif
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value);
/* CIntFromPy.proto */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
/* PrintOne.proto */
static int __Pyx_PrintOne(PyObject* stream, PyObject *o);
/* CIntFromPy.proto */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
/* FastTypeChecks.proto */
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);
#else
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
#endif
#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
/* CheckBinaryVersion.proto */
static int __Pyx_check_binary_version(void);
/* InitStrings.proto */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
/* Module declarations from 'cpython.buffer' */
/* Module declarations from 'libc.string' */
/* Module declarations from 'libc.stdio' */
/* Module declarations from '__builtin__' */
/* Module declarations from 'cpython.type' */
static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0;
/* Module declarations from 'cpython' */
/* Module declarations from 'cpython.object' */
/* Module declarations from 'cpython.ref' */
/* Module declarations from 'cpython.mem' */
/* Module declarations from 'numpy' */
/* Module declarations from 'numpy' */
static PyTypeObject *__pyx_ptype_5numpy_dtype = 0;
static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0;
static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0;
static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0;
static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0;
static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/
/* Module declarations from 'cython' */
/* Module declarations from 'libc.math' */
/* Module declarations from 'PostProcess_EnergySpectrum' */
static PyObject *__pyx_f_26PostProcess_EnergySpectrum_readStructuredSliceData(PyObject *, int __pyx_skip_dispatch, struct __pyx_opt_args_26PostProcess_EnergySpectrum_readStructuredSliceData *__pyx_optional_args); /*proto*/
static PyObject *__pyx_f_26PostProcess_EnergySpectrum_getPlanarEnergySpectrum(PyArrayObject *, PyArrayObject *, PyArrayObject *, double, PyObject *, int __pyx_skip_dispatch, struct __pyx_opt_args_26PostProcess_EnergySpectrum_getPlanarEnergySpectrum *__pyx_optional_args); /*proto*/
static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_float_t = { "float_t", NULL, sizeof(__pyx_t_5numpy_float_t), { 0 }, 0, 'R', 0, 0 };
static __Pyx_TypeInfo __Pyx_TypeInfo___pyx_t_double_complex = { "double complex", NULL, sizeof(__pyx_t_double_complex), { 0 }, 0, 'C', 0, 0 };
#define __Pyx_MODULE_NAME "PostProcess_EnergySpectrum"
extern int __pyx_module_is_main_PostProcess_EnergySpectrum;
int __pyx_module_is_main_PostProcess_EnergySpectrum = 0;
/* Implementation of 'PostProcess_EnergySpectrum' */
static PyObject *__pyx_builtin_OSError;
static PyObject *__pyx_builtin_enumerate;
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_ValueError;
static PyObject *__pyx_builtin_RuntimeError;
static PyObject *__pyx_builtin_ImportError;
static const char __pyx_k_[] = ".";
static const char __pyx_k_L[] = "L";
static const char __pyx_k_d[] = "d";
static const char __pyx_k__2[] = "/";
static const char __pyx_k_np[] = "np";
static const char __pyx_k_os[] = "os";
static const char __pyx_k_pi[] = "pi";
static const char __pyx_k_end[] = "end";
static const char __pyx_k_fft[] = "fft";
static const char __pyx_k_sum[] = "sum";
static const char __pyx_k_u2D[] = "u2D";
static const char __pyx_k_v2D[] = "v2D";
static const char __pyx_k_w2D[] = "w2D";
static const char __pyx_k_auto[] = "auto";
static const char __pyx_k_axes[] = "axes";
static const char __pyx_k_case[] = "case";
static const char __pyx_k_conj[] = "conj";
static const char __pyx_k_fft2[] = "fft2";
static const char __pyx_k_file[] = "file";
static const char __pyx_k_main[] = "__main__";
static const char __pyx_k_mean[] = "mean";
static const char __pyx_k_name[] = "__name__";
static const char __pyx_k_norm[] = "norm";
static const char __pyx_k_sqrt[] = "sqrt";
static const char __pyx_k_test[] = "__test__";
static const char __pyx_k_time[] = "time";
static const char __pyx_k_dtype[] = "dtype";
static const char __pyx_k_empty[] = "empty";
static const char __pyx_k_numpy[] = "numpy";
static const char __pyx_k_print[] = "print";
static const char __pyx_k_range[] = "range";
static const char __pyx_k_Result[] = "Result";
static const char __pyx_k_Slices[] = "Slices";
static const char __pyx_k_import[] = "__import__";
static const char __pyx_k_ABL_N_H[] = "ABL_N_H";
static const char __pyx_k_OSError[] = "OSError";
static const char __pyx_k_caseDir[] = "caseDir";
static const char __pyx_k_fftfreq[] = "fftfreq";
static const char __pyx_k_listdir[] = "listdir";
static const char __pyx_k_reshape[] = "reshape";
static const char __pyx_k_linspace[] = "linspace";
static const char __pyx_k_makedirs[] = "makedirs";
static const char __pyx_k_meshgrid[] = "meshgrid";
static const char __pyx_k_multiply[] = "multiply";
static const char __pyx_k_enumerate[] = "enumerate";
static const char __pyx_k_sliceName[] = "sliceName";
static const char __pyx_k_ValueError[] = "ValueError";
static const char __pyx_k_complex128[] = "complex128";
static const char __pyx_k_empty_like[] = "empty_like";
static const char __pyx_k_genfromtxt[] = "genfromtxt";
static const char __pyx_k_ImportError[] = "ImportError";
static const char __pyx_k_cellSizes2D[] = "cellSizes2D";
static const char __pyx_k_sliceFolder[] = "sliceFolder";
static const char __pyx_k_RuntimeError[] = "RuntimeError";
static const char __pyx_k_resultFolder[] = "resultFolder";
static const char __pyx_k_horizontalEii[] = "horizontalEii";
static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback";
static const char __pyx_k_Slice_raw_data_read[] = "\nSlice raw data read";
static const char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous";
static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import";
static const char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)";
static const char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd";
static const char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported";
static const char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous";
static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import";
static const char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short.";
static PyObject *__pyx_kp_s_;
static PyObject *__pyx_n_s_ABL_N_H;
static PyObject *__pyx_kp_u_Format_string_allocated_too_shor;
static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2;
static PyObject *__pyx_n_s_ImportError;
static PyObject *__pyx_n_s_L;
static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor;
static PyObject *__pyx_n_s_OSError;
static PyObject *__pyx_n_s_Result;
static PyObject *__pyx_n_s_RuntimeError;
static PyObject *__pyx_kp_s_Slice_raw_data_read;
static PyObject *__pyx_n_s_Slices;
static PyObject *__pyx_n_s_ValueError;
static PyObject *__pyx_kp_s__2;
static PyObject *__pyx_n_s_auto;
static PyObject *__pyx_n_s_axes;
static PyObject *__pyx_n_s_case;
static PyObject *__pyx_n_s_caseDir;
static PyObject *__pyx_n_s_cellSizes2D;
static PyObject *__pyx_n_s_cline_in_traceback;
static PyObject *__pyx_n_s_complex128;
static PyObject *__pyx_n_s_conj;
static PyObject *__pyx_n_s_d;
static PyObject *__pyx_n_s_dtype;
static PyObject *__pyx_n_s_empty;
static PyObject *__pyx_n_s_empty_like;
static PyObject *__pyx_n_s_end;
static PyObject *__pyx_n_s_enumerate;
static PyObject *__pyx_n_s_fft;
static PyObject *__pyx_n_s_fft2;
static PyObject *__pyx_n_s_fftfreq;
static PyObject *__pyx_n_s_file;
static PyObject *__pyx_n_s_genfromtxt;
static PyObject *__pyx_n_s_horizontalEii;
static PyObject *__pyx_n_s_import;
static PyObject *__pyx_n_s_linspace;
static PyObject *__pyx_n_s_listdir;
static PyObject *__pyx_n_s_main;
static PyObject *__pyx_n_s_makedirs;
static PyObject *__pyx_n_s_mean;
static PyObject *__pyx_n_s_meshgrid;
static PyObject *__pyx_n_s_multiply;
static PyObject *__pyx_n_s_name;
static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous;
static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou;
static PyObject *__pyx_n_s_norm;
static PyObject *__pyx_n_s_np;
static PyObject *__pyx_n_s_numpy;
static PyObject *__pyx_kp_s_numpy_core_multiarray_failed_to;
static PyObject *__pyx_kp_s_numpy_core_umath_failed_to_impor;
static PyObject *__pyx_n_s_os;
static PyObject *__pyx_n_s_pi;
static PyObject *__pyx_n_s_print;
static PyObject *__pyx_n_s_range;
static PyObject *__pyx_n_s_reshape;
static PyObject *__pyx_n_s_resultFolder;
static PyObject *__pyx_n_s_sliceFolder;
static PyObject *__pyx_n_s_sliceName;
static PyObject *__pyx_n_s_sqrt;
static PyObject *__pyx_n_s_sum;
static PyObject *__pyx_n_s_test;
static PyObject *__pyx_n_s_time;
static PyObject *__pyx_n_s_u2D;
static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd;
static PyObject *__pyx_n_s_v2D;
static PyObject *__pyx_n_s_w2D;
static PyObject *__pyx_pf_26PostProcess_EnergySpectrum_readStructuredSliceData(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_sliceName, PyObject *__pyx_v_case, PyObject *__pyx_v_caseDir, PyObject *__pyx_v_time, PyObject *__pyx_v_resultFolder, PyObject *__pyx_v_sliceFolder); /* proto */
static PyObject *__pyx_pf_26PostProcess_EnergySpectrum_2getPlanarEnergySpectrum(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_u2D, PyArrayObject *__pyx_v_v2D, PyArrayObject *__pyx_v_w2D, double __pyx_v_L, PyObject *__pyx_v_cellSizes2D, PyObject *__pyx_v_horizontalEii); /* proto */
static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */
static PyObject *__pyx_float_0_5;
static PyObject *__pyx_int_0;
static PyObject *__pyx_int_1;
static PyObject *__pyx_int_2;
static PyObject *__pyx_int_3;
static PyObject *__pyx_int_4;
static PyObject *__pyx_int_5;
static PyObject *__pyx_int_6;
static PyObject *__pyx_slice__3;
static PyObject *__pyx_slice__7;
static PyObject *__pyx_tuple__4;
static PyObject *__pyx_tuple__5;
static PyObject *__pyx_tuple__6;
static PyObject *__pyx_tuple__8;
static PyObject *__pyx_tuple__9;
static PyObject *__pyx_tuple__10;
static PyObject *__pyx_tuple__11;
static PyObject *__pyx_tuple__12;
static PyObject *__pyx_tuple__13;
static PyObject *__pyx_tuple__14;
static PyObject *__pyx_tuple__15;
static PyObject *__pyx_tuple__16;
static PyObject *__pyx_tuple__17;
static PyObject *__pyx_tuple__18;
static PyObject *__pyx_tuple__19;
static PyObject *__pyx_tuple__20;
static PyObject *__pyx_tuple__21;
/* Late includes */
/* "PostProcess_EnergySpectrum.pyx":20
* # Deactivate negative indexing
* @cython.wraparound(False)
* cpdef tuple readStructuredSliceData(str sliceName, str case = 'ABL_N_H', str caseDir = '.', str time = 'auto', str resultFolder = 'Result', str sliceFolder = 'Slices'): # <<<<<<<<<<<<<<
* cdef str sliceFullPath
* cdef np.ndarray[np.float_t] row, scalarField
*/
static PyObject *__pyx_pw_26PostProcess_EnergySpectrum_1readStructuredSliceData(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyObject *__pyx_f_26PostProcess_EnergySpectrum_readStructuredSliceData(PyObject *__pyx_v_sliceName, CYTHON_UNUSED int __pyx_skip_dispatch, struct __pyx_opt_args_26PostProcess_EnergySpectrum_readStructuredSliceData *__pyx_optional_args) {
PyObject *__pyx_v_case = ((PyObject*)__pyx_n_s_ABL_N_H);
PyObject *__pyx_v_caseDir = ((PyObject*)__pyx_kp_s_);
PyObject *__pyx_v_time = ((PyObject*)__pyx_n_s_auto);
PyObject *__pyx_v_resultFolder = ((PyObject*)__pyx_n_s_Result);
PyObject *__pyx_v_sliceFolder = ((PyObject*)__pyx_n_s_Slices);
PyObject *__pyx_v_sliceFullPath = 0;
PyArrayObject *__pyx_v_scalarField = 0;
PyArrayObject *__pyx_v_x = 0;
PyArrayObject *__pyx_v_y = 0;
PyArrayObject *__pyx_v_z = 0;
PyArrayObject *__pyx_v_u = 0;
PyArrayObject *__pyx_v_v = 0;
PyArrayObject *__pyx_v_w = 0;
PyArrayObject *__pyx_v_data = 0;
PyArrayObject *__pyx_v_x2D = 0;
PyArrayObject *__pyx_v_y2D = 0;
PyArrayObject *__pyx_v_z2D = 0;
PyArrayObject *__pyx_v_u2D = 0;
PyArrayObject *__pyx_v_v2D = 0;
PyArrayObject *__pyx_v_w2D = 0;
PyArrayObject *__pyx_v_scalarField2D = 0;
double __pyx_v_valOld;
double __pyx_v_val;
int __pyx_v_i;
int __pyx_v_nPtX;
int __pyx_v_nPtY;
PyObject *__pyx_v_caseFullPath = 0;
PyObject *__pyx_v_resultPath = 0;
__Pyx_LocalBuf_ND __pyx_pybuffernd_data;
__Pyx_Buffer __pyx_pybuffer_data;
__Pyx_LocalBuf_ND __pyx_pybuffernd_scalarField;
__Pyx_Buffer __pyx_pybuffer_scalarField;
__Pyx_LocalBuf_ND __pyx_pybuffernd_scalarField2D;
__Pyx_Buffer __pyx_pybuffer_scalarField2D;
__Pyx_LocalBuf_ND __pyx_pybuffernd_u;
__Pyx_Buffer __pyx_pybuffer_u;
__Pyx_LocalBuf_ND __pyx_pybuffernd_u2D;
__Pyx_Buffer __pyx_pybuffer_u2D;
__Pyx_LocalBuf_ND __pyx_pybuffernd_v;
__Pyx_Buffer __pyx_pybuffer_v;
__Pyx_LocalBuf_ND __pyx_pybuffernd_v2D;
__Pyx_Buffer __pyx_pybuffer_v2D;
__Pyx_LocalBuf_ND __pyx_pybuffernd_w;
__Pyx_Buffer __pyx_pybuffer_w;
__Pyx_LocalBuf_ND __pyx_pybuffernd_w2D;
__Pyx_Buffer __pyx_pybuffer_w2D;
__Pyx_LocalBuf_ND __pyx_pybuffernd_x;
__Pyx_Buffer __pyx_pybuffer_x;
__Pyx_LocalBuf_ND __pyx_pybuffernd_x2D;
__Pyx_Buffer __pyx_pybuffer_x2D;
__Pyx_LocalBuf_ND __pyx_pybuffernd_y;
__Pyx_Buffer __pyx_pybuffer_y;
__Pyx_LocalBuf_ND __pyx_pybuffernd_y2D;
__Pyx_Buffer __pyx_pybuffer_y2D;
__Pyx_LocalBuf_ND __pyx_pybuffernd_z;
__Pyx_Buffer __pyx_pybuffer_z;
__Pyx_LocalBuf_ND __pyx_pybuffernd_z2D;
__Pyx_Buffer __pyx_pybuffer_z2D;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
int __pyx_t_7;
int __pyx_t_8;
PyObject *__pyx_t_9 = NULL;
PyArrayObject *__pyx_t_10 = NULL;
PyArrayObject *__pyx_t_11 = NULL;
Py_ssize_t __pyx_t_12;
Py_ssize_t __pyx_t_13;
PyObject *(*__pyx_t_14)(PyObject *);
double __pyx_t_15;
PyObject *__pyx_t_16 = NULL;
PyObject *__pyx_t_17 = NULL;
PyObject *__pyx_t_18 = NULL;
PyArrayObject *__pyx_t_19 = NULL;
npy_intp __pyx_t_20;
npy_intp __pyx_t_21;
npy_intp __pyx_t_22;
Py_ssize_t __pyx_t_23;
Py_ssize_t __pyx_t_24;
Py_ssize_t __pyx_t_25;
Py_ssize_t __pyx_t_26;
Py_ssize_t __pyx_t_27;
Py_ssize_t __pyx_t_28;
Py_ssize_t __pyx_t_29;
__Pyx_RefNannySetupContext("readStructuredSliceData", 0);
if (__pyx_optional_args) {
if (__pyx_optional_args->__pyx_n > 0) {
__pyx_v_case = __pyx_optional_args->__pyx_case;
if (__pyx_optional_args->__pyx_n > 1) {
__pyx_v_caseDir = __pyx_optional_args->caseDir;
if (__pyx_optional_args->__pyx_n > 2) {
__pyx_v_time = __pyx_optional_args->time;
if (__pyx_optional_args->__pyx_n > 3) {
__pyx_v_resultFolder = __pyx_optional_args->resultFolder;
if (__pyx_optional_args->__pyx_n > 4) {
__pyx_v_sliceFolder = __pyx_optional_args->sliceFolder;
}
}
}
}
}
}
__Pyx_INCREF(__pyx_v_time);
__pyx_pybuffer_scalarField.pybuffer.buf = NULL;
__pyx_pybuffer_scalarField.refcount = 0;
__pyx_pybuffernd_scalarField.data = NULL;
__pyx_pybuffernd_scalarField.rcbuffer = &__pyx_pybuffer_scalarField;
__pyx_pybuffer_x.pybuffer.buf = NULL;
__pyx_pybuffer_x.refcount = 0;
__pyx_pybuffernd_x.data = NULL;
__pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x;
__pyx_pybuffer_y.pybuffer.buf = NULL;
__pyx_pybuffer_y.refcount = 0;
__pyx_pybuffernd_y.data = NULL;
__pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y;
__pyx_pybuffer_z.pybuffer.buf = NULL;
__pyx_pybuffer_z.refcount = 0;
__pyx_pybuffernd_z.data = NULL;
__pyx_pybuffernd_z.rcbuffer = &__pyx_pybuffer_z;
__pyx_pybuffer_u.pybuffer.buf = NULL;
__pyx_pybuffer_u.refcount = 0;
__pyx_pybuffernd_u.data = NULL;
__pyx_pybuffernd_u.rcbuffer = &__pyx_pybuffer_u;
__pyx_pybuffer_v.pybuffer.buf = NULL;
__pyx_pybuffer_v.refcount = 0;
__pyx_pybuffernd_v.data = NULL;
__pyx_pybuffernd_v.rcbuffer = &__pyx_pybuffer_v;
__pyx_pybuffer_w.pybuffer.buf = NULL;
__pyx_pybuffer_w.refcount = 0;
__pyx_pybuffernd_w.data = NULL;
__pyx_pybuffernd_w.rcbuffer = &__pyx_pybuffer_w;
__pyx_pybuffer_data.pybuffer.buf = NULL;
__pyx_pybuffer_data.refcount = 0;
__pyx_pybuffernd_data.data = NULL;
__pyx_pybuffernd_data.rcbuffer = &__pyx_pybuffer_data;
__pyx_pybuffer_x2D.pybuffer.buf = NULL;
__pyx_pybuffer_x2D.refcount = 0;
__pyx_pybuffernd_x2D.data = NULL;
__pyx_pybuffernd_x2D.rcbuffer = &__pyx_pybuffer_x2D;
__pyx_pybuffer_y2D.pybuffer.buf = NULL;
__pyx_pybuffer_y2D.refcount = 0;
__pyx_pybuffernd_y2D.data = NULL;
__pyx_pybuffernd_y2D.rcbuffer = &__pyx_pybuffer_y2D;
__pyx_pybuffer_z2D.pybuffer.buf = NULL;
__pyx_pybuffer_z2D.refcount = 0;
__pyx_pybuffernd_z2D.data = NULL;
__pyx_pybuffernd_z2D.rcbuffer = &__pyx_pybuffer_z2D;
__pyx_pybuffer_u2D.pybuffer.buf = NULL;
__pyx_pybuffer_u2D.refcount = 0;
__pyx_pybuffernd_u2D.data = NULL;
__pyx_pybuffernd_u2D.rcbuffer = &__pyx_pybuffer_u2D;
__pyx_pybuffer_v2D.pybuffer.buf = NULL;
__pyx_pybuffer_v2D.refcount = 0;
__pyx_pybuffernd_v2D.data = NULL;
__pyx_pybuffernd_v2D.rcbuffer = &__pyx_pybuffer_v2D;
__pyx_pybuffer_w2D.pybuffer.buf = NULL;
__pyx_pybuffer_w2D.refcount = 0;
__pyx_pybuffernd_w2D.data = NULL;
__pyx_pybuffernd_w2D.rcbuffer = &__pyx_pybuffer_w2D;
__pyx_pybuffer_scalarField2D.pybuffer.buf = NULL;
__pyx_pybuffer_scalarField2D.refcount = 0;
__pyx_pybuffernd_scalarField2D.data = NULL;
__pyx_pybuffernd_scalarField2D.rcbuffer = &__pyx_pybuffer_scalarField2D;
/* "PostProcess_EnergySpectrum.pyx":28
* cdef double valOld, val
* cdef int i, nPtX, nPtY
* cdef str caseFullPath = caseDir + '/' + case + '/' + sliceFolder + '/' # <<<<<<<<<<<<<<
* cdef str resultPath = caseFullPath + resultFolder + '/'
*
*/
__pyx_t_1 = PyNumber_Add(__pyx_v_caseDir, __pyx_kp_s__2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 28, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyNumber_Add(__pyx_t_1, __pyx_v_case); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 28, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyNumber_Add(__pyx_t_2, __pyx_kp_s__2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 28, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyNumber_Add(__pyx_t_1, __pyx_v_sliceFolder); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 28, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyNumber_Add(__pyx_t_2, __pyx_kp_s__2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 28, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_caseFullPath = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "PostProcess_EnergySpectrum.pyx":29
* cdef int i, nPtX, nPtY
* cdef str caseFullPath = caseDir + '/' + case + '/' + sliceFolder + '/'
* cdef str resultPath = caseFullPath + resultFolder + '/' # <<<<<<<<<<<<<<
*
* # Try making the result folder, if it doesn't exist
*/
__pyx_t_1 = PyNumber_Add(__pyx_v_caseFullPath, __pyx_v_resultFolder); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 29, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyNumber_Add(__pyx_t_1, __pyx_kp_s__2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 29, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_resultPath = ((PyObject*)__pyx_t_2);
__pyx_t_2 = 0;
/* "PostProcess_EnergySpectrum.pyx":32
*
* # Try making the result folder, if it doesn't exist
* try: # <<<<<<<<<<<<<<
* os.makedirs(resultPath)
* except OSError:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_5);
/*try:*/ {
/* "PostProcess_EnergySpectrum.pyx":33
* # Try making the result folder, if it doesn't exist
* try:
* os.makedirs(resultPath) # <<<<<<<<<<<<<<
* except OSError:
* pass
*/
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_os); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 33, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_makedirs); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 33, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) {
__pyx_t_1 = PyMethod_GET_SELF(__pyx_t_6);
if (likely(__pyx_t_1)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
__Pyx_INCREF(__pyx_t_1);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_6, function);
}
}
__pyx_t_2 = (__pyx_t_1) ? __Pyx_PyObject_Call2Args(__pyx_t_6, __pyx_t_1, __pyx_v_resultPath) : __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_v_resultPath);
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 33, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "PostProcess_EnergySpectrum.pyx":32
*
* # Try making the result folder, if it doesn't exist
* try: # <<<<<<<<<<<<<<
* os.makedirs(resultPath)
* except OSError:
*/
}
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
goto __pyx_L8_try_end;
__pyx_L3_error:;
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
/* "PostProcess_EnergySpectrum.pyx":34
* try:
* os.makedirs(resultPath)
* except OSError: # <<<<<<<<<<<<<<
* pass
*
*/
__pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_OSError);
if (__pyx_t_7) {
__Pyx_ErrRestore(0,0,0);
goto __pyx_L4_exception_handled;
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "PostProcess_EnergySpectrum.pyx":32
*
* # Try making the result folder, if it doesn't exist
* try: # <<<<<<<<<<<<<<
* os.makedirs(resultPath)
* except OSError:
*/
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L1_error;
__pyx_L4_exception_handled:;
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
__pyx_L8_try_end:;
}
/* "PostProcess_EnergySpectrum.pyx":38
*
* # If time is 'auto', pick the 1st from the available times
* time = os.listdir(caseFullPath)[0] if time is 'auto' else time # <<<<<<<<<<<<<<
* # Full path to the slice
* sliceFullPath = caseFullPath + time + '/' + sliceName
*/
__pyx_t_8 = (__pyx_v_time == __pyx_n_s_auto);
if ((__pyx_t_8 != 0)) {
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_os); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 38, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_listdir); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 38, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_9))) {
__pyx_t_1 = PyMethod_GET_SELF(__pyx_t_9);
if (likely(__pyx_t_1)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_9);
__Pyx_INCREF(__pyx_t_1);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_9, function);
}
}
__pyx_t_6 = (__pyx_t_1) ? __Pyx_PyObject_Call2Args(__pyx_t_9, __pyx_t_1, __pyx_v_caseFullPath) : __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_v_caseFullPath);
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 38, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_9 = __Pyx_GetItemInt(__pyx_t_6, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 38, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyString_CheckExact(__pyx_t_9))||((__pyx_t_9) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "str", Py_TYPE(__pyx_t_9)->tp_name), 0))) __PYX_ERR(0, 38, __pyx_L1_error)
__pyx_t_2 = __pyx_t_9;
__pyx_t_9 = 0;
} else {
__Pyx_INCREF(__pyx_v_time);
__pyx_t_2 = __pyx_v_time;
}
__Pyx_DECREF_SET(__pyx_v_time, ((PyObject*)__pyx_t_2));
__pyx_t_2 = 0;
/* "PostProcess_EnergySpectrum.pyx":40
* time = os.listdir(caseFullPath)[0] if time is 'auto' else time
* # Full path to the slice
* sliceFullPath = caseFullPath + time + '/' + sliceName # <<<<<<<<<<<<<<
* # Read slice data, headers with # are auto trimmed
* data = np.genfromtxt(sliceFullPath)
*/
__pyx_t_2 = PyNumber_Add(__pyx_v_caseFullPath, __pyx_v_time); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 40, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_9 = PyNumber_Add(__pyx_t_2, __pyx_kp_s__2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 40, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyNumber_Add(__pyx_t_9, __pyx_v_sliceName); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 40, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_sliceFullPath = ((PyObject*)__pyx_t_2);
__pyx_t_2 = 0;
/* "PostProcess_EnergySpectrum.pyx":42
* sliceFullPath = caseFullPath + time + '/' + sliceName
* # Read slice data, headers with # are auto trimmed
* data = np.genfromtxt(sliceFullPath) # <<<<<<<<<<<<<<
* # 1D array
* x, y, z = data[:, 0], data[:, 1], data[:, 2]
*/
__Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_np); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 42, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_genfromtxt); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 42, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_9 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) {
__pyx_t_9 = PyMethod_GET_SELF(__pyx_t_6);
if (likely(__pyx_t_9)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
__Pyx_INCREF(__pyx_t_9);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_6, function);
}
}
__pyx_t_2 = (__pyx_t_9) ? __Pyx_PyObject_Call2Args(__pyx_t_6, __pyx_t_9, __pyx_v_sliceFullPath) : __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_v_sliceFullPath);
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 42, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 42, __pyx_L1_error)
__pyx_t_10 = ((PyArrayObject *)__pyx_t_2);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_data.rcbuffer->pybuffer);
__pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_data.rcbuffer->pybuffer, (PyObject*)__pyx_t_10, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack);
if (unlikely(__pyx_t_7 < 0)) {
PyErr_Fetch(&__pyx_t_5, &__pyx_t_4, &__pyx_t_3);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_data.rcbuffer->pybuffer, (PyObject*)__pyx_v_data, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_5); Py_XDECREF(__pyx_t_4); Py_XDECREF(__pyx_t_3);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_5, __pyx_t_4, __pyx_t_3);
}
__pyx_t_5 = __pyx_t_4 = __pyx_t_3 = 0;
}
__pyx_pybuffernd_data.diminfo[0].strides = __pyx_pybuffernd_data.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_data.diminfo[0].shape = __pyx_pybuffernd_data.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_data.diminfo[1].strides = __pyx_pybuffernd_data.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_data.diminfo[1].shape = __pyx_pybuffernd_data.rcbuffer->pybuffer.shape[1];
if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 42, __pyx_L1_error)
}
__pyx_t_10 = 0;
__pyx_v_data = ((PyArrayObject *)__pyx_t_2);
__pyx_t_2 = 0;
/* "PostProcess_EnergySpectrum.pyx":44
* data = np.genfromtxt(sliceFullPath)
* # 1D array
* x, y, z = data[:, 0], data[:, 1], data[:, 2] # <<<<<<<<<<<<<<
* # Mesh size in x
* # Since the slice is sorted from low to high x, count the number of x
*/
__pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_data), __pyx_tuple__4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 44, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 44, __pyx_L1_error)
__pyx_t_6 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_data), __pyx_tuple__5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 44, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 44, __pyx_L1_error)
__pyx_t_9 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_data), __pyx_tuple__6); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 44, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
if (!(likely(((__pyx_t_9) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_9, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 44, __pyx_L1_error)
__pyx_t_11 = ((PyArrayObject *)__pyx_t_2);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer);
__pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_t_11, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack);
if (unlikely(__pyx_t_7 < 0)) {
PyErr_Fetch(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_3); Py_XDECREF(__pyx_t_4); Py_XDECREF(__pyx_t_5);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_3, __pyx_t_4, __pyx_t_5);
}
__pyx_t_3 = __pyx_t_4 = __pyx_t_5 = 0;
}
__pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0];
if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 44, __pyx_L1_error)
}
__pyx_t_11 = 0;
__pyx_v_x = ((PyArrayObject *)__pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_11 = ((PyArrayObject *)__pyx_t_6);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer);
__pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_t_11, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack);
if (unlikely(__pyx_t_7 < 0)) {
PyErr_Fetch(&__pyx_t_5, &__pyx_t_4, &__pyx_t_3);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_5); Py_XDECREF(__pyx_t_4); Py_XDECREF(__pyx_t_3);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_5, __pyx_t_4, __pyx_t_3);
}
__pyx_t_5 = __pyx_t_4 = __pyx_t_3 = 0;
}
__pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0];
if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 44, __pyx_L1_error)
}
__pyx_t_11 = 0;
__pyx_v_y = ((PyArrayObject *)__pyx_t_6);
__pyx_t_6 = 0;
__pyx_t_11 = ((PyArrayObject *)__pyx_t_9);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer);
__pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z.rcbuffer->pybuffer, (PyObject*)__pyx_t_11, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack);
if (unlikely(__pyx_t_7 < 0)) {
PyErr_Fetch(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z.rcbuffer->pybuffer, (PyObject*)__pyx_v_z, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_3); Py_XDECREF(__pyx_t_4); Py_XDECREF(__pyx_t_5);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_3, __pyx_t_4, __pyx_t_5);
}
__pyx_t_3 = __pyx_t_4 = __pyx_t_5 = 0;
}
__pyx_pybuffernd_z.diminfo[0].strides = __pyx_pybuffernd_z.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_z.diminfo[0].shape = __pyx_pybuffernd_z.rcbuffer->pybuffer.shape[0];
if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 44, __pyx_L1_error)
}
__pyx_t_11 = 0;
__pyx_v_z = ((PyArrayObject *)__pyx_t_9);
__pyx_t_9 = 0;
/* "PostProcess_EnergySpectrum.pyx":47
* # Mesh size in x
* # Since the slice is sorted from low to high x, count the number of x
* valOld = x[0] # <<<<<<<<<<<<<<
* for i, val in enumerate(x[1:]):
* if val < valOld:
*/
__pyx_t_12 = 0;
__pyx_v_valOld = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_x.diminfo[0].strides));
/* "PostProcess_EnergySpectrum.pyx":48
* # Since the slice is sorted from low to high x, count the number of x
* valOld = x[0]
* for i, val in enumerate(x[1:]): # <<<<<<<<<<<<<<
* if val < valOld:
* nPtX = i + 1
*/
__pyx_t_7 = 0;
__pyx_t_9 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_x), __pyx_slice__7); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 48, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
if (likely(PyList_CheckExact(__pyx_t_9)) || PyTuple_CheckExact(__pyx_t_9)) {
__pyx_t_6 = __pyx_t_9; __Pyx_INCREF(__pyx_t_6); __pyx_t_13 = 0;
__pyx_t_14 = NULL;
} else {
__pyx_t_13 = -1; __pyx_t_6 = PyObject_GetIter(__pyx_t_9); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 48, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_14 = Py_TYPE(__pyx_t_6)->tp_iternext; if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 48, __pyx_L1_error)
}
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
for (;;) {
if (likely(!__pyx_t_14)) {
if (likely(PyList_CheckExact(__pyx_t_6))) {
if (__pyx_t_13 >= PyList_GET_SIZE(__pyx_t_6)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_9 = PyList_GET_ITEM(__pyx_t_6, __pyx_t_13); __Pyx_INCREF(__pyx_t_9); __pyx_t_13++; if (unlikely(0 < 0)) __PYX_ERR(0, 48, __pyx_L1_error)
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_6, __pyx_t_13); __pyx_t_13++; if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 48, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
#endif
} else {
if (__pyx_t_13 >= PyTuple_GET_SIZE(__pyx_t_6)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_6, __pyx_t_13); __Pyx_INCREF(__pyx_t_9); __pyx_t_13++; if (unlikely(0 < 0)) __PYX_ERR(0, 48, __pyx_L1_error)
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_6, __pyx_t_13); __pyx_t_13++; if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 48, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
#endif
}
} else {
__pyx_t_9 = __pyx_t_14(__pyx_t_6);
if (unlikely(!__pyx_t_9)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(0, 48, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_9);
}
__pyx_t_15 = __pyx_PyFloat_AsDouble(__pyx_t_9); if (unlikely((__pyx_t_15 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 48, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_val = __pyx_t_15;
__pyx_v_i = __pyx_t_7;
__pyx_t_7 = (__pyx_t_7 + 1);
/* "PostProcess_EnergySpectrum.pyx":49
* valOld = x[0]
* for i, val in enumerate(x[1:]):
* if val < valOld: # <<<<<<<<<<<<<<
* nPtX = i + 1
* break
*/
__pyx_t_8 = ((__pyx_v_val < __pyx_v_valOld) != 0);
if (__pyx_t_8) {
/* "PostProcess_EnergySpectrum.pyx":50
* for i, val in enumerate(x[1:]):
* if val < valOld:
* nPtX = i + 1 # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_nPtX = (__pyx_v_i + 1);
/* "PostProcess_EnergySpectrum.pyx":51
* if val < valOld:
* nPtX = i + 1
* break # <<<<<<<<<<<<<<
*
* valOld = val
*/
goto __pyx_L10_break;
/* "PostProcess_EnergySpectrum.pyx":49
* valOld = x[0]
* for i, val in enumerate(x[1:]):
* if val < valOld: # <<<<<<<<<<<<<<
* nPtX = i + 1
* break
*/
}
/* "PostProcess_EnergySpectrum.pyx":53
* break
*
* valOld = val # <<<<<<<<<<<<<<
*
* nPtY = x.shape[0]/nPtX
*/
__pyx_v_valOld = __pyx_v_val;
/* "PostProcess_EnergySpectrum.pyx":48
* # Since the slice is sorted from low to high x, count the number of x
* valOld = x[0]
* for i, val in enumerate(x[1:]): # <<<<<<<<<<<<<<
* if val < valOld:
* nPtX = i + 1
*/
}
__pyx_L10_break:;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
/* "PostProcess_EnergySpectrum.pyx":55
* valOld = val
*
* nPtY = x.shape[0]/nPtX # <<<<<<<<<<<<<<
* x2D, y2D, z2D = x.reshape((nPtY, nPtX)), y.reshape((nPtY, nPtX)), z.reshape((nPtY, nPtX))
* # if data.shape[1] == 6:
*/
if (unlikely(__pyx_v_nPtX == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
__PYX_ERR(0, 55, __pyx_L1_error)
}
else if (sizeof(npy_intp) == sizeof(long) && (!(((int)-1) > 0)) && unlikely(__pyx_v_nPtX == (int)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW((__pyx_v_x->dimensions[0])))) {
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
__PYX_ERR(0, 55, __pyx_L1_error)
}
__pyx_v_nPtY = __Pyx_div_npy_intp((__pyx_v_x->dimensions[0]), __pyx_v_nPtX);
/* "PostProcess_EnergySpectrum.pyx":56
*
* nPtY = x.shape[0]/nPtX
* x2D, y2D, z2D = x.reshape((nPtY, nPtX)), y.reshape((nPtY, nPtX)), z.reshape((nPtY, nPtX)) # <<<<<<<<<<<<<<
* # if data.shape[1] == 6:
* u, v, w = data[:, 3], data[:, 4], data[:, 5]
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_x), __pyx_n_s_reshape); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 56, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_nPtY); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 56, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_nPtX); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 56, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_16 = PyTuple_New(2); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 56, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_16);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_16, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_16, 1, __pyx_t_1);
__pyx_t_2 = 0;
__pyx_t_1 = 0;
__pyx_t_1 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_9))) {
__pyx_t_1 = PyMethod_GET_SELF(__pyx_t_9);
if (likely(__pyx_t_1)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_9);
__Pyx_INCREF(__pyx_t_1);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_9, function);
}
}
__pyx_t_6 = (__pyx_t_1) ? __Pyx_PyObject_Call2Args(__pyx_t_9, __pyx_t_1, __pyx_t_16) : __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_16);
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 56, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 56, __pyx_L1_error)
__pyx_t_16 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_y), __pyx_n_s_reshape); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 56, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_16);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_nPtY); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 56, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_nPtX); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 56, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_17 = PyTuple_New(2); if (unlikely(!__pyx_t_17)) __PYX_ERR(0, 56, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_17);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_17, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_17, 1, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_16))) {
__pyx_t_2 = PyMethod_GET_SELF(__pyx_t_16);
if (likely(__pyx_t_2)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_16);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_16, function);
}
}
__pyx_t_9 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_16, __pyx_t_2, __pyx_t_17) : __Pyx_PyObject_CallOneArg(__pyx_t_16, __pyx_t_17);
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_17); __pyx_t_17 = 0;
if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 56, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
if (!(likely(((__pyx_t_9) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_9, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 56, __pyx_L1_error)
__pyx_t_17 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_z), __pyx_n_s_reshape); if (unlikely(!__pyx_t_17)) __PYX_ERR(0, 56, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_17);
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_nPtY); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 56, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_nPtX); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 56, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_18 = PyTuple_New(2); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 56, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_18);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_18, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_18, 1, __pyx_t_1);
__pyx_t_2 = 0;
__pyx_t_1 = 0;
__pyx_t_1 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_17))) {
__pyx_t_1 = PyMethod_GET_SELF(__pyx_t_17);
if (likely(__pyx_t_1)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_17);
__Pyx_INCREF(__pyx_t_1);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_17, function);
}
}
__pyx_t_16 = (__pyx_t_1) ? __Pyx_PyObject_Call2Args(__pyx_t_17, __pyx_t_1, __pyx_t_18) : __Pyx_PyObject_CallOneArg(__pyx_t_17, __pyx_t_18);
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0;
if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 56, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_16);
__Pyx_DECREF(__pyx_t_17); __pyx_t_17 = 0;
if (!(likely(((__pyx_t_16) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_16, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 56, __pyx_L1_error)
__pyx_t_10 = ((PyArrayObject *)__pyx_t_6);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x2D.rcbuffer->pybuffer);
__pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x2D.rcbuffer->pybuffer, (PyObject*)__pyx_t_10, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack);
if (unlikely(__pyx_t_7 < 0)) {
PyErr_Fetch(&__pyx_t_5, &__pyx_t_4, &__pyx_t_3);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x2D.rcbuffer->pybuffer, (PyObject*)__pyx_v_x2D, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_5); Py_XDECREF(__pyx_t_4); Py_XDECREF(__pyx_t_3);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_5, __pyx_t_4, __pyx_t_3);
}
__pyx_t_5 = __pyx_t_4 = __pyx_t_3 = 0;
}
__pyx_pybuffernd_x2D.diminfo[0].strides = __pyx_pybuffernd_x2D.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x2D.diminfo[0].shape = __pyx_pybuffernd_x2D.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_x2D.diminfo[1].strides = __pyx_pybuffernd_x2D.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_x2D.diminfo[1].shape = __pyx_pybuffernd_x2D.rcbuffer->pybuffer.shape[1];
if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 56, __pyx_L1_error)
}
__pyx_t_10 = 0;
__pyx_v_x2D = ((PyArrayObject *)__pyx_t_6);
__pyx_t_6 = 0;
__pyx_t_10 = ((PyArrayObject *)__pyx_t_9);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y2D.rcbuffer->pybuffer);
__pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y2D.rcbuffer->pybuffer, (PyObject*)__pyx_t_10, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack);
if (unlikely(__pyx_t_7 < 0)) {
PyErr_Fetch(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y2D.rcbuffer->pybuffer, (PyObject*)__pyx_v_y2D, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_3); Py_XDECREF(__pyx_t_4); Py_XDECREF(__pyx_t_5);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_3, __pyx_t_4, __pyx_t_5);
}
__pyx_t_3 = __pyx_t_4 = __pyx_t_5 = 0;
}
__pyx_pybuffernd_y2D.diminfo[0].strides = __pyx_pybuffernd_y2D.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y2D.diminfo[0].shape = __pyx_pybuffernd_y2D.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_y2D.diminfo[1].strides = __pyx_pybuffernd_y2D.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_y2D.diminfo[1].shape = __pyx_pybuffernd_y2D.rcbuffer->pybuffer.shape[1];
if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 56, __pyx_L1_error)
}
__pyx_t_10 = 0;
__pyx_v_y2D = ((PyArrayObject *)__pyx_t_9);
__pyx_t_9 = 0;
__pyx_t_10 = ((PyArrayObject *)__pyx_t_16);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z2D.rcbuffer->pybuffer);
__pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z2D.rcbuffer->pybuffer, (PyObject*)__pyx_t_10, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack);
if (unlikely(__pyx_t_7 < 0)) {
PyErr_Fetch(&__pyx_t_5, &__pyx_t_4, &__pyx_t_3);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z2D.rcbuffer->pybuffer, (PyObject*)__pyx_v_z2D, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_5); Py_XDECREF(__pyx_t_4); Py_XDECREF(__pyx_t_3);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_5, __pyx_t_4, __pyx_t_3);
}
__pyx_t_5 = __pyx_t_4 = __pyx_t_3 = 0;
}
__pyx_pybuffernd_z2D.diminfo[0].strides = __pyx_pybuffernd_z2D.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_z2D.diminfo[0].shape = __pyx_pybuffernd_z2D.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_z2D.diminfo[1].strides = __pyx_pybuffernd_z2D.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_z2D.diminfo[1].shape = __pyx_pybuffernd_z2D.rcbuffer->pybuffer.shape[1];
if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 56, __pyx_L1_error)
}
__pyx_t_10 = 0;
__pyx_v_z2D = ((PyArrayObject *)__pyx_t_16);
__pyx_t_16 = 0;
/* "PostProcess_EnergySpectrum.pyx":58
* x2D, y2D, z2D = x.reshape((nPtY, nPtX)), y.reshape((nPtY, nPtX)), z.reshape((nPtY, nPtX))
* # if data.shape[1] == 6:
* u, v, w = data[:, 3], data[:, 4], data[:, 5] # <<<<<<<<<<<<<<
* scalarField = np.empty(data.shape[0])
* # Go through every row and calculate resultant value
*/
__pyx_t_16 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_data), __pyx_tuple__8); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_16);
if (!(likely(((__pyx_t_16) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_16, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 58, __pyx_L1_error)
__pyx_t_9 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_data), __pyx_tuple__9); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
if (!(likely(((__pyx_t_9) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_9, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 58, __pyx_L1_error)
__pyx_t_6 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_data), __pyx_tuple__10); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 58, __pyx_L1_error)
__pyx_t_11 = ((PyArrayObject *)__pyx_t_16);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_u.rcbuffer->pybuffer);
__pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_u.rcbuffer->pybuffer, (PyObject*)__pyx_t_11, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack);
if (unlikely(__pyx_t_7 < 0)) {
PyErr_Fetch(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_u.rcbuffer->pybuffer, (PyObject*)__pyx_v_u, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_3); Py_XDECREF(__pyx_t_4); Py_XDECREF(__pyx_t_5);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_3, __pyx_t_4, __pyx_t_5);
}
__pyx_t_3 = __pyx_t_4 = __pyx_t_5 = 0;
}
__pyx_pybuffernd_u.diminfo[0].strides = __pyx_pybuffernd_u.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_u.diminfo[0].shape = __pyx_pybuffernd_u.rcbuffer->pybuffer.shape[0];
if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 58, __pyx_L1_error)
}
__pyx_t_11 = 0;
__pyx_v_u = ((PyArrayObject *)__pyx_t_16);
__pyx_t_16 = 0;
__pyx_t_11 = ((PyArrayObject *)__pyx_t_9);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_v.rcbuffer->pybuffer);
__pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_v.rcbuffer->pybuffer, (PyObject*)__pyx_t_11, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack);
if (unlikely(__pyx_t_7 < 0)) {
PyErr_Fetch(&__pyx_t_5, &__pyx_t_4, &__pyx_t_3);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_v.rcbuffer->pybuffer, (PyObject*)__pyx_v_v, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_5); Py_XDECREF(__pyx_t_4); Py_XDECREF(__pyx_t_3);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_5, __pyx_t_4, __pyx_t_3);
}
__pyx_t_5 = __pyx_t_4 = __pyx_t_3 = 0;
}
__pyx_pybuffernd_v.diminfo[0].strides = __pyx_pybuffernd_v.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_v.diminfo[0].shape = __pyx_pybuffernd_v.rcbuffer->pybuffer.shape[0];
if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 58, __pyx_L1_error)
}
__pyx_t_11 = 0;
__pyx_v_v = ((PyArrayObject *)__pyx_t_9);
__pyx_t_9 = 0;
__pyx_t_11 = ((PyArrayObject *)__pyx_t_6);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_w.rcbuffer->pybuffer);
__pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_w.rcbuffer->pybuffer, (PyObject*)__pyx_t_11, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack);
if (unlikely(__pyx_t_7 < 0)) {
PyErr_Fetch(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_w.rcbuffer->pybuffer, (PyObject*)__pyx_v_w, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_3); Py_XDECREF(__pyx_t_4); Py_XDECREF(__pyx_t_5);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_3, __pyx_t_4, __pyx_t_5);
}
__pyx_t_3 = __pyx_t_4 = __pyx_t_5 = 0;
}
__pyx_pybuffernd_w.diminfo[0].strides = __pyx_pybuffernd_w.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_w.diminfo[0].shape = __pyx_pybuffernd_w.rcbuffer->pybuffer.shape[0];
if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 58, __pyx_L1_error)
}
__pyx_t_11 = 0;
__pyx_v_w = ((PyArrayObject *)__pyx_t_6);
__pyx_t_6 = 0;
/* "PostProcess_EnergySpectrum.pyx":59
* # if data.shape[1] == 6:
* u, v, w = data[:, 3], data[:, 4], data[:, 5]
* scalarField = np.empty(data.shape[0]) # <<<<<<<<<<<<<<
* # Go through every row and calculate resultant value
* # nogil doesn't support numpy
*/
__Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_np); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 59, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_16 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_empty); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 59, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_16);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_9 = __Pyx_PyInt_From_Py_intptr_t((__pyx_v_data->dimensions[0])); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 59, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_17 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_16))) {
__pyx_t_17 = PyMethod_GET_SELF(__pyx_t_16);
if (likely(__pyx_t_17)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_16);
__Pyx_INCREF(__pyx_t_17);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_16, function);
}
}
__pyx_t_6 = (__pyx_t_17) ? __Pyx_PyObject_Call2Args(__pyx_t_16, __pyx_t_17, __pyx_t_9) : __Pyx_PyObject_CallOneArg(__pyx_t_16, __pyx_t_9);
__Pyx_XDECREF(__pyx_t_17); __pyx_t_17 = 0;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 59, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 59, __pyx_L1_error)
__pyx_t_19 = ((PyArrayObject *)__pyx_t_6);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_scalarField.rcbuffer->pybuffer);
__pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_scalarField.rcbuffer->pybuffer, (PyObject*)__pyx_t_19, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack);
if (unlikely(__pyx_t_7 < 0)) {
PyErr_Fetch(&__pyx_t_5, &__pyx_t_4, &__pyx_t_3);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_scalarField.rcbuffer->pybuffer, (PyObject*)__pyx_v_scalarField, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_5); Py_XDECREF(__pyx_t_4); Py_XDECREF(__pyx_t_3);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_5, __pyx_t_4, __pyx_t_3);
}
__pyx_t_5 = __pyx_t_4 = __pyx_t_3 = 0;
}
__pyx_pybuffernd_scalarField.diminfo[0].strides = __pyx_pybuffernd_scalarField.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_scalarField.diminfo[0].shape = __pyx_pybuffernd_scalarField.rcbuffer->pybuffer.shape[0];
if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 59, __pyx_L1_error)
}
__pyx_t_19 = 0;
__pyx_v_scalarField = ((PyArrayObject *)__pyx_t_6);
__pyx_t_6 = 0;
/* "PostProcess_EnergySpectrum.pyx":63
* # nogil doesn't support numpy
* # Using sqrt from clib.math instead, for 1D array
* for i in prange(data.shape[0], nogil = True): # <<<<<<<<<<<<<<
* scalarField[i] = sqrt(data[i, 3]**2 + data[i, 4]**2 + data[i, 5]**2)
* # for i, row in enumerate(data):
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
__Pyx_FastGIL_Remember();
#endif
/*try:*/ {
__pyx_t_20 = (__pyx_v_data->dimensions[0]);
if (1 == 0) abort();
{
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_22 = (__pyx_t_20 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_22 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_23, __pyx_t_24, __pyx_t_25, __pyx_t_26, __pyx_t_27, __pyx_t_28, __pyx_t_29)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i)
#endif /* _OPENMP */
for (__pyx_t_21 = 0; __pyx_t_21 < __pyx_t_22; __pyx_t_21++){
{
__pyx_v_i = (int)(0 + 1 * __pyx_t_21);
/* "PostProcess_EnergySpectrum.pyx":64
* # Using sqrt from clib.math instead, for 1D array
* for i in prange(data.shape[0], nogil = True):
* scalarField[i] = sqrt(data[i, 3]**2 + data[i, 4]**2 + data[i, 5]**2) # <<<<<<<<<<<<<<
* # for i, row in enumerate(data):
* # scalarField[i] = np.sqrt(data[i][3]**2 + row[4]**2 + row[5]**2)
*/
__pyx_t_23 = __pyx_v_i;
__pyx_t_24 = 3;
__pyx_t_25 = __pyx_v_i;
__pyx_t_26 = 4;
__pyx_t_27 = __pyx_v_i;
__pyx_t_28 = 5;
__pyx_t_29 = __pyx_v_i;
*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_scalarField.rcbuffer->pybuffer.buf, __pyx_t_29, __pyx_pybuffernd_scalarField.diminfo[0].strides) = sqrt(((pow((*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_data.rcbuffer->pybuffer.buf, __pyx_t_23, __pyx_pybuffernd_data.diminfo[0].strides, __pyx_t_24, __pyx_pybuffernd_data.diminfo[1].strides)), 2.0) + pow((*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_data.rcbuffer->pybuffer.buf, __pyx_t_25, __pyx_pybuffernd_data.diminfo[0].strides, __pyx_t_26, __pyx_pybuffernd_data.diminfo[1].strides)), 2.0)) + pow((*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_data.rcbuffer->pybuffer.buf, __pyx_t_27, __pyx_pybuffernd_data.diminfo[0].strides, __pyx_t_28, __pyx_pybuffernd_data.diminfo[1].strides)), 2.0)));
}
}
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "PostProcess_EnergySpectrum.pyx":63
* # nogil doesn't support numpy
* # Using sqrt from clib.math instead, for 1D array
* for i in prange(data.shape[0], nogil = True): # <<<<<<<<<<<<<<
* scalarField[i] = sqrt(data[i, 3]**2 + data[i, 4]**2 + data[i, 5]**2)
* # for i, row in enumerate(data):
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L14;
}
__pyx_L14:;
}
}
/* "PostProcess_EnergySpectrum.pyx":71
* # scalarField = data[:, 3]
*
* u2D, v2D, w2D = u.reshape((nPtY, nPtX)), v.reshape((nPtY, nPtX)), w.reshape((nPtY, nPtX)) # <<<<<<<<<<<<<<
* scalarField2D = scalarField.reshape((nPtY, nPtX))
*
*/
__pyx_t_16 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_u), __pyx_n_s_reshape); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_16);
__pyx_t_9 = __Pyx_PyInt_From_int(__pyx_v_nPtY); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_17 = __Pyx_PyInt_From_int(__pyx_v_nPtX); if (unlikely(!__pyx_t_17)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_17);
__pyx_t_18 = PyTuple_New(2); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_18);
__Pyx_GIVEREF(__pyx_t_9);
PyTuple_SET_ITEM(__pyx_t_18, 0, __pyx_t_9);
__Pyx_GIVEREF(__pyx_t_17);
PyTuple_SET_ITEM(__pyx_t_18, 1, __pyx_t_17);
__pyx_t_9 = 0;
__pyx_t_17 = 0;
__pyx_t_17 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_16))) {
__pyx_t_17 = PyMethod_GET_SELF(__pyx_t_16);
if (likely(__pyx_t_17)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_16);
__Pyx_INCREF(__pyx_t_17);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_16, function);
}
}
__pyx_t_6 = (__pyx_t_17) ? __Pyx_PyObject_Call2Args(__pyx_t_16, __pyx_t_17, __pyx_t_18) : __Pyx_PyObject_CallOneArg(__pyx_t_16, __pyx_t_18);
__Pyx_XDECREF(__pyx_t_17); __pyx_t_17 = 0;
__Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0;
if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 71, __pyx_L1_error)
__pyx_t_18 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_v), __pyx_n_s_reshape); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_18);
__pyx_t_17 = __Pyx_PyInt_From_int(__pyx_v_nPtY); if (unlikely(!__pyx_t_17)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_17);
__pyx_t_9 = __Pyx_PyInt_From_int(__pyx_v_nPtX); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_17);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_17);
__Pyx_GIVEREF(__pyx_t_9);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_9);
__pyx_t_17 = 0;
__pyx_t_9 = 0;
__pyx_t_9 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_18))) {
__pyx_t_9 = PyMethod_GET_SELF(__pyx_t_18);
if (likely(__pyx_t_9)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_18);
__Pyx_INCREF(__pyx_t_9);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_18, function);
}
}
__pyx_t_16 = (__pyx_t_9) ? __Pyx_PyObject_Call2Args(__pyx_t_18, __pyx_t_9, __pyx_t_1) : __Pyx_PyObject_CallOneArg(__pyx_t_18, __pyx_t_1);
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_16);
__Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0;
if (!(likely(((__pyx_t_16) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_16, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 71, __pyx_L1_error)
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_w), __pyx_n_s_reshape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_9 = __Pyx_PyInt_From_int(__pyx_v_nPtY); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_17 = __Pyx_PyInt_From_int(__pyx_v_nPtX); if (unlikely(!__pyx_t_17)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_17);
__pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_9);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_9);
__Pyx_GIVEREF(__pyx_t_17);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_17);
__pyx_t_9 = 0;
__pyx_t_17 = 0;
__pyx_t_17 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) {
__pyx_t_17 = PyMethod_GET_SELF(__pyx_t_1);
if (likely(__pyx_t_17)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1);
__Pyx_INCREF(__pyx_t_17);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_1, function);
}
}
__pyx_t_18 = (__pyx_t_17) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_17, __pyx_t_2) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_2);
__Pyx_XDECREF(__pyx_t_17); __pyx_t_17 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_18);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (!(likely(((__pyx_t_18) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_18, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 71, __pyx_L1_error)
__pyx_t_10 = ((PyArrayObject *)__pyx_t_6);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_u2D.rcbuffer->pybuffer);
__pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_u2D.rcbuffer->pybuffer, (PyObject*)__pyx_t_10, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack);
if (unlikely(__pyx_t_7 < 0)) {
PyErr_Fetch(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_u2D.rcbuffer->pybuffer, (PyObject*)__pyx_v_u2D, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_3); Py_XDECREF(__pyx_t_4); Py_XDECREF(__pyx_t_5);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_3, __pyx_t_4, __pyx_t_5);
}
__pyx_t_3 = __pyx_t_4 = __pyx_t_5 = 0;
}
__pyx_pybuffernd_u2D.diminfo[0].strides = __pyx_pybuffernd_u2D.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_u2D.diminfo[0].shape = __pyx_pybuffernd_u2D.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_u2D.diminfo[1].strides = __pyx_pybuffernd_u2D.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_u2D.diminfo[1].shape = __pyx_pybuffernd_u2D.rcbuffer->pybuffer.shape[1];
if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 71, __pyx_L1_error)
}
__pyx_t_10 = 0;
__pyx_v_u2D = ((PyArrayObject *)__pyx_t_6);
__pyx_t_6 = 0;
__pyx_t_10 = ((PyArrayObject *)__pyx_t_16);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_v2D.rcbuffer->pybuffer);
__pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_v2D.rcbuffer->pybuffer, (PyObject*)__pyx_t_10, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack);
if (unlikely(__pyx_t_7 < 0)) {
PyErr_Fetch(&__pyx_t_5, &__pyx_t_4, &__pyx_t_3);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_v2D.rcbuffer->pybuffer, (PyObject*)__pyx_v_v2D, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_5); Py_XDECREF(__pyx_t_4); Py_XDECREF(__pyx_t_3);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_5, __pyx_t_4, __pyx_t_3);
}
__pyx_t_5 = __pyx_t_4 = __pyx_t_3 = 0;
}
__pyx_pybuffernd_v2D.diminfo[0].strides = __pyx_pybuffernd_v2D.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_v2D.diminfo[0].shape = __pyx_pybuffernd_v2D.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_v2D.diminfo[1].strides = __pyx_pybuffernd_v2D.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_v2D.diminfo[1].shape = __pyx_pybuffernd_v2D.rcbuffer->pybuffer.shape[1];
if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 71, __pyx_L1_error)
}
__pyx_t_10 = 0;
__pyx_v_v2D = ((PyArrayObject *)__pyx_t_16);
__pyx_t_16 = 0;
__pyx_t_10 = ((PyArrayObject *)__pyx_t_18);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_w2D.rcbuffer->pybuffer);
__pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_w2D.rcbuffer->pybuffer, (PyObject*)__pyx_t_10, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack);
if (unlikely(__pyx_t_7 < 0)) {
PyErr_Fetch(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_w2D.rcbuffer->pybuffer, (PyObject*)__pyx_v_w2D, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_3); Py_XDECREF(__pyx_t_4); Py_XDECREF(__pyx_t_5);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_3, __pyx_t_4, __pyx_t_5);
}
__pyx_t_3 = __pyx_t_4 = __pyx_t_5 = 0;
}
__pyx_pybuffernd_w2D.diminfo[0].strides = __pyx_pybuffernd_w2D.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_w2D.diminfo[0].shape = __pyx_pybuffernd_w2D.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_w2D.diminfo[1].strides = __pyx_pybuffernd_w2D.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_w2D.diminfo[1].shape = __pyx_pybuffernd_w2D.rcbuffer->pybuffer.shape[1];
if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 71, __pyx_L1_error)
}
__pyx_t_10 = 0;
__pyx_v_w2D = ((PyArrayObject *)__pyx_t_18);
__pyx_t_18 = 0;
/* "PostProcess_EnergySpectrum.pyx":72
*
* u2D, v2D, w2D = u.reshape((nPtY, nPtX)), v.reshape((nPtY, nPtX)), w.reshape((nPtY, nPtX))
* scalarField2D = scalarField.reshape((nPtY, nPtX)) # <<<<<<<<<<<<<<
*
* print('\nSlice raw data read')
*/
__pyx_t_16 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_scalarField), __pyx_n_s_reshape); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 72, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_16);
__pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_nPtY); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 72, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_nPtX); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 72, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 72, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_1);
__pyx_t_6 = 0;
__pyx_t_1 = 0;
__pyx_t_1 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_16))) {
__pyx_t_1 = PyMethod_GET_SELF(__pyx_t_16);
if (likely(__pyx_t_1)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_16);
__Pyx_INCREF(__pyx_t_1);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_16, function);
}
}
__pyx_t_18 = (__pyx_t_1) ? __Pyx_PyObject_Call2Args(__pyx_t_16, __pyx_t_1, __pyx_t_2) : __Pyx_PyObject_CallOneArg(__pyx_t_16, __pyx_t_2);
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 72, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_18);
__Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
if (!(likely(((__pyx_t_18) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_18, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 72, __pyx_L1_error)
__pyx_t_10 = ((PyArrayObject *)__pyx_t_18);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_scalarField2D.rcbuffer->pybuffer);
__pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_scalarField2D.rcbuffer->pybuffer, (PyObject*)__pyx_t_10, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack);
if (unlikely(__pyx_t_7 < 0)) {
PyErr_Fetch(&__pyx_t_5, &__pyx_t_4, &__pyx_t_3);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_scalarField2D.rcbuffer->pybuffer, (PyObject*)__pyx_v_scalarField2D, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_5); Py_XDECREF(__pyx_t_4); Py_XDECREF(__pyx_t_3);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_5, __pyx_t_4, __pyx_t_3);
}
__pyx_t_5 = __pyx_t_4 = __pyx_t_3 = 0;
}
__pyx_pybuffernd_scalarField2D.diminfo[0].strides = __pyx_pybuffernd_scalarField2D.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_scalarField2D.diminfo[0].shape = __pyx_pybuffernd_scalarField2D.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_scalarField2D.diminfo[1].strides = __pyx_pybuffernd_scalarField2D.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_scalarField2D.diminfo[1].shape = __pyx_pybuffernd_scalarField2D.rcbuffer->pybuffer.shape[1];
if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 72, __pyx_L1_error)
}
__pyx_t_10 = 0;
__pyx_v_scalarField2D = ((PyArrayObject *)__pyx_t_18);
__pyx_t_18 = 0;
/* "PostProcess_EnergySpectrum.pyx":74
* scalarField2D = scalarField.reshape((nPtY, nPtX))
*
* print('\nSlice raw data read') # <<<<<<<<<<<<<<
* return x2D, y2D, z2D, scalarField2D, u2D, v2D, w2D
*
*/
if (__Pyx_PrintOne(0, __pyx_kp_s_Slice_raw_data_read) < 0) __PYX_ERR(0, 74, __pyx_L1_error)
/* "PostProcess_EnergySpectrum.pyx":75
*
* print('\nSlice raw data read')
* return x2D, y2D, z2D, scalarField2D, u2D, v2D, w2D # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_18 = PyTuple_New(7); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 75, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_18);
__Pyx_INCREF(((PyObject *)__pyx_v_x2D));
__Pyx_GIVEREF(((PyObject *)__pyx_v_x2D));
PyTuple_SET_ITEM(__pyx_t_18, 0, ((PyObject *)__pyx_v_x2D));
__Pyx_INCREF(((PyObject *)__pyx_v_y2D));
__Pyx_GIVEREF(((PyObject *)__pyx_v_y2D));
PyTuple_SET_ITEM(__pyx_t_18, 1, ((PyObject *)__pyx_v_y2D));
__Pyx_INCREF(((PyObject *)__pyx_v_z2D));
__Pyx_GIVEREF(((PyObject *)__pyx_v_z2D));
PyTuple_SET_ITEM(__pyx_t_18, 2, ((PyObject *)__pyx_v_z2D));
__Pyx_INCREF(((PyObject *)__pyx_v_scalarField2D));
__Pyx_GIVEREF(((PyObject *)__pyx_v_scalarField2D));
PyTuple_SET_ITEM(__pyx_t_18, 3, ((PyObject *)__pyx_v_scalarField2D));
__Pyx_INCREF(((PyObject *)__pyx_v_u2D));
__Pyx_GIVEREF(((PyObject *)__pyx_v_u2D));
PyTuple_SET_ITEM(__pyx_t_18, 4, ((PyObject *)__pyx_v_u2D));
__Pyx_INCREF(((PyObject *)__pyx_v_v2D));
__Pyx_GIVEREF(((PyObject *)__pyx_v_v2D));
PyTuple_SET_ITEM(__pyx_t_18, 5, ((PyObject *)__pyx_v_v2D));
__Pyx_INCREF(((PyObject *)__pyx_v_w2D));
__Pyx_GIVEREF(((PyObject *)__pyx_v_w2D));
PyTuple_SET_ITEM(__pyx_t_18, 6, ((PyObject *)__pyx_v_w2D));
__pyx_r = ((PyObject*)__pyx_t_18);
__pyx_t_18 = 0;
goto __pyx_L0;
/* "PostProcess_EnergySpectrum.pyx":20
* # Deactivate negative indexing
* @cython.wraparound(False)
* cpdef tuple readStructuredSliceData(str sliceName, str case = 'ABL_N_H', str caseDir = '.', str time = 'auto', str resultFolder = 'Result', str sliceFolder = 'Slices'): # <<<<<<<<<<<<<<
* cdef str sliceFullPath
* cdef np.ndarray[np.float_t] row, scalarField
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_XDECREF(__pyx_t_16);
__Pyx_XDECREF(__pyx_t_17);
__Pyx_XDECREF(__pyx_t_18);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_data.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_scalarField.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_scalarField2D.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_u.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_u2D.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_v.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_v2D.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_w.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_w2D.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x2D.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y2D.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z2D.rcbuffer->pybuffer);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("PostProcess_EnergySpectrum.readStructuredSliceData", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_data.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_scalarField.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_scalarField2D.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_u.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_u2D.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_v.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_v2D.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_w.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_w2D.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x2D.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y2D.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z2D.rcbuffer->pybuffer);
__pyx_L2:;
__Pyx_XDECREF(__pyx_v_sliceFullPath);
__Pyx_XDECREF((PyObject *)__pyx_v_scalarField);
__Pyx_XDECREF((PyObject *)__pyx_v_x);
__Pyx_XDECREF((PyObject *)__pyx_v_y);
__Pyx_XDECREF((PyObject *)__pyx_v_z);
__Pyx_XDECREF((PyObject *)__pyx_v_u);
__Pyx_XDECREF((PyObject *)__pyx_v_v);
__Pyx_XDECREF((PyObject *)__pyx_v_w);
__Pyx_XDECREF((PyObject *)__pyx_v_data);
__Pyx_XDECREF((PyObject *)__pyx_v_x2D);
__Pyx_XDECREF((PyObject *)__pyx_v_y2D);
__Pyx_XDECREF((PyObject *)__pyx_v_z2D);
__Pyx_XDECREF((PyObject *)__pyx_v_u2D);
__Pyx_XDECREF((PyObject *)__pyx_v_v2D);
__Pyx_XDECREF((PyObject *)__pyx_v_w2D);
__Pyx_XDECREF((PyObject *)__pyx_v_scalarField2D);
__Pyx_XDECREF(__pyx_v_caseFullPath);
__Pyx_XDECREF(__pyx_v_resultPath);
__Pyx_XDECREF(__pyx_v_time);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_26PostProcess_EnergySpectrum_1readStructuredSliceData(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyObject *__pyx_pw_26PostProcess_EnergySpectrum_1readStructuredSliceData(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_sliceName = 0;
PyObject *__pyx_v_case = 0;
PyObject *__pyx_v_caseDir = 0;
PyObject *__pyx_v_time = 0;
PyObject *__pyx_v_resultFolder = 0;
PyObject *__pyx_v_sliceFolder = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("readStructuredSliceData (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_sliceName,&__pyx_n_s_case,&__pyx_n_s_caseDir,&__pyx_n_s_time,&__pyx_n_s_resultFolder,&__pyx_n_s_sliceFolder,0};
PyObject* values[6] = {0,0,0,0,0,0};
values[1] = ((PyObject*)__pyx_n_s_ABL_N_H);
values[2] = ((PyObject*)__pyx_kp_s_);
values[3] = ((PyObject*)__pyx_n_s_auto);
values[4] = ((PyObject*)__pyx_n_s_Result);
values[5] = ((PyObject*)__pyx_n_s_Slices);
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
CYTHON_FALLTHROUGH;
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_sliceName)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_case);
if (value) { values[1] = value; kw_args--; }
}
CYTHON_FALLTHROUGH;
case 2:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_caseDir);
if (value) { values[2] = value; kw_args--; }
}
CYTHON_FALLTHROUGH;
case 3:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_time);
if (value) { values[3] = value; kw_args--; }
}
CYTHON_FALLTHROUGH;
case 4:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_resultFolder);
if (value) { values[4] = value; kw_args--; }
}
CYTHON_FALLTHROUGH;
case 5:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_sliceFolder);
if (value) { values[5] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "readStructuredSliceData") < 0)) __PYX_ERR(0, 20, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
CYTHON_FALLTHROUGH;
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_sliceName = ((PyObject*)values[0]);
__pyx_v_case = ((PyObject*)values[1]);
__pyx_v_caseDir = ((PyObject*)values[2]);
__pyx_v_time = ((PyObject*)values[3]);
__pyx_v_resultFolder = ((PyObject*)values[4]);
__pyx_v_sliceFolder = ((PyObject*)values[5]);
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("readStructuredSliceData", 0, 1, 6, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 20, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("PostProcess_EnergySpectrum.readStructuredSliceData", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_sliceName), (&PyString_Type), 1, "sliceName", 1))) __PYX_ERR(0, 20, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_case), (&PyString_Type), 1, "case", 1))) __PYX_ERR(0, 20, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_caseDir), (&PyString_Type), 1, "caseDir", 1))) __PYX_ERR(0, 20, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_time), (&PyString_Type), 1, "time", 1))) __PYX_ERR(0, 20, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_resultFolder), (&PyString_Type), 1, "resultFolder", 1))) __PYX_ERR(0, 20, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_sliceFolder), (&PyString_Type), 1, "sliceFolder", 1))) __PYX_ERR(0, 20, __pyx_L1_error)
__pyx_r = __pyx_pf_26PostProcess_EnergySpectrum_readStructuredSliceData(__pyx_self, __pyx_v_sliceName, __pyx_v_case, __pyx_v_caseDir, __pyx_v_time, __pyx_v_resultFolder, __pyx_v_sliceFolder);
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_26PostProcess_EnergySpectrum_readStructuredSliceData(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_sliceName, PyObject *__pyx_v_case, PyObject *__pyx_v_caseDir, PyObject *__pyx_v_time, PyObject *__pyx_v_resultFolder, PyObject *__pyx_v_sliceFolder) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
struct __pyx_opt_args_26PostProcess_EnergySpectrum_readStructuredSliceData __pyx_t_2;
__Pyx_RefNannySetupContext("readStructuredSliceData", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_2.__pyx_n = 5;
__pyx_t_2.__pyx_case = __pyx_v_case;
__pyx_t_2.caseDir = __pyx_v_caseDir;
__pyx_t_2.time = __pyx_v_time;
__pyx_t_2.resultFolder = __pyx_v_resultFolder;
__pyx_t_2.sliceFolder = __pyx_v_sliceFolder;
__pyx_t_1 = __pyx_f_26PostProcess_EnergySpectrum_readStructuredSliceData(__pyx_v_sliceName, 0, &__pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("PostProcess_EnergySpectrum.readStructuredSliceData", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "PostProcess_EnergySpectrum.pyx":81
* @cython.wraparound(False)
* @cython.cdivision(True)
* cpdef getPlanarEnergySpectrum(np.ndarray[np.float_t, ndim= 2] u2D, np.ndarray[np.float_t, ndim= 2] v2D, np.ndarray[np.float_t, ndim= 2] w2D, double L, tuple cellSizes2D, horizontalEii = False): # <<<<<<<<<<<<<<
* cdef np.ndarray[np.float_t, ndim = 2] uRes2D, vRes2D, wRes2D, KrOld
* cdef np.ndarray[np.complex128_t, ndim = 2] uResFft, vResFft, wResFft
*/
static PyObject *__pyx_pw_26PostProcess_EnergySpectrum_3getPlanarEnergySpectrum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyObject *__pyx_f_26PostProcess_EnergySpectrum_getPlanarEnergySpectrum(PyArrayObject *__pyx_v_u2D, PyArrayObject *__pyx_v_v2D, PyArrayObject *__pyx_v_w2D, double __pyx_v_L, PyObject *__pyx_v_cellSizes2D, CYTHON_UNUSED int __pyx_skip_dispatch, struct __pyx_opt_args_26PostProcess_EnergySpectrum_getPlanarEnergySpectrum *__pyx_optional_args) {
PyObject *__pyx_v_horizontalEii = ((PyObject *)Py_False);
PyArrayObject *__pyx_v_uRes2D = 0;
PyArrayObject *__pyx_v_vRes2D = 0;
PyArrayObject *__pyx_v_wRes2D = 0;
PyArrayObject *__pyx_v_KrOld = 0;
PyArrayObject *__pyx_v_uResFft = 0;
PyArrayObject *__pyx_v_vResFft = 0;
PyArrayObject *__pyx_v_wResFft = 0;
double __pyx_v_TKE;
double __pyx_v_Kr0;
int __pyx_v_nPtX;
int __pyx_v_nPtY;
int __pyx_v_i;
int __pyx_v_j;
PyObject *__pyx_v_U1ResFft = 0;
PyObject *__pyx_v_U2ResFft = 0;
PyArrayObject *__pyx_v_RiiFft = 0;
PyArrayObject *__pyx_v_Eij = 0;
PyArrayObject *__pyx_v_RijFft = 0;
PyArrayObject *__pyx_v_Eii = 0;
PyArrayObject *__pyx_v_Kr = 0;
PyArrayObject *__pyx_v_occurrence = 0;
PyObject *__pyx_v_Kx = NULL;
PyObject *__pyx_v_Ky = NULL;
__Pyx_LocalBuf_ND __pyx_pybuffernd_Eii;
__Pyx_Buffer __pyx_pybuffer_Eii;
__Pyx_LocalBuf_ND __pyx_pybuffernd_Eij;
__Pyx_Buffer __pyx_pybuffer_Eij;
__Pyx_LocalBuf_ND __pyx_pybuffernd_Kr;
__Pyx_Buffer __pyx_pybuffer_Kr;
__Pyx_LocalBuf_ND __pyx_pybuffernd_KrOld;
__Pyx_Buffer __pyx_pybuffer_KrOld;
__Pyx_LocalBuf_ND __pyx_pybuffernd_RiiFft;
__Pyx_Buffer __pyx_pybuffer_RiiFft;
__Pyx_LocalBuf_ND __pyx_pybuffernd_RijFft;
__Pyx_Buffer __pyx_pybuffer_RijFft;
__Pyx_LocalBuf_ND __pyx_pybuffernd_occurrence;
__Pyx_Buffer __pyx_pybuffer_occurrence;
__Pyx_LocalBuf_ND __pyx_pybuffernd_u2D;
__Pyx_Buffer __pyx_pybuffer_u2D;
__Pyx_LocalBuf_ND __pyx_pybuffernd_uRes2D;
__Pyx_Buffer __pyx_pybuffer_uRes2D;
__Pyx_LocalBuf_ND __pyx_pybuffernd_uResFft;
__Pyx_Buffer __pyx_pybuffer_uResFft;
__Pyx_LocalBuf_ND __pyx_pybuffernd_v2D;
__Pyx_Buffer __pyx_pybuffer_v2D;
__Pyx_LocalBuf_ND __pyx_pybuffernd_vRes2D;
__Pyx_Buffer __pyx_pybuffer_vRes2D;
__Pyx_LocalBuf_ND __pyx_pybuffernd_vResFft;
__Pyx_Buffer __pyx_pybuffer_vResFft;
__Pyx_LocalBuf_ND __pyx_pybuffernd_w2D;
__Pyx_Buffer __pyx_pybuffer_w2D;
__Pyx_LocalBuf_ND __pyx_pybuffernd_wRes2D;
__Pyx_Buffer __pyx_pybuffer_wRes2D;
__Pyx_LocalBuf_ND __pyx_pybuffernd_wResFft;
__Pyx_Buffer __pyx_pybuffer_wResFft;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyArrayObject *__pyx_t_6 = NULL;
int __pyx_t_7;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
PyObject *__pyx_t_10 = NULL;
double __pyx_t_11;
npy_intp __pyx_t_12;
npy_intp __pyx_t_13;
PyObject *__pyx_t_14 = NULL;
PyArrayObject *__pyx_t_15 = NULL;
PyObject *(*__pyx_t_16)(PyObject *);
PyArrayObject *__pyx_t_17 = NULL;
int __pyx_t_18;
int __pyx_t_19;
PyArrayObject *__pyx_t_20 = NULL;
PyObject *__pyx_t_21 = NULL;
PyArrayObject *__pyx_t_22 = NULL;
Py_ssize_t __pyx_t_23;
PyArrayObject *__pyx_t_24 = NULL;
Py_ssize_t __pyx_t_25;
Py_ssize_t __pyx_t_26;
Py_ssize_t __pyx_t_27;
__pyx_t_double_complex __pyx_t_28;
Py_ssize_t __pyx_t_29;
Py_ssize_t __pyx_t_30;
Py_ssize_t __pyx_t_31;
__Pyx_RefNannySetupContext("getPlanarEnergySpectrum", 0);
if (__pyx_optional_args) {
if (__pyx_optional_args->__pyx_n > 0) {
__pyx_v_horizontalEii = __pyx_optional_args->horizontalEii;
}
}
__pyx_pybuffer_uRes2D.pybuffer.buf = NULL;
__pyx_pybuffer_uRes2D.refcount = 0;
__pyx_pybuffernd_uRes2D.data = NULL;
__pyx_pybuffernd_uRes2D.rcbuffer = &__pyx_pybuffer_uRes2D;
__pyx_pybuffer_vRes2D.pybuffer.buf = NULL;
__pyx_pybuffer_vRes2D.refcount = 0;
__pyx_pybuffernd_vRes2D.data = NULL;
__pyx_pybuffernd_vRes2D.rcbuffer = &__pyx_pybuffer_vRes2D;
__pyx_pybuffer_wRes2D.pybuffer.buf = NULL;
__pyx_pybuffer_wRes2D.refcount = 0;
__pyx_pybuffernd_wRes2D.data = NULL;
__pyx_pybuffernd_wRes2D.rcbuffer = &__pyx_pybuffer_wRes2D;
__pyx_pybuffer_KrOld.pybuffer.buf = NULL;
__pyx_pybuffer_KrOld.refcount = 0;
__pyx_pybuffernd_KrOld.data = NULL;
__pyx_pybuffernd_KrOld.rcbuffer = &__pyx_pybuffer_KrOld;
__pyx_pybuffer_uResFft.pybuffer.buf = NULL;
__pyx_pybuffer_uResFft.refcount = 0;
__pyx_pybuffernd_uResFft.data = NULL;
__pyx_pybuffernd_uResFft.rcbuffer = &__pyx_pybuffer_uResFft;
__pyx_pybuffer_vResFft.pybuffer.buf = NULL;
__pyx_pybuffer_vResFft.refcount = 0;
__pyx_pybuffernd_vResFft.data = NULL;
__pyx_pybuffernd_vResFft.rcbuffer = &__pyx_pybuffer_vResFft;
__pyx_pybuffer_wResFft.pybuffer.buf = NULL;
__pyx_pybuffer_wResFft.refcount = 0;
__pyx_pybuffernd_wResFft.data = NULL;
__pyx_pybuffernd_wResFft.rcbuffer = &__pyx_pybuffer_wResFft;
__pyx_pybuffer_RiiFft.pybuffer.buf = NULL;
__pyx_pybuffer_RiiFft.refcount = 0;
__pyx_pybuffernd_RiiFft.data = NULL;
__pyx_pybuffernd_RiiFft.rcbuffer = &__pyx_pybuffer_RiiFft;
__pyx_pybuffer_Eij.pybuffer.buf = NULL;
__pyx_pybuffer_Eij.refcount = 0;
__pyx_pybuffernd_Eij.data = NULL;
__pyx_pybuffernd_Eij.rcbuffer = &__pyx_pybuffer_Eij;
__pyx_pybuffer_RijFft.pybuffer.buf = NULL;
__pyx_pybuffer_RijFft.refcount = 0;
__pyx_pybuffernd_RijFft.data = NULL;
__pyx_pybuffernd_RijFft.rcbuffer = &__pyx_pybuffer_RijFft;
__pyx_pybuffer_Eii.pybuffer.buf = NULL;
__pyx_pybuffer_Eii.refcount = 0;
__pyx_pybuffernd_Eii.data = NULL;
__pyx_pybuffernd_Eii.rcbuffer = &__pyx_pybuffer_Eii;
__pyx_pybuffer_Kr.pybuffer.buf = NULL;
__pyx_pybuffer_Kr.refcount = 0;
__pyx_pybuffernd_Kr.data = NULL;
__pyx_pybuffernd_Kr.rcbuffer = &__pyx_pybuffer_Kr;
__pyx_pybuffer_occurrence.pybuffer.buf = NULL;
__pyx_pybuffer_occurrence.refcount = 0;
__pyx_pybuffernd_occurrence.data = NULL;
__pyx_pybuffernd_occurrence.rcbuffer = &__pyx_pybuffer_occurrence;
__pyx_pybuffer_u2D.pybuffer.buf = NULL;
__pyx_pybuffer_u2D.refcount = 0;
__pyx_pybuffernd_u2D.data = NULL;
__pyx_pybuffernd_u2D.rcbuffer = &__pyx_pybuffer_u2D;
__pyx_pybuffer_v2D.pybuffer.buf = NULL;
__pyx_pybuffer_v2D.refcount = 0;
__pyx_pybuffernd_v2D.data = NULL;
__pyx_pybuffernd_v2D.rcbuffer = &__pyx_pybuffer_v2D;
__pyx_pybuffer_w2D.pybuffer.buf = NULL;
__pyx_pybuffer_w2D.refcount = 0;
__pyx_pybuffernd_w2D.data = NULL;
__pyx_pybuffernd_w2D.rcbuffer = &__pyx_pybuffer_w2D;
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_u2D.rcbuffer->pybuffer, (PyObject*)__pyx_v_u2D, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 81, __pyx_L1_error)
}
__pyx_pybuffernd_u2D.diminfo[0].strides = __pyx_pybuffernd_u2D.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_u2D.diminfo[0].shape = __pyx_pybuffernd_u2D.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_u2D.diminfo[1].strides = __pyx_pybuffernd_u2D.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_u2D.diminfo[1].shape = __pyx_pybuffernd_u2D.rcbuffer->pybuffer.shape[1];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_v2D.rcbuffer->pybuffer, (PyObject*)__pyx_v_v2D, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 81, __pyx_L1_error)
}
__pyx_pybuffernd_v2D.diminfo[0].strides = __pyx_pybuffernd_v2D.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_v2D.diminfo[0].shape = __pyx_pybuffernd_v2D.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_v2D.diminfo[1].strides = __pyx_pybuffernd_v2D.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_v2D.diminfo[1].shape = __pyx_pybuffernd_v2D.rcbuffer->pybuffer.shape[1];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_w2D.rcbuffer->pybuffer, (PyObject*)__pyx_v_w2D, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 81, __pyx_L1_error)
}
__pyx_pybuffernd_w2D.diminfo[0].strides = __pyx_pybuffernd_w2D.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_w2D.diminfo[0].shape = __pyx_pybuffernd_w2D.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_w2D.diminfo[1].strides = __pyx_pybuffernd_w2D.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_w2D.diminfo[1].shape = __pyx_pybuffernd_w2D.rcbuffer->pybuffer.shape[1];
/* "PostProcess_EnergySpectrum.pyx":96
* # The Taylor hypothesis states that for fully developed turbulence,
* # the spatial average and the time average are equivalent
* uRes2D, vRes2D, wRes2D = u2D - u2D.mean(), v2D - v2D.mean(), w2D - w2D.mean() # <<<<<<<<<<<<<<
* # TKE calculated form physical space
* TKE = 0.5*np.sum(uRes2D**2 + vRes2D**2 + wRes2D**2)
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_u2D), __pyx_n_s_mean); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 96, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_3)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3) : __Pyx_PyObject_CallNoArg(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 96, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyNumber_Subtract(((PyObject *)__pyx_v_u2D), __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 96, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 96, __pyx_L1_error)
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_v2D), __pyx_n_s_mean); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 96, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
__pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4) : __Pyx_PyObject_CallNoArg(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 96, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyNumber_Subtract(((PyObject *)__pyx_v_v2D), __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 96, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 96, __pyx_L1_error)
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_w2D), __pyx_n_s_mean); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 96, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_4, function);
}
}
__pyx_t_1 = (__pyx_t_5) ? __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_5) : __Pyx_PyObject_CallNoArg(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 96, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = PyNumber_Subtract(((PyObject *)__pyx_v_w2D), __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 96, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 96, __pyx_L1_error)
__pyx_t_6 = ((PyArrayObject *)__pyx_t_2);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_uRes2D.rcbuffer->pybuffer);
__pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_uRes2D.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack);
if (unlikely(__pyx_t_7 < 0)) {
PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_uRes2D.rcbuffer->pybuffer, (PyObject*)__pyx_v_uRes2D, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10);
}
__pyx_t_8 = __pyx_t_9 = __pyx_t_10 = 0;
}
__pyx_pybuffernd_uRes2D.diminfo[0].strides = __pyx_pybuffernd_uRes2D.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_uRes2D.diminfo[0].shape = __pyx_pybuffernd_uRes2D.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_uRes2D.diminfo[1].strides = __pyx_pybuffernd_uRes2D.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_uRes2D.diminfo[1].shape = __pyx_pybuffernd_uRes2D.rcbuffer->pybuffer.shape[1];
if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 96, __pyx_L1_error)
}
__pyx_t_6 = 0;
__pyx_v_uRes2D = ((PyArrayObject *)__pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_6 = ((PyArrayObject *)__pyx_t_3);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_vRes2D.rcbuffer->pybuffer);
__pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_vRes2D.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack);
if (unlikely(__pyx_t_7 < 0)) {
PyErr_Fetch(&__pyx_t_10, &__pyx_t_9, &__pyx_t_8);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_vRes2D.rcbuffer->pybuffer, (PyObject*)__pyx_v_vRes2D, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_10, __pyx_t_9, __pyx_t_8);
}
__pyx_t_10 = __pyx_t_9 = __pyx_t_8 = 0;
}
__pyx_pybuffernd_vRes2D.diminfo[0].strides = __pyx_pybuffernd_vRes2D.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_vRes2D.diminfo[0].shape = __pyx_pybuffernd_vRes2D.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_vRes2D.diminfo[1].strides = __pyx_pybuffernd_vRes2D.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_vRes2D.diminfo[1].shape = __pyx_pybuffernd_vRes2D.rcbuffer->pybuffer.shape[1];
if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 96, __pyx_L1_error)
}
__pyx_t_6 = 0;
__pyx_v_vRes2D = ((PyArrayObject *)__pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_6 = ((PyArrayObject *)__pyx_t_4);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_wRes2D.rcbuffer->pybuffer);
__pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_wRes2D.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack);
if (unlikely(__pyx_t_7 < 0)) {
PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_wRes2D.rcbuffer->pybuffer, (PyObject*)__pyx_v_wRes2D, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10);
}
__pyx_t_8 = __pyx_t_9 = __pyx_t_10 = 0;
}
__pyx_pybuffernd_wRes2D.diminfo[0].strides = __pyx_pybuffernd_wRes2D.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_wRes2D.diminfo[0].shape = __pyx_pybuffernd_wRes2D.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_wRes2D.diminfo[1].strides = __pyx_pybuffernd_wRes2D.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_wRes2D.diminfo[1].shape = __pyx_pybuffernd_wRes2D.rcbuffer->pybuffer.shape[1];
if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 96, __pyx_L1_error)
}
__pyx_t_6 = 0;
__pyx_v_wRes2D = ((PyArrayObject *)__pyx_t_4);
__pyx_t_4 = 0;
/* "PostProcess_EnergySpectrum.pyx":98
* uRes2D, vRes2D, wRes2D = u2D - u2D.mean(), v2D - v2D.mean(), w2D - w2D.mean()
* # TKE calculated form physical space
* TKE = 0.5*np.sum(uRes2D**2 + vRes2D**2 + wRes2D**2) # <<<<<<<<<<<<<<
* # Number of samples in x and y/z, columns correspond to x
* nPtX, nPtY = uRes2D.shape[1], uRes2D.shape[0]
*/
__Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 98, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_sum); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 98, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyNumber_Power(((PyObject *)__pyx_v_uRes2D), __pyx_int_2, Py_None); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 98, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_1 = PyNumber_Power(((PyObject *)__pyx_v_vRes2D), __pyx_int_2, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 98, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = PyNumber_Add(__pyx_t_3, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 98, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyNumber_Power(((PyObject *)__pyx_v_wRes2D), __pyx_int_2, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 98, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = PyNumber_Add(__pyx_t_5, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 98, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_1 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_1)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_1);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_4 = (__pyx_t_1) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_1, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3);
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 98, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyNumber_Multiply(__pyx_float_0_5, __pyx_t_4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 98, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_11 = __pyx_PyFloat_AsDouble(__pyx_t_2); if (unlikely((__pyx_t_11 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 98, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_TKE = __pyx_t_11;
/* "PostProcess_EnergySpectrum.pyx":100
* TKE = 0.5*np.sum(uRes2D**2 + vRes2D**2 + wRes2D**2)
* # Number of samples in x and y/z, columns correspond to x
* nPtX, nPtY = uRes2D.shape[1], uRes2D.shape[0] # <<<<<<<<<<<<<<
* # 2D DFT, no normalization (will be done manually below)
* uResFft, vResFft, wResFft = np.fft.fft2(uRes2D, axes = (0, 1), norm = None), \
*/
__pyx_t_12 = (__pyx_v_uRes2D->dimensions[1]);
__pyx_t_13 = (__pyx_v_uRes2D->dimensions[0]);
__pyx_v_nPtX = __pyx_t_12;
__pyx_v_nPtY = __pyx_t_13;
/* "PostProcess_EnergySpectrum.pyx":102
* nPtX, nPtY = uRes2D.shape[1], uRes2D.shape[0]
* # 2D DFT, no normalization (will be done manually below)
* uResFft, vResFft, wResFft = np.fft.fft2(uRes2D, axes = (0, 1), norm = None), \ # <<<<<<<<<<<<<<
* np.fft.fft2(vRes2D, axes = (0, 1), norm = None), \
* np.fft.fft2(wRes2D, axes = (0, 1), norm = None)
*/
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 102, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_fft); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 102, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_fft2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 102, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 102, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_INCREF(((PyObject *)__pyx_v_uRes2D));
__Pyx_GIVEREF(((PyObject *)__pyx_v_uRes2D));
PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_v_uRes2D));
__pyx_t_3 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 102, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_axes, __pyx_tuple__11) < 0) __PYX_ERR(0, 102, __pyx_L1_error)
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_norm, Py_None) < 0) __PYX_ERR(0, 102, __pyx_L1_error)
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 102, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 102, __pyx_L1_error)
/* "PostProcess_EnergySpectrum.pyx":103
* # 2D DFT, no normalization (will be done manually below)
* uResFft, vResFft, wResFft = np.fft.fft2(uRes2D, axes = (0, 1), norm = None), \
* np.fft.fft2(vRes2D, axes = (0, 1), norm = None), \ # <<<<<<<<<<<<<<
* np.fft.fft2(wRes2D, axes = (0, 1), norm = None)
* # Normalization by N^(dimension)
*/
__Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 103, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_fft); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 103, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_fft2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 103, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 103, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_INCREF(((PyObject *)__pyx_v_vRes2D));
__Pyx_GIVEREF(((PyObject *)__pyx_v_vRes2D));
PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_v_vRes2D));
__pyx_t_2 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 103, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_axes, __pyx_tuple__11) < 0) __PYX_ERR(0, 103, __pyx_L1_error)
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_norm, Py_None) < 0) __PYX_ERR(0, 103, __pyx_L1_error)
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 103, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 103, __pyx_L1_error)
/* "PostProcess_EnergySpectrum.pyx":104
* uResFft, vResFft, wResFft = np.fft.fft2(uRes2D, axes = (0, 1), norm = None), \
* np.fft.fft2(vRes2D, axes = (0, 1), norm = None), \
* np.fft.fft2(wRes2D, axes = (0, 1), norm = None) # <<<<<<<<<<<<<<
* # Normalization by N^(dimension)
* uResFft /= (nPtX*nPtY)
*/
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 104, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_fft); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 104, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_fft2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 104, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 104, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_INCREF(((PyObject *)__pyx_v_wRes2D));
__Pyx_GIVEREF(((PyObject *)__pyx_v_wRes2D));
PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_v_wRes2D));
__pyx_t_3 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 104, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_axes, __pyx_tuple__11) < 0) __PYX_ERR(0, 104, __pyx_L1_error)
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_norm, Py_None) < 0) __PYX_ERR(0, 104, __pyx_L1_error)
__pyx_t_14 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 104, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_14);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (!(likely(((__pyx_t_14) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_14, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 104, __pyx_L1_error)
__pyx_t_15 = ((PyArrayObject *)__pyx_t_1);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_uResFft.rcbuffer->pybuffer);
__pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_uResFft.rcbuffer->pybuffer, (PyObject*)__pyx_t_15, &__Pyx_TypeInfo___pyx_t_double_complex, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack);
if (unlikely(__pyx_t_7 < 0)) {
PyErr_Fetch(&__pyx_t_10, &__pyx_t_9, &__pyx_t_8);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_uResFft.rcbuffer->pybuffer, (PyObject*)__pyx_v_uResFft, &__Pyx_TypeInfo___pyx_t_double_complex, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_10, __pyx_t_9, __pyx_t_8);
}
__pyx_t_10 = __pyx_t_9 = __pyx_t_8 = 0;
}
__pyx_pybuffernd_uResFft.diminfo[0].strides = __pyx_pybuffernd_uResFft.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_uResFft.diminfo[0].shape = __pyx_pybuffernd_uResFft.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_uResFft.diminfo[1].strides = __pyx_pybuffernd_uResFft.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_uResFft.diminfo[1].shape = __pyx_pybuffernd_uResFft.rcbuffer->pybuffer.shape[1];
if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 102, __pyx_L1_error)
}
__pyx_t_15 = 0;
__pyx_v_uResFft = ((PyArrayObject *)__pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_15 = ((PyArrayObject *)__pyx_t_5);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_vResFft.rcbuffer->pybuffer);
__pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_vResFft.rcbuffer->pybuffer, (PyObject*)__pyx_t_15, &__Pyx_TypeInfo___pyx_t_double_complex, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack);
if (unlikely(__pyx_t_7 < 0)) {
PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_vResFft.rcbuffer->pybuffer, (PyObject*)__pyx_v_vResFft, &__Pyx_TypeInfo___pyx_t_double_complex, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10);
}
__pyx_t_8 = __pyx_t_9 = __pyx_t_10 = 0;
}
__pyx_pybuffernd_vResFft.diminfo[0].strides = __pyx_pybuffernd_vResFft.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_vResFft.diminfo[0].shape = __pyx_pybuffernd_vResFft.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_vResFft.diminfo[1].strides = __pyx_pybuffernd_vResFft.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_vResFft.diminfo[1].shape = __pyx_pybuffernd_vResFft.rcbuffer->pybuffer.shape[1];
if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 102, __pyx_L1_error)
}
__pyx_t_15 = 0;
__pyx_v_vResFft = ((PyArrayObject *)__pyx_t_5);
__pyx_t_5 = 0;
__pyx_t_15 = ((PyArrayObject *)__pyx_t_14);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_wResFft.rcbuffer->pybuffer);
__pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_wResFft.rcbuffer->pybuffer, (PyObject*)__pyx_t_15, &__Pyx_TypeInfo___pyx_t_double_complex, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack);
if (unlikely(__pyx_t_7 < 0)) {
PyErr_Fetch(&__pyx_t_10, &__pyx_t_9, &__pyx_t_8);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_wResFft.rcbuffer->pybuffer, (PyObject*)__pyx_v_wResFft, &__Pyx_TypeInfo___pyx_t_double_complex, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_10, __pyx_t_9, __pyx_t_8);
}
__pyx_t_10 = __pyx_t_9 = __pyx_t_8 = 0;
}
__pyx_pybuffernd_wResFft.diminfo[0].strides = __pyx_pybuffernd_wResFft.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_wResFft.diminfo[0].shape = __pyx_pybuffernd_wResFft.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_wResFft.diminfo[1].strides = __pyx_pybuffernd_wResFft.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_wResFft.diminfo[1].shape = __pyx_pybuffernd_wResFft.rcbuffer->pybuffer.shape[1];
if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 102, __pyx_L1_error)
}
__pyx_t_15 = 0;
__pyx_v_wResFft = ((PyArrayObject *)__pyx_t_14);
__pyx_t_14 = 0;
/* "PostProcess_EnergySpectrum.pyx":106
* np.fft.fft2(wRes2D, axes = (0, 1), norm = None)
* # Normalization by N^(dimension)
* uResFft /= (nPtX*nPtY) # <<<<<<<<<<<<<<
* vResFft /= (nPtX*nPtY)
* wResFft /= (nPtX*nPtY)
*/
__pyx_t_14 = __Pyx_PyInt_From_int((__pyx_v_nPtX * __pyx_v_nPtY)); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 106, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_14);
__pyx_t_5 = __Pyx_PyNumber_InPlaceDivide(((PyObject *)__pyx_v_uResFft), __pyx_t_14); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 106, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 106, __pyx_L1_error)
__pyx_t_15 = ((PyArrayObject *)__pyx_t_5);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_uResFft.rcbuffer->pybuffer);
__pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_uResFft.rcbuffer->pybuffer, (PyObject*)__pyx_t_15, &__Pyx_TypeInfo___pyx_t_double_complex, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack);
if (unlikely(__pyx_t_7 < 0)) {
PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_uResFft.rcbuffer->pybuffer, (PyObject*)__pyx_v_uResFft, &__Pyx_TypeInfo___pyx_t_double_complex, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10);
}
__pyx_t_8 = __pyx_t_9 = __pyx_t_10 = 0;
}
__pyx_pybuffernd_uResFft.diminfo[0].strides = __pyx_pybuffernd_uResFft.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_uResFft.diminfo[0].shape = __pyx_pybuffernd_uResFft.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_uResFft.diminfo[1].strides = __pyx_pybuffernd_uResFft.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_uResFft.diminfo[1].shape = __pyx_pybuffernd_uResFft.rcbuffer->pybuffer.shape[1];
if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 106, __pyx_L1_error)
}
__pyx_t_15 = 0;
__Pyx_DECREF_SET(__pyx_v_uResFft, ((PyArrayObject *)__pyx_t_5));
__pyx_t_5 = 0;
/* "PostProcess_EnergySpectrum.pyx":107
* # Normalization by N^(dimension)
* uResFft /= (nPtX*nPtY)
* vResFft /= (nPtX*nPtY) # <<<<<<<<<<<<<<
* wResFft /= (nPtX*nPtY)
*
*/
__pyx_t_5 = __Pyx_PyInt_From_int((__pyx_v_nPtX * __pyx_v_nPtY)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 107, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_14 = __Pyx_PyNumber_InPlaceDivide(((PyObject *)__pyx_v_vResFft), __pyx_t_5); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 107, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_14);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (!(likely(((__pyx_t_14) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_14, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 107, __pyx_L1_error)
__pyx_t_15 = ((PyArrayObject *)__pyx_t_14);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_vResFft.rcbuffer->pybuffer);
__pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_vResFft.rcbuffer->pybuffer, (PyObject*)__pyx_t_15, &__Pyx_TypeInfo___pyx_t_double_complex, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack);
if (unlikely(__pyx_t_7 < 0)) {
PyErr_Fetch(&__pyx_t_10, &__pyx_t_9, &__pyx_t_8);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_vResFft.rcbuffer->pybuffer, (PyObject*)__pyx_v_vResFft, &__Pyx_TypeInfo___pyx_t_double_complex, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_10, __pyx_t_9, __pyx_t_8);
}
__pyx_t_10 = __pyx_t_9 = __pyx_t_8 = 0;
}
__pyx_pybuffernd_vResFft.diminfo[0].strides = __pyx_pybuffernd_vResFft.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_vResFft.diminfo[0].shape = __pyx_pybuffernd_vResFft.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_vResFft.diminfo[1].strides = __pyx_pybuffernd_vResFft.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_vResFft.diminfo[1].shape = __pyx_pybuffernd_vResFft.rcbuffer->pybuffer.shape[1];
if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 107, __pyx_L1_error)
}
__pyx_t_15 = 0;
__Pyx_DECREF_SET(__pyx_v_vResFft, ((PyArrayObject *)__pyx_t_14));
__pyx_t_14 = 0;
/* "PostProcess_EnergySpectrum.pyx":108
* uResFft /= (nPtX*nPtY)
* vResFft /= (nPtX*nPtY)
* wResFft /= (nPtX*nPtY) # <<<<<<<<<<<<<<
*
* # Corresponding frequency in x and y/z directions, expressed in cycles/m
*/
__pyx_t_14 = __Pyx_PyInt_From_int((__pyx_v_nPtX * __pyx_v_nPtY)); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 108, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_14);
__pyx_t_5 = __Pyx_PyNumber_InPlaceDivide(((PyObject *)__pyx_v_wResFft), __pyx_t_14); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 108, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 108, __pyx_L1_error)
__pyx_t_15 = ((PyArrayObject *)__pyx_t_5);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_wResFft.rcbuffer->pybuffer);
__pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_wResFft.rcbuffer->pybuffer, (PyObject*)__pyx_t_15, &__Pyx_TypeInfo___pyx_t_double_complex, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack);
if (unlikely(__pyx_t_7 < 0)) {
PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_wResFft.rcbuffer->pybuffer, (PyObject*)__pyx_v_wResFft, &__Pyx_TypeInfo___pyx_t_double_complex, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10);
}
__pyx_t_8 = __pyx_t_9 = __pyx_t_10 = 0;
}
__pyx_pybuffernd_wResFft.diminfo[0].strides = __pyx_pybuffernd_wResFft.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_wResFft.diminfo[0].shape = __pyx_pybuffernd_wResFft.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_wResFft.diminfo[1].strides = __pyx_pybuffernd_wResFft.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_wResFft.diminfo[1].shape = __pyx_pybuffernd_wResFft.rcbuffer->pybuffer.shape[1];
if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 108, __pyx_L1_error)
}
__pyx_t_15 = 0;
__Pyx_DECREF_SET(__pyx_v_wResFft, ((PyArrayObject *)__pyx_t_5));
__pyx_t_5 = 0;
/* "PostProcess_EnergySpectrum.pyx":116
* # d is sample spacing, which should be equidistant,
* # in this case, cell size in x and y/z respectively
* Kx, Ky = np.fft.fftfreq(nPtX, d = cellSizes2D[0]), np.fft.fftfreq(nPtY, d = cellSizes2D[1]) # <<<<<<<<<<<<<<
* # Kx and Ky/Kz is defined as 2n*pi/L, while the K in np.fft.fftn() is simply n/L, n in [1, N]
* # Thus scale old Kx, Ky/Kz by 2pi and 2D meshgrid treatment
*/
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 116, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_fft); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 116, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_14);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_14, __pyx_n_s_fftfreq); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 116, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
__pyx_t_14 = __Pyx_PyInt_From_int(__pyx_v_nPtX); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 116, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_14);
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 116, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_14);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_14);
__pyx_t_14 = 0;
__pyx_t_14 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 116, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_14);
if (unlikely(__pyx_v_cellSizes2D == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(0, 116, __pyx_L1_error)
}
if (PyDict_SetItem(__pyx_t_14, __pyx_n_s_d, PyTuple_GET_ITEM(__pyx_v_cellSizes2D, 0)) < 0) __PYX_ERR(0, 116, __pyx_L1_error)
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_1, __pyx_t_14); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 116, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
__Pyx_GetModuleGlobalName(__pyx_t_14, __pyx_n_s_np); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 116, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_14);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_14, __pyx_n_s_fft); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 116, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
__pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_fftfreq); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 116, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_14);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_nPtY); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 116, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 116, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 116, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (unlikely(__pyx_v_cellSizes2D == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(0, 116, __pyx_L1_error)
}
if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_d, PyTuple_GET_ITEM(__pyx_v_cellSizes2D, 1)) < 0) __PYX_ERR(0, 116, __pyx_L1_error)
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_14, __pyx_t_5, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 116, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_Kx = __pyx_t_3;
__pyx_t_3 = 0;
__pyx_v_Ky = __pyx_t_4;
__pyx_t_4 = 0;
/* "PostProcess_EnergySpectrum.pyx":119
* # Kx and Ky/Kz is defined as 2n*pi/L, while the K in np.fft.fftn() is simply n/L, n in [1, N]
* # Thus scale old Kx, Ky/Kz by 2pi and 2D meshgrid treatment
* Kx *= 2*np.pi # <<<<<<<<<<<<<<
* Ky *= 2*np.pi
* Kx, Ky = np.meshgrid(Kx, Ky)
*/
__Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 119, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_pi); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 119, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = PyNumber_Multiply(__pyx_int_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 119, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_v_Kx, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 119, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF_SET(__pyx_v_Kx, __pyx_t_3);
__pyx_t_3 = 0;
/* "PostProcess_EnergySpectrum.pyx":120
* # Thus scale old Kx, Ky/Kz by 2pi and 2D meshgrid treatment
* Kx *= 2*np.pi
* Ky *= 2*np.pi # <<<<<<<<<<<<<<
* Kx, Ky = np.meshgrid(Kx, Ky)
* # Before calculating (cross-)correlation, add arrays to 2 tuples
*/
__Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 120, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_pi); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 120, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyNumber_Multiply(__pyx_int_2, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 120, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = PyNumber_InPlaceMultiply(__pyx_v_Ky, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 120, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF_SET(__pyx_v_Ky, __pyx_t_4);
__pyx_t_4 = 0;
/* "PostProcess_EnergySpectrum.pyx":121
* Kx *= 2*np.pi
* Ky *= 2*np.pi
* Kx, Ky = np.meshgrid(Kx, Ky) # <<<<<<<<<<<<<<
* # Before calculating (cross-)correlation, add arrays to 2 tuples
* U1ResFft = (uResFft, uResFft, uResFft, vResFft, vResFft, wResFft)
*/
__Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 121, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_meshgrid); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 121, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = NULL;
__pyx_t_7 = 0;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) {
__pyx_t_3 = PyMethod_GET_SELF(__pyx_t_1);
if (likely(__pyx_t_3)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_1, function);
__pyx_t_7 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_1)) {
PyObject *__pyx_temp[3] = {__pyx_t_3, __pyx_v_Kx, __pyx_v_Ky};
__pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 121, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_GOTREF(__pyx_t_4);
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) {
PyObject *__pyx_temp[3] = {__pyx_t_3, __pyx_v_Kx, __pyx_v_Ky};
__pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 121, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_GOTREF(__pyx_t_4);
} else
#endif
{
__pyx_t_5 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 121, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
if (__pyx_t_3) {
__Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3); __pyx_t_3 = NULL;
}
__Pyx_INCREF(__pyx_v_Kx);
__Pyx_GIVEREF(__pyx_v_Kx);
PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_7, __pyx_v_Kx);
__Pyx_INCREF(__pyx_v_Ky);
__Pyx_GIVEREF(__pyx_v_Ky);
PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_7, __pyx_v_Ky);
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 121, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if ((likely(PyTuple_CheckExact(__pyx_t_4))) || (PyList_CheckExact(__pyx_t_4))) {
PyObject* sequence = __pyx_t_4;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(0, 121, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
if (likely(PyTuple_CheckExact(sequence))) {
__pyx_t_1 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_5 = PyTuple_GET_ITEM(sequence, 1);
} else {
__pyx_t_1 = PyList_GET_ITEM(sequence, 0);
__pyx_t_5 = PyList_GET_ITEM(sequence, 1);
}
__Pyx_INCREF(__pyx_t_1);
__Pyx_INCREF(__pyx_t_5);
#else
__pyx_t_1 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 121, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 121, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
} else {
Py_ssize_t index = -1;
__pyx_t_3 = PyObject_GetIter(__pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 121, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_16 = Py_TYPE(__pyx_t_3)->tp_iternext;
index = 0; __pyx_t_1 = __pyx_t_16(__pyx_t_3); if (unlikely(!__pyx_t_1)) goto __pyx_L3_unpacking_failed;
__Pyx_GOTREF(__pyx_t_1);
index = 1; __pyx_t_5 = __pyx_t_16(__pyx_t_3); if (unlikely(!__pyx_t_5)) goto __pyx_L3_unpacking_failed;
__Pyx_GOTREF(__pyx_t_5);
if (__Pyx_IternextUnpackEndCheck(__pyx_t_16(__pyx_t_3), 2) < 0) __PYX_ERR(0, 121, __pyx_L1_error)
__pyx_t_16 = NULL;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L4_unpacking_done;
__pyx_L3_unpacking_failed:;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_16 = NULL;
if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);
__PYX_ERR(0, 121, __pyx_L1_error)
__pyx_L4_unpacking_done:;
}
__Pyx_DECREF_SET(__pyx_v_Kx, __pyx_t_1);
__pyx_t_1 = 0;
__Pyx_DECREF_SET(__pyx_v_Ky, __pyx_t_5);
__pyx_t_5 = 0;
/* "PostProcess_EnergySpectrum.pyx":123
* Kx, Ky = np.meshgrid(Kx, Ky)
* # Before calculating (cross-)correlation, add arrays to 2 tuples
* U1ResFft = (uResFft, uResFft, uResFft, vResFft, vResFft, wResFft) # <<<<<<<<<<<<<<
* U2ResFft = (uResFft, vResFft, wResFft, vResFft, wResFft, wResFft)
* # Initialize 2D Rij in spectral space
*/
__pyx_t_4 = PyTuple_New(6); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 123, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_INCREF(((PyObject *)__pyx_v_uResFft));
__Pyx_GIVEREF(((PyObject *)__pyx_v_uResFft));
PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_v_uResFft));
__Pyx_INCREF(((PyObject *)__pyx_v_uResFft));
__Pyx_GIVEREF(((PyObject *)__pyx_v_uResFft));
PyTuple_SET_ITEM(__pyx_t_4, 1, ((PyObject *)__pyx_v_uResFft));
__Pyx_INCREF(((PyObject *)__pyx_v_uResFft));
__Pyx_GIVEREF(((PyObject *)__pyx_v_uResFft));
PyTuple_SET_ITEM(__pyx_t_4, 2, ((PyObject *)__pyx_v_uResFft));
__Pyx_INCREF(((PyObject *)__pyx_v_vResFft));
__Pyx_GIVEREF(((PyObject *)__pyx_v_vResFft));
PyTuple_SET_ITEM(__pyx_t_4, 3, ((PyObject *)__pyx_v_vResFft));
__Pyx_INCREF(((PyObject *)__pyx_v_vResFft));
__Pyx_GIVEREF(((PyObject *)__pyx_v_vResFft));
PyTuple_SET_ITEM(__pyx_t_4, 4, ((PyObject *)__pyx_v_vResFft));
__Pyx_INCREF(((PyObject *)__pyx_v_wResFft));
__Pyx_GIVEREF(((PyObject *)__pyx_v_wResFft));
PyTuple_SET_ITEM(__pyx_t_4, 5, ((PyObject *)__pyx_v_wResFft));
__pyx_v_U1ResFft = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
/* "PostProcess_EnergySpectrum.pyx":124
* # Before calculating (cross-)correlation, add arrays to 2 tuples
* U1ResFft = (uResFft, uResFft, uResFft, vResFft, vResFft, wResFft)
* U2ResFft = (uResFft, vResFft, wResFft, vResFft, wResFft, wResFft) # <<<<<<<<<<<<<<
* # Initialize 2D Rij in spectral space
* RijFft = np.empty((nPtY, nPtX, 6), dtype = np.complex128)
*/
__pyx_t_4 = PyTuple_New(6); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 124, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_INCREF(((PyObject *)__pyx_v_uResFft));
__Pyx_GIVEREF(((PyObject *)__pyx_v_uResFft));
PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_v_uResFft));
__Pyx_INCREF(((PyObject *)__pyx_v_vResFft));
__Pyx_GIVEREF(((PyObject *)__pyx_v_vResFft));
PyTuple_SET_ITEM(__pyx_t_4, 1, ((PyObject *)__pyx_v_vResFft));
__Pyx_INCREF(((PyObject *)__pyx_v_wResFft));
__Pyx_GIVEREF(((PyObject *)__pyx_v_wResFft));
PyTuple_SET_ITEM(__pyx_t_4, 2, ((PyObject *)__pyx_v_wResFft));
__Pyx_INCREF(((PyObject *)__pyx_v_vResFft));
__Pyx_GIVEREF(((PyObject *)__pyx_v_vResFft));
PyTuple_SET_ITEM(__pyx_t_4, 3, ((PyObject *)__pyx_v_vResFft));
__Pyx_INCREF(((PyObject *)__pyx_v_wResFft));
__Pyx_GIVEREF(((PyObject *)__pyx_v_wResFft));
PyTuple_SET_ITEM(__pyx_t_4, 4, ((PyObject *)__pyx_v_wResFft));
__Pyx_INCREF(((PyObject *)__pyx_v_wResFft));
__Pyx_GIVEREF(((PyObject *)__pyx_v_wResFft));
PyTuple_SET_ITEM(__pyx_t_4, 5, ((PyObject *)__pyx_v_wResFft));
__pyx_v_U2ResFft = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
/* "PostProcess_EnergySpectrum.pyx":126
* U2ResFft = (uResFft, vResFft, wResFft, vResFft, wResFft, wResFft)
* # Initialize 2D Rij in spectral space
* RijFft = np.empty((nPtY, nPtX, 6), dtype = np.complex128) # <<<<<<<<<<<<<<
* # Go through each component of RijFft
* # The 6 components are 11, 12, 13,
*/
__Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 126, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_empty); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 126, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_nPtY); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 126, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_nPtX); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 126, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 126, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_INCREF(__pyx_int_6);
__Pyx_GIVEREF(__pyx_int_6);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_int_6);
__pyx_t_4 = 0;
__pyx_t_1 = 0;
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 126, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 126, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 126, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_complex128); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 126, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_14);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_14) < 0) __PYX_ERR(0, 126, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
__pyx_t_14 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 126, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_14);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (!(likely(((__pyx_t_14) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_14, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 126, __pyx_L1_error)
__pyx_t_17 = ((PyArrayObject *)__pyx_t_14);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_RijFft.rcbuffer->pybuffer);
__pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_RijFft.rcbuffer->pybuffer, (PyObject*)__pyx_t_17, &__Pyx_TypeInfo___pyx_t_double_complex, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack);
if (unlikely(__pyx_t_7 < 0)) {
PyErr_Fetch(&__pyx_t_10, &__pyx_t_9, &__pyx_t_8);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_RijFft.rcbuffer->pybuffer, (PyObject*)__pyx_v_RijFft, &__Pyx_TypeInfo___pyx_t_double_complex, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_10, __pyx_t_9, __pyx_t_8);
}
__pyx_t_10 = __pyx_t_9 = __pyx_t_8 = 0;
}
__pyx_pybuffernd_RijFft.diminfo[0].strides = __pyx_pybuffernd_RijFft.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_RijFft.diminfo[0].shape = __pyx_pybuffernd_RijFft.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_RijFft.diminfo[1].strides = __pyx_pybuffernd_RijFft.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_RijFft.diminfo[1].shape = __pyx_pybuffernd_RijFft.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_RijFft.diminfo[2].strides = __pyx_pybuffernd_RijFft.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_RijFft.diminfo[2].shape = __pyx_pybuffernd_RijFft.rcbuffer->pybuffer.shape[2];
if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 126, __pyx_L1_error)
}
__pyx_t_17 = 0;
__pyx_v_RijFft = ((PyArrayObject *)__pyx_t_14);
__pyx_t_14 = 0;
/* "PostProcess_EnergySpectrum.pyx":131
* # 22, 23,
* # 33
* for i in range(6): # <<<<<<<<<<<<<<
* # Perform the 2-point (cross-)correlation
* RijFft[:, :, i] = np.multiply(U1ResFft[i], np.conj(U2ResFft[i]))
*/
for (__pyx_t_7 = 0; __pyx_t_7 < 6; __pyx_t_7+=1) {
__pyx_v_i = __pyx_t_7;
/* "PostProcess_EnergySpectrum.pyx":133
* for i in range(6):
* # Perform the 2-point (cross-)correlation
* RijFft[:, :, i] = np.multiply(U1ResFft[i], np.conj(U2ResFft[i])) # <<<<<<<<<<<<<<
*
* # Trace of 2-point correlations
*/
__Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_multiply); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_conj); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_4, function);
}
}
__pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, ((PyObject *)PyTuple_GET_ITEM(__pyx_v_U2ResFft, __pyx_v_i))) : __Pyx_PyObject_CallOneArg(__pyx_t_4, ((PyObject *)PyTuple_GET_ITEM(__pyx_v_U2ResFft, __pyx_v_i)));
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = NULL;
__pyx_t_18 = 0;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_1);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_1, function);
__pyx_t_18 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_1)) {
PyObject *__pyx_temp[3] = {__pyx_t_4, ((PyObject *)PyTuple_GET_ITEM(__pyx_v_U1ResFft, __pyx_v_i)), __pyx_t_3};
__pyx_t_14 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_18, 2+__pyx_t_18); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 133, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_GOTREF(__pyx_t_14);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) {
PyObject *__pyx_temp[3] = {__pyx_t_4, ((PyObject *)PyTuple_GET_ITEM(__pyx_v_U1ResFft, __pyx_v_i)), __pyx_t_3};
__pyx_t_14 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_18, 2+__pyx_t_18); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 133, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_GOTREF(__pyx_t_14);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else
#endif
{
__pyx_t_5 = PyTuple_New(2+__pyx_t_18); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
if (__pyx_t_4) {
__Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = NULL;
}
__Pyx_INCREF(((PyObject *)PyTuple_GET_ITEM(__pyx_v_U1ResFft, __pyx_v_i)));
__Pyx_GIVEREF(((PyObject *)PyTuple_GET_ITEM(__pyx_v_U1ResFft, __pyx_v_i)));
PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_18, ((PyObject *)PyTuple_GET_ITEM(__pyx_v_U1ResFft, __pyx_v_i)));
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_18, __pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_14 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_5, NULL); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_14);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(__pyx_slice__3);
__Pyx_GIVEREF(__pyx_slice__3);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_slice__3);
__Pyx_INCREF(__pyx_slice__3);
__Pyx_GIVEREF(__pyx_slice__3);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_slice__3);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_1);
__pyx_t_1 = 0;
if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_RijFft), __pyx_t_5, __pyx_t_14) < 0)) __PYX_ERR(0, 133, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
}
/* "PostProcess_EnergySpectrum.pyx":137
* # Trace of 2-point correlations
* # If decompose Rii to horizontal Rii and R33
* if horizontalEii: # <<<<<<<<<<<<<<
* RiiFft = RijFft[:, :, 0] + RijFft[:, :, 3]
* else:
*/
__pyx_t_19 = __Pyx_PyObject_IsTrue(__pyx_v_horizontalEii); if (unlikely(__pyx_t_19 < 0)) __PYX_ERR(0, 137, __pyx_L1_error)
if (__pyx_t_19) {
/* "PostProcess_EnergySpectrum.pyx":138
* # If decompose Rii to horizontal Rii and R33
* if horizontalEii:
* RiiFft = RijFft[:, :, 0] + RijFft[:, :, 3] # <<<<<<<<<<<<<<
* else:
* RiiFft = RijFft[:, :, 0] + RijFft[:, :, 3] + RijFft[:, :, 5]
*/
__pyx_t_14 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_RijFft), __pyx_tuple__12); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 138, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_14);
__pyx_t_5 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_RijFft), __pyx_tuple__13); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 138, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_1 = PyNumber_Add(__pyx_t_14, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 138, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 138, __pyx_L1_error)
__pyx_t_20 = ((PyArrayObject *)__pyx_t_1);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_RiiFft.rcbuffer->pybuffer);
__pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_RiiFft.rcbuffer->pybuffer, (PyObject*)__pyx_t_20, &__Pyx_TypeInfo___pyx_t_double_complex, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack);
if (unlikely(__pyx_t_7 < 0)) {
PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_RiiFft.rcbuffer->pybuffer, (PyObject*)__pyx_v_RiiFft, &__Pyx_TypeInfo___pyx_t_double_complex, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10);
}
__pyx_t_8 = __pyx_t_9 = __pyx_t_10 = 0;
}
__pyx_pybuffernd_RiiFft.diminfo[0].strides = __pyx_pybuffernd_RiiFft.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_RiiFft.diminfo[0].shape = __pyx_pybuffernd_RiiFft.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_RiiFft.diminfo[1].strides = __pyx_pybuffernd_RiiFft.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_RiiFft.diminfo[1].shape = __pyx_pybuffernd_RiiFft.rcbuffer->pybuffer.shape[1];
if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 138, __pyx_L1_error)
}
__pyx_t_20 = 0;
__pyx_v_RiiFft = ((PyArrayObject *)__pyx_t_1);
__pyx_t_1 = 0;
/* "PostProcess_EnergySpectrum.pyx":137
* # Trace of 2-point correlations
* # If decompose Rii to horizontal Rii and R33
* if horizontalEii: # <<<<<<<<<<<<<<
* RiiFft = RijFft[:, :, 0] + RijFft[:, :, 3]
* else:
*/
goto __pyx_L7;
}
/* "PostProcess_EnergySpectrum.pyx":140
* RiiFft = RijFft[:, :, 0] + RijFft[:, :, 3]
* else:
* RiiFft = RijFft[:, :, 0] + RijFft[:, :, 3] + RijFft[:, :, 5] # <<<<<<<<<<<<<<
*
* # Original resultant Kr
*/
/*else*/ {
__pyx_t_1 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_RijFft), __pyx_tuple__12); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 140, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_RijFft), __pyx_tuple__13); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 140, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_14 = PyNumber_Add(__pyx_t_1, __pyx_t_5); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 140, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_14);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_RijFft), __pyx_tuple__14); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 140, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_1 = PyNumber_Add(__pyx_t_14, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 140, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 140, __pyx_L1_error)
__pyx_t_20 = ((PyArrayObject *)__pyx_t_1);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_RiiFft.rcbuffer->pybuffer);
__pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_RiiFft.rcbuffer->pybuffer, (PyObject*)__pyx_t_20, &__Pyx_TypeInfo___pyx_t_double_complex, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack);
if (unlikely(__pyx_t_7 < 0)) {
PyErr_Fetch(&__pyx_t_10, &__pyx_t_9, &__pyx_t_8);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_RiiFft.rcbuffer->pybuffer, (PyObject*)__pyx_v_RiiFft, &__Pyx_TypeInfo___pyx_t_double_complex, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_10, __pyx_t_9, __pyx_t_8);
}
__pyx_t_10 = __pyx_t_9 = __pyx_t_8 = 0;
}
__pyx_pybuffernd_RiiFft.diminfo[0].strides = __pyx_pybuffernd_RiiFft.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_RiiFft.diminfo[0].shape = __pyx_pybuffernd_RiiFft.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_RiiFft.diminfo[1].strides = __pyx_pybuffernd_RiiFft.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_RiiFft.diminfo[1].shape = __pyx_pybuffernd_RiiFft.rcbuffer->pybuffer.shape[1];
if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 140, __pyx_L1_error)
}
__pyx_t_20 = 0;
__pyx_v_RiiFft = ((PyArrayObject *)__pyx_t_1);
__pyx_t_1 = 0;
}
__pyx_L7:;
/* "PostProcess_EnergySpectrum.pyx":143
*
* # Original resultant Kr
* KrOld = np.sqrt(Kx**2 + Ky**2) # <<<<<<<<<<<<<<
* # New proposed Kr for E spectrum, same number of points as x
* Kr0 = 2*np.pi/L
*/
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 143, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_sqrt); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 143, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_14);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = PyNumber_Power(__pyx_v_Kx, __pyx_int_2, Py_None); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 143, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = PyNumber_Power(__pyx_v_Ky, __pyx_int_2, Py_None); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 143, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyNumber_Add(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 143, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_14))) {
__pyx_t_3 = PyMethod_GET_SELF(__pyx_t_14);
if (likely(__pyx_t_3)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_14);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_14, function);
}
}
__pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_14, __pyx_t_3, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_14, __pyx_t_4);
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 143, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 143, __pyx_L1_error)
__pyx_t_6 = ((PyArrayObject *)__pyx_t_1);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_KrOld.rcbuffer->pybuffer);
__pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_KrOld.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack);
if (unlikely(__pyx_t_7 < 0)) {
PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_KrOld.rcbuffer->pybuffer, (PyObject*)__pyx_v_KrOld, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10);
}
__pyx_t_8 = __pyx_t_9 = __pyx_t_10 = 0;
}
__pyx_pybuffernd_KrOld.diminfo[0].strides = __pyx_pybuffernd_KrOld.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_KrOld.diminfo[0].shape = __pyx_pybuffernd_KrOld.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_KrOld.diminfo[1].strides = __pyx_pybuffernd_KrOld.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_KrOld.diminfo[1].shape = __pyx_pybuffernd_KrOld.rcbuffer->pybuffer.shape[1];
if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 143, __pyx_L1_error)
}
__pyx_t_6 = 0;
__pyx_v_KrOld = ((PyArrayObject *)__pyx_t_1);
__pyx_t_1 = 0;
/* "PostProcess_EnergySpectrum.pyx":145
* KrOld = np.sqrt(Kx**2 + Ky**2)
* # New proposed Kr for E spectrum, same number of points as x
* Kr0 = 2*np.pi/L # <<<<<<<<<<<<<<
* Kr = Kr0*np.linspace(1, nPtX, nPtX)
* # Initialize Eij combined from 2D to 1D
*/
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 145, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_pi); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 145, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_14);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyNumber_Multiply(__pyx_int_2, __pyx_t_14); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 145, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
__pyx_t_14 = PyFloat_FromDouble(__pyx_v_L); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 145, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_14);
__pyx_t_4 = __Pyx_PyNumber_Divide(__pyx_t_1, __pyx_t_14); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 145, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
__pyx_t_11 = __pyx_PyFloat_AsDouble(__pyx_t_4); if (unlikely((__pyx_t_11 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 145, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_v_Kr0 = __pyx_t_11;
/* "PostProcess_EnergySpectrum.pyx":146
* # New proposed Kr for E spectrum, same number of points as x
* Kr0 = 2*np.pi/L
* Kr = Kr0*np.linspace(1, nPtX, nPtX) # <<<<<<<<<<<<<<
* # Initialize Eij combined from 2D to 1D
* # Eij[i] is 0.5ui'uj' = 0.5sum(Rij of equal Kr[i])
*/
__pyx_t_4 = PyFloat_FromDouble(__pyx_v_Kr0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 146, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 146, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_linspace); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 146, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_nPtX); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 146, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_nPtX); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 146, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_2 = NULL;
__pyx_t_7 = 0;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_2)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
__pyx_t_7 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_3)) {
PyObject *__pyx_temp[4] = {__pyx_t_2, __pyx_int_1, __pyx_t_1, __pyx_t_5};
__pyx_t_14 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_7, 3+__pyx_t_7); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 146, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GOTREF(__pyx_t_14);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) {
PyObject *__pyx_temp[4] = {__pyx_t_2, __pyx_int_1, __pyx_t_1, __pyx_t_5};
__pyx_t_14 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_7, 3+__pyx_t_7); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 146, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GOTREF(__pyx_t_14);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
} else
#endif
{
__pyx_t_21 = PyTuple_New(3+__pyx_t_7); if (unlikely(!__pyx_t_21)) __PYX_ERR(0, 146, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_21);
if (__pyx_t_2) {
__Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_21, 0, __pyx_t_2); __pyx_t_2 = NULL;
}
__Pyx_INCREF(__pyx_int_1);
__Pyx_GIVEREF(__pyx_int_1);
PyTuple_SET_ITEM(__pyx_t_21, 0+__pyx_t_7, __pyx_int_1);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_21, 1+__pyx_t_7, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_21, 2+__pyx_t_7, __pyx_t_5);
__pyx_t_1 = 0;
__pyx_t_5 = 0;
__pyx_t_14 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_21, NULL); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 146, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_14);
__Pyx_DECREF(__pyx_t_21); __pyx_t_21 = 0;
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyNumber_Multiply(__pyx_t_4, __pyx_t_14); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 146, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 146, __pyx_L1_error)
__pyx_t_22 = ((PyArrayObject *)__pyx_t_3);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_Kr.rcbuffer->pybuffer);
__pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_Kr.rcbuffer->pybuffer, (PyObject*)__pyx_t_22, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack);
if (unlikely(__pyx_t_7 < 0)) {
PyErr_Fetch(&__pyx_t_10, &__pyx_t_9, &__pyx_t_8);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_Kr.rcbuffer->pybuffer, (PyObject*)__pyx_v_Kr, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_10, __pyx_t_9, __pyx_t_8);
}
__pyx_t_10 = __pyx_t_9 = __pyx_t_8 = 0;
}
__pyx_pybuffernd_Kr.diminfo[0].strides = __pyx_pybuffernd_Kr.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_Kr.diminfo[0].shape = __pyx_pybuffernd_Kr.rcbuffer->pybuffer.shape[0];
if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 146, __pyx_L1_error)
}
__pyx_t_22 = 0;
__pyx_v_Kr = ((PyArrayObject *)__pyx_t_3);
__pyx_t_3 = 0;
/* "PostProcess_EnergySpectrum.pyx":149
* # Initialize Eij combined from 2D to 1D
* # Eij[i] is 0.5ui'uj' = 0.5sum(Rij of equal Kr[i])
* Eij = np.empty((len(Kr), 6), dtype = np.complex128) # <<<<<<<<<<<<<<
* Eii = np.empty_like(Kr, dtype = np.complex128)
* # Occurrence when KrOld is close to each Kr[i]
*/
__Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 149, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_empty); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 149, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_14);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_23 = PyObject_Length(((PyObject *)__pyx_v_Kr)); if (unlikely(__pyx_t_23 == ((Py_ssize_t)-1))) __PYX_ERR(0, 149, __pyx_L1_error)
__pyx_t_3 = PyInt_FromSsize_t(__pyx_t_23); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 149, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 149, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
__Pyx_INCREF(__pyx_int_6);
__Pyx_GIVEREF(__pyx_int_6);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_int_6);
__pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 149, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 149, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GetModuleGlobalName(__pyx_t_21, __pyx_n_s_np); if (unlikely(!__pyx_t_21)) __PYX_ERR(0, 149, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_21);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_21, __pyx_n_s_complex128); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 149, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_21); __pyx_t_21 = 0;
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 149, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_14, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 149, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 149, __pyx_L1_error)
__pyx_t_20 = ((PyArrayObject *)__pyx_t_5);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_Eij.rcbuffer->pybuffer);
__pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_Eij.rcbuffer->pybuffer, (PyObject*)__pyx_t_20, &__Pyx_TypeInfo___pyx_t_double_complex, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack);
if (unlikely(__pyx_t_7 < 0)) {
PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_Eij.rcbuffer->pybuffer, (PyObject*)__pyx_v_Eij, &__Pyx_TypeInfo___pyx_t_double_complex, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10);
}
__pyx_t_8 = __pyx_t_9 = __pyx_t_10 = 0;
}
__pyx_pybuffernd_Eij.diminfo[0].strides = __pyx_pybuffernd_Eij.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_Eij.diminfo[0].shape = __pyx_pybuffernd_Eij.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_Eij.diminfo[1].strides = __pyx_pybuffernd_Eij.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_Eij.diminfo[1].shape = __pyx_pybuffernd_Eij.rcbuffer->pybuffer.shape[1];
if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 149, __pyx_L1_error)
}
__pyx_t_20 = 0;
__pyx_v_Eij = ((PyArrayObject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "PostProcess_EnergySpectrum.pyx":150
* # Eij[i] is 0.5ui'uj' = 0.5sum(Rij of equal Kr[i])
* Eij = np.empty((len(Kr), 6), dtype = np.complex128)
* Eii = np.empty_like(Kr, dtype = np.complex128) # <<<<<<<<<<<<<<
* # Occurrence when KrOld is close to each Kr[i]
* occurrence = np.empty(len(Kr))
*/
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 150, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_empty_like); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 150, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 150, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(((PyObject *)__pyx_v_Kr));
__Pyx_GIVEREF(((PyObject *)__pyx_v_Kr));
PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_Kr));
__pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 150, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GetModuleGlobalName(__pyx_t_14, __pyx_n_s_np); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 150, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_14);
__pyx_t_21 = __Pyx_PyObject_GetAttrStr(__pyx_t_14, __pyx_n_s_complex128); if (unlikely(!__pyx_t_21)) __PYX_ERR(0, 150, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_21);
__Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_21) < 0) __PYX_ERR(0, 150, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_21); __pyx_t_21 = 0;
__pyx_t_21 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_21)) __PYX_ERR(0, 150, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_21);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (!(likely(((__pyx_t_21) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_21, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 150, __pyx_L1_error)
__pyx_t_24 = ((PyArrayObject *)__pyx_t_21);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_Eii.rcbuffer->pybuffer);
__pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_Eii.rcbuffer->pybuffer, (PyObject*)__pyx_t_24, &__Pyx_TypeInfo___pyx_t_double_complex, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack);
if (unlikely(__pyx_t_7 < 0)) {
PyErr_Fetch(&__pyx_t_10, &__pyx_t_9, &__pyx_t_8);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_Eii.rcbuffer->pybuffer, (PyObject*)__pyx_v_Eii, &__Pyx_TypeInfo___pyx_t_double_complex, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_10, __pyx_t_9, __pyx_t_8);
}
__pyx_t_10 = __pyx_t_9 = __pyx_t_8 = 0;
}
__pyx_pybuffernd_Eii.diminfo[0].strides = __pyx_pybuffernd_Eii.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_Eii.diminfo[0].shape = __pyx_pybuffernd_Eii.rcbuffer->pybuffer.shape[0];
if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 150, __pyx_L1_error)
}
__pyx_t_24 = 0;
__pyx_v_Eii = ((PyArrayObject *)__pyx_t_21);
__pyx_t_21 = 0;
/* "PostProcess_EnergySpectrum.pyx":152
* Eii = np.empty_like(Kr, dtype = np.complex128)
* # Occurrence when KrOld is close to each Kr[i]
* occurrence = np.empty(len(Kr)) # <<<<<<<<<<<<<<
* # Go through each proposed Kr
* # Integrate Rij where KrOld lies between Kr0*[(i + 1) - 0.5, (i + 1) + 0.5)
*/
__Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 152, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_empty); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 152, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_23 = PyObject_Length(((PyObject *)__pyx_v_Kr)); if (unlikely(__pyx_t_23 == ((Py_ssize_t)-1))) __PYX_ERR(0, 152, __pyx_L1_error)
__pyx_t_3 = PyInt_FromSsize_t(__pyx_t_23); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 152, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
}
}
__pyx_t_21 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_4, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_3);
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (unlikely(!__pyx_t_21)) __PYX_ERR(0, 152, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_21);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (!(likely(((__pyx_t_21) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_21, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 152, __pyx_L1_error)
__pyx_t_22 = ((PyArrayObject *)__pyx_t_21);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_occurrence.rcbuffer->pybuffer);
__pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_occurrence.rcbuffer->pybuffer, (PyObject*)__pyx_t_22, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack);
if (unlikely(__pyx_t_7 < 0)) {
PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_occurrence.rcbuffer->pybuffer, (PyObject*)__pyx_v_occurrence, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10);
}
__pyx_t_8 = __pyx_t_9 = __pyx_t_10 = 0;
}
__pyx_pybuffernd_occurrence.diminfo[0].strides = __pyx_pybuffernd_occurrence.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_occurrence.diminfo[0].shape = __pyx_pybuffernd_occurrence.rcbuffer->pybuffer.shape[0];
if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 152, __pyx_L1_error)
}
__pyx_t_22 = 0;
__pyx_v_occurrence = ((PyArrayObject *)__pyx_t_21);
__pyx_t_21 = 0;
/* "PostProcess_EnergySpectrum.pyx":156
* # Integrate Rij where KrOld lies between Kr0*[(i + 1) - 0.5, (i + 1) + 0.5)
* # This is possible since RijFft and KrOld has matching 2D indices
* for i in range(len(Kr)): # <<<<<<<<<<<<<<
* occurrence[i] = len(KrOld[(KrOld >= Kr0*(i + 1 - 0.5)) & (KrOld < Kr0*(i + 1 + 0.5))])
* # For Eij, go through all 6 components
*/
__pyx_t_23 = PyObject_Length(((PyObject *)__pyx_v_Kr)); if (unlikely(__pyx_t_23 == ((Py_ssize_t)-1))) __PYX_ERR(0, 156, __pyx_L1_error)
__pyx_t_25 = __pyx_t_23;
for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_25; __pyx_t_7+=1) {
__pyx_v_i = __pyx_t_7;
/* "PostProcess_EnergySpectrum.pyx":157
* # This is possible since RijFft and KrOld has matching 2D indices
* for i in range(len(Kr)):
* occurrence[i] = len(KrOld[(KrOld >= Kr0*(i + 1 - 0.5)) & (KrOld < Kr0*(i + 1 + 0.5))]) # <<<<<<<<<<<<<<
* # For Eij, go through all 6 components
* for j in range(6):
*/
__pyx_t_21 = PyFloat_FromDouble((__pyx_v_Kr0 * ((__pyx_v_i + 1) - 0.5))); if (unlikely(!__pyx_t_21)) __PYX_ERR(0, 157, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_21);
__pyx_t_5 = PyObject_RichCompare(((PyObject *)__pyx_v_KrOld), __pyx_t_21, Py_GE); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 157, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_21); __pyx_t_21 = 0;
__pyx_t_21 = PyFloat_FromDouble((__pyx_v_Kr0 * ((__pyx_v_i + 1) + 0.5))); if (unlikely(!__pyx_t_21)) __PYX_ERR(0, 157, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_21);
__pyx_t_3 = PyObject_RichCompare(((PyObject *)__pyx_v_KrOld), __pyx_t_21, Py_LT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 157, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_21); __pyx_t_21 = 0;
__pyx_t_21 = PyNumber_And(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_21)) __PYX_ERR(0, 157, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_21);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_KrOld), __pyx_t_21); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 157, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_21); __pyx_t_21 = 0;
__pyx_t_26 = PyObject_Length(__pyx_t_3); if (unlikely(__pyx_t_26 == ((Py_ssize_t)-1))) __PYX_ERR(0, 157, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_27 = __pyx_v_i;
*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_occurrence.rcbuffer->pybuffer.buf, __pyx_t_27, __pyx_pybuffernd_occurrence.diminfo[0].strides) = __pyx_t_26;
/* "PostProcess_EnergySpectrum.pyx":159
* occurrence[i] = len(KrOld[(KrOld >= Kr0*(i + 1 - 0.5)) & (KrOld < Kr0*(i + 1 + 0.5))])
* # For Eij, go through all 6 components
* for j in range(6): # <<<<<<<<<<<<<<
* Eij[i, j] = 0.5*np.sum(RijFft[:, :, j][(KrOld >= Kr0*(i + 1 - 0.5)) & (KrOld < Kr0*(i + 1 + 0.5))])
*
*/
for (__pyx_t_18 = 0; __pyx_t_18 < 6; __pyx_t_18+=1) {
__pyx_v_j = __pyx_t_18;
/* "PostProcess_EnergySpectrum.pyx":160
* # For Eij, go through all 6 components
* for j in range(6):
* Eij[i, j] = 0.5*np.sum(RijFft[:, :, j][(KrOld >= Kr0*(i + 1 - 0.5)) & (KrOld < Kr0*(i + 1 + 0.5))]) # <<<<<<<<<<<<<<
*
* Eii[i] = 0.5*np.sum(RiiFft[(KrOld >= Kr0*(i + 1 - 0.5)) & (KrOld < Kr0*(i + 1 + 0.5))])
*/
__Pyx_GetModuleGlobalName(__pyx_t_21, __pyx_n_s_np); if (unlikely(!__pyx_t_21)) __PYX_ERR(0, 160, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_21);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_21, __pyx_n_s_sum); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 160, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_21); __pyx_t_21 = 0;
__pyx_t_21 = __Pyx_PyInt_From_int(__pyx_v_j); if (unlikely(!__pyx_t_21)) __PYX_ERR(0, 160, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_21);
__pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 160, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_INCREF(__pyx_slice__3);
__Pyx_GIVEREF(__pyx_slice__3);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_slice__3);
__Pyx_INCREF(__pyx_slice__3);
__Pyx_GIVEREF(__pyx_slice__3);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_slice__3);
__Pyx_GIVEREF(__pyx_t_21);
PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_21);
__pyx_t_21 = 0;
__pyx_t_21 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_RijFft), __pyx_t_4); if (unlikely(!__pyx_t_21)) __PYX_ERR(0, 160, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_21);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = PyFloat_FromDouble((__pyx_v_Kr0 * ((__pyx_v_i + 1) - 0.5))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 160, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_14 = PyObject_RichCompare(((PyObject *)__pyx_v_KrOld), __pyx_t_4, Py_GE); __Pyx_XGOTREF(__pyx_t_14); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 160, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = PyFloat_FromDouble((__pyx_v_Kr0 * ((__pyx_v_i + 1) + 0.5))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 160, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_1 = PyObject_RichCompare(((PyObject *)__pyx_v_KrOld), __pyx_t_4, Py_LT); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 160, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = PyNumber_And(__pyx_t_14, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 160, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_21, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 160, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_21); __pyx_t_21 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
}
}
__pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_4, __pyx_t_1) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_1);
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 160, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = PyNumber_Multiply(__pyx_float_0_5, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 160, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_28 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_5); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 160, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_29 = __pyx_v_i;
__pyx_t_30 = __pyx_v_j;
*__Pyx_BufPtrStrided2d(__pyx_t_double_complex *, __pyx_pybuffernd_Eij.rcbuffer->pybuffer.buf, __pyx_t_29, __pyx_pybuffernd_Eij.diminfo[0].strides, __pyx_t_30, __pyx_pybuffernd_Eij.diminfo[1].strides) = __pyx_t_28;
}
/* "PostProcess_EnergySpectrum.pyx":162
* Eij[i, j] = 0.5*np.sum(RijFft[:, :, j][(KrOld >= Kr0*(i + 1 - 0.5)) & (KrOld < Kr0*(i + 1 + 0.5))])
*
* Eii[i] = 0.5*np.sum(RiiFft[(KrOld >= Kr0*(i + 1 - 0.5)) & (KrOld < Kr0*(i + 1 + 0.5))]) # <<<<<<<<<<<<<<
*
* # # If done right, TKE from energy spectrum should equal TKE in physical space
*/
__Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 162, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_sum); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 162, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyFloat_FromDouble((__pyx_v_Kr0 * ((__pyx_v_i + 1) - 0.5))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 162, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(((PyObject *)__pyx_v_KrOld), __pyx_t_3, Py_GE); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 162, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyFloat_FromDouble((__pyx_v_Kr0 * ((__pyx_v_i + 1) + 0.5))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 162, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_21 = PyObject_RichCompare(((PyObject *)__pyx_v_KrOld), __pyx_t_3, Py_LT); __Pyx_XGOTREF(__pyx_t_21); if (unlikely(!__pyx_t_21)) __PYX_ERR(0, 162, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyNumber_And(__pyx_t_4, __pyx_t_21); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 162, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_21); __pyx_t_21 = 0;
__pyx_t_21 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_RiiFft), __pyx_t_3); if (unlikely(!__pyx_t_21)) __PYX_ERR(0, 162, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_21);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) {
__pyx_t_3 = PyMethod_GET_SELF(__pyx_t_1);
if (likely(__pyx_t_3)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_1, function);
}
}
__pyx_t_5 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_3, __pyx_t_21) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_21);
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_21); __pyx_t_21 = 0;
if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 162, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyNumber_Multiply(__pyx_float_0_5, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 162, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_28 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_1); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 162, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_31 = __pyx_v_i;
*__Pyx_BufPtrStrided1d(__pyx_t_double_complex *, __pyx_pybuffernd_Eii.rcbuffer->pybuffer.buf, __pyx_t_31, __pyx_pybuffernd_Eii.diminfo[0].strides) = __pyx_t_28;
}
/* "PostProcess_EnergySpectrum.pyx":171
* # plt.hist(occurrence)
*
* return RiiFft, Eii, RijFft, Eij, Kx, Ky, Kr, TKE # <<<<<<<<<<<<<<
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_TKE); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 171, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = PyTuple_New(8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 171, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(((PyObject *)__pyx_v_RiiFft));
__Pyx_GIVEREF(((PyObject *)__pyx_v_RiiFft));
PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_RiiFft));
__Pyx_INCREF(((PyObject *)__pyx_v_Eii));
__Pyx_GIVEREF(((PyObject *)__pyx_v_Eii));
PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject *)__pyx_v_Eii));
__Pyx_INCREF(((PyObject *)__pyx_v_RijFft));
__Pyx_GIVEREF(((PyObject *)__pyx_v_RijFft));
PyTuple_SET_ITEM(__pyx_t_5, 2, ((PyObject *)__pyx_v_RijFft));
__Pyx_INCREF(((PyObject *)__pyx_v_Eij));
__Pyx_GIVEREF(((PyObject *)__pyx_v_Eij));
PyTuple_SET_ITEM(__pyx_t_5, 3, ((PyObject *)__pyx_v_Eij));
__Pyx_INCREF(__pyx_v_Kx);
__Pyx_GIVEREF(__pyx_v_Kx);
PyTuple_SET_ITEM(__pyx_t_5, 4, __pyx_v_Kx);
__Pyx_INCREF(__pyx_v_Ky);
__Pyx_GIVEREF(__pyx_v_Ky);
PyTuple_SET_ITEM(__pyx_t_5, 5, __pyx_v_Ky);
__Pyx_INCREF(((PyObject *)__pyx_v_Kr));
__Pyx_GIVEREF(((PyObject *)__pyx_v_Kr));
PyTuple_SET_ITEM(__pyx_t_5, 6, ((PyObject *)__pyx_v_Kr));
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_5, 7, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "PostProcess_EnergySpectrum.pyx":81
* @cython.wraparound(False)
* @cython.cdivision(True)
* cpdef getPlanarEnergySpectrum(np.ndarray[np.float_t, ndim= 2] u2D, np.ndarray[np.float_t, ndim= 2] v2D, np.ndarray[np.float_t, ndim= 2] w2D, double L, tuple cellSizes2D, horizontalEii = False): # <<<<<<<<<<<<<<
* cdef np.ndarray[np.float_t, ndim = 2] uRes2D, vRes2D, wRes2D, KrOld
* cdef np.ndarray[np.complex128_t, ndim = 2] uResFft, vResFft, wResFft
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_14);
__Pyx_XDECREF(__pyx_t_21);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_Eii.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_Eij.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_Kr.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_KrOld.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_RiiFft.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_RijFft.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_occurrence.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_u2D.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_uRes2D.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_uResFft.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_v2D.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_vRes2D.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_vResFft.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_w2D.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_wRes2D.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_wResFft.rcbuffer->pybuffer);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("PostProcess_EnergySpectrum.getPlanarEnergySpectrum", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_Eii.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_Eij.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_Kr.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_KrOld.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_RiiFft.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_RijFft.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_occurrence.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_u2D.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_uRes2D.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_uResFft.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_v2D.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_vRes2D.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_vResFft.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_w2D.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_wRes2D.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_wResFft.rcbuffer->pybuffer);
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_uRes2D);
__Pyx_XDECREF((PyObject *)__pyx_v_vRes2D);
__Pyx_XDECREF((PyObject *)__pyx_v_wRes2D);
__Pyx_XDECREF((PyObject *)__pyx_v_KrOld);
__Pyx_XDECREF((PyObject *)__pyx_v_uResFft);
__Pyx_XDECREF((PyObject *)__pyx_v_vResFft);
__Pyx_XDECREF((PyObject *)__pyx_v_wResFft);
__Pyx_XDECREF(__pyx_v_U1ResFft);
__Pyx_XDECREF(__pyx_v_U2ResFft);
__Pyx_XDECREF((PyObject *)__pyx_v_RiiFft);
__Pyx_XDECREF((PyObject *)__pyx_v_Eij);
__Pyx_XDECREF((PyObject *)__pyx_v_RijFft);
__Pyx_XDECREF((PyObject *)__pyx_v_Eii);
__Pyx_XDECREF((PyObject *)__pyx_v_Kr);
__Pyx_XDECREF((PyObject *)__pyx_v_occurrence);
__Pyx_XDECREF(__pyx_v_Kx);
__Pyx_XDECREF(__pyx_v_Ky);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_26PostProcess_EnergySpectrum_3getPlanarEnergySpectrum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyObject *__pyx_pw_26PostProcess_EnergySpectrum_3getPlanarEnergySpectrum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyArrayObject *__pyx_v_u2D = 0;
PyArrayObject *__pyx_v_v2D = 0;
PyArrayObject *__pyx_v_w2D = 0;
double __pyx_v_L;
PyObject *__pyx_v_cellSizes2D = 0;
PyObject *__pyx_v_horizontalEii = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("getPlanarEnergySpectrum (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_u2D,&__pyx_n_s_v2D,&__pyx_n_s_w2D,&__pyx_n_s_L,&__pyx_n_s_cellSizes2D,&__pyx_n_s_horizontalEii,0};
PyObject* values[6] = {0,0,0,0,0,0};
values[5] = ((PyObject *)Py_False);
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
CYTHON_FALLTHROUGH;
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_u2D)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_v2D)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("getPlanarEnergySpectrum", 0, 5, 6, 1); __PYX_ERR(0, 81, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_w2D)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("getPlanarEnergySpectrum", 0, 5, 6, 2); __PYX_ERR(0, 81, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 3:
if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_L)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("getPlanarEnergySpectrum", 0, 5, 6, 3); __PYX_ERR(0, 81, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 4:
if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_cellSizes2D)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("getPlanarEnergySpectrum", 0, 5, 6, 4); __PYX_ERR(0, 81, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 5:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_horizontalEii);
if (value) { values[5] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "getPlanarEnergySpectrum") < 0)) __PYX_ERR(0, 81, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
CYTHON_FALLTHROUGH;
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_u2D = ((PyArrayObject *)values[0]);
__pyx_v_v2D = ((PyArrayObject *)values[1]);
__pyx_v_w2D = ((PyArrayObject *)values[2]);
__pyx_v_L = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_L == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 81, __pyx_L3_error)
__pyx_v_cellSizes2D = ((PyObject*)values[4]);
__pyx_v_horizontalEii = values[5];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("getPlanarEnergySpectrum", 0, 5, 6, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 81, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("PostProcess_EnergySpectrum.getPlanarEnergySpectrum", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_u2D), __pyx_ptype_5numpy_ndarray, 1, "u2D", 0))) __PYX_ERR(0, 81, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_v2D), __pyx_ptype_5numpy_ndarray, 1, "v2D", 0))) __PYX_ERR(0, 81, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_w2D), __pyx_ptype_5numpy_ndarray, 1, "w2D", 0))) __PYX_ERR(0, 81, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_cellSizes2D), (&PyTuple_Type), 1, "cellSizes2D", 1))) __PYX_ERR(0, 81, __pyx_L1_error)
__pyx_r = __pyx_pf_26PostProcess_EnergySpectrum_2getPlanarEnergySpectrum(__pyx_self, __pyx_v_u2D, __pyx_v_v2D, __pyx_v_w2D, __pyx_v_L, __pyx_v_cellSizes2D, __pyx_v_horizontalEii);
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_26PostProcess_EnergySpectrum_2getPlanarEnergySpectrum(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_u2D, PyArrayObject *__pyx_v_v2D, PyArrayObject *__pyx_v_w2D, double __pyx_v_L, PyObject *__pyx_v_cellSizes2D, PyObject *__pyx_v_horizontalEii) {
__Pyx_LocalBuf_ND __pyx_pybuffernd_u2D;
__Pyx_Buffer __pyx_pybuffer_u2D;
__Pyx_LocalBuf_ND __pyx_pybuffernd_v2D;
__Pyx_Buffer __pyx_pybuffer_v2D;
__Pyx_LocalBuf_ND __pyx_pybuffernd_w2D;
__Pyx_Buffer __pyx_pybuffer_w2D;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
struct __pyx_opt_args_26PostProcess_EnergySpectrum_getPlanarEnergySpectrum __pyx_t_2;
__Pyx_RefNannySetupContext("getPlanarEnergySpectrum", 0);
__pyx_pybuffer_u2D.pybuffer.buf = NULL;
__pyx_pybuffer_u2D.refcount = 0;
__pyx_pybuffernd_u2D.data = NULL;
__pyx_pybuffernd_u2D.rcbuffer = &__pyx_pybuffer_u2D;
__pyx_pybuffer_v2D.pybuffer.buf = NULL;
__pyx_pybuffer_v2D.refcount = 0;
__pyx_pybuffernd_v2D.data = NULL;
__pyx_pybuffernd_v2D.rcbuffer = &__pyx_pybuffer_v2D;
__pyx_pybuffer_w2D.pybuffer.buf = NULL;
__pyx_pybuffer_w2D.refcount = 0;
__pyx_pybuffernd_w2D.data = NULL;
__pyx_pybuffernd_w2D.rcbuffer = &__pyx_pybuffer_w2D;
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_u2D.rcbuffer->pybuffer, (PyObject*)__pyx_v_u2D, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 81, __pyx_L1_error)
}
__pyx_pybuffernd_u2D.diminfo[0].strides = __pyx_pybuffernd_u2D.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_u2D.diminfo[0].shape = __pyx_pybuffernd_u2D.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_u2D.diminfo[1].strides = __pyx_pybuffernd_u2D.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_u2D.diminfo[1].shape = __pyx_pybuffernd_u2D.rcbuffer->pybuffer.shape[1];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_v2D.rcbuffer->pybuffer, (PyObject*)__pyx_v_v2D, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 81, __pyx_L1_error)
}
__pyx_pybuffernd_v2D.diminfo[0].strides = __pyx_pybuffernd_v2D.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_v2D.diminfo[0].shape = __pyx_pybuffernd_v2D.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_v2D.diminfo[1].strides = __pyx_pybuffernd_v2D.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_v2D.diminfo[1].shape = __pyx_pybuffernd_v2D.rcbuffer->pybuffer.shape[1];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_w2D.rcbuffer->pybuffer, (PyObject*)__pyx_v_w2D, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 81, __pyx_L1_error)
}
__pyx_pybuffernd_w2D.diminfo[0].strides = __pyx_pybuffernd_w2D.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_w2D.diminfo[0].shape = __pyx_pybuffernd_w2D.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_w2D.diminfo[1].strides = __pyx_pybuffernd_w2D.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_w2D.diminfo[1].shape = __pyx_pybuffernd_w2D.rcbuffer->pybuffer.shape[1];
__Pyx_XDECREF(__pyx_r);
__pyx_t_2.__pyx_n = 1;
__pyx_t_2.horizontalEii = __pyx_v_horizontalEii;
__pyx_t_1 = __pyx_f_26PostProcess_EnergySpectrum_getPlanarEnergySpectrum(__pyx_v_u2D, __pyx_v_v2D, __pyx_v_w2D, __pyx_v_L, __pyx_v_cellSizes2D, 0, &__pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_u2D.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_v2D.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_w2D.rcbuffer->pybuffer);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("PostProcess_EnergySpectrum.getPlanarEnergySpectrum", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_u2D.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_v2D.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_w2D.rcbuffer->pybuffer);
__pyx_L2:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":258
* # experimental exception made for __getbuffer__ and __releasebuffer__
* # -- the details of this may change.
* def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<<
* # This implementation of getbuffer is geared towards Cython
* # requirements, and does not yet fulfill the PEP.
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_v_i;
int __pyx_v_ndim;
int __pyx_v_endian_detector;
int __pyx_v_little_endian;
int __pyx_v_t;
char *__pyx_v_f;
PyArray_Descr *__pyx_v_descr = 0;
int __pyx_v_offset;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
PyArray_Descr *__pyx_t_7;
PyObject *__pyx_t_8 = NULL;
char *__pyx_t_9;
if (__pyx_v_info == NULL) {
PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
return -1;
}
__Pyx_RefNannySetupContext("__getbuffer__", 0);
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":265
*
* cdef int i, ndim
* cdef int endian_detector = 1 # <<<<<<<<<<<<<<
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
*
*/
__pyx_v_endian_detector = 1;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":266
* cdef int i, ndim
* cdef int endian_detector = 1
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<<
*
* ndim = PyArray_NDIM(self)
*/
__pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":268
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
*
* ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
*/
__pyx_v_ndim = PyArray_NDIM(__pyx_v_self);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":270
* ndim = PyArray_NDIM(self)
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous")
*/
__pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L4_bool_binop_done;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":271
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): # <<<<<<<<<<<<<<
* raise ValueError(u"ndarray is not C contiguous")
*
*/
__pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_ARRAY_C_CONTIGUOUS) != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L4_bool_binop_done:;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":270
* ndim = PyArray_NDIM(self)
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous")
*/
if (unlikely(__pyx_t_1)) {
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":272
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 272, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 272, __pyx_L1_error)
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":270
* ndim = PyArray_NDIM(self)
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous")
*/
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":274
* raise ValueError(u"ndarray is not C contiguous")
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous")
*/
__pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L7_bool_binop_done;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":275
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): # <<<<<<<<<<<<<<
* raise ValueError(u"ndarray is not Fortran contiguous")
*
*/
__pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_ARRAY_F_CONTIGUOUS) != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L7_bool_binop_done:;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":274
* raise ValueError(u"ndarray is not C contiguous")
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous")
*/
if (unlikely(__pyx_t_1)) {
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":276
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<<
*
* info.buf = PyArray_DATA(self)
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 276, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 276, __pyx_L1_error)
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":274
* raise ValueError(u"ndarray is not C contiguous")
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous")
*/
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":278
* raise ValueError(u"ndarray is not Fortran contiguous")
*
* info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<<
* info.ndim = ndim
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
__pyx_v_info->buf = PyArray_DATA(__pyx_v_self);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":279
*
* info.buf = PyArray_DATA(self)
* info.ndim = ndim # <<<<<<<<<<<<<<
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* # Allocate new buffer for strides and shape info.
*/
__pyx_v_info->ndim = __pyx_v_ndim;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":280
* info.buf = PyArray_DATA(self)
* info.ndim = ndim
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
*/
__pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
if (__pyx_t_1) {
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":283
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
* info.strides = <Py_ssize_t*>PyObject_Malloc(sizeof(Py_ssize_t) * 2 * <size_t>ndim) # <<<<<<<<<<<<<<
* info.shape = info.strides + ndim
* for i in range(ndim):
*/
__pyx_v_info->strides = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * 2) * ((size_t)__pyx_v_ndim))));
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":284
* # This is allocated as one block, strides first.
* info.strides = <Py_ssize_t*>PyObject_Malloc(sizeof(Py_ssize_t) * 2 * <size_t>ndim)
* info.shape = info.strides + ndim # <<<<<<<<<<<<<<
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i]
*/
__pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":285
* info.strides = <Py_ssize_t*>PyObject_Malloc(sizeof(Py_ssize_t) * 2 * <size_t>ndim)
* info.shape = info.strides + ndim
* for i in range(ndim): # <<<<<<<<<<<<<<
* info.strides[i] = PyArray_STRIDES(self)[i]
* info.shape[i] = PyArray_DIMS(self)[i]
*/
__pyx_t_4 = __pyx_v_ndim;
__pyx_t_5 = __pyx_t_4;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":286
* info.shape = info.strides + ndim
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<<
* info.shape[i] = PyArray_DIMS(self)[i]
* else:
*/
(__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":287
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i]
* info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<<
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
*/
(__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]);
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":280
* info.buf = PyArray_DATA(self)
* info.ndim = ndim
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
*/
goto __pyx_L9;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":289
* info.shape[i] = PyArray_DIMS(self)[i]
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self) # <<<<<<<<<<<<<<
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL
*/
/*else*/ {
__pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self));
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":290
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
* info.shape = <Py_ssize_t*>PyArray_DIMS(self) # <<<<<<<<<<<<<<
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self)
*/
__pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self));
}
__pyx_L9:;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":291
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL # <<<<<<<<<<<<<<
* info.itemsize = PyArray_ITEMSIZE(self)
* info.readonly = not PyArray_ISWRITEABLE(self)
*/
__pyx_v_info->suboffsets = NULL;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":292
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<<
* info.readonly = not PyArray_ISWRITEABLE(self)
*
*/
__pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":293
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self)
* info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<<
*
* cdef int t
*/
__pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0));
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":296
*
* cdef int t
* cdef char* f = NULL # <<<<<<<<<<<<<<
* cdef dtype descr = <dtype>PyArray_DESCR(self)
* cdef int offset
*/
__pyx_v_f = NULL;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":297
* cdef int t
* cdef char* f = NULL
* cdef dtype descr = <dtype>PyArray_DESCR(self) # <<<<<<<<<<<<<<
* cdef int offset
*
*/
__pyx_t_7 = PyArray_DESCR(__pyx_v_self);
__pyx_t_3 = ((PyObject *)__pyx_t_7);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_descr = ((PyArray_Descr *)__pyx_t_3);
__pyx_t_3 = 0;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":300
* cdef int offset
*
* info.obj = self # <<<<<<<<<<<<<<
*
* if not PyDataType_HASFIELDS(descr):
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":302
* info.obj = self
*
* if not PyDataType_HASFIELDS(descr): # <<<<<<<<<<<<<<
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or
*/
__pyx_t_1 = ((!(PyDataType_HASFIELDS(__pyx_v_descr) != 0)) != 0);
if (__pyx_t_1) {
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":303
*
* if not PyDataType_HASFIELDS(descr):
* t = descr.type_num # <<<<<<<<<<<<<<
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)):
*/
__pyx_t_4 = __pyx_v_descr->type_num;
__pyx_v_t = __pyx_t_4;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":304
* if not PyDataType_HASFIELDS(descr):
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
__pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0);
if (!__pyx_t_2) {
goto __pyx_L15_next_or;
} else {
}
__pyx_t_2 = (__pyx_v_little_endian != 0);
if (!__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L14_bool_binop_done;
}
__pyx_L15_next_or:;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":305
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<<
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b"
*/
__pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L14_bool_binop_done;
}
__pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L14_bool_binop_done:;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":304
* if not PyDataType_HASFIELDS(descr):
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
if (unlikely(__pyx_t_1)) {
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":306
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 306, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 306, __pyx_L1_error)
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":304
* if not PyDataType_HASFIELDS(descr):
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":307
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<<
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h"
*/
switch (__pyx_v_t) {
case NPY_BYTE:
__pyx_v_f = ((char *)"b");
break;
case NPY_UBYTE:
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":308
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<<
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H"
*/
__pyx_v_f = ((char *)"B");
break;
case NPY_SHORT:
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":309
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<<
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i"
*/
__pyx_v_f = ((char *)"h");
break;
case NPY_USHORT:
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":310
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<<
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I"
*/
__pyx_v_f = ((char *)"H");
break;
case NPY_INT:
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":311
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<<
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l"
*/
__pyx_v_f = ((char *)"i");
break;
case NPY_UINT:
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":312
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<<
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L"
*/
__pyx_v_f = ((char *)"I");
break;
case NPY_LONG:
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":313
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<<
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q"
*/
__pyx_v_f = ((char *)"l");
break;
case NPY_ULONG:
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":314
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<<
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q"
*/
__pyx_v_f = ((char *)"L");
break;
case NPY_LONGLONG:
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":315
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<<
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f"
*/
__pyx_v_f = ((char *)"q");
break;
case NPY_ULONGLONG:
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":316
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<<
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d"
*/
__pyx_v_f = ((char *)"Q");
break;
case NPY_FLOAT:
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":317
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<<
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g"
*/
__pyx_v_f = ((char *)"f");
break;
case NPY_DOUBLE:
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":318
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<<
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf"
*/
__pyx_v_f = ((char *)"d");
break;
case NPY_LONGDOUBLE:
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":319
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<<
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd"
*/
__pyx_v_f = ((char *)"g");
break;
case NPY_CFLOAT:
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":320
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<<
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg"
*/
__pyx_v_f = ((char *)"Zf");
break;
case NPY_CDOUBLE:
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":321
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<<
* elif t == NPY_CLONGDOUBLE: f = "Zg"
* elif t == NPY_OBJECT: f = "O"
*/
__pyx_v_f = ((char *)"Zd");
break;
case NPY_CLONGDOUBLE:
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":322
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<<
* elif t == NPY_OBJECT: f = "O"
* else:
*/
__pyx_v_f = ((char *)"Zg");
break;
case NPY_OBJECT:
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":323
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg"
* elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<<
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
*/
__pyx_v_f = ((char *)"O");
break;
default:
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":325
* elif t == NPY_OBJECT: f = "O"
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<<
* info.format = f
* return
*/
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 325, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_8 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 325, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 325, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 325, __pyx_L1_error)
break;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":326
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* info.format = f # <<<<<<<<<<<<<<
* return
* else:
*/
__pyx_v_info->format = __pyx_v_f;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":327
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* info.format = f
* return # <<<<<<<<<<<<<<
* else:
* info.format = <char*>PyObject_Malloc(_buffer_format_string_len)
*/
__pyx_r = 0;
goto __pyx_L0;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":302
* info.obj = self
*
* if not PyDataType_HASFIELDS(descr): # <<<<<<<<<<<<<<
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or
*/
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":329
* return
* else:
* info.format = <char*>PyObject_Malloc(_buffer_format_string_len) # <<<<<<<<<<<<<<
* info.format[0] = c'^' # Native data types, manual alignment
* offset = 0
*/
/*else*/ {
__pyx_v_info->format = ((char *)PyObject_Malloc(0xFF));
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":330
* else:
* info.format = <char*>PyObject_Malloc(_buffer_format_string_len)
* info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<<
* offset = 0
* f = _util_dtypestring(descr, info.format + 1,
*/
(__pyx_v_info->format[0]) = '^';
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":331
* info.format = <char*>PyObject_Malloc(_buffer_format_string_len)
* info.format[0] = c'^' # Native data types, manual alignment
* offset = 0 # <<<<<<<<<<<<<<
* f = _util_dtypestring(descr, info.format + 1,
* info.format + _buffer_format_string_len,
*/
__pyx_v_offset = 0;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":332
* info.format[0] = c'^' # Native data types, manual alignment
* offset = 0
* f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<<
* info.format + _buffer_format_string_len,
* &offset)
*/
__pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 0xFF), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == ((char *)NULL))) __PYX_ERR(1, 332, __pyx_L1_error)
__pyx_v_f = __pyx_t_9;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":335
* info.format + _buffer_format_string_len,
* &offset)
* f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<<
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
*/
(__pyx_v_f[0]) = '\x00';
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":258
* # experimental exception made for __getbuffer__ and __releasebuffer__
* # -- the details of this may change.
* def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<<
* # This implementation of getbuffer is geared towards Cython
* # requirements, and does not yet fulfill the PEP.
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_descr);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":337
* f[0] = c'\0' # Terminate format string
*
* def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<<
* if PyArray_HASFIELDS(self):
* PyObject_Free(info.format)
*/
/* Python wrapper */
static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/
static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0);
__pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__releasebuffer__", 0);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":338
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<<
* PyObject_Free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
__pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0);
if (__pyx_t_1) {
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":339
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self):
* PyObject_Free(info.format) # <<<<<<<<<<<<<<
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* PyObject_Free(info.strides)
*/
PyObject_Free(__pyx_v_info->format);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":338
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<<
* PyObject_Free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":340
* if PyArray_HASFIELDS(self):
* PyObject_Free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* PyObject_Free(info.strides)
* # info.shape was stored after info.strides in the same block
*/
__pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
if (__pyx_t_1) {
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":341
* PyObject_Free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* PyObject_Free(info.strides) # <<<<<<<<<<<<<<
* # info.shape was stored after info.strides in the same block
*
*/
PyObject_Free(__pyx_v_info->strides);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":340
* if PyArray_HASFIELDS(self):
* PyObject_Free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* PyObject_Free(info.strides)
* # info.shape was stored after info.strides in the same block
*/
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":337
* f[0] = c'\0' # Terminate format string
*
* def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<<
* if PyArray_HASFIELDS(self):
* PyObject_Free(info.format)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":821
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, <void*>a)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":822
*
* cdef inline object PyArray_MultiIterNew1(a):
* return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew2(a, b):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 822, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":821
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, <void*>a)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":824
* return PyArray_MultiIterNew(1, <void*>a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":825
*
* cdef inline object PyArray_MultiIterNew2(a, b):
* return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 825, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":824
* return PyArray_MultiIterNew(1, <void*>a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":827
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":828
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 828, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":827
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":830
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":831
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 831, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":830
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":833
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":834
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<<
*
* cdef inline tuple PyDataType_SHAPE(dtype d):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 834, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":833
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":836
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
* cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<<
* if PyDataType_HASSUBARRAY(d):
* return <tuple>d.subarray.shape
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyDataType_SHAPE(PyArray_Descr *__pyx_v_d) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("PyDataType_SHAPE", 0);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":837
*
* cdef inline tuple PyDataType_SHAPE(dtype d):
* if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<<
* return <tuple>d.subarray.shape
* else:
*/
__pyx_t_1 = (PyDataType_HASSUBARRAY(__pyx_v_d) != 0);
if (__pyx_t_1) {
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":838
* cdef inline tuple PyDataType_SHAPE(dtype d):
* if PyDataType_HASSUBARRAY(d):
* return <tuple>d.subarray.shape # <<<<<<<<<<<<<<
* else:
* return ()
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject*)__pyx_v_d->subarray->shape));
__pyx_r = ((PyObject*)__pyx_v_d->subarray->shape);
goto __pyx_L0;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":837
*
* cdef inline tuple PyDataType_SHAPE(dtype d):
* if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<<
* return <tuple>d.subarray.shape
* else:
*/
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":840
* return <tuple>d.subarray.shape
* else:
* return () # <<<<<<<<<<<<<<
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_empty_tuple);
__pyx_r = __pyx_empty_tuple;
goto __pyx_L0;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":836
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
* cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<<
* if PyDataType_HASSUBARRAY(d):
* return <tuple>d.subarray.shape
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":842
* return ()
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<<
* # Recursive utility function used in __getbuffer__ to get format
* # string. The new location in the format string is returned.
*/
static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) {
PyArray_Descr *__pyx_v_child = 0;
int __pyx_v_endian_detector;
int __pyx_v_little_endian;
PyObject *__pyx_v_fields = 0;
PyObject *__pyx_v_childname = NULL;
PyObject *__pyx_v_new_offset = NULL;
PyObject *__pyx_v_t = NULL;
char *__pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
Py_ssize_t __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
long __pyx_t_8;
char *__pyx_t_9;
__Pyx_RefNannySetupContext("_util_dtypestring", 0);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":847
*
* cdef dtype child
* cdef int endian_detector = 1 # <<<<<<<<<<<<<<
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
* cdef tuple fields
*/
__pyx_v_endian_detector = 1;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":848
* cdef dtype child
* cdef int endian_detector = 1
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<<
* cdef tuple fields
*
*/
__pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":851
* cdef tuple fields
*
* for childname in descr.names: # <<<<<<<<<<<<<<
* fields = descr.fields[childname]
* child, new_offset = fields
*/
if (unlikely(__pyx_v_descr->names == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
__PYX_ERR(1, 851, __pyx_L1_error)
}
__pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0;
for (;;) {
if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 851, __pyx_L1_error)
#else
__pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 851, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
#endif
__Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3);
__pyx_t_3 = 0;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":852
*
* for childname in descr.names:
* fields = descr.fields[childname] # <<<<<<<<<<<<<<
* child, new_offset = fields
*
*/
if (unlikely(__pyx_v_descr->fields == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 852, __pyx_L1_error)
}
__pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 852, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) __PYX_ERR(1, 852, __pyx_L1_error)
__Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3));
__pyx_t_3 = 0;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":853
* for childname in descr.names:
* fields = descr.fields[childname]
* child, new_offset = fields # <<<<<<<<<<<<<<
*
* if (end - f) - <int>(new_offset - offset[0]) < 15:
*/
if (likely(__pyx_v_fields != Py_None)) {
PyObject* sequence = __pyx_v_fields;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(1, 853, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
#else
__pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 853, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 853, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
#endif
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 853, __pyx_L1_error)
}
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) __PYX_ERR(1, 853, __pyx_L1_error)
__Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3));
__pyx_t_3 = 0;
__Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4);
__pyx_t_4 = 0;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":855
* child, new_offset = fields
*
* if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
*/
__pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 855, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 855, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 855, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0);
if (unlikely(__pyx_t_6)) {
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":856
*
* if (end - f) - <int>(new_offset - offset[0]) < 15:
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<<
*
* if ((child.byteorder == c'>' and little_endian) or
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 856, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 856, __pyx_L1_error)
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":855
* child, new_offset = fields
*
* if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
*/
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":858
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
* if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
__pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0);
if (!__pyx_t_7) {
goto __pyx_L8_next_or;
} else {
}
__pyx_t_7 = (__pyx_v_little_endian != 0);
if (!__pyx_t_7) {
} else {
__pyx_t_6 = __pyx_t_7;
goto __pyx_L7_bool_binop_done;
}
__pyx_L8_next_or:;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":859
*
* if ((child.byteorder == c'>' and little_endian) or
* (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<<
* raise ValueError(u"Non-native byte order not supported")
* # One could encode it in the format string and have Cython
*/
__pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0);
if (__pyx_t_7) {
} else {
__pyx_t_6 = __pyx_t_7;
goto __pyx_L7_bool_binop_done;
}
__pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0);
__pyx_t_6 = __pyx_t_7;
__pyx_L7_bool_binop_done:;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":858
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
* if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
if (unlikely(__pyx_t_6)) {
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":860
* if ((child.byteorder == c'>' and little_endian) or
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* # One could encode it in the format string and have Cython
* # complain instead, BUT: < and > in format strings also imply
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 860, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 860, __pyx_L1_error)
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":858
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
* if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":870
*
* # Output padding bytes
* while offset[0] < new_offset: # <<<<<<<<<<<<<<
* f[0] = 120 # "x"; pad byte
* f += 1
*/
while (1) {
__pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 870, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 870, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 870, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (!__pyx_t_6) break;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":871
* # Output padding bytes
* while offset[0] < new_offset:
* f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<<
* f += 1
* offset[0] += 1
*/
(__pyx_v_f[0]) = 0x78;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":872
* while offset[0] < new_offset:
* f[0] = 120 # "x"; pad byte
* f += 1 # <<<<<<<<<<<<<<
* offset[0] += 1
*
*/
__pyx_v_f = (__pyx_v_f + 1);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":873
* f[0] = 120 # "x"; pad byte
* f += 1
* offset[0] += 1 # <<<<<<<<<<<<<<
*
* offset[0] += child.itemsize
*/
__pyx_t_8 = 0;
(__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1);
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":875
* offset[0] += 1
*
* offset[0] += child.itemsize # <<<<<<<<<<<<<<
*
* if not PyDataType_HASFIELDS(child):
*/
__pyx_t_8 = 0;
(__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":877
* offset[0] += child.itemsize
*
* if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<<
* t = child.type_num
* if end - f < 5:
*/
__pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0);
if (__pyx_t_6) {
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":878
*
* if not PyDataType_HASFIELDS(child):
* t = child.type_num # <<<<<<<<<<<<<<
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.")
*/
__pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 878, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4);
__pyx_t_4 = 0;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":879
* if not PyDataType_HASFIELDS(child):
* t = child.type_num
* if end - f < 5: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short.")
*
*/
__pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0);
if (unlikely(__pyx_t_6)) {
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":880
* t = child.type_num
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<<
*
* # Until ticket #99 is fixed, use integers to avoid warnings
*/
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 880, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__PYX_ERR(1, 880, __pyx_L1_error)
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":879
* if not PyDataType_HASFIELDS(child):
* t = child.type_num
* if end - f < 5: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short.")
*
*/
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":883
*
* # Until ticket #99 is fixed, use integers to avoid warnings
* if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<<
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 883, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 883, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 883, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 98;
goto __pyx_L15;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":884
* # Until ticket #99 is fixed, use integers to avoid warnings
* if t == NPY_BYTE: f[0] = 98 #"b"
* elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<<
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 884, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 884, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 884, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 66;
goto __pyx_L15;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":885
* if t == NPY_BYTE: f[0] = 98 #"b"
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<<
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 885, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 885, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 885, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x68;
goto __pyx_L15;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":886
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<<
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 886, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 886, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 886, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 72;
goto __pyx_L15;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":887
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<<
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 887, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 887, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 887, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x69;
goto __pyx_L15;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":888
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<<
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 888, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 888, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 888, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 73;
goto __pyx_L15;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":889
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<<
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 889, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 889, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 889, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x6C;
goto __pyx_L15;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":890
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<<
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 890, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 890, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 890, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 76;
goto __pyx_L15;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":891
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<<
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 891, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 891, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 891, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x71;
goto __pyx_L15;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":892
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<<
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 892, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 892, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 892, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 81;
goto __pyx_L15;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":893
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<<
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 893, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 893, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 893, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x66;
goto __pyx_L15;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":894
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<<
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 894, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 894, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 894, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x64;
goto __pyx_L15;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":895
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<<
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 895, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 895, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 895, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x67;
goto __pyx_L15;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":896
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<<
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 896, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 896, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 896, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 0x66;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L15;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":897
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<<
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
* elif t == NPY_OBJECT: f[0] = 79 #"O"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 897, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 897, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 897, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 0x64;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L15;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":898
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<<
* elif t == NPY_OBJECT: f[0] = 79 #"O"
* else:
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 898, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 898, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 898, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 0x67;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L15;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":899
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
* elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<<
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 899, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 899, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 899, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (likely(__pyx_t_6)) {
(__pyx_v_f[0]) = 79;
goto __pyx_L15;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":901
* elif t == NPY_OBJECT: f[0] = 79 #"O"
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<<
* f += 1
* else:
*/
/*else*/ {
__pyx_t_3 = __Pyx_PyUnicode_FormatSafe(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 901, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 901, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__PYX_ERR(1, 901, __pyx_L1_error)
}
__pyx_L15:;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":902
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* f += 1 # <<<<<<<<<<<<<<
* else:
* # Cython ignores struct boundary information ("T{...}"),
*/
__pyx_v_f = (__pyx_v_f + 1);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":877
* offset[0] += child.itemsize
*
* if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<<
* t = child.type_num
* if end - f < 5:
*/
goto __pyx_L13;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":906
* # Cython ignores struct boundary information ("T{...}"),
* # so don't output it
* f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<<
* return f
*
*/
/*else*/ {
__pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == ((char *)NULL))) __PYX_ERR(1, 906, __pyx_L1_error)
__pyx_v_f = __pyx_t_9;
}
__pyx_L13:;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":851
* cdef tuple fields
*
* for childname in descr.names: # <<<<<<<<<<<<<<
* fields = descr.fields[childname]
* child, new_offset = fields
*/
}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":907
* # so don't output it
* f = _util_dtypestring(child, f, end, offset)
* return f # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_f;
goto __pyx_L0;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":842
* return ()
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<<
* # Recursive utility function used in __getbuffer__ to get format
* # string. The new location in the format string is returned.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_child);
__Pyx_XDECREF(__pyx_v_fields);
__Pyx_XDECREF(__pyx_v_childname);
__Pyx_XDECREF(__pyx_v_new_offset);
__Pyx_XDECREF(__pyx_v_t);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1022
* int _import_umath() except -1
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* Py_INCREF(base) # important to do this before stealing the reference below!
* PyArray_SetBaseObject(arr, base)
*/
static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("set_array_base", 0);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1023
*
* cdef inline void set_array_base(ndarray arr, object base):
* Py_INCREF(base) # important to do this before stealing the reference below! # <<<<<<<<<<<<<<
* PyArray_SetBaseObject(arr, base)
*
*/
Py_INCREF(__pyx_v_base);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1024
* cdef inline void set_array_base(ndarray arr, object base):
* Py_INCREF(base) # important to do this before stealing the reference below!
* PyArray_SetBaseObject(arr, base) # <<<<<<<<<<<<<<
*
* cdef inline object get_array_base(ndarray arr):
*/
(void)(PyArray_SetBaseObject(__pyx_v_arr, __pyx_v_base));
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1022
* int _import_umath() except -1
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* Py_INCREF(base) # important to do this before stealing the reference below!
* PyArray_SetBaseObject(arr, base)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1026
* PyArray_SetBaseObject(arr, base)
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* base = PyArray_BASE(arr)
* if base is NULL:
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) {
PyObject *__pyx_v_base;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("get_array_base", 0);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1027
*
* cdef inline object get_array_base(ndarray arr):
* base = PyArray_BASE(arr) # <<<<<<<<<<<<<<
* if base is NULL:
* return None
*/
__pyx_v_base = PyArray_BASE(__pyx_v_arr);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1028
* cdef inline object get_array_base(ndarray arr):
* base = PyArray_BASE(arr)
* if base is NULL: # <<<<<<<<<<<<<<
* return None
* return <object>base
*/
__pyx_t_1 = ((__pyx_v_base == NULL) != 0);
if (__pyx_t_1) {
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1029
* base = PyArray_BASE(arr)
* if base is NULL:
* return None # <<<<<<<<<<<<<<
* return <object>base
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1028
* cdef inline object get_array_base(ndarray arr):
* base = PyArray_BASE(arr)
* if base is NULL: # <<<<<<<<<<<<<<
* return None
* return <object>base
*/
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1030
* if base is NULL:
* return None
* return <object>base # <<<<<<<<<<<<<<
*
* # Versions of the import_* functions which are more suitable for
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_base));
__pyx_r = ((PyObject *)__pyx_v_base);
goto __pyx_L0;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1026
* PyArray_SetBaseObject(arr, base)
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* base = PyArray_BASE(arr)
* if base is NULL:
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1034
* # Versions of the import_* functions which are more suitable for
* # Cython code.
* cdef inline int import_array() except -1: # <<<<<<<<<<<<<<
* try:
* _import_array()
*/
static CYTHON_INLINE int __pyx_f_5numpy_import_array(void) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
__Pyx_RefNannySetupContext("import_array", 0);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1035
* # Cython code.
* cdef inline int import_array() except -1:
* try: # <<<<<<<<<<<<<<
* _import_array()
* except Exception:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_1);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
/*try:*/ {
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1036
* cdef inline int import_array() except -1:
* try:
* _import_array() # <<<<<<<<<<<<<<
* except Exception:
* raise ImportError("numpy.core.multiarray failed to import")
*/
__pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1036, __pyx_L3_error)
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1035
* # Cython code.
* cdef inline int import_array() except -1:
* try: # <<<<<<<<<<<<<<
* _import_array()
* except Exception:
*/
}
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L8_try_end;
__pyx_L3_error:;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1037
* try:
* _import_array()
* except Exception: # <<<<<<<<<<<<<<
* raise ImportError("numpy.core.multiarray failed to import")
*
*/
__pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])));
if (__pyx_t_4) {
__Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1037, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GOTREF(__pyx_t_7);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1038
* _import_array()
* except Exception:
* raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_umath() except -1:
*/
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1038, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__PYX_ERR(1, 1038, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1035
* # Cython code.
* cdef inline int import_array() except -1:
* try: # <<<<<<<<<<<<<<
* _import_array()
* except Exception:
*/
__Pyx_XGIVEREF(__pyx_t_1);
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
goto __pyx_L1_error;
__pyx_L8_try_end:;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1034
* # Versions of the import_* functions which are more suitable for
* # Cython code.
* cdef inline int import_array() except -1: # <<<<<<<<<<<<<<
* try:
* _import_array()
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1040
* raise ImportError("numpy.core.multiarray failed to import")
*
* cdef inline int import_umath() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
static CYTHON_INLINE int __pyx_f_5numpy_import_umath(void) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
__Pyx_RefNannySetupContext("import_umath", 0);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1041
*
* cdef inline int import_umath() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_1);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
/*try:*/ {
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1042
* cdef inline int import_umath() except -1:
* try:
* _import_umath() # <<<<<<<<<<<<<<
* except Exception:
* raise ImportError("numpy.core.umath failed to import")
*/
__pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1042, __pyx_L3_error)
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1041
*
* cdef inline int import_umath() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
}
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L8_try_end;
__pyx_L3_error:;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1043
* try:
* _import_umath()
* except Exception: # <<<<<<<<<<<<<<
* raise ImportError("numpy.core.umath failed to import")
*
*/
__pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])));
if (__pyx_t_4) {
__Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1043, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GOTREF(__pyx_t_7);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1044
* _import_umath()
* except Exception:
* raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_ufunc() except -1:
*/
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1044, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__PYX_ERR(1, 1044, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1041
*
* cdef inline int import_umath() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
__Pyx_XGIVEREF(__pyx_t_1);
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
goto __pyx_L1_error;
__pyx_L8_try_end:;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1040
* raise ImportError("numpy.core.multiarray failed to import")
*
* cdef inline int import_umath() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1046
* raise ImportError("numpy.core.umath failed to import")
*
* cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
static CYTHON_INLINE int __pyx_f_5numpy_import_ufunc(void) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
__Pyx_RefNannySetupContext("import_ufunc", 0);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1047
*
* cdef inline int import_ufunc() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_1);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
/*try:*/ {
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1048
* cdef inline int import_ufunc() except -1:
* try:
* _import_umath() # <<<<<<<<<<<<<<
* except Exception:
* raise ImportError("numpy.core.umath failed to import")
*/
__pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1048, __pyx_L3_error)
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1047
*
* cdef inline int import_ufunc() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
}
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L8_try_end;
__pyx_L3_error:;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1049
* try:
* _import_umath()
* except Exception: # <<<<<<<<<<<<<<
* raise ImportError("numpy.core.umath failed to import")
*/
__pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])));
if (__pyx_t_4) {
__Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1049, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GOTREF(__pyx_t_7);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1050
* _import_umath()
* except Exception:
* raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<<
*/
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1050, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__PYX_ERR(1, 1050, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1047
*
* cdef inline int import_ufunc() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
__Pyx_XGIVEREF(__pyx_t_1);
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
goto __pyx_L1_error;
__pyx_L8_try_end:;
}
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1046
* raise ImportError("numpy.core.umath failed to import")
*
* cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyMethodDef __pyx_methods[] = {
{"readStructuredSliceData", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_26PostProcess_EnergySpectrum_1readStructuredSliceData, METH_VARARGS|METH_KEYWORDS, 0},
{"getPlanarEnergySpectrum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_26PostProcess_EnergySpectrum_3getPlanarEnergySpectrum, METH_VARARGS|METH_KEYWORDS, 0},
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
#if CYTHON_PEP489_MULTI_PHASE_INIT
static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/
static int __pyx_pymod_exec_PostProcess_EnergySpectrum(PyObject* module); /*proto*/
static PyModuleDef_Slot __pyx_moduledef_slots[] = {
{Py_mod_create, (void*)__pyx_pymod_create},
{Py_mod_exec, (void*)__pyx_pymod_exec_PostProcess_EnergySpectrum},
{0, NULL}
};
#endif
static struct PyModuleDef __pyx_moduledef = {
PyModuleDef_HEAD_INIT,
"PostProcess_EnergySpectrum",
0, /* m_doc */
#if CYTHON_PEP489_MULTI_PHASE_INIT
0, /* m_size */
#else
-1, /* m_size */
#endif
__pyx_methods /* m_methods */,
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_moduledef_slots, /* m_slots */
#else
NULL, /* m_reload */
#endif
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
#ifndef CYTHON_SMALL_CODE
#if defined(__clang__)
#define CYTHON_SMALL_CODE
#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
#define CYTHON_SMALL_CODE __attribute__((cold))
#else
#define CYTHON_SMALL_CODE
#endif
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_kp_s_, __pyx_k_, sizeof(__pyx_k_), 0, 0, 1, 0},
{&__pyx_n_s_ABL_N_H, __pyx_k_ABL_N_H, sizeof(__pyx_k_ABL_N_H), 0, 0, 1, 1},
{&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0},
{&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0},
{&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1},
{&__pyx_n_s_L, __pyx_k_L, sizeof(__pyx_k_L), 0, 0, 1, 1},
{&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0},
{&__pyx_n_s_OSError, __pyx_k_OSError, sizeof(__pyx_k_OSError), 0, 0, 1, 1},
{&__pyx_n_s_Result, __pyx_k_Result, sizeof(__pyx_k_Result), 0, 0, 1, 1},
{&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1},
{&__pyx_kp_s_Slice_raw_data_read, __pyx_k_Slice_raw_data_read, sizeof(__pyx_k_Slice_raw_data_read), 0, 0, 1, 0},
{&__pyx_n_s_Slices, __pyx_k_Slices, sizeof(__pyx_k_Slices), 0, 0, 1, 1},
{&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
{&__pyx_kp_s__2, __pyx_k__2, sizeof(__pyx_k__2), 0, 0, 1, 0},
{&__pyx_n_s_auto, __pyx_k_auto, sizeof(__pyx_k_auto), 0, 0, 1, 1},
{&__pyx_n_s_axes, __pyx_k_axes, sizeof(__pyx_k_axes), 0, 0, 1, 1},
{&__pyx_n_s_case, __pyx_k_case, sizeof(__pyx_k_case), 0, 0, 1, 1},
{&__pyx_n_s_caseDir, __pyx_k_caseDir, sizeof(__pyx_k_caseDir), 0, 0, 1, 1},
{&__pyx_n_s_cellSizes2D, __pyx_k_cellSizes2D, sizeof(__pyx_k_cellSizes2D), 0, 0, 1, 1},
{&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1},
{&__pyx_n_s_complex128, __pyx_k_complex128, sizeof(__pyx_k_complex128), 0, 0, 1, 1},
{&__pyx_n_s_conj, __pyx_k_conj, sizeof(__pyx_k_conj), 0, 0, 1, 1},
{&__pyx_n_s_d, __pyx_k_d, sizeof(__pyx_k_d), 0, 0, 1, 1},
{&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1},
{&__pyx_n_s_empty, __pyx_k_empty, sizeof(__pyx_k_empty), 0, 0, 1, 1},
{&__pyx_n_s_empty_like, __pyx_k_empty_like, sizeof(__pyx_k_empty_like), 0, 0, 1, 1},
{&__pyx_n_s_end, __pyx_k_end, sizeof(__pyx_k_end), 0, 0, 1, 1},
{&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1},
{&__pyx_n_s_fft, __pyx_k_fft, sizeof(__pyx_k_fft), 0, 0, 1, 1},
{&__pyx_n_s_fft2, __pyx_k_fft2, sizeof(__pyx_k_fft2), 0, 0, 1, 1},
{&__pyx_n_s_fftfreq, __pyx_k_fftfreq, sizeof(__pyx_k_fftfreq), 0, 0, 1, 1},
{&__pyx_n_s_file, __pyx_k_file, sizeof(__pyx_k_file), 0, 0, 1, 1},
{&__pyx_n_s_genfromtxt, __pyx_k_genfromtxt, sizeof(__pyx_k_genfromtxt), 0, 0, 1, 1},
{&__pyx_n_s_horizontalEii, __pyx_k_horizontalEii, sizeof(__pyx_k_horizontalEii), 0, 0, 1, 1},
{&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
{&__pyx_n_s_linspace, __pyx_k_linspace, sizeof(__pyx_k_linspace), 0, 0, 1, 1},
{&__pyx_n_s_listdir, __pyx_k_listdir, sizeof(__pyx_k_listdir), 0, 0, 1, 1},
{&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
{&__pyx_n_s_makedirs, __pyx_k_makedirs, sizeof(__pyx_k_makedirs), 0, 0, 1, 1},
{&__pyx_n_s_mean, __pyx_k_mean, sizeof(__pyx_k_mean), 0, 0, 1, 1},
{&__pyx_n_s_meshgrid, __pyx_k_meshgrid, sizeof(__pyx_k_meshgrid), 0, 0, 1, 1},
{&__pyx_n_s_multiply, __pyx_k_multiply, sizeof(__pyx_k_multiply), 0, 0, 1, 1},
{&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
{&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0},
{&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0},
{&__pyx_n_s_norm, __pyx_k_norm, sizeof(__pyx_k_norm), 0, 0, 1, 1},
{&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1},
{&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1},
{&__pyx_kp_s_numpy_core_multiarray_failed_to, __pyx_k_numpy_core_multiarray_failed_to, sizeof(__pyx_k_numpy_core_multiarray_failed_to), 0, 0, 1, 0},
{&__pyx_kp_s_numpy_core_umath_failed_to_impor, __pyx_k_numpy_core_umath_failed_to_impor, sizeof(__pyx_k_numpy_core_umath_failed_to_impor), 0, 0, 1, 0},
{&__pyx_n_s_os, __pyx_k_os, sizeof(__pyx_k_os), 0, 0, 1, 1},
{&__pyx_n_s_pi, __pyx_k_pi, sizeof(__pyx_k_pi), 0, 0, 1, 1},
{&__pyx_n_s_print, __pyx_k_print, sizeof(__pyx_k_print), 0, 0, 1, 1},
{&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
{&__pyx_n_s_reshape, __pyx_k_reshape, sizeof(__pyx_k_reshape), 0, 0, 1, 1},
{&__pyx_n_s_resultFolder, __pyx_k_resultFolder, sizeof(__pyx_k_resultFolder), 0, 0, 1, 1},
{&__pyx_n_s_sliceFolder, __pyx_k_sliceFolder, sizeof(__pyx_k_sliceFolder), 0, 0, 1, 1},
{&__pyx_n_s_sliceName, __pyx_k_sliceName, sizeof(__pyx_k_sliceName), 0, 0, 1, 1},
{&__pyx_n_s_sqrt, __pyx_k_sqrt, sizeof(__pyx_k_sqrt), 0, 0, 1, 1},
{&__pyx_n_s_sum, __pyx_k_sum, sizeof(__pyx_k_sum), 0, 0, 1, 1},
{&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
{&__pyx_n_s_time, __pyx_k_time, sizeof(__pyx_k_time), 0, 0, 1, 1},
{&__pyx_n_s_u2D, __pyx_k_u2D, sizeof(__pyx_k_u2D), 0, 0, 1, 1},
{&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0},
{&__pyx_n_s_v2D, __pyx_k_v2D, sizeof(__pyx_k_v2D), 0, 0, 1, 1},
{&__pyx_n_s_w2D, __pyx_k_w2D, sizeof(__pyx_k_w2D), 0, 0, 1, 1},
{0, 0, 0, 0, 0, 0, 0}
};
static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {
__pyx_builtin_OSError = __Pyx_GetBuiltinName(__pyx_n_s_OSError); if (!__pyx_builtin_OSError) __PYX_ERR(0, 34, __pyx_L1_error)
__pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(0, 48, __pyx_L1_error)
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 131, __pyx_L1_error)
__pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 272, __pyx_L1_error)
__pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) __PYX_ERR(1, 856, __pyx_L1_error)
__pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(1, 1038, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
/* "PostProcess_EnergySpectrum.pyx":44
* data = np.genfromtxt(sliceFullPath)
* # 1D array
* x, y, z = data[:, 0], data[:, 1], data[:, 2] # <<<<<<<<<<<<<<
* # Mesh size in x
* # Since the slice is sorted from low to high x, count the number of x
*/
__pyx_slice__3 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__3)) __PYX_ERR(0, 44, __pyx_L1_error)
__Pyx_GOTREF(__pyx_slice__3);
__Pyx_GIVEREF(__pyx_slice__3);
__pyx_tuple__4 = PyTuple_Pack(2, __pyx_slice__3, __pyx_int_0); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(0, 44, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__4);
__Pyx_GIVEREF(__pyx_tuple__4);
__pyx_tuple__5 = PyTuple_Pack(2, __pyx_slice__3, __pyx_int_1); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(0, 44, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__5);
__Pyx_GIVEREF(__pyx_tuple__5);
__pyx_tuple__6 = PyTuple_Pack(2, __pyx_slice__3, __pyx_int_2); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(0, 44, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__6);
__Pyx_GIVEREF(__pyx_tuple__6);
/* "PostProcess_EnergySpectrum.pyx":48
* # Since the slice is sorted from low to high x, count the number of x
* valOld = x[0]
* for i, val in enumerate(x[1:]): # <<<<<<<<<<<<<<
* if val < valOld:
* nPtX = i + 1
*/
__pyx_slice__7 = PySlice_New(__pyx_int_1, Py_None, Py_None); if (unlikely(!__pyx_slice__7)) __PYX_ERR(0, 48, __pyx_L1_error)
__Pyx_GOTREF(__pyx_slice__7);
__Pyx_GIVEREF(__pyx_slice__7);
/* "PostProcess_EnergySpectrum.pyx":58
* x2D, y2D, z2D = x.reshape((nPtY, nPtX)), y.reshape((nPtY, nPtX)), z.reshape((nPtY, nPtX))
* # if data.shape[1] == 6:
* u, v, w = data[:, 3], data[:, 4], data[:, 5] # <<<<<<<<<<<<<<
* scalarField = np.empty(data.shape[0])
* # Go through every row and calculate resultant value
*/
__pyx_tuple__8 = PyTuple_Pack(2, __pyx_slice__3, __pyx_int_3); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__8);
__Pyx_GIVEREF(__pyx_tuple__8);
__pyx_tuple__9 = PyTuple_Pack(2, __pyx_slice__3, __pyx_int_4); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__9);
__Pyx_GIVEREF(__pyx_tuple__9);
__pyx_tuple__10 = PyTuple_Pack(2, __pyx_slice__3, __pyx_int_5); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__10);
__Pyx_GIVEREF(__pyx_tuple__10);
/* "PostProcess_EnergySpectrum.pyx":102
* nPtX, nPtY = uRes2D.shape[1], uRes2D.shape[0]
* # 2D DFT, no normalization (will be done manually below)
* uResFft, vResFft, wResFft = np.fft.fft2(uRes2D, axes = (0, 1), norm = None), \ # <<<<<<<<<<<<<<
* np.fft.fft2(vRes2D, axes = (0, 1), norm = None), \
* np.fft.fft2(wRes2D, axes = (0, 1), norm = None)
*/
__pyx_tuple__11 = PyTuple_Pack(2, __pyx_int_0, __pyx_int_1); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(0, 102, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__11);
__Pyx_GIVEREF(__pyx_tuple__11);
/* "PostProcess_EnergySpectrum.pyx":138
* # If decompose Rii to horizontal Rii and R33
* if horizontalEii:
* RiiFft = RijFft[:, :, 0] + RijFft[:, :, 3] # <<<<<<<<<<<<<<
* else:
* RiiFft = RijFft[:, :, 0] + RijFft[:, :, 3] + RijFft[:, :, 5]
*/
__pyx_tuple__12 = PyTuple_Pack(3, __pyx_slice__3, __pyx_slice__3, __pyx_int_0); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(0, 138, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__12);
__Pyx_GIVEREF(__pyx_tuple__12);
__pyx_tuple__13 = PyTuple_Pack(3, __pyx_slice__3, __pyx_slice__3, __pyx_int_3); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(0, 138, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__13);
__Pyx_GIVEREF(__pyx_tuple__13);
/* "PostProcess_EnergySpectrum.pyx":140
* RiiFft = RijFft[:, :, 0] + RijFft[:, :, 3]
* else:
* RiiFft = RijFft[:, :, 0] + RijFft[:, :, 3] + RijFft[:, :, 5] # <<<<<<<<<<<<<<
*
* # Original resultant Kr
*/
__pyx_tuple__14 = PyTuple_Pack(3, __pyx_slice__3, __pyx_slice__3, __pyx_int_5); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(0, 140, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__14);
__Pyx_GIVEREF(__pyx_tuple__14);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":272
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
*/
__pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(1, 272, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__15);
__Pyx_GIVEREF(__pyx_tuple__15);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":276
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<<
*
* info.buf = PyArray_DATA(self)
*/
__pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(1, 276, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__16);
__Pyx_GIVEREF(__pyx_tuple__16);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":306
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
*/
__pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 306, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__17);
__Pyx_GIVEREF(__pyx_tuple__17);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":856
*
* if (end - f) - <int>(new_offset - offset[0]) < 15:
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<<
*
* if ((child.byteorder == c'>' and little_endian) or
*/
__pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 856, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__18);
__Pyx_GIVEREF(__pyx_tuple__18);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":880
* t = child.type_num
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<<
*
* # Until ticket #99 is fixed, use integers to avoid warnings
*/
__pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(1, 880, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__19);
__Pyx_GIVEREF(__pyx_tuple__19);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1038
* _import_array()
* except Exception:
* raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_umath() except -1:
*/
__pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(1, 1038, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__20);
__Pyx_GIVEREF(__pyx_tuple__20);
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1044
* _import_umath()
* except Exception:
* raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_ufunc() except -1:
*/
__pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(1, 1044, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__21);
__Pyx_GIVEREF(__pyx_tuple__21);
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {
/* InitThreads.init */
#ifdef WITH_THREAD
PyEval_InitThreads();
#endif
if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
__pyx_float_0_5 = PyFloat_FromDouble(0.5); if (unlikely(!__pyx_float_0_5)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_2 = PyInt_FromLong(2); if (unlikely(!__pyx_int_2)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_3 = PyInt_FromLong(3); if (unlikely(!__pyx_int_3)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_4 = PyInt_FromLong(4); if (unlikely(!__pyx_int_4)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_5 = PyInt_FromLong(5); if (unlikely(!__pyx_int_5)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_6 = PyInt_FromLong(6); if (unlikely(!__pyx_int_6)) __PYX_ERR(0, 1, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/
static int __Pyx_modinit_global_init_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0);
/*--- Global init code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_variable_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0);
/*--- Variable export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0);
/*--- Function export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_type_init_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0);
/*--- Type init code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_type_import_code(void) {
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0);
/*--- Type import code ---*/
__pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 9, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type",
#if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000
sizeof(PyTypeObject),
#else
sizeof(PyHeapTypeObject),
#endif
__Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(2, 9, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyImport_ImportModule("numpy"); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 206, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_ptype_5numpy_dtype = __Pyx_ImportType(__pyx_t_1, "numpy", "dtype", sizeof(PyArray_Descr), __Pyx_ImportType_CheckSize_Ignore);
if (!__pyx_ptype_5numpy_dtype) __PYX_ERR(1, 206, __pyx_L1_error)
__pyx_ptype_5numpy_flatiter = __Pyx_ImportType(__pyx_t_1, "numpy", "flatiter", sizeof(PyArrayIterObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_flatiter) __PYX_ERR(1, 229, __pyx_L1_error)
__pyx_ptype_5numpy_broadcast = __Pyx_ImportType(__pyx_t_1, "numpy", "broadcast", sizeof(PyArrayMultiIterObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_broadcast) __PYX_ERR(1, 233, __pyx_L1_error)
__pyx_ptype_5numpy_ndarray = __Pyx_ImportType(__pyx_t_1, "numpy", "ndarray", sizeof(PyArrayObject), __Pyx_ImportType_CheckSize_Ignore);
if (!__pyx_ptype_5numpy_ndarray) __PYX_ERR(1, 242, __pyx_L1_error)
__pyx_ptype_5numpy_ufunc = __Pyx_ImportType(__pyx_t_1, "numpy", "ufunc", sizeof(PyUFuncObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_ufunc) __PYX_ERR(1, 918, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_modinit_variable_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0);
/*--- Variable import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0);
/*--- Function import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
#if PY_MAJOR_VERSION < 3
#ifdef CYTHON_NO_PYINIT_EXPORT
#define __Pyx_PyMODINIT_FUNC void
#else
#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#endif
#else
#ifdef CYTHON_NO_PYINIT_EXPORT
#define __Pyx_PyMODINIT_FUNC PyObject *
#else
#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#endif
#endif
#if PY_MAJOR_VERSION < 3
__Pyx_PyMODINIT_FUNC initPostProcess_EnergySpectrum(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC initPostProcess_EnergySpectrum(void)
#else
__Pyx_PyMODINIT_FUNC PyInit_PostProcess_EnergySpectrum(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC PyInit_PostProcess_EnergySpectrum(void)
#if CYTHON_PEP489_MULTI_PHASE_INIT
{
return PyModuleDef_Init(&__pyx_moduledef);
}
static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) {
#if PY_VERSION_HEX >= 0x030700A1
static PY_INT64_T main_interpreter_id = -1;
PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp);
if (main_interpreter_id == -1) {
main_interpreter_id = current_id;
return (unlikely(current_id == -1)) ? -1 : 0;
} else if (unlikely(main_interpreter_id != current_id))
#else
static PyInterpreterState *main_interpreter = NULL;
PyInterpreterState *current_interpreter = PyThreadState_Get()->interp;
if (!main_interpreter) {
main_interpreter = current_interpreter;
} else if (unlikely(main_interpreter != current_interpreter))
#endif
{
PyErr_SetString(
PyExc_ImportError,
"Interpreter change detected - this module can only be loaded into one interpreter per process.");
return -1;
}
return 0;
}
static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) {
PyObject *value = PyObject_GetAttrString(spec, from_name);
int result = 0;
if (likely(value)) {
if (allow_none || value != Py_None) {
result = PyDict_SetItemString(moddict, to_name, value);
}
Py_DECREF(value);
} else if (PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
} else {
result = -1;
}
return result;
}
static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) {
PyObject *module = NULL, *moddict, *modname;
if (__Pyx_check_single_interpreter())
return NULL;
if (__pyx_m)
return __Pyx_NewRef(__pyx_m);
modname = PyObject_GetAttrString(spec, "name");
if (unlikely(!modname)) goto bad;
module = PyModule_NewObject(modname);
Py_DECREF(modname);
if (unlikely(!module)) goto bad;
moddict = PyModule_GetDict(module);
if (unlikely(!moddict)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad;
return module;
bad:
Py_XDECREF(module);
return NULL;
}
static CYTHON_SMALL_CODE int __pyx_pymod_exec_PostProcess_EnergySpectrum(PyObject *__pyx_pyinit_module)
#endif
#endif
{
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannyDeclarations
#if CYTHON_PEP489_MULTI_PHASE_INIT
if (__pyx_m) {
if (__pyx_m == __pyx_pyinit_module) return 0;
PyErr_SetString(PyExc_RuntimeError, "Module 'PostProcess_EnergySpectrum' has already been imported. Re-initialisation is not supported.");
return -1;
}
#elif PY_MAJOR_VERSION >= 3
if (__pyx_m) return __Pyx_NewRef(__pyx_m);
#endif
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_PostProcess_EnergySpectrum(void)", 0);
if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pxy_PyFrame_Initialize_Offsets
__Pxy_PyFrame_Initialize_Offsets();
#endif
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pyx_CyFunction_USED
if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_FusedFunction_USED
if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Coroutine_USED
if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Generator_USED
if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_AsyncGen_USED
if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_StopAsyncIteration_USED
if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
#ifdef WITH_THREAD /* Python build with threading support? */
PyEval_InitThreads();
#endif
#endif
/*--- Module creation code ---*/
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_m = __pyx_pyinit_module;
Py_INCREF(__pyx_m);
#else
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4("PostProcess_EnergySpectrum", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_d);
__pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_b);
__pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_cython_runtime);
if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
/*--- Initialize various global constants etc. ---*/
if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
if (__pyx_module_is_main_PostProcess_EnergySpectrum) {
if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
}
#if PY_MAJOR_VERSION >= 3
{
PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
if (!PyDict_GetItemString(modules, "PostProcess_EnergySpectrum")) {
if (unlikely(PyDict_SetItemString(modules, "PostProcess_EnergySpectrum", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
}
}
#endif
/*--- Builtin init code ---*/
if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Constants init code ---*/
if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Global type/function init code ---*/
(void)__Pyx_modinit_global_init_code();
(void)__Pyx_modinit_variable_export_code();
(void)__Pyx_modinit_function_export_code();
(void)__Pyx_modinit_type_init_code();
if (unlikely(__Pyx_modinit_type_import_code() != 0)) goto __pyx_L1_error;
(void)__Pyx_modinit_variable_import_code();
(void)__Pyx_modinit_function_import_code();
/*--- Execution code ---*/
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/* "PostProcess_EnergySpectrum.pyx":1
* import os # <<<<<<<<<<<<<<
* # cimport numpy imports Numpy's C API but doesn't incl. all Numpy functions
* import numpy as np
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_os, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_os, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "PostProcess_EnergySpectrum.pyx":3
* import os
* # cimport numpy imports Numpy's C API but doesn't incl. all Numpy functions
* import numpy as np # <<<<<<<<<<<<<<
* cimport numpy as np
* #from cpython cimport bool
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 3, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "PostProcess_EnergySpectrum.pyx":1
* import os # <<<<<<<<<<<<<<
* # cimport numpy imports Numpy's C API but doesn't incl. all Numpy functions
* import numpy as np
*/
__pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "../../../opt/miniconda3/envs/SOWFA-Postprocess/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1046
* raise ImportError("numpy.core.umath failed to import")
*
* cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
/*--- Wrapped vars code ---*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
if (__pyx_m) {
if (__pyx_d) {
__Pyx_AddTraceback("init PostProcess_EnergySpectrum", __pyx_clineno, __pyx_lineno, __pyx_filename);
}
Py_CLEAR(__pyx_m);
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init PostProcess_EnergySpectrum");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if CYTHON_PEP489_MULTI_PHASE_INIT
return (__pyx_m != NULL) ? 0 : -1;
#elif PY_MAJOR_VERSION >= 3
return __pyx_m;
#else
return;
#endif
}
/* --- Runtime support code --- */
/* Refnanny */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule(modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, "RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif
/* PyObjectGetAttrStr */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#endif
/* GetBuiltinName */
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
if (unlikely(!result)) {
PyErr_Format(PyExc_NameError,
#if PY_MAJOR_VERSION >= 3
"name '%U' is not defined", name);
#else
"name '%.200s' is not defined", PyString_AS_STRING(name));
#endif
}
return result;
}
/* PyDictVersioning */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0;
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) {
PyObject **dictptr = NULL;
Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset;
if (offset) {
#if CYTHON_COMPILING_IN_CPYTHON
dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj);
#else
dictptr = _PyObject_GetDictPtr(obj);
#endif
}
return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0;
}
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict)))
return 0;
return obj_dict_version == __Pyx_get_object_dict_version(obj);
}
#endif
/* GetModuleGlobalName */
#if CYTHON_USE_DICT_VERSIONS
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value)
#else
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name)
#endif
{
PyObject *result;
#if !CYTHON_AVOID_BORROWED_REFS
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1
result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
} else if (unlikely(PyErr_Occurred())) {
return NULL;
}
#else
result = PyDict_GetItem(__pyx_d, name);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
#endif
#else
result = PyObject_GetItem(__pyx_d, name);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
PyErr_Clear();
#endif
return __Pyx_GetBuiltinName(name);
}
/* PyCFunctionFastCall */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) {
PyCFunctionObject *func = (PyCFunctionObject*)func_obj;
PyCFunction meth = PyCFunction_GET_FUNCTION(func);
PyObject *self = PyCFunction_GET_SELF(func);
int flags = PyCFunction_GET_FLAGS(func);
assert(PyCFunction_Check(func));
assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)));
assert(nargs >= 0);
assert(nargs == 0 || args != NULL);
/* _PyCFunction_FastCallDict() must not be called with an exception set,
because it may clear it (directly or indirectly) and so the
caller loses its exception */
assert(!PyErr_Occurred());
if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) {
return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL);
} else {
return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs);
}
}
#endif
/* PyFunctionFastCall */
#if CYTHON_FAST_PYCALL
static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na,
PyObject *globals) {
PyFrameObject *f;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject **fastlocals;
Py_ssize_t i;
PyObject *result;
assert(globals != NULL);
/* XXX Perhaps we should create a specialized
PyFrame_New() that doesn't take locals, but does
take builtins without sanity checking them.
*/
assert(tstate != NULL);
f = PyFrame_New(tstate, co, globals, NULL);
if (f == NULL) {
return NULL;
}
fastlocals = __Pyx_PyFrame_GetLocalsplus(f);
for (i = 0; i < na; i++) {
Py_INCREF(*args);
fastlocals[i] = *args++;
}
result = PyEval_EvalFrameEx(f,0);
++tstate->recursion_depth;
Py_DECREF(f);
--tstate->recursion_depth;
return result;
}
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs) {
PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func);
PyObject *globals = PyFunction_GET_GLOBALS(func);
PyObject *argdefs = PyFunction_GET_DEFAULTS(func);
PyObject *closure;
#if PY_MAJOR_VERSION >= 3
PyObject *kwdefs;
#endif
PyObject *kwtuple, **k;
PyObject **d;
Py_ssize_t nd;
Py_ssize_t nk;
PyObject *result;
assert(kwargs == NULL || PyDict_Check(kwargs));
nk = kwargs ? PyDict_Size(kwargs) : 0;
if (Py_EnterRecursiveCall((char*)" while calling a Python object")) {
return NULL;
}
if (
#if PY_MAJOR_VERSION >= 3
co->co_kwonlyargcount == 0 &&
#endif
likely(kwargs == NULL || nk == 0) &&
co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) {
if (argdefs == NULL && co->co_argcount == nargs) {
result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals);
goto done;
}
else if (nargs == 0 && argdefs != NULL
&& co->co_argcount == Py_SIZE(argdefs)) {
/* function called with no arguments, but all parameters have
a default value: use default values as arguments .*/
args = &PyTuple_GET_ITEM(argdefs, 0);
result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals);
goto done;
}
}
if (kwargs != NULL) {
Py_ssize_t pos, i;
kwtuple = PyTuple_New(2 * nk);
if (kwtuple == NULL) {
result = NULL;
goto done;
}
k = &PyTuple_GET_ITEM(kwtuple, 0);
pos = i = 0;
while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) {
Py_INCREF(k[i]);
Py_INCREF(k[i+1]);
i += 2;
}
nk = i / 2;
}
else {
kwtuple = NULL;
k = NULL;
}
closure = PyFunction_GET_CLOSURE(func);
#if PY_MAJOR_VERSION >= 3
kwdefs = PyFunction_GET_KW_DEFAULTS(func);
#endif
if (argdefs != NULL) {
d = &PyTuple_GET_ITEM(argdefs, 0);
nd = Py_SIZE(argdefs);
}
else {
d = NULL;
nd = 0;
}
#if PY_MAJOR_VERSION >= 3
result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL,
args, nargs,
k, (int)nk,
d, (int)nd, kwdefs, closure);
#else
result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL,
args, nargs,
k, (int)nk,
d, (int)nd, closure);
#endif
Py_XDECREF(kwtuple);
done:
Py_LeaveRecursiveCall();
return result;
}
#endif
#endif
/* PyObjectCall */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = func->ob_type->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = (*call)(func, arg, kw);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCall2Args */
static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) {
PyObject *args, *result = NULL;
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(function)) {
PyObject *args[2] = {arg1, arg2};
return __Pyx_PyFunction_FastCall(function, args, 2);
}
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(function)) {
PyObject *args[2] = {arg1, arg2};
return __Pyx_PyCFunction_FastCall(function, args, 2);
}
#endif
args = PyTuple_New(2);
if (unlikely(!args)) goto done;
Py_INCREF(arg1);
PyTuple_SET_ITEM(args, 0, arg1);
Py_INCREF(arg2);
PyTuple_SET_ITEM(args, 1, arg2);
Py_INCREF(function);
result = __Pyx_PyObject_Call(function, args, NULL);
Py_DECREF(args);
Py_DECREF(function);
done:
return result;
}
/* PyObjectCallMethO */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
PyObject *self, *result;
PyCFunction cfunc;
cfunc = PyCFunction_GET_FUNCTION(func);
self = PyCFunction_GET_SELF(func);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = cfunc(self, arg);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallOneArg */
#if CYTHON_COMPILING_IN_CPYTHON
static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_New(1);
if (unlikely(!args)) return NULL;
Py_INCREF(arg);
PyTuple_SET_ITEM(args, 0, arg);
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, &arg, 1);
}
#endif
if (likely(PyCFunction_Check(func))) {
if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
return __Pyx_PyObject_CallMethO(func, arg);
#if CYTHON_FAST_PYCCALL
} else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) {
return __Pyx_PyCFunction_FastCall(func, &arg, 1);
#endif
}
}
return __Pyx__PyObject_CallOneArg(func, arg);
}
#else
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_Pack(1, arg);
if (unlikely(!args)) return NULL;
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
#endif
/* GetTopmostException */
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem *
__Pyx_PyErr_GetTopmostException(PyThreadState *tstate)
{
_PyErr_StackItem *exc_info = tstate->exc_info;
while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) &&
exc_info->previous_item != NULL)
{
exc_info = exc_info->previous_item;
}
return exc_info;
}
#endif
/* SaveResetException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate);
*type = exc_info->exc_type;
*value = exc_info->exc_value;
*tb = exc_info->exc_traceback;
#else
*type = tstate->exc_type;
*value = tstate->exc_value;
*tb = tstate->exc_traceback;
#endif
Py_XINCREF(*type);
Py_XINCREF(*value);
Py_XINCREF(*tb);
}
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = type;
exc_info->exc_value = value;
exc_info->exc_traceback = tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = type;
tstate->exc_value = value;
tstate->exc_traceback = tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
#endif
/* PyErrExceptionMatches */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1;
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) {
PyObject *exc_type = tstate->curexc_type;
if (exc_type == err) return 1;
if (unlikely(!exc_type)) return 0;
if (unlikely(PyTuple_Check(err)))
return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err);
return __Pyx_PyErr_GivenExceptionMatches(exc_type, err);
}
#endif
/* PyErrFetchRestore */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
}
#endif
/* GetItemInt */
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
PyObject *r;
if (!j) return NULL;
r = PyObject_GetItem(o, j);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyList_GET_SIZE(o);
}
if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) {
PyObject *r = PyList_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyTuple_GET_SIZE(o);
}
if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) {
PyObject *r = PyList_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
}
else if (PyTuple_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
} else {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_item)) {
if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
Py_ssize_t l = m->sq_length(o);
if (likely(l >= 0)) {
i += l;
} else {
if (!PyErr_ExceptionMatches(PyExc_OverflowError))
return NULL;
PyErr_Clear();
}
}
return m->sq_item(o, i);
}
}
#else
if (is_list || PySequence_Check(o)) {
return PySequence_GetItem(o, i);
}
#endif
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
/* ExtTypeTest */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (likely(__Pyx_TypeCheck(obj, type)))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
Py_TYPE(obj)->tp_name, type->tp_name);
return 0;
}
/* IsLittleEndian */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void)
{
union {
uint32_t u32;
uint8_t u8[4];
} S;
S.u32 = 0x01020304;
return S.u8[0] == 4;
}
/* BufferFormatCheck */
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type) {
stack[0].field = &ctx->root;
stack[0].parent_offset = 0;
ctx->root.type = type;
ctx->root.name = "buffer dtype";
ctx->root.offset = 0;
ctx->head = stack;
ctx->head->field = &ctx->root;
ctx->fmt_offset = 0;
ctx->head->parent_offset = 0;
ctx->new_packmode = '@';
ctx->enc_packmode = '@';
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->is_complex = 0;
ctx->is_valid_array = 0;
ctx->struct_alignment = 0;
while (type->typegroup == 'S') {
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = 0;
type = type->fields->type;
}
}
static int __Pyx_BufFmt_ParseNumber(const char** ts) {
int count;
const char* t = *ts;
if (*t < '0' || *t > '9') {
return -1;
} else {
count = *t++ - '0';
while (*t >= '0' && *t <= '9') {
count *= 10;
count += *t++ - '0';
}
}
*ts = t;
return count;
}
static int __Pyx_BufFmt_ExpectNumber(const char **ts) {
int number = __Pyx_BufFmt_ParseNumber(ts);
if (number == -1)
PyErr_Format(PyExc_ValueError,\
"Does not understand character buffer dtype format string ('%c')", **ts);
return number;
}
static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
PyErr_Format(PyExc_ValueError,
"Unexpected format string character: '%c'", ch);
}
static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
switch (ch) {
case 'c': return "'char'";
case 'b': return "'signed char'";
case 'B': return "'unsigned char'";
case 'h': return "'short'";
case 'H': return "'unsigned short'";
case 'i': return "'int'";
case 'I': return "'unsigned int'";
case 'l': return "'long'";
case 'L': return "'unsigned long'";
case 'q': return "'long long'";
case 'Q': return "'unsigned long long'";
case 'f': return (is_complex ? "'complex float'" : "'float'");
case 'd': return (is_complex ? "'complex double'" : "'double'");
case 'g': return (is_complex ? "'complex long double'" : "'long double'");
case 'T': return "a struct";
case 'O': return "Python object";
case 'P': return "a pointer";
case 's': case 'p': return "a string";
case 0: return "end";
default: return "unparseable format string";
}
}
static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return 2;
case 'i': case 'I': case 'l': case 'L': return 4;
case 'q': case 'Q': return 8;
case 'f': return (is_complex ? 8 : 4);
case 'd': return (is_complex ? 16 : 8);
case 'g': {
PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
return 0;
}
case 'O': case 'P': return sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
switch (ch) {
case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(short);
case 'i': case 'I': return sizeof(int);
case 'l': case 'L': return sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(float) * (is_complex ? 2 : 1);
case 'd': return sizeof(double) * (is_complex ? 2 : 1);
case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
case 'O': case 'P': return sizeof(void*);
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
typedef struct { char c; short x; } __Pyx_st_short;
typedef struct { char c; int x; } __Pyx_st_int;
typedef struct { char c; long x; } __Pyx_st_long;
typedef struct { char c; float x; } __Pyx_st_float;
typedef struct { char c; double x; } __Pyx_st_double;
typedef struct { char c; long double x; } __Pyx_st_longdouble;
typedef struct { char c; void *x; } __Pyx_st_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
/* These are for computing the padding at the end of the struct to align
on the first member of the struct. This will probably the same as above,
but we don't have any guarantees.
*/
typedef struct { short x; char c; } __Pyx_pad_short;
typedef struct { int x; char c; } __Pyx_pad_int;
typedef struct { long x; char c; } __Pyx_pad_long;
typedef struct { float x; char c; } __Pyx_pad_float;
typedef struct { double x; char c; } __Pyx_pad_double;
typedef struct { long double x; char c; } __Pyx_pad_longdouble;
typedef struct { void *x; char c; } __Pyx_pad_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_pad_float) - sizeof(float);
case 'd': return sizeof(__Pyx_pad_double) - sizeof(double);
case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
switch (ch) {
case 'c':
return 'H';
case 'b': case 'h': case 'i':
case 'l': case 'q': case 's': case 'p':
return 'I';
case 'B': case 'H': case 'I': case 'L': case 'Q':
return 'U';
case 'f': case 'd': case 'g':
return (is_complex ? 'C' : 'R');
case 'O':
return 'O';
case 'P':
return 'P';
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
if (ctx->head == NULL || ctx->head->field == &ctx->root) {
const char* expected;
const char* quote;
if (ctx->head == NULL) {
expected = "end";
quote = "";
} else {
expected = ctx->head->field->type->name;
quote = "'";
}
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected %s%s%s but got %s",
quote, expected, quote,
__Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
} else {
__Pyx_StructField* field = ctx->head->field;
__Pyx_StructField* parent = (ctx->head - 1)->field;
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
parent->type->name, field->name);
}
}
static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
char group;
size_t size, offset, arraysize = 1;
if (ctx->enc_type == 0) return 0;
if (ctx->head->field->type->arraysize[0]) {
int i, ndim = 0;
if (ctx->enc_type == 's' || ctx->enc_type == 'p') {
ctx->is_valid_array = ctx->head->field->type->ndim == 1;
ndim = 1;
if (ctx->enc_count != ctx->head->field->type->arraysize[0]) {
PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %zu",
ctx->head->field->type->arraysize[0], ctx->enc_count);
return -1;
}
}
if (!ctx->is_valid_array) {
PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d",
ctx->head->field->type->ndim, ndim);
return -1;
}
for (i = 0; i < ctx->head->field->type->ndim; i++) {
arraysize *= ctx->head->field->type->arraysize[i];
}
ctx->is_valid_array = 0;
ctx->enc_count = 1;
}
group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
do {
__Pyx_StructField* field = ctx->head->field;
__Pyx_TypeInfo* type = field->type;
if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') {
size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
} else {
size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
}
if (ctx->enc_packmode == '@') {
size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
size_t align_mod_offset;
if (align_at == 0) return -1;
align_mod_offset = ctx->fmt_offset % align_at;
if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
if (ctx->struct_alignment == 0)
ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type,
ctx->is_complex);
}
if (type->size != size || type->typegroup != group) {
if (type->typegroup == 'C' && type->fields != NULL) {
size_t parent_offset = ctx->head->parent_offset + field->offset;
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = parent_offset;
continue;
}
if ((type->typegroup == 'H' || group == 'H') && type->size == size) {
} else {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
}
offset = ctx->head->parent_offset + field->offset;
if (ctx->fmt_offset != offset) {
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected",
(Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset);
return -1;
}
ctx->fmt_offset += size;
if (arraysize)
ctx->fmt_offset += (arraysize - 1) * size;
--ctx->enc_count;
while (1) {
if (field == &ctx->root) {
ctx->head = NULL;
if (ctx->enc_count != 0) {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
break;
}
ctx->head->field = ++field;
if (field->type == NULL) {
--ctx->head;
field = ctx->head->field;
continue;
} else if (field->type->typegroup == 'S') {
size_t parent_offset = ctx->head->parent_offset + field->offset;
if (field->type->fields->type == NULL) continue;
field = field->type->fields;
++ctx->head;
ctx->head->field = field;
ctx->head->parent_offset = parent_offset;
break;
} else {
break;
}
}
} while (ctx->enc_count);
ctx->enc_type = 0;
ctx->is_complex = 0;
return 0;
}
static PyObject *
__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
{
const char *ts = *tsp;
int i = 0, number;
int ndim = ctx->head->field->type->ndim;
;
++ts;
if (ctx->new_count != 1) {
PyErr_SetString(PyExc_ValueError,
"Cannot handle repeated arrays in format string");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
while (*ts && *ts != ')') {
switch (*ts) {
case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue;
default: break;
}
number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i])
return PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %d",
ctx->head->field->type->arraysize[i], number);
if (*ts != ',' && *ts != ')')
return PyErr_Format(PyExc_ValueError,
"Expected a comma in format string, got '%c'", *ts);
if (*ts == ',') ts++;
i++;
}
if (i != ndim)
return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d",
ctx->head->field->type->ndim, i);
if (!*ts) {
PyErr_SetString(PyExc_ValueError,
"Unexpected end of format string, expected ')'");
return NULL;
}
ctx->is_valid_array = 1;
ctx->new_count = 1;
*tsp = ++ts;
return Py_None;
}
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
int got_Z = 0;
while (1) {
switch(*ts) {
case 0:
if (ctx->enc_type != 0 && ctx->head == NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
if (ctx->head != NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
return ts;
case ' ':
case '\r':
case '\n':
++ts;
break;
case '<':
if (!__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '>':
case '!':
if (__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '=':
case '@':
case '^':
ctx->new_packmode = *ts++;
break;
case 'T':
{
const char* ts_after_sub;
size_t i, struct_count = ctx->new_count;
size_t struct_alignment = ctx->struct_alignment;
ctx->new_count = 1;
++ts;
if (*ts != '{') {
PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
ctx->enc_count = 0;
ctx->struct_alignment = 0;
++ts;
ts_after_sub = ts;
for (i = 0; i != struct_count; ++i) {
ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
if (!ts_after_sub) return NULL;
}
ts = ts_after_sub;
if (struct_alignment) ctx->struct_alignment = struct_alignment;
}
break;
case '}':
{
size_t alignment = ctx->struct_alignment;
++ts;
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
if (alignment && ctx->fmt_offset % alignment) {
ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment);
}
}
return ts;
case 'x':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->fmt_offset += ctx->new_count;
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->enc_packmode = ctx->new_packmode;
++ts;
break;
case 'Z':
got_Z = 1;
++ts;
if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
__Pyx_BufFmt_RaiseUnexpectedChar('Z');
return NULL;
}
CYTHON_FALLTHROUGH;
case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
case 'l': case 'L': case 'q': case 'Q':
case 'f': case 'd': case 'g':
case 'O': case 'p':
if (ctx->enc_type == *ts && got_Z == ctx->is_complex &&
ctx->enc_packmode == ctx->new_packmode) {
ctx->enc_count += ctx->new_count;
ctx->new_count = 1;
got_Z = 0;
++ts;
break;
}
CYTHON_FALLTHROUGH;
case 's':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_count = ctx->new_count;
ctx->enc_packmode = ctx->new_packmode;
ctx->enc_type = *ts;
ctx->is_complex = got_Z;
++ts;
ctx->new_count = 1;
got_Z = 0;
break;
case ':':
++ts;
while(*ts != ':') ++ts;
++ts;
break;
case '(':
if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL;
break;
default:
{
int number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
ctx->new_count = (size_t)number;
}
}
}
}
/* BufferGetAndValidate */
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) {
if (unlikely(info->buf == NULL)) return;
if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL;
__Pyx_ReleaseBuffer(info);
}
static void __Pyx_ZeroBuffer(Py_buffer* buf) {
buf->buf = NULL;
buf->obj = NULL;
buf->strides = __Pyx_zeros;
buf->shape = __Pyx_zeros;
buf->suboffsets = __Pyx_minusones;
}
static int __Pyx__GetBufferAndValidate(
Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags,
int nd, int cast, __Pyx_BufFmt_StackElem* stack)
{
buf->buf = NULL;
if (unlikely(__Pyx_GetBuffer(obj, buf, flags) == -1)) {
__Pyx_ZeroBuffer(buf);
return -1;
}
if (unlikely(buf->ndim != nd)) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
nd, buf->ndim);
goto fail;
}
if (!cast) {
__Pyx_BufFmt_Context ctx;
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
}
if (unlikely((size_t)buf->itemsize != dtype->size)) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)",
buf->itemsize, (buf->itemsize > 1) ? "s" : "",
dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : "");
goto fail;
}
if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones;
return 0;
fail:;
__Pyx_SafeReleaseBuffer(buf);
return -1;
}
/* BufferFallbackError */
static void __Pyx_RaiseBufferFallbackError(void) {
PyErr_SetString(PyExc_ValueError,
"Buffer acquisition failed on assignment; and then reacquiring the old buffer failed too!");
}
/* ObjectGetItem */
#if CYTHON_USE_TYPE_SLOTS
static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) {
PyObject *runerr;
Py_ssize_t key_value;
PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence;
if (unlikely(!(m && m->sq_item))) {
PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name);
return NULL;
}
key_value = __Pyx_PyIndex_AsSsize_t(index);
if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) {
return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1);
}
if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) {
PyErr_Clear();
PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name);
}
return NULL;
}
static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) {
PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping;
if (likely(m && m->mp_subscript)) {
return m->mp_subscript(obj, key);
}
return __Pyx_PyObject_GetIndex(obj, key);
}
#endif
/* None */
static CYTHON_INLINE npy_intp __Pyx_div_npy_intp(npy_intp a, npy_intp b) {
npy_intp q = a / b;
npy_intp r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
/* RaiseDoubleKeywords */
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AsString(kw_name));
#endif
}
/* ParseKeywords */
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
continue;
}
name = first_kw_arg;
#if PY_MAJOR_VERSION < 3
if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) {
while (*name) {
if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
&& _PyString_Eq(**name, key)) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
if ((**argname == key) || (
(CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
&& _PyString_Eq(**argname, key))) {
goto arg_passed_twice;
}
argname++;
}
}
} else
#endif
if (likely(PyUnicode_Check(key))) {
while (*name) {
int cmp = (**name == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**name, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
int cmp = (**argname == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**argname, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) goto arg_passed_twice;
argname++;
}
}
} else
goto invalid_keyword_type;
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, key);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
/* RaiseArgTupleInvalid */
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
/* ArgTypeTest */
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact)
{
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
else if (exact) {
#if PY_MAJOR_VERSION == 2
if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1;
#endif
}
else {
if (likely(__Pyx_TypeCheck(obj, type))) return 1;
}
PyErr_Format(PyExc_TypeError,
"Argument '%.200s' has incorrect type (expected %.200s, got %.200s)",
name, type->tp_name, Py_TYPE(obj)->tp_name);
return 0;
}
/* PyObjectCallNoArg */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, NULL, 0);
}
#endif
#ifdef __Pyx_CyFunction_USED
if (likely(PyCFunction_Check(func) || __Pyx_CyFunction_Check(func)))
#else
if (likely(PyCFunction_Check(func)))
#endif
{
if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) {
return __Pyx_PyObject_CallMethO(func, NULL);
}
}
return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL);
}
#endif
/* RaiseTooManyValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
PyErr_Format(PyExc_ValueError,
"too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
}
/* RaiseNeedMoreValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
PyErr_Format(PyExc_ValueError,
"need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
index, (index == 1) ? "" : "s");
}
/* IterFinish */
static CYTHON_INLINE int __Pyx_IterFinish(void) {
#if CYTHON_FAST_THREAD_STATE
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject* exc_type = tstate->curexc_type;
if (unlikely(exc_type)) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) {
PyObject *exc_value, *exc_tb;
exc_value = tstate->curexc_value;
exc_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
Py_DECREF(exc_type);
Py_XDECREF(exc_value);
Py_XDECREF(exc_tb);
return 0;
} else {
return -1;
}
}
return 0;
#else
if (unlikely(PyErr_Occurred())) {
if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) {
PyErr_Clear();
return 0;
} else {
return -1;
}
}
return 0;
#endif
}
/* UnpackItemEndCheck */
static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) {
if (unlikely(retval)) {
Py_DECREF(retval);
__Pyx_RaiseTooManyValuesError(expected);
return -1;
} else {
return __Pyx_IterFinish();
}
return 0;
}
/* RaiseException */
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
__Pyx_PyThreadState_declare
Py_XINCREF(type);
if (!value || value == Py_None)
value = NULL;
else
Py_INCREF(value);
if (!tb || tb == Py_None)
tb = NULL;
else {
Py_INCREF(tb);
if (!PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
}
if (PyType_Check(type)) {
#if CYTHON_COMPILING_IN_PYPY
if (!value) {
Py_INCREF(Py_None);
value = Py_None;
}
#endif
PyErr_NormalizeException(&type, &value, &tb);
} else {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
value = type;
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
}
__Pyx_PyThreadState_assign
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
PyObject* owned_instance = NULL;
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (PyExceptionClass_Check(type)) {
PyObject *instance_class = NULL;
if (value && PyExceptionInstance_Check(value)) {
instance_class = (PyObject*) Py_TYPE(value);
if (instance_class != type) {
int is_subclass = PyObject_IsSubclass(instance_class, type);
if (!is_subclass) {
instance_class = NULL;
} else if (unlikely(is_subclass == -1)) {
goto bad;
} else {
type = instance_class;
}
}
}
if (!instance_class) {
PyObject *args;
if (!value)
args = PyTuple_New(0);
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
} else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
owned_instance = PyObject_Call(type, args, NULL);
Py_DECREF(args);
if (!owned_instance)
goto bad;
value = owned_instance;
if (!PyExceptionInstance_Check(value)) {
PyErr_Format(PyExc_TypeError,
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
goto bad;
}
}
} else {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
if (cause) {
PyObject *fixed_cause;
if (cause == Py_None) {
fixed_cause = NULL;
} else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
} else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
} else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
#if CYTHON_COMPILING_IN_PYPY
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
Py_INCREF(tb);
PyErr_Restore(tmp_type, tmp_value, tb);
Py_XDECREF(tmp_tb);
#else
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
#endif
}
bad:
Py_XDECREF(owned_instance);
return;
}
#endif
/* DictGetItem */
#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY
static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) {
PyObject *value;
value = PyDict_GetItemWithError(d, key);
if (unlikely(!value)) {
if (!PyErr_Occurred()) {
if (unlikely(PyTuple_Check(key))) {
PyObject* args = PyTuple_Pack(1, key);
if (likely(args)) {
PyErr_SetObject(PyExc_KeyError, args);
Py_DECREF(args);
}
} else {
PyErr_SetObject(PyExc_KeyError, key);
}
}
return NULL;
}
Py_INCREF(value);
return value;
}
#endif
/* RaiseNoneIterError */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
}
/* GetException */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb)
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb)
#endif
{
PyObject *local_type, *local_value, *local_tb;
#if CYTHON_FAST_THREAD_STATE
PyObject *tmp_type, *tmp_value, *tmp_tb;
local_type = tstate->curexc_type;
local_value = tstate->curexc_value;
local_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
#else
PyErr_Fetch(&local_type, &local_value, &local_tb);
#endif
PyErr_NormalizeException(&local_type, &local_value, &local_tb);
#if CYTHON_FAST_THREAD_STATE
if (unlikely(tstate->curexc_type))
#else
if (unlikely(PyErr_Occurred()))
#endif
goto bad;
#if PY_MAJOR_VERSION >= 3
if (local_tb) {
if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
goto bad;
}
#endif
Py_XINCREF(local_tb);
Py_XINCREF(local_type);
Py_XINCREF(local_value);
*type = local_type;
*value = local_value;
*tb = local_tb;
#if CYTHON_FAST_THREAD_STATE
#if CYTHON_USE_EXC_INFO_STACK
{
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = local_type;
exc_info->exc_value = local_value;
exc_info->exc_traceback = local_tb;
}
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = local_type;
tstate->exc_value = local_value;
tstate->exc_traceback = local_tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_SetExcInfo(local_type, local_value, local_tb);
#endif
return 0;
bad:
*type = 0;
*value = 0;
*tb = 0;
Py_XDECREF(local_type);
Py_XDECREF(local_value);
Py_XDECREF(local_tb);
return -1;
}
/* TypeImport */
#ifndef __PYX_HAVE_RT_ImportType
#define __PYX_HAVE_RT_ImportType
static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name,
size_t size, enum __Pyx_ImportType_CheckSize check_size)
{
PyObject *result = 0;
char warning[200];
Py_ssize_t basicsize;
#ifdef Py_LIMITED_API
PyObject *py_basicsize;
#endif
result = PyObject_GetAttrString(module, class_name);
if (!result)
goto bad;
if (!PyType_Check(result)) {
PyErr_Format(PyExc_TypeError,
"%.200s.%.200s is not a type object",
module_name, class_name);
goto bad;
}
#ifndef Py_LIMITED_API
basicsize = ((PyTypeObject *)result)->tp_basicsize;
#else
py_basicsize = PyObject_GetAttrString(result, "__basicsize__");
if (!py_basicsize)
goto bad;
basicsize = PyLong_AsSsize_t(py_basicsize);
Py_DECREF(py_basicsize);
py_basicsize = 0;
if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred())
goto bad;
#endif
if ((size_t)basicsize < size) {
PyErr_Format(PyExc_ValueError,
"%.200s.%.200s size changed, may indicate binary incompatibility. "
"Expected %zd from C header, got %zd from PyObject",
module_name, class_name, size, basicsize);
goto bad;
}
if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) {
PyErr_Format(PyExc_ValueError,
"%.200s.%.200s size changed, may indicate binary incompatibility. "
"Expected %zd from C header, got %zd from PyObject",
module_name, class_name, size, basicsize);
goto bad;
}
else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) {
PyOS_snprintf(warning, sizeof(warning),
"%s.%s size changed, may indicate binary incompatibility. "
"Expected %zd from C header, got %zd from PyObject",
module_name, class_name, size, basicsize);
if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad;
}
return (PyTypeObject *)result;
bad:
Py_XDECREF(result);
return NULL;
}
#endif
/* Import */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
#if PY_MAJOR_VERSION < 3
PyObject *py_import;
py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
if (!py_import)
goto bad;
#endif
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(__pyx_m);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
{
#if PY_MAJOR_VERSION >= 3
if (level == -1) {
if (strchr(__Pyx_MODULE_NAME, '.')) {
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, 1);
if (!module) {
if (!PyErr_ExceptionMatches(PyExc_ImportError))
goto bad;
PyErr_Clear();
}
}
level = 0;
}
#endif
if (!module) {
#if PY_MAJOR_VERSION < 3
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, (PyObject *)NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, level);
#endif
}
}
bad:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(py_import);
#endif
Py_XDECREF(empty_list);
Py_XDECREF(empty_dict);
return module;
}
/* CLineInTraceback */
#ifndef CYTHON_CLINE_IN_TRACEBACK
static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line) {
PyObject *use_cline;
PyObject *ptype, *pvalue, *ptraceback;
#if CYTHON_COMPILING_IN_CPYTHON
PyObject **cython_runtime_dict;
#endif
if (unlikely(!__pyx_cython_runtime)) {
return c_line;
}
__Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);
#if CYTHON_COMPILING_IN_CPYTHON
cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime);
if (likely(cython_runtime_dict)) {
__PYX_PY_DICT_LOOKUP_IF_MODIFIED(
use_cline, *cython_runtime_dict,
__Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback))
} else
#endif
{
PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback);
if (use_cline_obj) {
use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True;
Py_DECREF(use_cline_obj);
} else {
PyErr_Clear();
use_cline = NULL;
}
}
if (!use_cline) {
c_line = 0;
PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False);
}
else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) {
c_line = 0;
}
__Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);
return c_line;
}
#endif
/* CodeObjectCache */
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = start + (end - start) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
/* AddTraceback */
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
#if PY_MAJOR_VERSION < 3
py_srcfile = PyString_FromString(filename);
#else
py_srcfile = PyUnicode_FromString(filename);
#endif
if (!py_srcfile) goto bad;
if (c_line) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
#else
py_funcname = PyUnicode_FromString(funcname);
#endif
}
if (!py_funcname) goto bad;
py_code = __Pyx_PyCode_New(
0,
0,
0,
0,
0,
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
py_line,
__pyx_empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
Py_DECREF(py_funcname);
return py_code;
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
if (c_line) {
c_line = __Pyx_CLineForTraceback(tstate, c_line);
}
py_code = __pyx_find_code_object(c_line ? -c_line : py_line);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, c_line, py_line, filename);
if (!py_code) goto bad;
__pyx_insert_code_object(c_line ? -c_line : py_line, py_code);
}
py_frame = PyFrame_New(
tstate, /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
__pyx_d, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
__Pyx_PyFrame_SetLineNumber(py_frame, py_line);
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags);
PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
return -1;
}
static void __Pyx_ReleaseBuffer(Py_buffer *view) {
PyObject *obj = view->obj;
if (!obj) return;
if (PyObject_CheckBuffer(obj)) {
PyBuffer_Release(view);
return;
}
if ((0)) {}
else if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view);
view->obj = NULL;
Py_DECREF(obj);
}
#endif
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(long) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(long) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(long),
little, !is_unsigned);
}
}
/* CIntFromPyVerify */
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
{\
func_type value = func_value;\
if (sizeof(target_type) < sizeof(func_type)) {\
if (unlikely(value != (func_type) (target_type) value)) {\
func_type zero = 0;\
if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
return (target_type) -1;\
if (is_unsigned && unlikely(value < zero))\
goto raise_neg_overflow;\
else\
goto raise_overflow;\
}\
}\
return (target_type) value;\
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(int) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(int) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(int),
little, !is_unsigned);
}
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_Py_intptr_t(Py_intptr_t value) {
const Py_intptr_t neg_one = (Py_intptr_t) ((Py_intptr_t) 0 - (Py_intptr_t) 1), const_zero = (Py_intptr_t) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(Py_intptr_t) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(Py_intptr_t) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(Py_intptr_t) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(Py_intptr_t) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(Py_intptr_t) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(Py_intptr_t),
little, !is_unsigned);
}
}
/* Print */
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION < 3
static PyObject *__Pyx_GetStdout(void) {
PyObject *f = PySys_GetObject((char *)"stdout");
if (!f) {
PyErr_SetString(PyExc_RuntimeError, "lost sys.stdout");
}
return f;
}
static int __Pyx_Print(PyObject* f, PyObject *arg_tuple, int newline) {
int i;
if (!f) {
if (!(f = __Pyx_GetStdout()))
return -1;
}
Py_INCREF(f);
for (i=0; i < PyTuple_GET_SIZE(arg_tuple); i++) {
PyObject* v;
if (PyFile_SoftSpace(f, 1)) {
if (PyFile_WriteString(" ", f) < 0)
goto error;
}
v = PyTuple_GET_ITEM(arg_tuple, i);
if (PyFile_WriteObject(v, f, Py_PRINT_RAW) < 0)
goto error;
if (PyString_Check(v)) {
char *s = PyString_AsString(v);
Py_ssize_t len = PyString_Size(v);
if (len > 0) {
switch (s[len-1]) {
case ' ': break;
case '\f': case '\r': case '\n': case '\t': case '\v':
PyFile_SoftSpace(f, 0);
break;
default: break;
}
}
}
}
if (newline) {
if (PyFile_WriteString("\n", f) < 0)
goto error;
PyFile_SoftSpace(f, 0);
}
Py_DECREF(f);
return 0;
error:
Py_DECREF(f);
return -1;
}
#else
static int __Pyx_Print(PyObject* stream, PyObject *arg_tuple, int newline) {
PyObject* kwargs = 0;
PyObject* result = 0;
PyObject* end_string;
if (unlikely(!__pyx_print)) {
__pyx_print = PyObject_GetAttr(__pyx_b, __pyx_n_s_print);
if (!__pyx_print)
return -1;
}
if (stream) {
kwargs = PyDict_New();
if (unlikely(!kwargs))
return -1;
if (unlikely(PyDict_SetItem(kwargs, __pyx_n_s_file, stream) < 0))
goto bad;
if (!newline) {
end_string = PyUnicode_FromStringAndSize(" ", 1);
if (unlikely(!end_string))
goto bad;
if (PyDict_SetItem(kwargs, __pyx_n_s_end, end_string) < 0) {
Py_DECREF(end_string);
goto bad;
}
Py_DECREF(end_string);
}
} else if (!newline) {
if (unlikely(!__pyx_print_kwargs)) {
__pyx_print_kwargs = PyDict_New();
if (unlikely(!__pyx_print_kwargs))
return -1;
end_string = PyUnicode_FromStringAndSize(" ", 1);
if (unlikely(!end_string))
return -1;
if (PyDict_SetItem(__pyx_print_kwargs, __pyx_n_s_end, end_string) < 0) {
Py_DECREF(end_string);
return -1;
}
Py_DECREF(end_string);
}
kwargs = __pyx_print_kwargs;
}
result = PyObject_Call(__pyx_print, arg_tuple, kwargs);
if (unlikely(kwargs) && (kwargs != __pyx_print_kwargs))
Py_DECREF(kwargs);
if (!result)
return -1;
Py_DECREF(result);
return 0;
bad:
if (kwargs != __pyx_print_kwargs)
Py_XDECREF(kwargs);
return -1;
}
#endif
/* FromPy */
static __pyx_t_double_complex __Pyx_PyComplex_As___pyx_t_double_complex(PyObject* o) {
Py_complex cval;
#if !CYTHON_COMPILING_IN_PYPY
if (PyComplex_CheckExact(o))
cval = ((PyComplexObject *)o)->cval;
else
#endif
cval = PyComplex_AsCComplex(o);
return __pyx_t_double_complex_from_parts(
(double)cval.real,
(double)cval.imag);
}
/* Declarations */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return ::std::complex< float >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return x + y*(__pyx_t_float_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
__pyx_t_float_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
/* Arithmetic */
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
#if 1
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
if (b.imag == 0) {
return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real);
} else if (fabsf(b.real) >= fabsf(b.imag)) {
if (b.real == 0 && b.imag == 0) {
return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag);
} else {
float r = b.imag / b.real;
float s = (float)(1.0) / (b.real + b.imag * r);
return __pyx_t_float_complex_from_parts(
(a.real + a.imag * r) * s, (a.imag - a.real * r) * s);
}
} else {
float r = b.real / b.imag;
float s = (float)(1.0) / (b.imag + b.real * r);
return __pyx_t_float_complex_from_parts(
(a.real * r + a.imag) * s, (a.imag * r - a.real) * s);
}
}
#else
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
if (b.imag == 0) {
return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real);
} else {
float denom = b.real * b.real + b.imag * b.imag;
return __pyx_t_float_complex_from_parts(
(a.real * b.real + a.imag * b.imag) / denom,
(a.imag * b.real - a.real * b.imag) / denom);
}
}
#endif
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrtf(z.real*z.real + z.imag*z.imag);
#else
return hypotf(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
float r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
float denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
z = __Pyx_c_prod_float(a, a);
return __Pyx_c_prod_float(a, a);
case 3:
z = __Pyx_c_prod_float(a, a);
return __Pyx_c_prod_float(z, a);
case 4:
z = __Pyx_c_prod_float(a, a);
return __Pyx_c_prod_float(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
} else if (b.imag == 0) {
z.real = powf(a.real, b.real);
z.imag = 0;
return z;
} else if (a.real > 0) {
r = a.real;
theta = 0;
} else {
r = -a.real;
theta = atan2f(0.0, -1.0);
}
} else {
r = __Pyx_c_abs_float(a);
theta = atan2f(a.imag, a.real);
}
lnr = logf(r);
z_r = expf(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cosf(z_theta);
z.imag = z_r * sinf(z_theta);
return z;
}
#endif
#endif
/* Declarations */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return ::std::complex< double >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return x + y*(__pyx_t_double_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
__pyx_t_double_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
/* Arithmetic */
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
#if 1
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
if (b.imag == 0) {
return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real);
} else if (fabs(b.real) >= fabs(b.imag)) {
if (b.real == 0 && b.imag == 0) {
return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag);
} else {
double r = b.imag / b.real;
double s = (double)(1.0) / (b.real + b.imag * r);
return __pyx_t_double_complex_from_parts(
(a.real + a.imag * r) * s, (a.imag - a.real * r) * s);
}
} else {
double r = b.real / b.imag;
double s = (double)(1.0) / (b.imag + b.real * r);
return __pyx_t_double_complex_from_parts(
(a.real * r + a.imag) * s, (a.imag * r - a.real) * s);
}
}
#else
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
if (b.imag == 0) {
return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real);
} else {
double denom = b.real * b.real + b.imag * b.imag;
return __pyx_t_double_complex_from_parts(
(a.real * b.real + a.imag * b.imag) / denom,
(a.imag * b.real - a.real * b.imag) / denom);
}
}
#endif
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrt(z.real*z.real + z.imag*z.imag);
#else
return hypot(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
double r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
double denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
z = __Pyx_c_prod_double(a, a);
return __Pyx_c_prod_double(a, a);
case 3:
z = __Pyx_c_prod_double(a, a);
return __Pyx_c_prod_double(z, a);
case 4:
z = __Pyx_c_prod_double(a, a);
return __Pyx_c_prod_double(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
} else if (b.imag == 0) {
z.real = pow(a.real, b.real);
z.imag = 0;
return z;
} else if (a.real > 0) {
r = a.real;
theta = 0;
} else {
r = -a.real;
theta = atan2(0.0, -1.0);
}
} else {
r = __Pyx_c_abs_double(a);
theta = atan2(a.imag, a.real);
}
lnr = log(r);
z_r = exp(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cos(z_theta);
z.imag = z_r * sin(z_theta);
return z;
}
#endif
#endif
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value) {
const enum NPY_TYPES neg_one = (enum NPY_TYPES) ((enum NPY_TYPES) 0 - (enum NPY_TYPES) 1), const_zero = (enum NPY_TYPES) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(enum NPY_TYPES) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(enum NPY_TYPES) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(enum NPY_TYPES) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(enum NPY_TYPES),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (int) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
case -2:
if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
}
#endif
if (sizeof(int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
int val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (int) -1;
}
} else {
int val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (int) -1;
val = __Pyx_PyInt_As_int(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to int");
return (int) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
/* PrintOne */
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION < 3
static int __Pyx_PrintOne(PyObject* f, PyObject *o) {
if (!f) {
if (!(f = __Pyx_GetStdout()))
return -1;
}
Py_INCREF(f);
if (PyFile_SoftSpace(f, 0)) {
if (PyFile_WriteString(" ", f) < 0)
goto error;
}
if (PyFile_WriteObject(o, f, Py_PRINT_RAW) < 0)
goto error;
if (PyFile_WriteString("\n", f) < 0)
goto error;
Py_DECREF(f);
return 0;
error:
Py_DECREF(f);
return -1;
/* the line below is just to avoid C compiler
* warnings about unused functions */
return __Pyx_Print(f, NULL, 0);
}
#else
static int __Pyx_PrintOne(PyObject* stream, PyObject *o) {
int res;
PyObject* arg_tuple = PyTuple_Pack(1, o);
if (unlikely(!arg_tuple))
return -1;
res = __Pyx_Print(stream, arg_tuple, 1);
Py_DECREF(arg_tuple);
return res;
}
#endif
/* CIntFromPy */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(long) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (long) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (long) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(long) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
case -2:
if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
}
#endif
if (sizeof(long) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
long val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (long) -1;
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (long) -1;
val = __Pyx_PyInt_As_long(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to long");
return (long) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
/* FastTypeChecks */
#if CYTHON_COMPILING_IN_CPYTHON
static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) {
while (a) {
a = a->tp_base;
if (a == b)
return 1;
}
return b == &PyBaseObject_Type;
}
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) {
PyObject *mro;
if (a == b) return 1;
mro = a->tp_mro;
if (likely(mro)) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(mro);
for (i = 0; i < n; i++) {
if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b)
return 1;
}
return 0;
}
return __Pyx_InBases(a, b);
}
#if PY_MAJOR_VERSION == 2
static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) {
PyObject *exception, *value, *tb;
int res;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&exception, &value, &tb);
res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0;
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
if (!res) {
res = PyObject_IsSubclass(err, exc_type2);
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
}
__Pyx_ErrRestore(exception, value, tb);
return res;
}
#else
static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) {
int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0;
if (!res) {
res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2);
}
return res;
}
#endif
static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
assert(PyExceptionClass_Check(exc_type));
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
PyObject *t = PyTuple_GET_ITEM(tuple, i);
#if PY_MAJOR_VERSION < 3
if (likely(exc_type == t)) return 1;
#endif
if (likely(PyExceptionClass_Check(t))) {
if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1;
} else {
}
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) {
if (likely(err == exc_type)) return 1;
if (likely(PyExceptionClass_Check(err))) {
if (likely(PyExceptionClass_Check(exc_type))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type);
} else if (likely(PyTuple_Check(exc_type))) {
return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type);
} else {
}
}
return PyErr_GivenExceptionMatches(err, exc_type);
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) {
assert(PyExceptionClass_Check(exc_type1));
assert(PyExceptionClass_Check(exc_type2));
if (likely(err == exc_type1 || err == exc_type2)) return 1;
if (likely(PyExceptionClass_Check(err))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2);
}
return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2));
}
#endif
/* CheckBinaryVersion */
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
return PyErr_WarnEx(NULL, message, 1);
}
return 0;
}
/* InitStrings */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
if (PyObject_Hash(*t->p) == -1)
return -1;
++t;
}
return 0;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
}
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {
Py_ssize_t ignore;
return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
#if !CYTHON_PEP393_ENABLED
static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
char* defenc_c;
PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
if (!defenc) return NULL;
defenc_c = PyBytes_AS_STRING(defenc);
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
{
char* end = defenc_c + PyBytes_GET_SIZE(defenc);
char* c;
for (c = defenc_c; c < end; c++) {
if ((unsigned char) (*c) >= 128) {
PyUnicode_AsASCIIString(o);
return NULL;
}
}
}
#endif
*length = PyBytes_GET_SIZE(defenc);
return defenc_c;
}
#else
static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL;
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
if (likely(PyUnicode_IS_ASCII(o))) {
*length = PyUnicode_GET_LENGTH(o);
return PyUnicode_AsUTF8(o);
} else {
PyUnicode_AsASCIIString(o);
return NULL;
}
#else
return PyUnicode_AsUTF8AndSize(o, length);
#endif
}
#endif
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
if (
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
__Pyx_sys_getdefaultencoding_not_ascii &&
#endif
PyUnicode_Check(o)) {
return __Pyx_PyUnicode_AsStringAndSize(o, length);
} else
#endif
#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
{
char* result;
int r = PyBytes_AsStringAndSize(o, &result, length);
if (unlikely(r < 0)) {
return NULL;
} else {
return result;
}
}
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) {
int retval;
if (unlikely(!x)) return -1;
retval = __Pyx_PyObject_IsTrue(x);
Py_DECREF(x);
return retval;
}
static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) {
#if PY_MAJOR_VERSION >= 3
if (PyLong_Check(result)) {
if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
"__int__ returned non-int (type %.200s). "
"The ability to return an instance of a strict subclass of int "
"is deprecated, and may be removed in a future version of Python.",
Py_TYPE(result)->tp_name)) {
Py_DECREF(result);
return NULL;
}
return result;
}
#endif
PyErr_Format(PyExc_TypeError,
"__%.4s__ returned non-%.4s (type %.200s)",
type_name, type_name, Py_TYPE(result)->tp_name);
Py_DECREF(result);
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
#if CYTHON_USE_TYPE_SLOTS
PyNumberMethods *m;
#endif
const char *name = NULL;
PyObject *res = NULL;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x) || PyLong_Check(x)))
#else
if (likely(PyLong_Check(x)))
#endif
return __Pyx_NewRef(x);
#if CYTHON_USE_TYPE_SLOTS
m = Py_TYPE(x)->tp_as_number;
#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = m->nb_int(x);
}
else if (m && m->nb_long) {
name = "long";
res = m->nb_long(x);
}
#else
if (likely(m && m->nb_int)) {
name = "int";
res = m->nb_int(x);
}
#endif
#else
if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) {
res = PyNumber_Int(x);
}
#endif
if (likely(res)) {
#if PY_MAJOR_VERSION < 3
if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) {
#else
if (unlikely(!PyLong_CheckExact(res))) {
#endif
return __Pyx_PyNumber_IntOrLongWrongResultType(res, name);
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject *x;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(b))) {
if (sizeof(Py_ssize_t) >= sizeof(long))
return PyInt_AS_LONG(b);
else
return PyInt_AsSsize_t(b);
}
#endif
if (likely(PyLong_CheckExact(b))) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)b)->ob_digit;
const Py_ssize_t size = Py_SIZE(b);
if (likely(__Pyx_sst_abs(size) <= 1)) {
ival = likely(size) ? digits[0] : 0;
if (size == -1) ival = -ival;
return ival;
} else {
switch (size) {
case 2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
}
}
#endif
return PyLong_AsSsize_t(b);
}
x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) {
return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False);
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
return PyInt_FromSize_t(ival);
}
#endif /* Py_PYTHON_H */
|
potentials.h | #pragma once
#include "base.h"
#include "geometries.h"
/*!
* \addtogroup Potentials
* @{
*/
/*! The potentials namespace contains the various potentials. Each potential has a corresponding
* force function which calculates the force by differentiating the potential function
*/
namespace potentials{
/*!
* \addtogroup Harmonic
* @{
*/
/*! One dimensional harmonic potential
*/
struct harmonic_1D{
private:
static constexpr double springConstant = 1.0; // [kJ * nm^(-2) * mol^(-1)]
public:
inline static double energy(Particles& particles){
double energy = 0;
energy = 0.5 * springConstant * particles.atoms[0]->pos.norm(); // [kJ/mol]
return energy;
}
inline static void forces(Particles& particles){
/*!
* One dimensional harmonic potential
*/
double force = 0;
Eigen::Vector3d forceDir;
forceDir << 1, 0, 0;
force = springConstant * (25 - particles.atoms[0]->pos[0]); // [(kJ / (nm * mol)] = [dalton * nm/ps^2]
particles.atoms[0]->force += force * forceDir.normalized(); // [(kJ / (nm * mol)] = [dalton * nm/ps^2]
}
};
struct harmonic{
private:
static constexpr double springConstant = 100.0; // [kJ * nm^(-2) * mol^(-1)]
public:
inline static double energy(Particles& particles, Geometry* geometry){
double energy = 0;
for(int i = 0; i < particles.numOfParticles; i++){
for(auto bond : particles[i]->bonds){
double dist = geometry->dist(particles[i]->atoms[bond[0]]->pos, particles[i]->atoms[bond[1]]->pos);
energy += springConstant * std::pow((dist - 0.5), 2); // [kJ/mol]
}
}
return energy;
}
inline static void forces(Particles& particles, Geometry* geometry) {
/*!
* Harmonic potential
*/
for(int i = 0; i < particles.numOfParticles; i++){
for(auto bond : particles[i]->bonds){
Eigen::Vector3d disp = geometry->disp(particles[i]->atoms[bond[0]]->pos, particles[i]->atoms[bond[1]]->pos);
Eigen::Vector3d a_force = -2.0 * springConstant * disp.normalized() * (disp.norm() - 0.5);
particles[i]->atoms[bond[0]]->force += a_force;
particles[i]->atoms[bond[1]]->force += -a_force;
}
}
}
};
struct angular_harmonic{
private:
static constexpr double k = 1000.0;
public:
inline static double energy(Particles& particles, Geometry* geometry){
double energy = 0;
for(int i = 0; i < particles.numOfParticles; i++) {
for (auto angle : particles[i]->angles) {
Eigen::Vector3d ba_disp = geometry->disp(particles[i]->atoms[angle[0]]->pos,
particles[i]->atoms[angle[1]]->pos);
Eigen::Vector3d bc_disp = geometry->disp(particles[i]->atoms[angle[2]]->pos,
particles[i]->atoms[angle[1]]->pos);
double ba_dist = ba_disp.norm();
double bc_dist = bc_disp.norm();
double theta = std::acos(ba_disp.dot(bc_disp) / (ba_dist * bc_dist));
energy += k * std::pow((theta - 3.14), 2);
}
}
return energy;
}
inline static void forces(Particles& particles, Geometry* geometry) {
for(int i = 0; i < particles.numOfParticles; i++){
for(auto angle : particles[i]->angles){
Eigen::Vector3d ab_disp = geometry->disp(particles[i]->atoms[angle[1]]->pos,
particles[i]->atoms[angle[0]]->pos);
Eigen::Vector3d bc_disp = geometry->disp(particles[i]->atoms[angle[2]]->pos,
particles[i]->atoms[angle[1]]->pos);
Eigen::Vector3d ba_disp = -1.0 * ab_disp;
Eigen::Vector3d cb_disp = -1.0 * bc_disp;
double ab_dist = ab_disp.norm();
double bc_dist = bc_disp.norm();
double ba_dist = ab_dist;
double cb_dist = bc_dist;
double theta = std::acos(ba_disp.dot(bc_disp) / (ba_dist * bc_dist));
Eigen::Vector3d a_force = -2 * k * (theta - 3.14) / ab_dist *
(ba_disp.cross(ba_disp.cross(bc_disp))).normalized();
Eigen::Vector3d c_force = -2 * k * (theta - 3.14) / bc_dist *
(cb_disp.cross(ba_disp.cross(bc_disp))).normalized();
particles[i]->atoms[angle[0]]->force += a_force;
particles[i]->atoms[angle[2]]->force += c_force;
particles[i]->atoms[angle[1]]->force += - a_force - c_force;
//particles[i]->atoms[angle[1]]->force += -1.0 * (particles[i]->atoms[angle[0]]->force +
// particles[i]->atoms[angle[2]]->force);
}
}
}
};
/*!
* \addtogroup Coulomb
* @{
*/
/*! Coulomb potential
*/
struct coulomb{
private:
static constexpr double cFactor = 1000.0; //[kJ * nm * mol^-1]
public:
inline static double energy(Particles& particles, Geometry* geometry){
double energy = 0;
for(int i = 0; i < particles.atoms.numOfAtoms; i++){
for(int j = i + 1; j < particles.atoms.numOfAtoms; j++) {
energy += particles.atoms[i]->q * particles.atoms[j]->q /
geometry->dist(particles.atoms[i]->pos, particles.atoms[j]->pos);
}
}
return energy * cFactor;
}
inline static void forces(Particles& particles, Geometry* geometry){
double magnitude = 0;
double distance = 0;
Eigen::Vector3d disp;
for(int i = 0; i < particles.atoms.numOfAtoms; i++){
for(int j = i + 1; j < particles.atoms.numOfAtoms; j++) {
disp = geometry->disp(particles.atoms[i]->pos, particles.atoms[j]->pos);
distance = disp.norm();
magnitude = cFactor * particles.atoms[i]->q * particles.atoms[j]->q / (distance * distance);
disp.normalize();
particles.atoms[i]->force += magnitude * disp;
particles.atoms[j]->force -= magnitude * disp;
}
}
}
};
/*!
* \addtogroup LJRep
* @{
*/
/*! Repulsive part of the Lennard Jones potential
*/
struct LJRep{
private:
static constexpr double epsilon = 1.5; //![kJ/mol] LJ parameter epsilon
static constexpr double sigma = 1.0; //![nm] LJ parameter sigma
public:
inline static void forces(Particles& particles, Geometry* geometry) {
/*!
* Calculate the forces using a Lennard-Jones potential
*/
Eigen::Vector3d dr;
for (int i = 0; i < particles.atoms.numOfAtoms; i++) {
for (int j = i + 1; j < particles.atoms.numOfAtoms; j++) {
dr = geometry->disp(particles.atoms[i]->pos, particles.atoms[j]->pos); // [nm]
double r2 = dr.dot(dr); // [nm^2]
double fr2 = sigma * sigma / r2; // unitless
double fr6 = fr2 * fr2 * fr2; // unitless
double fr = 48 * epsilon * fr6 * fr6 / r2; // [kJ/(nm^2*mol)]
particles.atoms[i]->force += fr * dr; //[(kJ/(nm*mol)] = [dalton * nm/ps^2]
particles.atoms[j]->force -= fr * dr; //[(kJ/(nm*mol)] = [dalton * nm/ps^2]
//particles.atoms.forceMatrix(i, j) = (fr * dr).norm();
}
}
}
inline static double energy(Particles& particles, Geometry* geometry) {
/*!
* Calculate the energy using a Lennard-Jones potential
*/
double distance;
double energy = 0;
Eigen::Vector3d dr;
for (int i = 0; i < particles.atoms.numOfAtoms; i++) {
for (int j = i + 1; j < particles.atoms.numOfAtoms; j++) {
dr = geometry->disp(particles.atoms[i]->pos, particles.atoms[j]->pos); // [nm]
distance = dr.norm(); // [nm]
double fr = sigma / distance; // unitless
double fr2 = fr * fr; // unitless
double fr6 = fr2 * fr2 * fr2; // unitless
energy += fr6 * fr6; // unitless
}
}
return 4 * epsilon * energy; // [kJ/mol]
}
};
/*!
* \addtogroup Lennard-Jones
* @{
*/
/*! The Lennard-Jones (LJ) potential
*/
struct LJ {
private:
static constexpr double epsilon = 1.5; //[kJ/mol] LJ parameter epsilon
static constexpr double sigma = 1.0; //[nm] LJ parameter sigma
public:
/*!
* Get the forces as given by the LJ potential
*/
inline static void forces(Particles &particles, Geometry *geometry) {
#pragma omp parallel default(none) shared(particles) if(particles.atoms.numOfAtoms >= 400)
{
double r2;
double fr2;
double fr6;
double fr;
Eigen::Vector3d dr;
std::vector<Eigen::Vector3d> private_forces(particles.atoms.numOfAtoms);
for(int i = 0; i < particles.atoms.numOfAtoms; i++){
private_forces[i].setZero();
}
#pragma omp for schedule(dynamic, 50)
for (int i = 0; i < particles.atoms.numOfAtoms; i++) {
for (int j = i + 1; j < particles.atoms.numOfAtoms; j++) {
dr = geometry->disp(particles.atoms[i]->pos, particles.atoms[j]->pos); // [nm]
r2 = dr.dot(dr); // [nm^2]
fr2 = sigma * sigma / r2; // unitless
fr6 = fr2 * fr2 * fr2; // unitless
fr = 48 * epsilon * fr6 * (fr6 - 0.5) / r2; // [kJ/(nm^2*mol)]
/*particles.atoms[i]->force +=
fr * dr; //[(kJ/(nm*mol)] = [dalton * nm/ps^2]
particles.atoms[j]->force -=
fr * dr; //[(kJ/(nm*mol)] = [dalton * nm/ps^2]
particles.atoms.forceMatrix(i, j) = (fr * dr).norm();*/
private_forces[i] += fr * dr;
private_forces[j] -= fr * dr;
}
}
#pragma omp critical
{
for (int i = 0; i < particles.atoms.numOfAtoms; i++) {
particles.atoms[i]->force += private_forces[i];
}
}
}
}
/*! Calculate the energy using a Lennard-Jones potential which is given by
\f[
U_{ij}^{LJ} = 4 \pi \epsilon \left( \left( \frac{\sigma}{r_{ij}} \right)^{12} - \left( \frac{\sigma}{r_{ij}} \right)^6\right)
\f]
*/
inline static double energy(Particles& particles, Geometry* geometry) {
double distance;
double energy = 0;
Eigen::Vector3d dr;
#pragma omp parallel for reduction(+:energy) schedule(dynamic, 50) private(distance, dr) shared(particles) if(particles.atoms.numOfAtoms >= 400)
for (int i = 0; i < particles.atoms.numOfAtoms; i++) {
for (int j = i + 1; j < particles.atoms.numOfAtoms; j++) {
dr = geometry->disp(particles.atoms[i]->pos, particles.atoms[j]->pos); // [nm]
distance = dr.norm(); // [nm]
double fr = sigma / distance; // unitless
double fr2 = fr * fr; // unitless
double fr6 = fr2 * fr2 * fr2; // unitless
energy += fr6 * (fr6 - 1); // unitless
}
}
return 4 * epsilon * energy; // [kJ/mol]
}
};
/*!
* \addtogroup magnetic
* @{
*/
/*! Magnetic potential
*/
struct magnetic {
private:
static constexpr double dipoleC = 50.0;//8.3145; // [kJ*nm^3*mol^(-1)] (example of what is used in Faunus at 300 Kelvin) //!Dipole dipole product over the vacuum permittivity
public:
inline static void forces(Particles& particles) {
Eigen::Vector3d dr;
/*for (int i = 0; i < atoms.numOfAtoms; i++) {
atoms[i]->force.setZero();
}*/
for (int i = 0; i < particles.atoms.numOfAtoms; i++) {
for (int j = i + 1; j < particles.atoms.numOfAtoms; j++) {
dr = particles.atoms[i]->pos - particles.atoms[j]->pos; // [nm]
double r = dr.norm(); // [nm]
double fr = 3.0 * dipoleC / (r * r * r * r * r); // [(kJ/(nm^2*mol)]
particles.atoms[i]->force += fr * dr; // [(kJ/(nm*mol)] = [dalton * nm/ps^2]
particles.atoms[j]->force -= fr * dr; // [(kJ/(nm*mol)] = [dalton * nm/ps^2]
particles.atoms.forceMatrix(i, j) = (fr * dr).norm();
}
particles.atoms[i]->force += wall_force(particles.atoms[i]);
}
}
inline static double energy(Particles& particles) {
double distance;
double energy = 0;
Eigen::Vector3d dr;
for(int i = 0; i < particles.atoms.numOfAtoms; i++) {
for (int j = i + 1; j < particles.atoms.numOfAtoms; j++) {
dr = particles.atoms[i]->pos - particles.atoms[j]->pos; // [nm]
distance = dr.norm(); // [nm]
energy += dipoleC / (distance * distance * distance); // [kJ/mol]
}
}
return energy;
}
/*! Magnetic repulsion from walls, only works for two dimensions
*
*/
inline static double wall_potential(Atom *atom){
double energy = 0;
double b = Base::boxDim/2;
double x = atom->pos[0] - Base::boxDim;
double y = atom->pos[1] - Base::boxDim;
double diffX = b + x;
double diffY = b + y;
//Bottom wall
energy += 2.0 * b / (diffY * diffY * std::sqrt(b * b + 2 * b * y + b * b + y * y));
//Left wall
energy += 2.0 * b / (diffX * diffX * std::sqrt(b * b + 2 * b * x + b * b + x * x));
diffX = b - x;
diffY = b - y;
//Top wall
energy += 2.0 * b / (diffY * diffY * std::sqrt(b * b + 2 * b * y + b * b + y * y));
//Right wall
energy += 2.0 * b / (diffX * diffX * std::sqrt(b * b + 2 * b * x + b * b + x * x));
return dipoleC * energy;
}
inline static Eigen::Vector3d wall_force(Atom *atom){
double magneticConstant = 100.0; //magnetic potential per nm^2
Eigen::Vector3d force;
force.setZero();
// Left and right walls
force[0] += 1.0 / (atom->pos[0] * atom->pos[0] * atom->pos[0] * atom->pos[0]) -
1.0 / ((Base::boxDim - atom->pos[0]) * (Base::boxDim - atom->pos[0]) * (Base::boxDim - atom->pos[0]) * (Base::boxDim - atom->pos[0]));
// Top and Bottom walls
force[1] += 1.0 / (atom->pos[1] * atom->pos[1] * atom->pos[1] * atom->pos[1]) -
1.0 / ((Base::boxDim - atom->pos[1]) * (Base::boxDim - atom->pos[1]) * (Base::boxDim - atom->pos[1]) * (Base::boxDim - atom->pos[1]));
return force * magneticConstant;
}
};
struct ewald{
public:
static int kNumMax;
static double selfTerm;
static std::vector< Eigen::Vector3d > kVec;
static std::complex<double> *rkVec;
static double *kNorm;
static double *resFac;
static double alpha;
static int kNum;
template<typename T, typename G>
static double dot(T vec1, G vec2){
return vec1[0]*vec2[0] + vec1[1]*vec2[1] + vec1[2]*vec2[2];
}
template<typename T>
static double norm(T x){
double norm = 0;
norm = x[0]*x[0] + x[1]*x[1] + x[2]*x[2];
return sqrt(norm);
}
template<typename T>
static inline T erfc_x( T x )
{
//static_assert(std::is_floating_point<T>::value, "type must be floating point");
if(x < 0){
return ( 2.0 - erfc_x(-x) );
}
T t = 1.0 / (1.0 + 0.3275911 * x);
const T a1 = 0.254829592;
const T a2 = -0.284496736;
const T a3 = 1.421413741;
const T a4 = -1.453152027;
const T a5 = 1.061405429;
return t * (a1 + t * (a2 + t * (a3 + t * (a4 + t * a5)))) * exp(-x * x);
//return 1;
}
template<typename T>
static inline T erf_x( T x ) {
return (1 - erfc_x(x));
}
static inline void reset(){
kVec.clear();
free(resFac);
free(kNorm);
free(rkVec);
}
static inline void initialize(Particles& particles, Geometry* geometry){
int i = 0;
double r = 0;
double qq = 0;
double k2 = 0;
kNumMax = 1000000;
kNum = 0;
resFac = (double*) malloc(kNumMax * sizeof(double));
int kMax = 4; //half of the third root of number of reciprocal vectors
double factor = 1;
Eigen::Vector3d vec;
alpha = 5.0 / geometry->box[0];
for(int kx = 0; kx <= kMax; kx++){
for(int ky = -kMax; ky <= kMax; ky++){
for(int kz = -kMax; kz <= kMax; kz++){
factor = 1.0;
if(kx > 0){
factor *= 2;
}
vec[0] = (2.0 * constants::PI * kx / geometry->box[0]);
vec[1] = (2.0 * constants::PI * ky / geometry->box[1]);
vec[2] = (2.0 * constants::PI * kz / geometry->box[2]);
k2 = dot(vec, vec);
if(fabs(k2) > 1e-5){
kVec.push_back(vec);
resFac[kNum] = factor * exp(-k2/(4.0 * alpha * alpha))/k2;
kNum++;
}
}
}
}
kNorm = (double*) malloc(kNum * sizeof(double));
for(i = 0; i < kNum; i++){
kNorm[i] = norm(kVec[i]);
}
rkVec = (std::complex<double>*) malloc(kNum * sizeof(std::complex<double>));
std::complex<double> rho;
std::complex<double> rk;
std::complex<double> charge;
for(int k = 0; k < kNum; k++){
rho = 0;
for(int i = 0; i < particles.atoms.numOfAtoms; i++){
rk.imag(std::sin(particles.atoms[i]->pos.dot(kVec[k])));
rk.real(std::cos(particles.atoms[i]->pos.dot(kVec[k])));
charge = particles.atoms[i]->q;
rk = rk * charge;
rho += rk;
}
rkVec[k] = rho;
}
selfTerm = 0;
for(int i = 0; i < particles.atoms.numOfAtoms; i++){
selfTerm += get_self_correction(particles.atoms[i]);
}
selfTerm = alpha/sqrt(constants::PI) * selfTerm;
}
static inline double get_reciprocal(){
double energy = 0;
for(int k = 0; k < kNum; k++){
//std::cout << "resfac: " << resFac[k] <<std::endl;
//std::cout << "rkVec: " << rkVec[k] <<std::endl;
energy += std::norm(rkVec[k]) * resFac[k];
}
return energy;
}
static inline double get_self_correction(Atom *p){
double self = p->q * p->q;
return self;
}
static inline double energy(Particles& particles, Geometry* geometry){
double real = 0;
double reciprocal = 0;
double distance = 0;
double energy = 0;
reciprocal = get_reciprocal();
for(int i = 0; i < particles.atoms.numOfAtoms; i++){
for(int j = i + 1; j < particles.atoms.numOfAtoms; j++){
distance = geometry->dist(particles.atoms[i]->pos, particles.atoms[j]->pos);
if(distance <= 25){
energy = erfc_x(distance * alpha) / distance;
real += particles.atoms[i]->q * particles.atoms[j]->q * energy;
}
}
}
reciprocal = 2.0 * constants::PI/(geometry->box[0] * geometry->box[1] * geometry->box[2]) * reciprocal;
// DEBUGGING
/*printf("real: %lf\n", real);
printf("reciprocal: %lf\n", reciprocal);
printf("self: %lf\n", selfTerm);
printf("ewald energy: %lf\n", 0.1 * ((real + reciprocal) - selfTerm));*/
return ((real + reciprocal) - selfTerm) * 0.1; //Tinfoil boundary conditions
}
static inline void forces(Particles& particles, Geometry* geometry){
//reset();
//initialize(particles, geometry);
for(int i = 0; i < particles.numOfParticles; i++){
particles.atoms[i]->force += ((real_force(particles.atoms[i], particles, geometry) + reciprocal_force(particles.atoms[i], particles, geometry))) * 0.1;
}
//printf("Ewald force x: %lf, y: %lf\n", particles.atoms[0]->force[0], particles.atoms[0]->force[1]);
}
static inline Eigen::Vector3d reciprocal_force(Atom* a, Particles& particles, Geometry* geometry){
Eigen::Vector3d force;
force.setZero();
for(int i = 0; i < particles.atoms.numOfAtoms; i++){
Eigen::Vector3d disp = geometry->disp(a->pos, particles.atoms[i]->pos);
for(int k = 0; k < kNum; k++){
force += 4 * constants::PI * kVec[k] / (kNorm[k] * kNorm[k]) * std::exp(-1.0 * kNorm[k] * kNorm[k] / (4 * alpha)) * std::sin(kVec[k].dot(disp));
}
force *= particles.atoms[i]->q / geometry->volume;
}
force *= a->q;
return force;
}
static inline Eigen::Vector3d real_force(Atom* a, Particles& particles, Geometry* geometry){
Eigen::Vector3d force;
force.setZero();
for(int i = 0; i < particles.atoms.numOfAtoms; i++){
if(a->index != i) {
Eigen::Vector3d disp = geometry->disp(a->pos, particles.atoms[i]->pos);
force += particles.atoms[i]->q * (2.0 * std::sqrt(alpha / constants::PI) *
std::exp(-1.0 * alpha * disp.norm() * disp.norm()) +
1.0 / disp.norm() * erfc_x(std::sqrt(alpha) * disp.norm())) *
disp / (disp.norm() * disp.norm());
}
}
force *= a->q;
return force;
}
};
int ewald::kNumMax;
double ewald::selfTerm;
std::vector< Eigen::Vector3d > ewald::kVec;
std::complex<double> *ewald::rkVec;
double *ewald::kNorm;
double *ewald::resFac;
double ewald::alpha;
int ewald::kNum;
} |
GB_unop__lnot_uint8_uint8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__lnot_uint8_uint8)
// op(A') function: GB (_unop_tran__lnot_uint8_uint8)
// C type: uint8_t
// A type: uint8_t
// cast: uint8_t cij = aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = aij ; \
Cx [pC] = !(z != 0) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__lnot_uint8_uint8)
(
uint8_t *Cx, // Cx and Ax may be aliased
const uint8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
uint8_t z = aij ;
Cx [p] = !(z != 0) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint8_t aij = Ax [p] ;
uint8_t z = aij ;
Cx [p] = !(z != 0) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__lnot_uint8_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB085-threadprivate-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A file-scope variable used within a function called by a parallel region.
Use threadprivate to avoid data races.
*/
#include <stdio.h>
#include <assert.h>
#include <omp.h>
int sum0 = 0;
int sum1 = 0;
void foo(int i)
{
sum0 = sum0 + i;
}
int main()
{
int len = 1000;
int i;
int sum = 0;
for (i = 0; i <= len - 1; i += 1) {
foo(i);
}
sum = sum + sum0;
/* reference calculation */
#pragma omp parallel for private (i) reduction (+:sum1) firstprivate (len)
for (i = 0; i <= len - 1; i += 1) {
sum1 = sum1 + i;
}
printf("sum=%d; sum1=%d\n",sum,sum1);
(((void )(sizeof(((sum == sum1?1 : 0))))) , ((
{
if (sum == sum1)
;
else
__assert_fail("sum==sum1","DRB085-threadprivate-orig-no.c",75,__PRETTY_FUNCTION__);
})));
return 0;
}
|
displacement_lagrangemultiplier_frictional_contact_criteria.h | // KRATOS ___| | | |
// \___ \ __| __| | | __| __| | | __| _` | |
// | | | | | ( | | | | ( | |
// _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS
//
// License: BSD License
// license: StructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_FRICTIONAL_CONTACT_CRITERIA_H)
#define KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_FRICTIONAL_CONTACT_CRITERIA_H
/* System includes */
/* External includes */
/* Project includes */
#include "utilities/table_stream_utility.h"
#include "utilities/color_utilities.h"
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
#include "custom_utilities/active_set_utilities.h"
#include "utilities/constraint_utilities.h"
#include "custom_utilities/contact_utilities.h"
namespace Kratos
{
///@addtogroup ContactStructuralMechanicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@name Kratos Classes
///@{
/**
* @class DisplacementLagrangeMultiplierFrictionalContactCriteria
* @ingroup ContactStructuralMechanicsApplication
* @brief Convergence criteria for contact problems
* @details This class implements a convergence control based on nodal displacement and
* lagrange multiplier values. The error is evaluated separately for each of them, and
* relative and absolute tolerances for both must be specified.
* @author Vicente Mataix Ferrandiz
*/
template< class TSparseSpace,
class TDenseSpace >
class DisplacementLagrangeMultiplierFrictionalContactCriteria
: public ConvergenceCriteria< TSparseSpace, TDenseSpace >
{
public:
///@name Type Definitions
///@{
/// Pointer definition of DisplacementLagrangeMultiplierFrictionalContactCriteria
KRATOS_CLASS_POINTER_DEFINITION( DisplacementLagrangeMultiplierFrictionalContactCriteria );
/// Local Flags
KRATOS_DEFINE_LOCAL_FLAG( ENSURE_CONTACT );
KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT );
KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED );
KRATOS_DEFINE_LOCAL_FLAG( PURE_SLIP );
/// The base class definition (and it subclasses)
typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
/// The sparse space used
typedef TSparseSpace SparseSpaceType;
/// The r_table stream definition TODO: Replace by logger
typedef TableStreamUtility::Pointer TablePrinterPointerType;
/// The index type definition
typedef std::size_t IndexType;
/// The key type definition
typedef std::size_t KeyType;
/// The epsilon tolerance definition
static constexpr double Tolerance = std::numeric_limits<double>::epsilon();
///@}
///@name Life Cycle
///@{
/// Constructor.
/**
* @param DispRatioTolerance Relative tolerance for displacement error
* @param DispAbsTolerance Absolute tolerance for displacement error
* @param LMRatioTolerance Relative tolerance for lagrange multiplier error
* @param LMAbsTolerance Absolute tolerance for lagrange multiplier error
* @param NormalTangentRatio Ratio between the normal and tangent that will accepted as converged
* @param EnsureContact To check if the contact is lost
* @param pTable The pointer to the output r_table
* @param PrintingOutput If the output is going to be printed in a txt file
*/
explicit DisplacementLagrangeMultiplierFrictionalContactCriteria(
const TDataType DispRatioTolerance,
const TDataType DispAbsTolerance,
const TDataType LMNormalRatioTolerance,
const TDataType LMNormalAbsTolerance,
const TDataType LMTangentStickRatioTolerance,
const TDataType LMTangentStickAbsTolerance,
const TDataType LMTangentSlipRatioTolerance,
const TDataType LMTangentSlipAbsTolerance,
const TDataType NormalTangentRatio,
const bool EnsureContact = false,
const bool PureSlip = false,
const bool PrintingOutput = false
)
: BaseType()
{
// Set local flags
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::ENSURE_CONTACT, EnsureContact);
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT, PrintingOutput);
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::PURE_SLIP, PureSlip);
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::TABLE_IS_INITIALIZED, false);
// The displacement solution
mDispRatioTolerance = DispRatioTolerance;
mDispAbsTolerance = DispAbsTolerance;
// The normal contact solution
mLMNormalRatioTolerance = LMNormalRatioTolerance;
mLMNormalAbsTolerance = LMNormalAbsTolerance;
// The tangent contact solution
mLMTangentStickRatioTolerance = LMTangentStickRatioTolerance;
mLMTangentStickAbsTolerance = LMTangentStickAbsTolerance;
mLMTangentStickRatioTolerance = LMTangentSlipRatioTolerance;
mLMTangentStickAbsTolerance = LMTangentSlipAbsTolerance;
// We get the ratio between the normal and tangent that will accepted as converged
mNormalTangentRatio = NormalTangentRatio;
}
/**
* @brief Default constructor (parameters)
* @param ThisParameters The configuration parameters
*/
explicit DisplacementLagrangeMultiplierFrictionalContactCriteria( Parameters ThisParameters = Parameters(R"({})"))
: BaseType()
{
// The default parameters
Parameters default_parameters = Parameters(R"(
{
"ensure_contact" : false,
"pure_slip" : false,
"print_convergence_criterion" : false,
"displacement_relative_tolerance" : 1.0e-4,
"displacement_absolute_tolerance" : 1.0e-9,
"contact_displacement_relative_tolerance" : 1.0e-4,
"contact_displacement_absolute_tolerance" : 1.0e-9,
"frictional_stick_contact_displacement_relative_tolerance" : 1.0e-4,
"frictional_stick_contact_displacement_absolute_tolerance" : 1.0e-9,
"frictional_slip_contact_displacement_relative_tolerance" : 1.0e-4,
"frictional_slip_contact_displacement_absolute_tolerance" : 1.0e-9,
"ratio_normal_tangent_threshold" : 1.0e-4
})" );
ThisParameters.ValidateAndAssignDefaults(default_parameters);
// The displacement solution
mDispRatioTolerance = ThisParameters["displacement_relative_tolerance"].GetDouble();
mDispAbsTolerance = ThisParameters["displacement_absolute_tolerance"].GetDouble();
// The normal contact solution
mLMNormalRatioTolerance = ThisParameters["contact_displacement_relative_tolerance"].GetDouble();
mLMNormalAbsTolerance = ThisParameters["contact_displacement_absolute_tolerance"].GetDouble();
// The tangent contact solution
mLMTangentStickRatioTolerance = ThisParameters["frictional_stick_contact_displacement_relative_tolerance"].GetDouble();
mLMTangentStickAbsTolerance = ThisParameters["frictional_stick_contact_displacement_absolute_tolerance"].GetDouble();
mLMTangentSlipRatioTolerance = ThisParameters["frictional_slip_contact_displacement_relative_tolerance"].GetDouble();
mLMTangentSlipAbsTolerance = ThisParameters["frictional_slip_contact_displacement_absolute_tolerance"].GetDouble();
// We get the ratio between the normal and tangent that will accepted as converged
mNormalTangentRatio = ThisParameters["ratio_normal_tangent_threshold"].GetDouble();
// Set local flags
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::ENSURE_CONTACT, ThisParameters["ensure_contact"].GetBool());
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool());
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::PURE_SLIP, ThisParameters["pure_slip"].GetBool());
}
//* Copy constructor.
DisplacementLagrangeMultiplierFrictionalContactCriteria( DisplacementLagrangeMultiplierFrictionalContactCriteria const& rOther )
:BaseType(rOther)
,mOptions(rOther.mOptions)
,mDispRatioTolerance(rOther.mDispRatioTolerance)
,mDispAbsTolerance(rOther.mDispAbsTolerance)
,mLMNormalRatioTolerance(rOther.mLMNormalRatioTolerance)
,mLMNormalAbsTolerance(rOther.mLMNormalAbsTolerance)
,mLMTangentStickRatioTolerance(rOther.mLMTangentStickRatioTolerance)
,mLMTangentStickAbsTolerance(rOther.mLMTangentStickAbsTolerance)
,mLMTangentSlipRatioTolerance(rOther.mLMTangentSlipRatioTolerance)
,mLMTangentSlipAbsTolerance(rOther.mLMTangentSlipAbsTolerance)
,mNormalTangentRatio(rOther.mNormalTangentRatio)
{
}
/// Destructor.
~DisplacementLagrangeMultiplierFrictionalContactCriteria() override = default;
///@}
///@name Operators
///@{
/**
* @brief Compute relative and absolute error.
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PostCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
if (SparseSpaceType::Size(rDx) != 0) { //if we are solving for something
// Getting process info
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// Initialize
TDataType disp_solution_norm = 0.0, normal_lm_solution_norm = 0.0, tangent_lm_stick_solution_norm = 0.0, tangent_lm_slip_solution_norm = 0.0, disp_increase_norm = 0.0, normal_lm_increase_norm = 0.0, tangent_lm_stick_increase_norm = 0.0, tangent_lm_slip_increase_norm = 0.0;
IndexType disp_dof_num(0), lm_dof_num(0), lm_stick_dof_num(0), lm_slip_dof_num(0);
// First iterator
const auto it_dof_begin = rDofSet.begin();
// The nodes array
auto& r_nodes_array = rModelPart.Nodes();
// Auxiliar values
std::size_t dof_id = 0;
TDataType dof_value = 0.0, dof_incr = 0.0;
// The number of active dofs
const std::size_t number_active_dofs = rb.size();
// Loop over Dofs
#pragma omp parallel for firstprivate(dof_id, dof_value, dof_incr) reduction(+:disp_solution_norm, normal_lm_solution_norm, tangent_lm_slip_solution_norm, tangent_lm_stick_solution_norm, disp_increase_norm, normal_lm_increase_norm, tangent_lm_slip_increase_norm, tangent_lm_stick_increase_norm, disp_dof_num, lm_dof_num, lm_stick_dof_num, lm_slip_dof_num)
for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) {
auto it_dof = it_dof_begin + i;
dof_id = it_dof->EquationId();
// Check dof id is solved
if (dof_id < number_active_dofs) {
if (mActiveDofs[dof_id] == 1) {
dof_value = it_dof->GetSolutionStepValue(0);
dof_incr = rDx[dof_id];
const auto& r_curr_var = it_dof->GetVariable();
if (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_X) {
// The normal of the node (TODO: how to solve this without accesing all the time to the database?)
const auto it_node = r_nodes_array.find(it_dof->Id());
const double mu = it_node->GetValue(FRICTION_COEFFICIENT);
if (mu < std::numeric_limits<double>::epsilon()) {
normal_lm_solution_norm += std::pow(dof_value, 2);
normal_lm_increase_norm += std::pow(dof_incr, 2);
} else {
const double normal_x = it_node->FastGetSolutionStepValue(NORMAL_X);
const TDataType normal_dof_value = dof_value * normal_x;
const TDataType normal_dof_incr = dof_incr * normal_x;
normal_lm_solution_norm += std::pow(normal_dof_value, 2);
normal_lm_increase_norm += std::pow(normal_dof_incr, 2);
if (it_node->Is(SLIP) || mOptions.Is(DisplacementLagrangeMultiplierFrictionalContactCriteria::PURE_SLIP)) {
tangent_lm_slip_solution_norm += std::pow(dof_value - normal_dof_value, 2);
tangent_lm_slip_increase_norm += std::pow(dof_incr - normal_dof_incr, 2);
++lm_slip_dof_num;
} else {
tangent_lm_stick_solution_norm += std::pow(dof_value - normal_dof_value, 2);
tangent_lm_stick_increase_norm += std::pow(dof_incr - normal_dof_incr, 2);
++lm_stick_dof_num;
}
}
++lm_dof_num;
} else if (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Y) {
// The normal of the node (TODO: how to solve this without accesing all the time to the database?)
const auto it_node = r_nodes_array.find(it_dof->Id());
const double mu = it_node->GetValue(FRICTION_COEFFICIENT);
if (mu < std::numeric_limits<double>::epsilon()) {
normal_lm_solution_norm += std::pow(dof_value, 2);
normal_lm_increase_norm += std::pow(dof_incr, 2);
} else {
const double normal_y = it_node->FastGetSolutionStepValue(NORMAL_Y);
const TDataType normal_dof_value = dof_value * normal_y;
const TDataType normal_dof_incr = dof_incr * normal_y;
normal_lm_solution_norm += std::pow(normal_dof_value, 2);
normal_lm_increase_norm += std::pow(normal_dof_incr, 2);
if (it_node->Is(SLIP) || mOptions.Is(DisplacementLagrangeMultiplierFrictionalContactCriteria::PURE_SLIP)) {
tangent_lm_slip_solution_norm += std::pow(dof_value - normal_dof_value, 2);
tangent_lm_slip_increase_norm += std::pow(dof_incr - normal_dof_incr, 2);
++lm_slip_dof_num;
} else {
tangent_lm_stick_solution_norm += std::pow(dof_value - normal_dof_value, 2);
tangent_lm_stick_increase_norm += std::pow(dof_incr - normal_dof_incr, 2);
++lm_stick_dof_num;
}
}
++lm_dof_num;
} else if (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Z) {
// The normal of the node (TODO: how to solve this without accesing all the time to the database?)
const auto it_node = r_nodes_array.find(it_dof->Id());
const double mu = it_node->GetValue(FRICTION_COEFFICIENT);
if (mu < std::numeric_limits<double>::epsilon()) {
normal_lm_solution_norm += std::pow(dof_value, 2);
normal_lm_increase_norm += std::pow(dof_incr, 2);
} else {
const double normal_z = it_node->FastGetSolutionStepValue(NORMAL_Z);
const TDataType normal_dof_value = dof_value * normal_z;
const TDataType normal_dof_incr = dof_incr * normal_z;
normal_lm_solution_norm += std::pow(normal_dof_value, 2);
normal_lm_increase_norm += std::pow(normal_dof_incr, 2);
if (it_node->Is(SLIP) || mOptions.Is(DisplacementLagrangeMultiplierFrictionalContactCriteria::PURE_SLIP)) {
tangent_lm_slip_solution_norm += std::pow(dof_value - normal_dof_value, 2);
tangent_lm_slip_increase_norm += std::pow(dof_incr - normal_dof_incr, 2);
++lm_slip_dof_num;
} else {
tangent_lm_stick_solution_norm += std::pow(dof_value - normal_dof_value, 2);
tangent_lm_stick_increase_norm += std::pow(dof_incr - normal_dof_incr, 2);
++lm_stick_dof_num;
}
}
++lm_dof_num;
} else { // We will assume is displacement dof
disp_solution_norm += dof_value * dof_value;
disp_increase_norm += dof_incr * dof_incr;
++disp_dof_num;
}
}
}
}
if(disp_increase_norm < Tolerance) disp_increase_norm = 1.0;
if(normal_lm_increase_norm < Tolerance) normal_lm_increase_norm = 1.0;
if(tangent_lm_stick_increase_norm < Tolerance) tangent_lm_stick_increase_norm = 1.0;
if(tangent_lm_slip_increase_norm < Tolerance) tangent_lm_slip_increase_norm = 1.0;
if(disp_solution_norm < Tolerance) disp_solution_norm = 1.0;
KRATOS_ERROR_IF(mOptions.Is(DisplacementLagrangeMultiplierFrictionalContactCriteria::ENSURE_CONTACT) && normal_lm_solution_norm < Tolerance) << "WARNING::CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl;
const TDataType disp_ratio = std::sqrt(disp_increase_norm/disp_solution_norm);
const TDataType normal_lm_ratio = normal_lm_solution_norm > Tolerance ? std::sqrt(normal_lm_increase_norm/normal_lm_solution_norm) : 0.0;
const TDataType tangent_lm_stick_ratio = tangent_lm_stick_solution_norm > Tolerance ? std::sqrt(tangent_lm_stick_increase_norm/tangent_lm_stick_solution_norm) : 0.0;
const TDataType tangent_lm_slip_ratio = tangent_lm_slip_solution_norm > Tolerance ? std::sqrt(tangent_lm_slip_increase_norm/tangent_lm_slip_solution_norm) : 0.0;
const TDataType disp_abs = std::sqrt(disp_increase_norm)/ static_cast<TDataType>(disp_dof_num);
const TDataType normal_lm_abs = std::sqrt(normal_lm_increase_norm)/ static_cast<TDataType>(lm_dof_num);
const TDataType tangent_lm_stick_abs = lm_stick_dof_num > 0 ? std::sqrt(tangent_lm_stick_increase_norm)/ static_cast<TDataType>(lm_stick_dof_num) : 0.0;
const TDataType tangent_lm_slip_abs = lm_slip_dof_num > 0 ? std::sqrt(tangent_lm_slip_increase_norm)/ static_cast<TDataType>(lm_slip_dof_num) : 0.0;
const TDataType normal_tangent_stick_ratio = tangent_lm_stick_abs/normal_lm_abs;
const TDataType normal_tangent_slip_ratio = tangent_lm_slip_abs/normal_lm_abs;
// We print the results // TODO: Replace for the new log
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
std::cout.precision(4);
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& Table = p_table->GetTable();
Table << disp_ratio << mDispRatioTolerance << disp_abs << mDispAbsTolerance << normal_lm_ratio << mLMNormalRatioTolerance << normal_lm_abs << mLMNormalAbsTolerance << tangent_lm_stick_ratio << mLMTangentStickRatioTolerance << tangent_lm_stick_abs << mLMTangentStickAbsTolerance << tangent_lm_slip_ratio << mLMTangentSlipRatioTolerance << tangent_lm_slip_abs << mLMTangentSlipAbsTolerance;
} else {
std::cout.precision(4);
if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT)) {
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT("DoF ONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT(" NORMAL LAGRANGE MUL:\tRATIO = ") << normal_lm_ratio << BOLDFONT(" EXP.RATIO = ") << mLMNormalRatioTolerance << BOLDFONT(" ABS = ") << normal_lm_abs << BOLDFONT(" EXP.ABS = ") << mLMNormalAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT(" STICK LAGRANGE MUL:\tRATIO = ") << tangent_lm_stick_ratio << BOLDFONT(" EXP.RATIO = ") << mLMTangentStickRatioTolerance << BOLDFONT(" ABS = ") << tangent_lm_stick_abs << BOLDFONT(" EXP.ABS = ") << mLMTangentStickAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT(" SLIP LAGRANGE MUL:\tRATIO = ") << tangent_lm_slip_ratio << BOLDFONT(" EXP.RATIO = ") << mLMTangentSlipRatioTolerance << BOLDFONT(" ABS = ") << tangent_lm_slip_abs << BOLDFONT(" EXP.ABS = ") << mLMTangentSlipAbsTolerance << std::endl;
} else {
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << "DoF ONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << "\tDISPLACEMENT: RATIO = " << disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << " NORMAL LAGRANGE MUL:\tRATIO = " << normal_lm_ratio << " EXP.RATIO = " << mLMNormalRatioTolerance << " ABS = " << normal_lm_abs << " EXP.ABS = " << mLMNormalAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << " STICK LAGRANGE MUL:\tRATIO = " << tangent_lm_stick_ratio << " EXP.RATIO = " << mLMTangentStickRatioTolerance << " ABS = " << tangent_lm_stick_abs << " EXP.ABS = " << mLMTangentStickAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << " SLIP LAGRANGE MUL:\tRATIO = " << tangent_lm_slip_ratio << " EXP.RATIO = " << mLMTangentSlipRatioTolerance << " ABS = " << tangent_lm_slip_abs << " EXP.ABS = " << mLMTangentSlipAbsTolerance << std::endl;
}
}
}
// We check if converged
const bool disp_converged = (disp_ratio <= mDispRatioTolerance || disp_abs <= mDispAbsTolerance);
const bool lm_converged = (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::ENSURE_CONTACT) && normal_lm_solution_norm < Tolerance) ? true : (normal_lm_ratio <= mLMNormalRatioTolerance || normal_lm_abs <= mLMNormalAbsTolerance) && (tangent_lm_stick_ratio <= mLMTangentStickRatioTolerance || tangent_lm_stick_abs <= mLMTangentStickAbsTolerance || normal_tangent_stick_ratio <= mNormalTangentRatio) && (tangent_lm_slip_ratio <= mLMTangentSlipRatioTolerance || tangent_lm_slip_abs <= mLMTangentSlipAbsTolerance || normal_tangent_slip_ratio <= mNormalTangentRatio);
if (disp_converged && lm_converged) {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT))
r_table << BOLDFONT(FGRN(" Achieved"));
else
r_table << "Achieved";
} else {
if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT("\tDoF") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl;
else
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << "\tDoF convergence is achieved" << std::endl;
}
}
return true;
} else {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT))
r_table << BOLDFONT(FRED(" Not achieved"));
else
r_table << "Not achieved";
} else {
if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT("\tDoF") << " convergence is " << BOLDFONT(FRED(" not achieved")) << std::endl;
else
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << "\tDoF convergence is not achieved" << std::endl;
}
}
return false;
}
}
else // In this case all the displacements are imposed!
return true;
}
/**
* This function initialize the convergence criteria
* @param rModelPart Reference to the ModelPart containing the contact problem. (unused)
*/
void Initialize( ModelPart& rModelPart ) override
{
BaseType::mConvergenceCriteriaIsInitialized = true;
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::TABLE_IS_INITIALIZED)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
r_table.AddColumn("DP RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
r_table.AddColumn("N.LM RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PURE_SLIP)) {
r_table.AddColumn("STI. RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
}
r_table.AddColumn("SLIP RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
r_table.AddColumn("CONVERGENCE", 15);
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::TABLE_IS_INITIALIZED, true);
}
}
/**
* @brief This function initializes the solution step
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
// Filling mActiveDofs when MPC exist
ConstraintUtilities::ComputeActiveDofs(rModelPart, mActiveDofs, rDofSet);
}
/**
* @brief This function finalizes the non-linear iteration
* @param rModelPart Reference to the ModelPart containing the problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual + reactions)
*/
void FinalizeNonLinearIteration(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
// Calling base criteria
BaseType::FinalizeNonLinearIteration(rModelPart, rDofSet, rA, rDx, rb);
// The current process info
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
r_process_info.SetValue(ACTIVE_SET_COMPUTED, false);
}
///@}
///@name Operations
///@{
///@}
///@name Acces
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
Flags mOptions; /// Local flags
TDataType mDispRatioTolerance; /// The ratio threshold for the norm of the displacement
TDataType mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement
TDataType mLMNormalRatioTolerance; /// The ratio threshold for the norm of the LM (normal)
TDataType mLMNormalAbsTolerance; /// The absolute value threshold for the norm of the LM (normal)
TDataType mLMTangentStickRatioTolerance; /// The ratio threshold for the norm of the LM (tangent-stick)
TDataType mLMTangentStickAbsTolerance; /// The absolute value threshold for the norm of the LM (tangent-stick)
TDataType mLMTangentSlipRatioTolerance; /// The ratio threshold for the norm of the LM (tangent-slip)
TDataType mLMTangentSlipAbsTolerance; /// The absolute value threshold for the norm of the LM (tangent-slip)
TDataType mNormalTangentRatio; /// The ratio to accept a non converged tangent component in case
std::vector<int> mActiveDofs; /// This vector contains the dofs that are active
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Unaccessible methods
///@{
///@}
}; // Kratos DisplacementLagrangeMultiplierFrictionalContactCriteria
///@name Local flags creation
///@{
/// Local Flags
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::ENSURE_CONTACT(Kratos::Flags::Create(0));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::PURE_SLIP(Kratos::Flags::Create(3));
}
#endif /* KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_FRICTIONAL_CONTACT_CRITERIA_H */
|
convolution_winograd_transform_pack4_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd63_transform_input_pack4_fp16sa_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt)
{
const int w = bottom_blob.w;
const int h = bottom_blob.h;
const int inch = bottom_blob.c;
const int w_tiles = (w - 2) / 6;
const int h_tiles = (h - 2) / 6;
const int tiles = w_tiles * h_tiles;
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
__fp16 tmp[8][8][4];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const __fp16* r0 = img0.row<const __fp16>(i * 6) + (j * 6) * 4;
for (int m = 0; m < 8; m++)
{
float16x4_t _r00 = vld1_f16(r0);
float16x4_t _r01 = vld1_f16(r0 + 4);
float16x4_t _r02 = vld1_f16(r0 + 8);
float16x4_t _r03 = vld1_f16(r0 + 12);
float16x4_t _r04 = vld1_f16(r0 + 16);
float16x4_t _r05 = vld1_f16(r0 + 20);
float16x4_t _r06 = vld1_f16(r0 + 24);
float16x4_t _r07 = vld1_f16(r0 + 28);
float16x4_t _tmp0m = vfma_n_f16(vsub_f16(_r00, _r06), vsub_f16(_r04, _r02), 5.25f);
float16x4_t _tmp7m = vfma_n_f16(vsub_f16(_r07, _r01), vsub_f16(_r03, _r05), 5.25f);
vst1_f16(tmp[0][m], _tmp0m);
vst1_f16(tmp[7][m], _tmp7m);
float16x4_t _tmp12a = vfms_n_f16(vadd_f16(_r02, _r06), _r04, 4.25f);
float16x4_t _tmp12b = vfms_n_f16(vadd_f16(_r01, _r05), _r03, 4.25f);
float16x4_t _tmp1m = vadd_f16(_tmp12a, _tmp12b);
float16x4_t _tmp2m = vsub_f16(_tmp12a, _tmp12b);
vst1_f16(tmp[1][m], _tmp1m);
vst1_f16(tmp[2][m], _tmp2m);
float16x4_t _tmp34a = vfms_n_f16(vfma_n_f16(_r06, _r02, 0.25f), _r04, 1.25f);
float16x4_t _tmp34b = vfma_n_f16(vfms_n_f16(vmul_n_f16(_r01, 0.5f), _r03, 2.5f), _r05, 2.f);
float16x4_t _tmp3m = vadd_f16(_tmp34a, _tmp34b);
float16x4_t _tmp4m = vsub_f16(_tmp34a, _tmp34b);
vst1_f16(tmp[3][m], _tmp3m);
vst1_f16(tmp[4][m], _tmp4m);
float16x4_t _tmp56a = vfma_n_f16(_r06, vfms_n_f16(_r02, _r04, 1.25f), 4.f);
float16x4_t _tmp56b = vfma_n_f16(vfms_n_f16(vmul_n_f16(_r01, 2.f), _r03, 2.5f), _r05, 0.5f);
float16x4_t _tmp5m = vadd_f16(_tmp56a, _tmp56b);
float16x4_t _tmp6m = vsub_f16(_tmp56a, _tmp56b);
vst1_f16(tmp[5][m], _tmp5m);
vst1_f16(tmp[6][m], _tmp6m);
r0 += w * 4;
}
__fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tiles + j) * 4;
__fp16* r0_tm_1 = r0_tm_0 + tiles * 4;
__fp16* r0_tm_2 = r0_tm_0 + tiles * 8;
__fp16* r0_tm_3 = r0_tm_0 + tiles * 12;
__fp16* r0_tm_4 = r0_tm_0 + tiles * 16;
__fp16* r0_tm_5 = r0_tm_0 + tiles * 20;
__fp16* r0_tm_6 = r0_tm_0 + tiles * 24;
__fp16* r0_tm_7 = r0_tm_0 + tiles * 28;
for (int m = 0; m < 8; m++)
{
float16x4_t _tmp00 = vld1_f16(tmp[m][0]);
float16x4_t _tmp01 = vld1_f16(tmp[m][1]);
float16x4_t _tmp02 = vld1_f16(tmp[m][2]);
float16x4_t _tmp03 = vld1_f16(tmp[m][3]);
float16x4_t _tmp04 = vld1_f16(tmp[m][4]);
float16x4_t _tmp05 = vld1_f16(tmp[m][5]);
float16x4_t _tmp06 = vld1_f16(tmp[m][6]);
float16x4_t _tmp07 = vld1_f16(tmp[m][7]);
float16x4_t _r0tm0 = vfma_n_f16(vsub_f16(_tmp00, _tmp06), vsub_f16(_tmp04, _tmp02), 5.25f);
float16x4_t _r0tm7 = vfma_n_f16(vsub_f16(_tmp07, _tmp01), vsub_f16(_tmp03, _tmp05), 5.25f);
float16x4_t _tmp12a = vfms_n_f16(vadd_f16(_tmp02, _tmp06), _tmp04, 4.25f);
float16x4_t _tmp12b = vfms_n_f16(vadd_f16(_tmp01, _tmp05), _tmp03, 4.25f);
float16x4_t _r0tm1 = vadd_f16(_tmp12a, _tmp12b);
float16x4_t _r0tm2 = vsub_f16(_tmp12a, _tmp12b);
float16x4_t _tmp34a = vfms_n_f16(vfma_n_f16(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f);
float16x4_t _tmp34b = vfma_n_f16(vfms_n_f16(vmul_n_f16(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f);
float16x4_t _r0tm3 = vadd_f16(_tmp34a, _tmp34b);
float16x4_t _r0tm4 = vsub_f16(_tmp34a, _tmp34b);
float16x4_t _tmp56a = vfma_n_f16(_tmp06, vfms_n_f16(_tmp02, _tmp04, 1.25f), 4.f);
float16x4_t _tmp56b = vfma_n_f16(vfms_n_f16(vmul_n_f16(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f);
float16x4_t _r0tm5 = vadd_f16(_tmp56a, _tmp56b);
float16x4_t _r0tm6 = vsub_f16(_tmp56a, _tmp56b);
vst1_f16(r0_tm_0, _r0tm0);
vst1_f16(r0_tm_1, _r0tm1);
vst1_f16(r0_tm_2, _r0tm2);
vst1_f16(r0_tm_3, _r0tm3);
vst1_f16(r0_tm_4, _r0tm4);
vst1_f16(r0_tm_5, _r0tm5);
vst1_f16(r0_tm_6, _r0tm6);
vst1_f16(r0_tm_7, _r0tm7);
r0_tm_0 += tiles * 32;
r0_tm_1 += tiles * 32;
r0_tm_2 += tiles * 32;
r0_tm_3 += tiles * 32;
r0_tm_4 += tiles * 32;
r0_tm_5 += tiles * 32;
r0_tm_6 += tiles * 32;
r0_tm_7 += tiles * 32;
}
}
}
}
}
static void conv3x3s1_winograd63_transform_output_pack4_fp16sa_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt)
{
const int outw = top_blob.w;
const int outh = top_blob.h;
const int outch = top_blob.c;
const int w_tiles = outw / 6;
const int h_tiles = outh / 6;
const int tiles = w_tiles * h_tiles;
const __fp16* biasptr = bias;
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob.channel(p);
float16x4_t _bias0 = biasptr ? vld1_f16(biasptr + p * 4) : vdup_n_f16(0.f);
__fp16 tmp[6][8][4];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tiles + j) * 4;
const __fp16* output0_tm_1 = output0_tm_0 + tiles * 4;
const __fp16* output0_tm_2 = output0_tm_0 + tiles * 8;
const __fp16* output0_tm_3 = output0_tm_0 + tiles * 12;
const __fp16* output0_tm_4 = output0_tm_0 + tiles * 16;
const __fp16* output0_tm_5 = output0_tm_0 + tiles * 20;
const __fp16* output0_tm_6 = output0_tm_0 + tiles * 24;
const __fp16* output0_tm_7 = output0_tm_0 + tiles * 28;
__fp16* output0 = out0.row<__fp16>(i * 6) + (j * 6) * 4;
for (int m = 0; m < 8; m++)
{
float16x4_t _out0tm0 = vld1_f16(output0_tm_0);
float16x4_t _out0tm1 = vld1_f16(output0_tm_1);
float16x4_t _out0tm2 = vld1_f16(output0_tm_2);
float16x4_t _out0tm3 = vld1_f16(output0_tm_3);
float16x4_t _out0tm4 = vld1_f16(output0_tm_4);
float16x4_t _out0tm5 = vld1_f16(output0_tm_5);
float16x4_t _out0tm6 = vld1_f16(output0_tm_6);
float16x4_t _out0tm7 = vld1_f16(output0_tm_7);
float16x4_t _tmp024a = vadd_f16(_out0tm1, _out0tm2);
float16x4_t _tmp135a = vsub_f16(_out0tm1, _out0tm2);
float16x4_t _tmp024b = vadd_f16(_out0tm3, _out0tm4);
float16x4_t _tmp135b = vsub_f16(_out0tm3, _out0tm4);
float16x4_t _tmp024c = vadd_f16(_out0tm5, _out0tm6);
float16x4_t _tmp135c = vsub_f16(_out0tm5, _out0tm6);
float16x4_t _tmp0m = vadd_f16(vadd_f16(_out0tm0, _tmp024a), vfma_n_f16(_tmp024b, _tmp024c, 32.f));
float16x4_t _tmp2m = vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f);
float16x4_t _tmp4m = vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f);
vst1_f16(tmp[0][m], _tmp0m);
vst1_f16(tmp[2][m], _tmp2m);
vst1_f16(tmp[4][m], _tmp4m);
float16x4_t _tmp1m = vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f);
float16x4_t _tmp3m = vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f);
float16x4_t _tmp5m = vadd_f16(vadd_f16(_out0tm7, _tmp135a), vfma_n_f16(_tmp135c, _tmp135b, 32.f));
vst1_f16(tmp[1][m], _tmp1m);
vst1_f16(tmp[3][m], _tmp3m);
vst1_f16(tmp[5][m], _tmp5m);
output0_tm_0 += tiles * 32;
output0_tm_1 += tiles * 32;
output0_tm_2 += tiles * 32;
output0_tm_3 += tiles * 32;
output0_tm_4 += tiles * 32;
output0_tm_5 += tiles * 32;
output0_tm_6 += tiles * 32;
output0_tm_7 += tiles * 32;
}
for (int m = 0; m < 6; m++)
{
float16x4_t _tmp00 = vld1_f16(tmp[m][0]);
float16x4_t _tmp01 = vld1_f16(tmp[m][1]);
float16x4_t _tmp02 = vld1_f16(tmp[m][2]);
float16x4_t _tmp03 = vld1_f16(tmp[m][3]);
float16x4_t _tmp04 = vld1_f16(tmp[m][4]);
float16x4_t _tmp05 = vld1_f16(tmp[m][5]);
float16x4_t _tmp06 = vld1_f16(tmp[m][6]);
float16x4_t _tmp07 = vld1_f16(tmp[m][7]);
float16x4_t _tmp024a = vadd_f16(_tmp01, _tmp02);
float16x4_t _tmp135a = vsub_f16(_tmp01, _tmp02);
float16x4_t _tmp024b = vadd_f16(_tmp03, _tmp04);
float16x4_t _tmp135b = vsub_f16(_tmp03, _tmp04);
float16x4_t _tmp024c = vadd_f16(_tmp05, _tmp06);
float16x4_t _tmp135c = vsub_f16(_tmp05, _tmp06);
float16x4_t _out00 = vadd_f16(_bias0, vadd_f16(vadd_f16(_tmp00, _tmp024a), vfma_n_f16(_tmp024b, _tmp024c, 32.f)));
float16x4_t _out02 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f));
float16x4_t _out04 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f));
vst1_f16(output0, _out00);
vst1_f16(output0 + 8, _out02);
vst1_f16(output0 + 16, _out04);
float16x4_t _out01 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f));
float16x4_t _out03 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f));
float16x4_t _out05 = vadd_f16(_bias0, vadd_f16(vadd_f16(_tmp07, _tmp135a), vfma_n_f16(_tmp135c, _tmp135b, 32.f)));
vst1_f16(output0 + 4, _out01);
vst1_f16(output0 + 12, _out03);
vst1_f16(output0 + 20, _out05);
output0 += outw * 4;
}
}
}
}
}
|
opencl_keyring_fmt_plug.c | /*
* This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net>,
* Copyright (c) 2012 Dhiru Kholia <dhiru at openwall.com> and
* Copyright (c) 2012-2014 magnum
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted. */
#ifdef HAVE_OPENCL
#if FMT_EXTERNS_H
extern struct fmt_main fmt_opencl_keyring;
#elif FMT_REGISTERS_H
john_register_one(&fmt_opencl_keyring);
#else
#include <string.h>
#include <openssl/aes.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "arch.h"
#include "formats.h"
#include "common.h"
#include "misc.h"
#include "common-opencl.h"
#include "options.h"
#include "sha2.h"
#include "md5.h"
#include "stdint.h"
#define FORMAT_LABEL "keyring-opencl"
#define FORMAT_NAME "GNOME Keyring"
#define ALGORITHM_NAME "SHA256 OpenCL AES"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define PLAINTEXT_LENGTH (55-8)
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN 4
#define SALTLEN 8
typedef unsigned char guchar; /* How many aliases do we need?! */
typedef unsigned int guint;
typedef int gint;
typedef struct {
uint32_t length;
uint8_t v[PLAINTEXT_LENGTH];
} keyring_password;
typedef struct {
uint8_t key[16];
uint8_t iv[16];
} keyring_hash;
typedef struct {
uint32_t length;
uint32_t iterations;
uint8_t salt[SALTLEN];
} keyring_salt;
static int *cracked;
static int any_cracked;
static struct custom_salt {
unsigned int iterations;
unsigned char salt[SALTLEN];
unsigned int crypto_size;
unsigned int inlined;
unsigned char ct[LINE_BUFFER_SIZE / 2]; /* after hex conversion */
} *cur_salt;
static struct fmt_tests keyring_tests[] = {
{"$keyring$db1b562e453a0764*3221*16*0*02b5c084e4802369c42507300f2e5e56", "openwall"},
//{"$keyring$4f3f1557a7da17f5*2439*144*0*12215fabcff6782aa23605ab2cd843f7be9477b172b615eaa9130836f189d32ffda2e666747378f09c6e76ad817154daae83a36c0a0a35f991d40bcfcba3b7807ef57a0ce4c7f835bf34c6e358f0d66aa048d73dacaaaf6d7fa4b3510add6b88cc237000ff13cb4dbd132db33be3ea113bedeba80606f86662cc226af0dad789c703a7df5ad8700542e0f7a5e1f10cf0", "password"},
{NULL}
};
static keyring_password *inbuffer;
static keyring_hash *outbuffer;
static keyring_salt currentsalt;
static cl_mem mem_in, mem_out, mem_setting;
#define insize (sizeof(keyring_password) * global_work_size)
#define outsize (sizeof(keyring_hash) * global_work_size)
#define settingsize (sizeof(keyring_salt))
#define cracked_size (sizeof(*cracked) * global_work_size)
#define OCL_CONFIG "keyring"
#define STEP 0
#define SEED 256
static const char * warn[] = {
"xfer: " , ", crypt: " , ", xfer: "
};
//This file contains auto-tuning routine(s). It has to be included after formats definitions.
#include "opencl-autotune.h"
#include "memdbg.h"
/* ------- Helper functions ------- */
static size_t get_task_max_work_group_size()
{
return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel);
}
static size_t get_task_max_size()
{
return 0;
}
static size_t get_default_workgroup()
{
if (cpu(device_info[gpu_id]))
return get_platform_vendor_id(platform_id) == DEV_INTEL ?
8 : 1;
else
return 64;
}
static void create_clobj(size_t global_work_size, struct fmt_main *self)
{
cl_int cl_error;
inbuffer = (keyring_password*) mem_calloc(insize);
outbuffer = (keyring_hash*) mem_alloc(outsize);
cracked = mem_calloc(cracked_size);
/// Allocate memory
mem_in =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem in");
mem_setting =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize,
NULL, &cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem setting");
mem_out =
clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem out");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in),
&mem_in), "Error while setting mem_in kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out),
&mem_out), "Error while setting mem_out kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting),
&mem_setting), "Error while setting mem_salt kernel argument");
}
static void release_clobj(void)
{
HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in");
HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting");
HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out");
MEM_FREE(inbuffer);
MEM_FREE(outbuffer);
MEM_FREE(cracked);
}
static void done(void)
{
release_clobj();
HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel");
HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
}
static void init(struct fmt_main *self)
{
char build_opts[64];
cl_int cl_error;
snprintf(build_opts, sizeof(build_opts),
"-DPLAINTEXT_LENGTH=%d -DSALTLEN=%d",
PLAINTEXT_LENGTH, SALTLEN);
opencl_init("$JOHN/kernels/keyring_kernel.cl",
gpu_id, build_opts);
crypt_kernel = clCreateKernel(program[gpu_id], "keyring", &cl_error);
HANDLE_CLERROR(cl_error, "Error creating kernel");
// Initialize openCL tuning (library) for this format.
opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self, create_clobj,
release_clobj, sizeof(keyring_password), 0);
//Auto tune execution from shared/included code.
autotune_run(self, 1, 0, cpu(device_info[gpu_id]) ?
500000000ULL : 1000000000ULL);
}
static int looks_like_nice_int(char *p)
{
// reasonability check + avoids atoi's UB
if (strlen(p) > 9)
return 0;
for (; *p; p++)
if (*p < '0' || *p > '9')
return 0;
return 1;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr, *p;
int ctlen;
if (strncmp(ciphertext, "$keyring$", 9) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
if (keeptr == NULL)
goto err;
ctcopy += 9;
if ((p = strtok(ctcopy, "*")) == NULL) /* salt */
goto err;
if (strlen(p) != SALTLEN * 2)
goto err;
while (*p)
if (atoi16[ARCH_INDEX(*p++)] == 0x7f)
goto err;
if ((p = strtok(NULL, "*")) == NULL) /* iterations */
goto err;
if (!looks_like_nice_int(p))
goto err;
if ((p = strtok(NULL, "*")) == NULL) /* crypto size */
goto err;
if (!looks_like_nice_int(p))
goto err;
ctlen = atoi(p);
if ((p = strtok(NULL, "*")) == NULL) /* inlined - unused? TODO */
goto err;
if (!looks_like_nice_int(p))
goto err;
if ((p = strtok(NULL, "*")) == NULL) /* ciphertext */
goto err;
if (ctlen > LINE_BUFFER_SIZE)
goto err;
if (strlen(p) != ctlen * 2)
goto err;
if (strlen(p) < 32) /* this shouldn't happen for valid hashes */
goto err;
while (*p)
if (atoi16[ARCH_INDEX(*p++)] == 0x7f)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
if (!cur_salt)
cur_salt = mem_alloc_tiny(sizeof(struct custom_salt),
MEM_ALIGN_WORD);
ctcopy += 9; /* skip over "$keyring$" */
p = strtok(ctcopy, "*");
for (i = 0; i < SALTLEN; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtok(NULL, "*");
cs.iterations = atoi(p);
p = strtok(NULL, "*");
cs.crypto_size = atoi(p);
p = strtok(NULL, "*");
cs.inlined = atoi(p);
p = strtok(NULL, "*");
for (i = 0; i < cs.crypto_size; i++)
cs.ct[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
memcpy((char*)currentsalt.salt, cur_salt->salt, SALTLEN);
currentsalt.length = SALTLEN;
currentsalt.iterations = cur_salt->iterations;
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting,
CL_FALSE, 0, settingsize,
¤tsalt, 0, NULL, NULL),
"Copy setting to gpu");
}
static void keyring_set_key(char *key, int index)
{
uint8_t length = strlen(key);
if (length > PLAINTEXT_LENGTH)
length = PLAINTEXT_LENGTH;
inbuffer[index].length = length;
memcpy(inbuffer[index].v, key, length);
}
static char *get_key(int index)
{
static char ret[PLAINTEXT_LENGTH + 1];
uint8_t length = inbuffer[index].length;
memcpy(ret, inbuffer[index].v, length);
ret[length] = '\0';
return ret;
}
static int verify_decrypted_buffer(unsigned char *buffer, int len)
{
guchar digest[16];
MD5_CTX ctx;
MD5_Init(&ctx);
MD5_Update(&ctx, buffer + 16, len - 16);
MD5_Final(digest, &ctx);
return memcmp(buffer, digest, 16) == 0;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index;
size_t *lws = local_work_size ? &local_work_size : NULL;
global_work_size = local_work_size ? (count + local_work_size - 1) / local_work_size * local_work_size : count;
if (any_cracked) {
memset(cracked, 0, cracked_size);
any_cracked = 0;
}
/// Copy data to gpu
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0,
insize, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu");
/// Run kernel
HANDLE_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1,
NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]),
"Run kernel");
HANDLE_CLERROR(clFinish(queue[gpu_id]), "clFinish");
/// Read the result back
HANDLE_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_FALSE, 0,
outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back");
/// Await completion of all the above
HANDLE_CLERROR(clFinish(queue[gpu_id]), "clFinish");
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++) {
unsigned char buffer[32];
unsigned char iv[16];
AES_KEY akey;
unsigned char *p = outbuffer[index].iv;
//dump_stuff_msg(inbuffer[index].length, outbuffer[index].key, 16);
// on GPU now!
// symkey_generate_simple(password, n_password, salt, 8, iterations, key, iv);
memcpy(iv, p, 16);
memcpy(buffer, cur_salt->ct, cur_salt->crypto_size);
memset(&akey, 0, sizeof(AES_KEY));
if (AES_set_decrypt_key(outbuffer[index].key, 128, &akey) < 0) {
fprintf(stderr, "AES_set_decrypt_key failed!\n");
}
AES_cbc_encrypt(buffer, buffer, cur_salt->crypto_size, &akey, iv, AES_DECRYPT);
if (verify_decrypted_buffer(buffer, cur_salt->crypto_size))
{
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
#if FMT_MAIN_VERSION > 11
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->iterations;
}
#endif
struct fmt_main fmt_opencl_keyring = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{
"iteration count",
},
#endif
keyring_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{
iteration_count,
},
#endif
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
set_salt,
keyring_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_OPENCL */
|
Toolbox.h | #ifndef TOOLBOX_H_
#define TOOLBOX_H_
#include "BaseObject.h"
#include "Buffer.h"
#include "DataStructures/NCSList.h"
#include "DataStructures/FastNCSList.h"
#include "DataStructures/FastKTuple.h"
#include "DataStructures/Fifo.h"
#include "Threading/Thread.h"
#include <string>
#include <time.h>
#include <vector>
#include <assert.h>
#include <stdio.h>
#include <sstream>
#include <type_traits>
namespace Lazarus
{
class Toolbox : public BaseObject {
public:
static const char* ERRNO_LOOKUP[125];
Toolbox();
virtual ~Toolbox();
/**
* This method will return true if u is an instance of a
* class which is a subclass of T. Otherwise false is being returned.
* Works only for pointer types.
* */
template<class T, class U>
static bool isSubClass(const U* u)
{
if( dynamic_cast<T*>(u) == NULL )
return false;
return true;
}
/**
* This method will return true if u is an instance of a
* class which is a subclass of T. Otherwise false is being returned.
* Works only for references.
* */
template<class T, class U>
static bool isSubClass(const U& u)
{
if( dynamic_cast<T&>(u) == NULL )
return false;
return true;
}
/**
* Prints a readable error message for errno-codes.
* */
static void decodeErrno(int error);
/**
* Replaces any existing '//' with '/' and ensures that the output end with a single /.
* */
static std::string ensureValidPath(const std::string& s);
//endianness methods
static short int swapShortInt(short int i);
static short unsigned int swapShortInt(short unsigned int i);
static int swapInt(int i);
static unsigned int swapUInt(unsigned int i);
static long int swapLongInt(long int i);
static unsigned long int swapULongInt(unsigned long int i);
static long long int swapLongLongInt(long long int i);
static unsigned long long int swapULongLongInt(unsigned long long int i);
static double swapDouble(double i);
static float swapFloat(float i);
//c-string conversion methods
static char* floatToCString(float value, unsigned int digits = std::numeric_limits< float >::max_digits10);
static char* doubleToCString(double value, unsigned int digits = std::numeric_limits< double >::max_digits10);
static char* longDoubleToCString(long double value, unsigned int digits = std::numeric_limits< long double >::max_digits10);
static char* intToCString(int value);
static char* longToCString(long value);
static double cStringToDouble(char* s);
static long double cStringToLongDouble(char* s);
static int cStringToInt(char* s);
static long int cStringToLongInt(char* s);
static long long int cStringToLongLongInt(char* s);
//std::string conversion methods
static std::string doubleToString(double d, unsigned int digits = std::numeric_limits< double >::max_digits10);
static std::string longDoubleToString(long double d, unsigned int digits = std::numeric_limits< long double >::max_digits10);
static std::string floatToString(float d, unsigned int digits = std::numeric_limits< float >::max_digits10);
static std::string uintToString(unsigned int d);
static std::string ushortToString(unsigned short d);
static std::string longToString(long d);
static std::string longLongToString(long long d);
static std::string ulongToString(unsigned long d);
static std::string ulongLongToString(unsigned long long d);
static std::string intToString(int d);
static std::string shortToString(short d);
static std::string size_tToString(size_t d);
static long double stringToLongDouble(const std::string& s);
static double stringToDouble(const std::string& s);
static float stringToFloat(const std::string& s);
static unsigned int stringToUInt(const std::string& s);
static unsigned short stringToUShort(const std::string& s);
static long stringToLong(const std::string& s);
static long long stringToLongLong(const std::string& s);
static unsigned long stringToULong(const std::string& s);
static unsigned long long stringToULongLong(const std::string& s);
static int stringToInt(const std::string& s);
static short stringToShort(const std::string& s);
static size_t stringTosize_t(const std::string& s);
//will add padding zeros in front of the integer number
template<typename T>
static std::string numToString(T d, unsigned int digits = 0)
{
if(isInteger(d) == false)
{
printf("ERROR: can not add digits to non-integer value\n");
return "";
}
//get the amount of digits
unsigned int digs = countDigits(d);
std::ostringstream convert;
//if there are less digits than requested
if(digs < digits)
for(unsigned int i=0;i<digits-digs;++i)
convert << (T)0;
//add the number
convert << d;
return convert.str();
}
static unsigned char stringToUCharB(const std::string& s, int base);
static char stringToCharB(const std::string& s, int base);
static unsigned int stringToUIntB(const std::string& s, int base);
static long stringToLongB(const std::string& s, int base);
static long long stringToLongLongB(const std::string& s, int base);
static unsigned long stringToULongB(const std::string& s, int base);
static unsigned long long stringToULongLongB(const std::string& s, int base);
static int stringToIntB(const std::string& s, int base);
static size_t stringTosize_tB(const std::string& s, int base);
static std::string ucharToStringB(unsigned char d, int base);
static std::string charToStringB(char d, int base);
static std::string uintToStringB(unsigned int d, int base);
static std::string longToStringB(long d, int base);
static std::string longLongToStringB(long long d, int base);
static std::string ulongToStringB(unsigned long d, int base);
static std::string ulongLongToStringB(unsigned long long d, int base);
static std::string intToStringB(int d, int base);
static std::string size_tToStringB(size_t d, int base);
//std::string methods
//counts the number of 'a'-occurrences in s
static unsigned int countChar(const std::string& s, char a);
//counts the number of 'a'-occurrences in s
static unsigned int countLineBreaks(const std::string& s);
//splits the string with delimiter 'a'
static Lazarus::FastNCSList<std::string>* splitStringF(const std::string&s, char a);
static Lazarus::NCSList<std::string>* splitString(const std::string&s, char a);
/**
* splits the string with delimiter 'a' but also keeps segments i.e. <"....."> together.
* In a nutshell, this method will split the string along 'a' but ignore any occurence of 'a'
* in a segment <"...">. '"' inside a segment can be escaped via '\"', yet this will not
* remove the '\"' from the segment.
*/
static Lazarus::FastNCSList<std::string>* splitStringFSegment(const std::string&s, char a);
static Lazarus::NCSList<std::string>* splitStringSegment(const std::string&s, char a);
//deletes all occurrences of 'a' in the string
static void deleteChar(std::string& s, char a);
/**
* Returns the number of a occurrences in a file, otherwise -1;
* */
static int countCharF(FILE** f, char a);
/**
* Returns the number of linebreaks in a file, otherwise -1;
* */
static int countLineBreaksF(FILE** f);
/**
* returns a '/'-terminated copy of the given string. If the string is already /-terminated a string copy is returned.
*/
static std::string slash_terminate_path(const std::string& path);
/**
* Creates an empty file (i.e. filled with NULL). Returns 0 in case of success, -1 otherwise.
* Unit may be the count in bytes or e.g. 1M, 1G etc.
* */
static int createEmptyFile(const std::string& file, unsigned long long count, const std::string unit = std::string("1024"));
/**
* Creates a file filled with random data. Returns 0 in case of success, -1 otherwise.
* Unit may be the count in bytes or e.g. 1M, 1G etc.
* */
static int createRandomFile(const std::string& file, unsigned long long count, const std::string unit = std::string("1024"));
/**
* Returns the directory of the executable (i.e. not including the executable and not trailing slash).
* */
static std::string getCurrentDirectory();
/**
* Returns true if the specified file exists, otherwise false
* */
static bool checkFileExistence(const std::string& file);
/**
* -1 in case of errors, filesize otherwise.
*/
static long long int getFileSize(const std::string& filename);
/**
* -1 in case of errors, filesize otherwise. The file stream must be opened with 'fopen64' before calling this function.
*/
static long long int getFileSize(FILE* file);
static void printSystemConfig();
static void exitThread(void* data);
static void dummyThreadTask(Thread*, void* data);
static void sleep_s(unsigned long s);
static void sleep_ms(unsigned long ms);
static void sleep_us(unsigned long us);
static void sleep_ns(unsigned long ns);
static std::string readFileToString(const std::string& filename);
/**
* In case of e.g. tmpfs file systems the determined size might be block aligned e.g. multiples of
* 4kb while the file content returned by fread might be smaller.
* In other words, the returned char array might be larger than the actual content.
* */
static char* readFileToCString(const std::string& filename);
static Buffer* readFileToBuffer(const std::string& filename);
/**
* This function will return the integer value 24000 from strings like e.g. 24k,
* i.e. the suffix will be parsed and the value expanded.
* Accepted strings are of form NUM|[k|K|m|M|g|G|t|T|p|P|e|E], depending on the functions
* return type overflows might occur with respect to the suffix.
* */
static unsigned int getExpMetricUInt(const std::string& s);
/**
* This function will return the integer value 24000 from strings like e.g. 24k,
* i.e. the suffix will be parsed and the value expanded.
* Accepted strings are of form NUM|[k|K|m|M|g|G|t|T|p|P|e|E], depending on the functions
* return type overflows might occur with respect to the suffix.
* */
static int getExpMetricInt(const std::string& s);
/**
* This function will return the integer value 24000 from strings like e.g. 24k,
* i.e. the suffix will be parsed and the value expanded.
* Accepted strings are of form NUM|[k|K|m|M|g|G|t|T|p|P|e|E], depending on the functions
* return type overflows might occur with respect to the suffix.
* */
static unsigned long long getExpMetricULongLong(const std::string& s);
/**
* This function will return the integer value 24000 from strings like e.g. 24k,
* i.e. the suffix will be parsed and the value expanded.
* Accepted strings are of form NUM|[k|K|m|M|g|G|t|T|p|P|e|E], depending on the functions
* return type overflows might occur with respect to the suffix.
* */
static long long getExpMetricLongLong(const std::string& s);
/**
* This function will return the integer value 24000 from strings like e.g. 24k,
* i.e. the suffix will be parsed and the value expanded.
* Accepted strings are of form NUM|[k|K|m|M|g|G|t|T|p|P|e|E], depending on the functions
* return type overflows might occur with respect to the suffix.
* */
static unsigned long getExpMetricULong(const std::string& s);
/**
* This function will return the integer value 24000 from strings like e.g. 24k,
* i.e. the suffix will be parsed and the value expanded.
* Accepted strings are of form NUM|[k|K|m|M|g|G|t|T|p|P|e|E], depending on the functions
* return type overflows might occur with respect to the suffix.
* */
static long getExpMetricLong(const std::string& s);
/**
* This function will return the integer value 24000 from strings like e.g. 24k,
* i.e. the suffix will be parsed and the value expanded.
* Accepted strings are of form NUM|[k|K|m|M|g|G|t|T|p|P|e|E], depending on the functions
* return type overflows might occur with respect to the suffix.
* */
static unsigned short getExpMetricUShort(const std::string& s);
/**
* This function will return the integer value 24000 from strings like e.g. 24k,
* i.e. the suffix will be parsed and the value expanded.
* Accepted strings are of form NUM|[k|K|m|M|g|G|t|T|p|P|e|E], depending on the functions
* return type overflows might occur with respect to the suffix.
* */
static short getExpMetricShort(const std::string& s);
/**
* This function will return the integer value 24000 from strings like e.g. 24k,
* i.e. the suffix will be parsed and the value expanded.
* Accepted strings are of form NUM|[k|K|m|M|g|G|t|T|p|P|e|E], depending on the functions
* return type overflows might occur with respect to the suffix.
* */
static float getExpMetricFloat(const std::string& s);
/**
* This function will return the integer value 24000 from strings like e.g. 24k,
* i.e. the suffix will be parsed and the value expanded.
* Accepted strings are of form NUM|[k|K|m|M|g|G|t|T|p|P|e|E], depending on the functions
* return type overflows might occur with respect to the suffix.
* */
static double getExpMetricDouble(const std::string& s);
/**
* This function reads a CSV token (i.e. value) from the current position within the open file.
* It is assumed that the file contains the following format:
* VAL<SEP>VAL<SEP>VAL<TER><LBREAK>
* The value is inserted into the given string.
* The function returns 0 in case of a complete value without hitting a TER symbol,
* 1 in case of hitting a TER symbol, 2 in case of hitting EOF, -1 else
* */
static int readCSVValue(FILE** pFile, std::string& s,char sep=',',char ter=';', bool linebreaks_after_terminator=true);
/**
* Returns 0 on success, -1 otherwise.
*/
static int readLinesToTuple(const std::string& filename, FastKTuple<std::string>& data);
/**
* The array must be preallocated and be large enough. 0 on success, -1 otherwise.
*/
static int readFileToArray(const std::string& filename, unsigned char* buffer);
/**
* The array must be preallocated and be large enough. 0 on success, -1 otherwise.
*/
static int readTextFileToArray(const std::string& filename, unsigned char* buffer);
/**
* Will overwrite any previous file content.
*/
static int writeArrayToFile(const std::string& filename, unsigned char* buffer, unsigned long long size);
/**
* Will overwrite any previous file content.
*/
static int writeArrayToTextFile(const std::string& filename, unsigned char* buffer, unsigned long long size);
/**
* Will append to the file.
*/
static int appendArrayToFile(const std::string& filename, unsigned char* buffer, unsigned long long size);
/**
* Will append to the file.
*/
static int appendArrayToTextFile(const std::string& filename, unsigned char* buffer, unsigned long long size);
/**
* The array must be preallocated and be large enough. 0 on success, -1 otherwise.
* The offset is the initial starting point within the file.
*/
static int readFileToArray(FILE* file, unsigned char* buffer, unsigned long long bytes, unsigned long long offset = 0);
/**
* This function will write the data to the file depending on how the file handle was obtained.
*/
static int writeArrayToFile(FILE* file, unsigned char* buffer, unsigned long long size);
static int char_peek(FILE** fp);
/**
* Use only true c-strings for the CString-methods!!
*/
static void writeStringToFile(const std::string& s, const std::string& filename);
static void writeCStringToFile(char* s,const std::string& filename);
static void appendStringToFile(const std::string& s, const std::string& filename);
static void appendCStringToFile(char* s,const std::string& filename);
static void writeStringVectorToFile(Fifo<std::string>& sv, const std::string& filename);
static void writeStringVectorToFile(std::vector<std::string>& sv, const std::string& filename);
static void writeCStringVectorToFile(std::vector<char*>& sv,const std::string& filename);
static void appendStringVectorToFile(std::vector<std::string>& sv, const std::string& filename);
static void appendCStringVectorToFile(std::vector<char*>& sv,const std::string& filename);
static void writeStringVectorToFile(FastNCSList<std::string>& sv, const std::string& filename);
static void writeCStringVectorToFile(FastNCSList<char*>& sv,const std::string& filename);
static void appendStringVectorToFile(FastNCSList<std::string>& sv, const std::string& filename);
static void appendCStringVectorToFile(FastNCSList<char*>& sv,const std::string& filename);
static void assertInteger(double val);
static void assertInteger(float val);
//asserts that b|a
static void assertDivisible(unsigned int a, unsigned int b);
//alignment helpers
/**
* Will return the first CPUs level 0 cache size.
* */
static unsigned long long getCPUCache0Size();
/**
* Will return the first CPUs level 0 cache line size.
* */
static unsigned long long getCPUCache0LineSize();
/**
* Will return the first CPUs level 0 cache line partition unit in bytes.
* */
static unsigned long long getCPUCache0LinePartition();
/**
* Will return a a size aligned to the CPUs level 0 cache line size.
* */
static unsigned long long alignToCPUCache0(unsigned long long size);
/**
* Returns the number of CPUs in the system.
* */
static unsigned long long getCPUCount();
template<typename T>
static bool isInteger(T val)
{
if(std::is_same<T,int>::value == true || std::is_same<T,unsigned int>::value == true ||
std::is_same<T,char>::value == true || std::is_same<T,unsigned char>::value == true ||
std::is_same<T,long int>::value == true || std::is_same<T,long long int>::value == true ||
std::is_same<T,unsigned long>::value == true || std::is_same<T,unsigned long long>::value == true ||
std::is_same<T,unsigned short>::value == true || std::is_same<T,size_t>::value == true ||
std::is_same<T,short>::value == true)
return true;
else
return false;
}
template<typename T>
static unsigned int countDigits(T val)
{
unsigned int digits = 1;
//only works for integer values
if(isInteger(val) == true)
{
for(unsigned int i=10;i<std::numeric_limits<T>::max();i = i*10)
{
if( val/i != 0)
++digits;
else
break;
}
}
else
{
printf("ERROR: can not count digits for non-integer values\n");
return 0;
}
return digits;
}
template<typename T>
static bool isPrimitive()
{
bool is_primitive = false;
if(std::is_same<T,int>::value == true || std::is_same<T,unsigned int>::value == true ||
std::is_same<T,char>::value == true || std::is_same<T,unsigned char>::value == true ||
std::is_same<T,long int>::value == true || std::is_same<T,long long int>::value == true ||
std::is_same<T,unsigned long>::value == true || std::is_same<T,unsigned long long>::value == true ||
std::is_same<T,float>::value == true || std::is_same<T,double>::value == true ||
std::is_same<T,long double>::value == true || std::is_same<T,unsigned short>::value == true ||
std::is_same<T,short>::value == true)
{
is_primitive = true;
}
return is_primitive;
}
/**
* Will set a >large< array to zero faster than memset.
* The quantum MUST divide the array size.
* */
static void setArrayVal(unsigned char val, unsigned char* array, unsigned int array_size, unsigned int quantum)
{
unsigned char iterations = array_size / quantum;
unsigned char zeropad[quantum];
for(unsigned int i=0;i<quantum;i++)
{
zeropad[i] = val;
}
for(unsigned int i=0;i<iterations;i++)
{
memcpy(array+i*quantum, zeropad, quantum);
}
}
/**
* Will set a >large< array to zero faster than memset.
* The quantum MUST divide the array size.
* */
static void setArrayValParallel(unsigned char val, unsigned char* array, unsigned int array_size, unsigned int quantum)
{
unsigned char iterations = array_size / quantum;
unsigned char zeropad[quantum];
for(unsigned int i=0;i<quantum;i++)
{
zeropad[i] = val;
}
#pragma omp parallel for
for(unsigned int i=0;i<iterations;i++)
{
memcpy(array+i*quantum, zeropad, quantum);
}
}
static const unsigned int m_buffer_size = 100;
CLASS_RTTI(Lazarus::Toolbox)
};
class Testcallee
{
public:
Testcallee(unsigned int i);
void p(Thread* t,void* param);
private:
unsigned int m_i;
};
}
#endif /* TOOLBOX_H_ */
|
TBBHashmap.h | // ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#pragma once
#include <tbb/concurrent_unordered_map.h>
#include <limits>
#include <unordered_map>
#include "open3d/core/hashmap/CPU/CPUHashmapBufferAccessor.hpp"
#include "open3d/core/hashmap/DeviceHashmap.h"
namespace open3d {
namespace core {
template <typename Key, typename Hash>
class TBBHashmap : public DeviceHashmap {
public:
TBBHashmap(int64_t init_capacity,
int64_t dsize_key,
int64_t dsize_value,
const Device& device);
~TBBHashmap();
void Rehash(int64_t buckets) override;
void Insert(const void* input_keys,
const void* input_values,
addr_t* output_addrs,
bool* output_masks,
int64_t count) override;
void Activate(const void* input_keys,
addr_t* output_addrs,
bool* output_masks,
int64_t count) override;
void Find(const void* input_keys,
addr_t* output_addrs,
bool* output_masks,
int64_t count) override;
void Erase(const void* input_keys,
bool* output_masks,
int64_t count) override;
int64_t GetActiveIndices(addr_t* output_indices) override;
int64_t Size() const override;
int64_t GetBucketCount() const override;
std::vector<int64_t> BucketSizes() const override;
float LoadFactor() const override;
std::shared_ptr<tbb::concurrent_unordered_map<Key, addr_t, Hash>> GetImpl()
const {
return impl_;
}
protected:
std::shared_ptr<tbb::concurrent_unordered_map<Key, addr_t, Hash>> impl_;
std::shared_ptr<CPUHashmapBufferAccessor> buffer_ctx_;
void InsertImpl(const void* input_keys,
const void* input_values,
addr_t* output_addrs,
bool* output_masks,
int64_t count);
void Allocate(int64_t capacity);
};
template <typename Key, typename Hash>
TBBHashmap<Key, Hash>::TBBHashmap(int64_t init_capacity,
int64_t dsize_key,
int64_t dsize_value,
const Device& device)
: DeviceHashmap(init_capacity, dsize_key, dsize_value, device) {
Allocate(init_capacity);
}
template <typename Key, typename Hash>
TBBHashmap<Key, Hash>::~TBBHashmap() {}
template <typename Key, typename Hash>
int64_t TBBHashmap<Key, Hash>::Size() const {
return impl_->size();
}
template <typename Key, typename Hash>
void TBBHashmap<Key, Hash>::Insert(const void* input_keys,
const void* input_values,
addr_t* output_addrs,
bool* output_masks,
int64_t count) {
int64_t new_size = Size() + count;
if (new_size > this->capacity_) {
int64_t bucket_count = GetBucketCount();
float avg_capacity_per_bucket =
float(this->capacity_) / float(bucket_count);
int64_t expected_buckets = std::max(
bucket_count * 2,
int64_t(std::ceil(new_size / avg_capacity_per_bucket)));
Rehash(expected_buckets);
}
InsertImpl(input_keys, input_values, output_addrs, output_masks, count);
}
template <typename Key, typename Hash>
void TBBHashmap<Key, Hash>::Activate(const void* input_keys,
addr_t* output_addrs,
bool* output_masks,
int64_t count) {
Insert(input_keys, nullptr, output_addrs, output_masks, count);
}
template <typename Key, typename Hash>
void TBBHashmap<Key, Hash>::Find(const void* input_keys,
addr_t* output_addrs,
bool* output_masks,
int64_t count) {
const Key* input_keys_templated = static_cast<const Key*>(input_keys);
#pragma omp parallel for
for (int64_t i = 0; i < count; ++i) {
const Key& key = input_keys_templated[i];
auto iter = impl_->find(key);
bool flag = (iter != impl_->end());
output_masks[i] = flag;
output_addrs[i] = flag ? iter->second : 0;
}
}
template <typename Key, typename Hash>
void TBBHashmap<Key, Hash>::Erase(const void* input_keys,
bool* output_masks,
int64_t count) {
const Key* input_keys_templated = static_cast<const Key*>(input_keys);
for (int64_t i = 0; i < count; ++i) {
const Key& key = input_keys_templated[i];
auto iter = impl_->find(key);
bool flag = (iter != impl_->end());
output_masks[i] = flag;
if (flag) {
buffer_ctx_->DeviceFree(iter->second);
impl_->unsafe_erase(iter);
}
}
}
template <typename Key, typename Hash>
int64_t TBBHashmap<Key, Hash>::GetActiveIndices(addr_t* output_indices) {
int64_t count = impl_->size();
int64_t i = 0;
for (auto iter = impl_->begin(); iter != impl_->end(); ++iter, ++i) {
output_indices[i] = static_cast<int64_t>(iter->second);
}
return count;
}
template <typename Key, typename Hash>
void TBBHashmap<Key, Hash>::Rehash(int64_t buckets) {
int64_t iterator_count = Size();
Tensor active_keys;
Tensor active_values;
if (iterator_count > 0) {
Tensor active_addrs({iterator_count}, Dtype::Int32, this->device_);
GetActiveIndices(static_cast<addr_t*>(active_addrs.GetDataPtr()));
Tensor active_indices = active_addrs.To(Dtype::Int64);
active_keys = this->GetKeyBuffer().IndexGet({active_indices});
active_values = this->GetValueBuffer().IndexGet({active_indices});
}
float avg_capacity_per_bucket =
float(this->capacity_) / float(GetBucketCount());
int64_t new_capacity =
int64_t(std::ceil(buckets * avg_capacity_per_bucket));
Allocate(new_capacity);
if (iterator_count > 0) {
Tensor output_addrs({iterator_count}, Dtype::Int32, this->device_);
Tensor output_masks({iterator_count}, Dtype::Bool, this->device_);
InsertImpl(active_keys.GetDataPtr(), active_values.GetDataPtr(),
static_cast<addr_t*>(output_addrs.GetDataPtr()),
output_masks.GetDataPtr<bool>(), iterator_count);
}
impl_->rehash(buckets);
}
template <typename Key, typename Hash>
int64_t TBBHashmap<Key, Hash>::GetBucketCount() const {
return impl_->unsafe_bucket_count();
}
template <typename Key, typename Hash>
std::vector<int64_t> TBBHashmap<Key, Hash>::BucketSizes() const {
int64_t bucket_count = impl_->unsafe_bucket_count();
std::vector<int64_t> ret;
for (int64_t i = 0; i < bucket_count; ++i) {
ret.push_back(impl_->unsafe_bucket_size(i));
}
return ret;
}
template <typename Key, typename Hash>
float TBBHashmap<Key, Hash>::LoadFactor() const {
return impl_->load_factor();
}
template <typename Key, typename Hash>
void TBBHashmap<Key, Hash>::InsertImpl(const void* input_keys,
const void* input_values,
addr_t* output_addrs,
bool* output_masks,
int64_t count) {
const Key* input_keys_templated = static_cast<const Key*>(input_keys);
#pragma omp parallel for
for (int64_t i = 0; i < count; ++i) {
output_addrs[i] = 0;
output_masks[i] = false;
const Key& key = input_keys_templated[i];
// Try to insert a dummy address.
auto res = impl_->insert({key, 0});
// Lazy copy key value pair to buffer only if succeeded
if (res.second) {
addr_t dst_kv_addr = buffer_ctx_->DeviceAllocate();
auto dst_kv_iter = buffer_ctx_->ExtractIterator(dst_kv_addr);
// Copy templated key to buffer
*static_cast<Key*>(dst_kv_iter.first) = key;
// Copy/reset non-templated value in buffer
uint8_t* dst_value = static_cast<uint8_t*>(dst_kv_iter.second);
if (input_values != nullptr) {
const uint8_t* src_value =
static_cast<const uint8_t*>(input_values) +
this->dsize_value_ * i;
std::memcpy(dst_value, src_value, this->dsize_value_);
} else {
std::memset(dst_value, 0, this->dsize_value_);
}
// Update from dummy 0
res.first->second = dst_kv_addr;
// Write to return variables
output_addrs[i] = dst_kv_addr;
output_masks[i] = true;
}
}
}
template <typename Key, typename Hash>
void TBBHashmap<Key, Hash>::Allocate(int64_t capacity) {
this->capacity_ = capacity;
this->buffer_ =
std::make_shared<HashmapBuffer>(this->capacity_, this->dsize_key_,
this->dsize_value_, this->device_);
buffer_ctx_ = std::make_shared<CPUHashmapBufferAccessor>(
this->capacity_, this->dsize_key_, this->dsize_value_,
this->buffer_->GetKeyBuffer(), this->buffer_->GetValueBuffer(),
this->buffer_->GetHeap());
buffer_ctx_->Reset();
impl_ = std::make_shared<tbb::concurrent_unordered_map<Key, addr_t, Hash>>(
capacity, Hash());
}
} // namespace core
} // namespace open3d
|
parfor.h | // Copyright 2019 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef PARFOR_H_
#define PARFOR_H_
#include <omp.h>
#include <cstdint>
#include <utility>
#include <vector>
namespace qsim {
/**
* Helper struct for executing for-loops in parallel across multiple threads.
*/
template <uint64_t MIN_SIZE>
struct ParallelForT {
explicit ParallelForT(unsigned num_threads) : num_threads(num_threads) {}
// GetIndex0 and GetIndex1 are useful when we need to know how work was
// divided between threads, for instance, for reusing partial sums obtained
// by RunReduceP.
uint64_t GetIndex0(uint64_t size, unsigned thread_id) const {
return size >= MIN_SIZE ? size * thread_id / num_threads : 0;
}
uint64_t GetIndex1(uint64_t size, unsigned thread_id) const {
return size >= MIN_SIZE ? size * (thread_id + 1) / num_threads : size;
}
template <typename Function, typename... Args>
void Run(uint64_t size, Function&& func, Args&&... args) const {
if (num_threads > 1 && size >= MIN_SIZE) {
#pragma omp parallel num_threads(num_threads)
{
unsigned n = omp_get_num_threads();
unsigned m = omp_get_thread_num();
uint64_t i0 = GetIndex0(size, m);
uint64_t i1 = GetIndex1(size, m);
for (uint64_t i = i0; i < i1; ++i) {
func(n, m, i, args...);
}
}
} else {
for (uint64_t i = 0; i < size; ++i) {
func(1, 0, i, args...);
}
}
}
template <typename Function, typename Op, typename... Args>
std::vector<typename Op::result_type> RunReduceP(
uint64_t size, Function&& func, Op&& op, Args&&... args) const {
std::vector<typename Op::result_type> partial_results;
if (num_threads > 1 && size >= MIN_SIZE) {
partial_results.resize(num_threads, 0);
#pragma omp parallel num_threads(num_threads)
{
unsigned n = omp_get_num_threads();
unsigned m = omp_get_thread_num();
uint64_t i0 = GetIndex0(size, m);
uint64_t i1 = GetIndex1(size, m);
typename Op::result_type partial_result = 0;
for (uint64_t i = i0; i < i1; ++i) {
partial_result = op(partial_result, func(n, m, i, args...));
}
partial_results[m] = partial_result;
}
} else if (num_threads > 0) {
typename Op::result_type result = 0;
for (uint64_t i = 0; i < size; ++i) {
result = op(result, func(1, 0, i, args...));
}
partial_results.resize(1, result);
}
return partial_results;
}
template <typename Function, typename Op, typename... Args>
typename Op::result_type RunReduce(uint64_t size, Function&& func,
Op&& op, Args&&... args) const {
auto partial_results = RunReduceP(size, func, std::move(op), args...);
typename Op::result_type result = 0;
for (auto partial_result : partial_results) {
result = op(result, partial_result);
}
return result;
}
unsigned num_threads;
};
using ParallelFor = ParallelForT<1024>;
} // namespace qsim
#endif // PARFOR_H_
|
mpi-omp-mat-infnorm-blkstp.c | #ifdef _CIVL
#include <civlc.cvh>
#endif
/*************************************************************************
C-DAC Tech Workshop : HeGaPa-2012
July 16-20,2012
Example 3 : Mpi-Omp_MatInf_blkstp.c
Objective : Write parallel program using MPI and OPENMP to compute norm
of a square matrix.
Input : Read file (infndata.inp) for Matrix
Output : Process with rank 0 prints the value of Infinity Norm
Created : MAY-2012
************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include "mpi.h"
#include <omp.h>
#ifdef _CIVL
$input int NUM_ROWS_BOUND = 4;
$input int NUM_COLS_BOUND = 4;
#endif
/* Main Program */
int main(int argc, char **argv) // "int" inserted manually
//main(int argc, char **argv)
{
int Numprocs, MyRank, iam;
int NoofCols, NoofRows, ScatterSize;
int index, irow, icol;
int Root = 0;
float **InputMatrix, *Buffer, *MyBuffer;
float max = 0, sum = 0, Inf_norm = 0;
FILE *fp;
int MatrixFileStatus = 1;
/* ........MPI Initialisation ....... */
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &MyRank);
MPI_Comm_size(MPI_COMM_WORLD, &Numprocs);
if (MyRank == 0) {
/* .......Read The Matrix Input File ...... */
if ((fp = fopen("./data/infndata.inp", "r")) == NULL) {
MatrixFileStatus = 0;
}
if (MatrixFileStatus != 0) {
fscanf(fp, "%d %d\n", &NoofRows, &NoofCols);
#ifdef _CIVL
$assume(NoofRows <= NUM_ROWS_BOUND);
$assume(NoofRows <= NoofCols);
#endif
/* .......Allocate Memory For Matrix ..... */
InputMatrix = (float **) malloc(NoofRows * sizeof(float *));
for (irow = 0; irow < NoofRows; irow++)
InputMatrix[irow] = (float *) malloc(NoofCols * sizeof(float));
/* .......Read Data For Matrix ..... */
for (irow = 0; irow < NoofRows; irow++) {
for (icol = 0; icol < NoofCols; icol++)
fscanf(fp, "%f", &InputMatrix[irow][icol]);
}
fclose(fp);
/*
* .......Convert 2-D InputMatrix Into 1-D Array
* .....
*/
Buffer = (float *) malloc(NoofRows * NoofCols * sizeof(float));
index = 0;
for (irow = 0; irow < NoofRows; irow++) {
for (icol = 0; icol < NoofCols; icol++) {
Buffer[index] = InputMatrix[irow][icol];
index++;
}
}
}
}/* MyRank == 0 */
MPI_Barrier(MPI_COMM_WORLD);
MPI_Bcast(&MatrixFileStatus, 1, MPI_INT, Root, MPI_COMM_WORLD);
if (MatrixFileStatus == 0) {
if (MyRank == Root)
printf("Can't Open Matrix Input File");
MPI_Finalize();
exit(-1);
}
MPI_Bcast(&NoofRows, 1, MPI_INT, Root, MPI_COMM_WORLD);
#ifdef _CIVL
$assume(NoofRows >= Numprocs);
#endif
if (NoofRows < Numprocs) {
MPI_Finalize();
if (MyRank == 0)
printf("Noof Rows Should Be More Than No of Processors ... \n");
exit(0);
}
#ifdef _CIVL
$assume(NoofRows % Numprocs == 0);
#endif
if (NoofRows % Numprocs != 0) {
MPI_Finalize();
if (MyRank == 0) {
printf("Matrix Cannot Be Striped Evenly ..... \n");
}
exit(0);
}
MPI_Bcast(&NoofCols, 1, MPI_INT, Root, MPI_COMM_WORLD);
ScatterSize = NoofRows / Numprocs;
MyBuffer = (float *) malloc(ScatterSize * NoofCols * sizeof(float));
MPI_Scatter(Buffer, ScatterSize * NoofCols, MPI_FLOAT,
MyBuffer, ScatterSize * NoofCols, MPI_FLOAT,
0, MPI_COMM_WORLD);
/* OpenMP Parallel Directive */
max = 0;
/*
#pragma omp parallel private(iam) shared(max)
{
iam = omp_get_thread_num();
printf("The Threadid Is %d With each Processor's Rank %d\n", iam, MyRank);
OpenMP Parallel For Directive
*/
omp_set_num_threads(4);
#pragma omp parallel for private(sum,index,icol) shared(max)
for (irow = 0; irow < ScatterSize; irow++) {
printf("The Threadid Is %d With each Processor's Rank %d\n",omp_get_thread_num(), MyRank);
sum = 0;
index = irow * NoofCols;
for (icol = 0; icol < NoofCols; icol++) {
sum += (MyBuffer[index] >= 0) ? (MyBuffer[index]) : (0 - MyBuffer[index]);
index++;
}
/* OpenMP Critical Section */
#pragma omp critical
if (sum > max)
max = sum;
}
MPI_Barrier(MPI_COMM_WORLD);
MPI_Reduce(&max, &Inf_norm, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
if (MyRank == 0) {
max = 0;
/* Serial Check */
for (irow = 0; irow < NoofRows; irow++) {
sum = 0;
index = irow * NoofCols;
for (icol = 0; icol < NoofCols; icol++) {
sum += (Buffer[index] >= 0) ? (Buffer[index]) : (0 - Buffer[index]);
index++;
}
max = max < sum ? sum : max;
}
printf("\nThe Infinity Norm Is(Parallel Code) : %f\n", Inf_norm);
printf("\nThe Infinity Norm Is(Serial Code) : %f\n\n", max);
/* Freeing Allocated Memory */
free(InputMatrix);
free(Buffer);
}
/* MPI-Termination */
free(MyBuffer);
MPI_Finalize();
}
|
algo.h | #include "utils.h"
#include <deque>
#include <random>
enum visited_state { UNVISITED, VISITED, QUEUED };
// Starting at a node, performs a BFS to identify the entire component that
// the node is in
std::vector<node> node_bfs(const node &start_node, const adjacency_list &adj_list) {
std::deque<node> queue;
std::unordered_set<node> visited;
std::vector<node> component;
queue.push_back(start_node);
while (!queue.empty()) {
node current_node = queue.front();
queue.pop_front();
auto search = visited.find(current_node);
if (search == visited.end()) {
visited.insert(current_node);
component.push_back(current_node);
for (node adj : adj_list.at(current_node)) {
search = visited.find(adj);
if (search == visited.end()) {
queue.push_back(adj);
}
}
}
}
return component;
}
// Given an adjancey list, returns a vec of vec of nodes, where each vec of
// nodes are all the nodes in a single connected component
std::vector<std::vector<node>> get_components(const adjacency_list adj_list) {
std::unordered_set<node> unvisited_nodes;
for (auto &[key_node, _adjs] : adj_list) {
unvisited_nodes.insert(key_node);
}
std::vector<std::vector<node>> components;
while (!unvisited_nodes.empty()) {
node this_node = *unvisited_nodes.begin();
std::vector<node> component = node_bfs(this_node, adj_list);
components.push_back(component);
for (node comp_node : component) {
unvisited_nodes.erase(comp_node);
}
}
return components;
}
// Given an adjacency list, a vector of vector of nodes giving the components,
// and the original graph, this connects the components with a single edge or
// a triangle if possible, if these edges were present in the original graph
void connect_components(adjacency_list &adj_list,
const std::vector<std::vector<node>> &components,
const adjacency_list &original_graph) {
std::unordered_map<size_t, visited_state> state;
std::unordered_map<node, size_t> node_to_comp;
for (size_t idx = 0; idx < components.size(); idx++) {
for (node this_node : components.at(idx)) {
node_to_comp.insert({this_node, idx});
}
state.insert({idx, UNVISITED});
}
std::deque<size_t> queue;
edge_list edges;
queue.push_back(0);
while (!queue.empty()) {
size_t current_comp = queue.front();
queue.pop_front();
if (state.at(current_comp) != VISITED) {
state.at(current_comp) = VISITED;
for (node node_0 : components.at(current_comp)) {
std::vector<node> adjs = original_graph.at(node_0);
for (node node_1 : adjs) {
size_t node_1_comp = node_to_comp.at(node_1);
if (state.at(node_1_comp) == UNVISITED && current_comp != node_1_comp) {
state.at(node_1_comp) = QUEUED;
edges.push_back(std::make_pair(node_0, node_1));
std::vector<node> node_1_adjs = adj_list.at(node_1);
// TODO add logic for triangles the other way
for (node node_2 : node_1_adjs) {
auto adjs_search = std::find(adjs.begin(), adjs.end(),
node_2);
if (adjs_search != adjs.end()) {
edges.push_back(std::make_pair(node_0, node_2));
break;
}
}
queue.push_back(node_1_comp);
}
}
}
}
if (queue.empty()) {
// search for an unvisited component and add it to the queue
for (auto &[comp, vis_state] : state) {
if (vis_state == UNVISITED) {
queue.push_back(comp);
break;
}
}
}
}
for (std::pair<node, node> edge : edges) {
add_edge(adj_list, edge.first, edge.second);
}
}
// Adds houses, w/ alternate orbit node, back to the graph from x
void add_houses_alt(const node x, const adjacency_list &adj_list,
std::unordered_set<node> &nu, std::vector<node> &out,
std::deque<node> &active) {
std::vector<node> x_adjs = adj_list.at(x);
std::unordered_set<node> aux(x_adjs.begin(), x_adjs.end());
for (node y : x_adjs) {
auto search = nu.find(y);
if (search != nu.end()) {
std::vector<node> y_adjs = adj_list.at(y);
bool found = false;
for (node z : y_adjs) {
search = nu.find(z);
auto aux_search = aux.find(z);
if (search != nu.end() && aux_search != aux.end()) {
std::vector<node> z_adjs = adj_list.at(z);
for (node w : z_adjs) {
search = nu.find(w);
auto y_search = std::find(y_adjs.begin(), y_adjs.end(), w);
if (search != nu.end() && y_search != y_adjs.end()) {
for (node v : y_adjs) {
if (v != z && v != w) {
std::vector<node> w_adjs = adj_list.at(w);
search = nu.find(v);
auto w_search = std::find(w_adjs.begin(), w_adjs.end(), v);
if (search != nu.end() && w_search != w_adjs.end()) {
// Here edges are just being added in a vector
// and the pair relationships are accounted for
// later. Doing it this way to keep edges in
// contiguous memory
out.push_back(x);
out.push_back(y);
out.push_back(x);
out.push_back(z);
out.push_back(y);
out.push_back(z);
out.push_back(y);
out.push_back(w);
out.push_back(z);
out.push_back(w);
out.push_back(y);
out.push_back(v);
out.push_back(w);
out.push_back(v);
active.push_front(y);
active.push_front(z);
active.push_front(w);
active.push_front(v);
nu.erase(y);
nu.erase(z);
nu.erase(w);
nu.erase(v);
aux.erase(y);
aux.erase(z);
aux.erase(w);
aux.erase(v);
found = true;
break;
}
}
}
}
if (found) {break;}
}
}
if (found) {break;}
}
}
}
}
// Adds houses back to the graph from x
void add_houses(const node x, const adjacency_list &adj_list,
std::unordered_set<node> &nu, std::vector<node> &out,
std::deque<node> &active) {
std::vector<node> x_adjs = adj_list.at(x);
std::unordered_set<node> aux(x_adjs.begin(), x_adjs.end());
for (node y : x_adjs) {
auto search = nu.find(y);
if (search != nu.end()) {
std::vector<node> y_adjs = adj_list.at(y);
bool found = false;
for (node z : y_adjs) {
search = nu.find(z);
auto aux_search = aux.find(z);
if (search != nu.end() && aux_search != aux.end()) {
std::vector<node> z_adjs = adj_list.at(z);
for (node w : z_adjs) {
search = nu.find(w);
aux_search = aux.find(w);
if (search != nu.end() && aux_search != aux.end()) {
for (node v : y_adjs) {
if (v != z && v != w) {
search = nu.find(v);
aux_search = aux.find(v);
if (search != nu.end() && aux_search != aux.end()) {
// Here edges are just being added in a vector
// and the pair relationships are accounted for
// later. Doing it this way to keep edges in
// contiguous memory
out.push_back(x);
out.push_back(y);
out.push_back(x);
out.push_back(z);
out.push_back(y);
out.push_back(z);
out.push_back(x);
out.push_back(w);
out.push_back(z);
out.push_back(w);
out.push_back(y);
out.push_back(v);
out.push_back(x);
out.push_back(v);
active.push_front(y);
active.push_front(z);
active.push_front(w);
active.push_front(v);
nu.erase(y);
nu.erase(z);
nu.erase(w);
nu.erase(v);
aux.erase(y);
aux.erase(z);
aux.erase(w);
aux.erase(v);
found = true;
break;
}
}
}
}
if (found) {break;}
}
}
if (found) {break;}
}
}
}
}
// Adds diamonds w/ alternate orbit node back to the graph from X
void add_diamonds_alt(const node x, const adjacency_list &adj_list,
std::unordered_set<node> &nu, std::vector<node> &out,
std::deque<node> &active) {
std::vector<node> x_adjs = adj_list.at(x);
std::unordered_set<node> aux(x_adjs.begin(), x_adjs.end());
for (node y : x_adjs) {
auto search = nu.find(y);
if (search != nu.end()) {
std::vector<node> y_adjs = adj_list.at(y);
bool found = false;
for (node z : y_adjs) {
search = nu.find(z);
auto aux_search = aux.find(z);
if (search != nu.end() && aux_search != aux.end()) {
std::vector<node> z_adjs = adj_list.at(z);
for (node w : z_adjs) {
search = nu.find(w);
auto aux_search = std::find(y_adjs.begin(), y_adjs.end(), w);
if (search != nu.end() && aux_search != y_adjs.end()) {
// Here edges are just being added in a vector
// and the pair relationships are accounted for
// later. Doing it this way to keep edges in
// contiguous memory
out.push_back(x);
out.push_back(y);
out.push_back(x);
out.push_back(z);
out.push_back(y);
out.push_back(z);
out.push_back(y);
out.push_back(w);
out.push_back(z);
out.push_back(w);
active.push_front(y);
active.push_front(z);
active.push_front(w);
nu.erase(y);
nu.erase(z);
nu.erase(w);
aux.erase(y);
aux.erase(z);
aux.erase(w);
found = true;
break;
}
}
}
if (found) {break;}
}
}
}
}
// Adds diamonds back to the graph from x
void add_diamonds(const node x, const adjacency_list &adj_list,
std::unordered_set<node> &nu, std::vector<node> &out,
std::deque<node> &active) {
std::vector<node> x_adjs = adj_list.at(x);
std::unordered_set<node> aux(x_adjs.begin(), x_adjs.end());
for (node y : x_adjs) {
auto search = nu.find(y);
if (search != nu.end()) {
std::vector<node> y_adjs = adj_list.at(y);
bool found = false;
for (node z : y_adjs) {
search = nu.find(z);
auto aux_search = aux.find(z);
if (search != nu.end() && aux_search != aux.end()) {
std::vector<node> z_adjs = adj_list.at(z);
for (node w : z_adjs) {
search = nu.find(w);
aux_search = aux.find(w);
if (search != nu.end() && aux_search != aux.end()) {
// Here edges are just being added in a vector
// and the pair relationships are accounted for
// later. Doing it this way to keep edges in
// contiguous memory
out.push_back(x);
out.push_back(y);
out.push_back(x);
out.push_back(z);
out.push_back(y);
out.push_back(z);
out.push_back(x);
out.push_back(w);
out.push_back(z);
out.push_back(w);
active.push_front(y);
active.push_front(z);
active.push_front(w);
nu.erase(y);
nu.erase(z);
nu.erase(w);
aux.erase(y);
aux.erase(z);
aux.erase(w);
found = true;
break;
}
}
}
if (found) {break;}
}
}
}
}
// Adds triangles back to the graph from x
void add_triangles(const node x, const adjacency_list &adj_list,
std::unordered_set<node> &nu, std::vector<node> &out, std::deque<node> &active) {
std::vector<node> x_adjs = adj_list.at(x);
std::unordered_set<node> aux(x_adjs.begin(), x_adjs.end());
for (node y : x_adjs) {
auto search = nu.find(y);
if (search != nu.end()) {
std::vector<node> y_adjs = adj_list.at(y);
for (node z : y_adjs) {
search = nu.find(z);
auto aux_search = aux.find(z);
if (search != nu.end() && aux_search != aux.end()) {
// add the edges to out, again, this is not super
// clear right now and should be cleaned up. possibly
// use matrix abstraction
out.push_back(x);
out.push_back(y);
out.push_back(x);
out.push_back(z);
out.push_back(y);
out.push_back(z);
active.push_front(y);
active.push_front(z);
nu.erase(y);
nu.erase(z);
aux.erase(y);
aux.erase(z);
break;
}
}
}
}
}
// Propagate shapes from a given x node, if they exist in the original
// graph, are planar, and allow for access to each node in the shape later
//
// NOTE: returning a vec<node> here, this is basically an
// edge list or matrix of dim 2, this is not entirely clear. doing it
// this way just for speed
std::vector<node> propagate_from_x(const size_t x_node, const adjacency_list &adj_list) {
std::vector<node> out;
std::unordered_set<node> nu;
std::deque<node> active {x_node};
for (auto &[key_node, _adjs] : adj_list) {
nu.insert(key_node);
}
nu.erase(x_node);
while (!nu.empty()) {
if (active.empty()) {
node temp = *nu.begin();
active.push_front(temp);
nu.erase(temp);
}
const node x = active.front();
active.pop_front();
add_houses(x, adj_list, nu, out, active);
add_houses_alt(x, adj_list, nu, out, active);
add_diamonds(x, adj_list, nu, out, active);
add_diamonds_alt(x, adj_list, nu, out, active);
add_triangles(x, adj_list, nu, out, active);
}
return out;
}
// Partitions nodes from the original graph so that the algorithm can
// be performed in parallel on each partition
//
// First, randomly selects nodes. Then starts adding nodes to each partition
// using BFS. Finally, just adds leftover nodes to available partitions
std::vector<adjacency_list> partition_nodes(const adjacency_list &adj_list,
const size_t num_partitions) {
std::vector<adjacency_list> partitions;
if (num_partitions == 1) {
partitions.push_back(adj_list);
return partitions;
}
std::unordered_set<node> node_set;
node_set.reserve(adj_list.size());
for (auto &[key_node, _adjs] : adj_list) {
node_set.insert(key_node);
}
std::mt19937 generator(42);
for (size_t _ = 0; _ < num_partitions; _++) {
std::uniform_int_distribution<> distribution(0, node_set.size() - 1);
auto iter = node_set.begin();
std::advance(iter, distribution(generator));
adjacency_list new_adj_list;
add_node(new_adj_list, *iter, adj_list.at(*iter).size());
node_set.erase(iter);
partitions.push_back(new_adj_list);
}
// add neighbors first
for (size_t idx = 0; idx < partitions.size(); idx++) {
node node_0 = partitions.at(idx).begin()->first;
for (node node_1 : adj_list.at(node_0)) {
auto search = node_set.find(node_1);
if (search != node_set.end()) {
add_node(partitions.at(idx), node_1, adj_list.at(node_1).size());
// neighbors that are in the partition need their
// edges
for (node adj : adj_list.at(node_1)) {
auto search = partitions.at(idx).find(adj);
if (search != partitions.at(idx).end()) {
add_edge(partitions.at(idx), node_1, adj);
}
}
node_set.erase(node_1);
}
}
}
size_t num_nodes = node_set.size();
// start adding nodes to partitions with BFS
for (size_t idx = 0; idx < partitions.size(); idx++) {
size_t num_nodes_added = 0;
std::deque<node> queue;
std::unordered_set<node> visited;
queue.push_back(partitions.at(idx).begin()->first);
while (!queue.empty() && num_nodes_added < num_nodes / num_partitions) {
const size_t queue_len = queue.size();
for (size_t _ = 0; _ < queue_len; _++) {
node current_node = queue.front();
queue.pop_front();
auto search = visited.find(current_node);
if (search == visited.end()) {
visited.insert(current_node);
std::vector<node> adjs = adj_list.at(current_node);
for (node node_0 : adjs) {
auto search = node_set.find(node_0);
if (search != node_set.end()) {
add_node(partitions.at(idx), node_0, adj_list.at(node_0).size());
search = visited.find(node_0);
if (search == visited.end()) {
queue.push_back(node_0);
}
num_nodes_added++;
// for each neighbor from the original graph,
// if the neighbor is in the partition, edges should
// be added
for (node node_1 : adj_list.at(node_0)) {
auto search = partitions.at(idx).find(node_1);
if (search != partitions.at(idx).end()) {
add_edge(partitions.at(idx), node_0, node_1);
}
}
node_set.erase(node_0);
}
}
}
}
}
}
size_t idx = 0;
while (!node_set.empty()) {
if (idx >= partitions.size()) {
idx = 0;
}
node this_node = *node_set.begin();
add_node(partitions.at(idx), this_node, adj_list.at(this_node).size());
for (node node_1 : adj_list.at(this_node)) {
auto search = partitions.at(idx).find(node_1);
if (search != partitions.at(idx).end()) {
add_edge(partitions.at(idx), this_node, node_1);
}
}
node_set.erase(this_node);
idx++;
}
return partitions;
}
// The main algorithm routine, driver of everything here.
// Partitions nodes, then runs the graphlet propagation from the maximum
// degree node in each partition. Connects components at the end, if possible
adjacency_list algo_routine(const adjacency_list &adj_list, const int threads) {
adjacency_list out;
out.reserve(adj_list.size());
for (auto &[key_node, adjs] : adj_list) {
add_node(out, key_node, adjs.size());
}
std::vector<adjacency_list> partitions = partition_nodes(adj_list, threads);
#pragma omp parallel for num_threads(threads)
for (adjacency_list partition : partitions) {
const node init_x = get_max_degree_node(partition);
const std::vector<node> edges = propagate_from_x(init_x, partition);
#pragma omp critical(out)
{
for (size_t idx = 0; idx < edges.size(); idx += 2) {
add_edge(out, edges.at(idx), edges.at(idx + 1));
}
}
}
std::vector<std::vector<node>> components = get_components(out);
if (components.size() > 1) {
connect_components(out, components, adj_list);
}
return out;
}
|
openmp-2dheat.c | /*
* B. Estrade <estrabd@cs.uh.edu>
* Original coding: Spring 2004
* Serialized: Summer 2010
* Wrapped into MPI/OpenMP 2dheat Suite: Summer 2010
* OpenMP added: ..not yet! :)
*
* Serial implementation of 2d heat conduction
* finite difference over a rectangular domain using:
* - Jacobi
* - Gauss-Seidel
* - SOR
*
* This code was created by eliminating the MPI include and
* API function calls from the parallel version in order to
* serve as a starting point for inserting OpenMP directives.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
T_SRC0 @ X - (W/2,H)
|
*******X*******
*.............*
*.............*
*.............*
*.............*
*.............* ~ 0.0 @ all bdy by "X" (W/2,H)
*.............*
*.............*
*.............*
*.............*
*.............*
***************
2D domain - WIDTH x HEIGHT
"X" = T_SRC0
"*" = 0.0
"." = internal node suceptible to heating
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#define _WIDTH 50
#define _HEIGHT 50
#define H 1.0
#define _EPSILON 0.1
/*
methods:
1 - jacobi
2 - gauss-seidel
3 - sor
*/
#define _METHOD 2
#define ITERMAX 10
#define T_SRC0 550.0
#define ROOT 0
/* Includes */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <unistd.h>
#include <omp.h>
#include <stdint.h>
#include <sys/time.h>
#include <time.h>
/* declare functions */
int get_start (int rank);
int get_end (int rank);
int get_num_rows (int rank);
void init_domain (float ** domain_ptr,int rank);
void jacobi (float ** current_ptr,float ** next_ptr);
void gauss_seidel (float ** current_ptr,float ** next_ptr);
void sor (float ** current_ptr,float ** next_ptr);
float get_val_par (float * above_ptr,float ** domain_ptr,float * below_ptr,int rank,int i,int j);
void enforce_bc_par (float ** domain_ptr,int rank,int i,int j);
int global_to_local (int rank, int row);
float f (int i,int j);
float get_convergence_sqd (float ** current_ptr,float ** next_ptr,int rank);
/* declare and set globals */
int WIDTH=_WIDTH;
int HEIGHT=_HEIGHT;
int meth=_METHOD;
int num_threads;
float EPSILON=_EPSILON;
/* Function pointer to solver method of choice */
void (*method) ();
int main(int argc, char** argv) {
int p,my_rank,time;
/* arrays used to contain each PE's rows - specify cols, no need to spec rows */
float **U_Curr;
float **U_Next;
/* helper variables */
float convergence,convergence_sqd,local_convergence_sqd;
/* available iterator */
int i,j,k,m,n;
int per_proc,remainder,my_start_row,my_end_row,my_num_rows;
int verbose = 0;
int show_time = 0;
/* for timings */
struct timeval tv;
struct timezone tz;
struct tm *tm;
/* artifacts of original serialization from MPI version */
p = 1;
my_rank = 0;
/* argument processing done by everyone */
int c,errflg;
extern char *optarg;
extern int optind, optopt;
while ((c = getopt(argc, argv, "e:h:m:tw:v")) != -1) {
switch(c) {
case 'e':
EPSILON = atof(optarg);
break;
case 'h':
HEIGHT = atoi(optarg);
break;
case 'm':
/* selects the numerical methods */
switch(atoi(optarg)) {
case 1: /* jacobi */
meth = 1;
break;
case 2: /* gauss-seidel */
meth = 2;
break;
case 3: /* sor */
meth = 3;
break;
}
break;
case 't':
show_time++; /* overridden by -v (verbose) */
break;
case 'w':
WIDTH = atoi(optarg);
break;
case 'v':
verbose++;
break;
/* handle bad arguments */
case ':': /* -h or -w without operand */
if (ROOT == my_rank)
fprintf(stderr,"Option -%c requires an operand\n", optopt);
errflg++;
break;
case '?':
if (ROOT == my_rank)
fprintf(stderr,"Unrecognized option: -%c\n", optopt);
errflg++;
break;
}
}
/*
if (0 < errflg)
exit(EXIT_FAILURE);
*/
/* wait for user to input runtime params */
//MPI_Barrier(MPI_COMM_WORLD);
/* broadcast method to use */
//(void) MPI_Bcast(&meth,1,MPI_INT,0,MPI_COMM_WORLD);
switch (meth) {
case 1:
method = &jacobi;
break;
case 2:
method = &gauss_seidel;
break;
case 3:
method = &sor;
break;
}
/* let each processor decide what rows(s) it owns */
my_start_row = get_start(my_rank);
my_end_row = get_end(my_rank);
my_num_rows = get_num_rows(my_rank);
if ( 0 < verbose )
printf("proc %d contains (%d) rows %d to %d\n",my_rank,my_num_rows,my_start_row,my_end_row);
fflush(stdout);
/* allocate 2d array */
U_Curr = (float**)malloc(sizeof(float*)*my_num_rows);
U_Curr[0] = (float*)malloc(sizeof(float)*my_num_rows*(int)floor(WIDTH/H));
for (i=1;i<my_num_rows;i++) {
U_Curr[i] = U_Curr[i-1]+(int)floor(WIDTH/H);
}
/* allocate 2d array */
U_Next = (float**)malloc(sizeof(float*)*my_num_rows);
U_Next[0] = (float*)malloc(sizeof(float)*my_num_rows*(int)floor(WIDTH/H));
for (i=1;i<my_num_rows;i++) {
U_Next[i] = U_Next[i-1]+(int)floor(WIDTH/H);
}
/* initialize global grid */
init_domain(U_Curr,my_rank);
init_domain(U_Next,my_rank);
/* iterate for solution */
if (my_rank == ROOT) {
gettimeofday(&tv, &tz);
tm=localtime(&tv.tv_sec);
time = 1000000*(tm->tm_hour * 3600 + tm->tm_min * 60 + tm->tm_sec) + tv.tv_usec;
}
k = 1;
num_threads = 0;
//while (1) {
for(;;) {
method(U_Curr,U_Next);
local_convergence_sqd = get_convergence_sqd(U_Curr,U_Next,my_rank);
//MPI_Reduce(&local_convergence_sqd,&convergence_sqd,1,MPI_FLOAT,MPI_SUM,ROOT,MPI_COMM_WORLD);
convergence_sqd = local_convergence_sqd;
if (my_rank == ROOT) {
convergence = sqrt(convergence_sqd);
if (verbose == 1) {
printf("L2 = %f\n",convergence);
fflush(stdout);
}
}
/* broadcast method to use */
//(void) MPI_Bcast(&convergence,1,MPI_INT,0,MPI_COMM_WORLD);
if (convergence <= EPSILON) {
break;
}
/* copy U_Next to U_Curr */
for (j=my_start_row;j<=my_end_row;j++) {
for (i=0;i<(int)floor(WIDTH/H);i++) {
U_Curr[j-my_start_row][i] = U_Next[j-my_start_row][i];
}
}
k++;
//MPI_Barrier(MPI_COMM_WORLD);
}
/* say something at the end */
if (my_rank == ROOT) {
gettimeofday(&tv, &tz);
tm=localtime(&tv.tv_sec);
time = 1000000*(tm->tm_hour * 3600 + tm->tm_min * 60 + tm->tm_sec) + tv.tv_usec - time;
if (0 < verbose)
{ printf("Estimated time to convergence in %d iterations using %d processors on a %dx%d grid is %d microseconds\n",k,p,(int)floor(WIDTH/H),(int)floor(HEIGHT/H),time);
}
else if (show_time)
{ printf("% 5d\t% 12d msec\n",omp_get_max_threads(),time); }
/* else show nothing */
}
exit(EXIT_SUCCESS);
//return 0; not needed; not reached
}
/* used by each PE to compute the sum of the squared diffs between current iteration and previous */
float get_convergence_sqd (float ** current_ptr,float ** next_ptr,int rank) {
int i,j,my_start,my_end,my_num_rows;
float sum;
my_start = get_start(rank);
my_end = get_end(rank);
my_num_rows = get_num_rows(rank);
sum = 0.0;
for (j=my_start;j<=my_end;j++) {
for (i=0;i<(int)floor(WIDTH/H);i++) {
sum += pow(next_ptr[global_to_local(rank,j)][i]-current_ptr[global_to_local(rank,j)][i],2);
}
}
return sum;
}
/* implements parallel jacobi methods */
void jacobi (float ** current_ptr,float ** next_ptr) {
int i,j,p,my_rank,my_start,my_end,my_num_rows;
float U_Curr_Above[(int)floor(WIDTH/H)]; /* 1d array holding values from bottom row of PE above */
float U_Curr_Below[(int)floor(WIDTH/H)]; /* 1d array holding values from top row of PE below */
p = 1;
my_rank = 0;
my_start = get_start(my_rank);
my_end = get_end(my_rank);
my_num_rows = get_num_rows(my_rank);
#pragma omp parallel default(none) private(i,j) \
shared(p,my_rank,U_Curr_Above,U_Curr_Below,WIDTH,my_start,my_end,next_ptr,current_ptr)
{
/* Jacobi method using global addressing */
#pragma omp for schedule(runtime)
for (j=my_start;j<=my_end;j++) {
for (i=0;i<(int)floor(WIDTH/H);i++) {
next_ptr[j-my_start][i] = .25*(get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i-1,j)
+ get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i+1,j)
+ get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i,j-1)
+ get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i,j+1)
- (pow(H,2)*f(i,j)));
enforce_bc_par(next_ptr,my_rank,i,j);
}
}
} //end omp parallel region
}
/* implements parallel g-s method */
void gauss_seidel (float ** current_ptr,float ** next_ptr) {
int i,j,p,my_rank,my_start,my_end,my_num_rows;
float U_Curr_Above[(int)floor(WIDTH/H)]; /* 1d array holding values from bottom row of PE above */
float U_Curr_Below[(int)floor(WIDTH/H)]; /* 1d array holding values from top row of PE below */
float W = 1.0;
p = 1;
my_rank = 0;
my_start = get_start(my_rank);
my_end = get_end(my_rank);
my_num_rows = get_num_rows(my_rank);
#pragma omp parallel default(none) private(i,j) \
shared(W,p,my_rank,U_Curr_Above,U_Curr_Below,WIDTH,my_start,my_end,next_ptr,current_ptr)
{
/* solve next reds (i+j odd) */
#pragma omp for schedule(runtime)
for (j=my_start;j<=my_end;j++) {
for (i=0;i<(int)floor(WIDTH/H);i++) {
if ((i+j)%2 != 0) {
next_ptr[j-my_start][i] = get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i,j)
+ (W/4)*(get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i-1,j)
+ get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i+1,j)
+ get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i,j-1)
+ get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i,j+1)
- 4*(get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i,j))
- (pow(H,2)*f(i,j)));
enforce_bc_par(next_ptr,my_rank,i,j);
}
}
}
/* solve next blacks (i+j) even .... using next reds */
#pragma omp for schedule(runtime)
for (j=my_start;j<=my_end;j++) {
for (i=0;i<(int)floor(WIDTH/H);i++) {
if ((i+j)%2 == 0) {
next_ptr[j-my_start][i] = get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i,j)
+ (W/4)*(get_val_par(U_Curr_Above,next_ptr,U_Curr_Below,my_rank,i-1,j)
+ get_val_par(U_Curr_Above,next_ptr,U_Curr_Below,my_rank,i+1,j)
+ get_val_par(U_Curr_Above,next_ptr,U_Curr_Below,my_rank,i,j-1)
+ get_val_par(U_Curr_Above,next_ptr,U_Curr_Below,my_rank,i,j+1)
- 4*(get_val_par(U_Curr_Above,next_ptr,U_Curr_Below,my_rank,i,j))
- (pow(H,2)*f(i,j)));
enforce_bc_par(next_ptr,my_rank,i,j);
}
}
}
} //end omp parallel region
}
/* implements parallels sor method */
void sor (float ** current_ptr,float ** next_ptr) {
int i,j,p,my_rank,my_start,my_end,my_num_rows;
float U_Curr_Above[(int)floor(WIDTH/H)]; /* 1d array holding values from bottom row of PE above */
float U_Curr_Below[(int)floor(WIDTH/H)]; /* 1d array holding values from top row of PE below */
float W = 1.5;
p = 1;
my_rank = 0;
my_start = get_start(my_rank);
my_end = get_end(my_rank);
my_num_rows = get_num_rows(my_rank);
#pragma omp parallel default(none) private(i,j) \
shared(W,p,my_rank,U_Curr_Above,U_Curr_Below,WIDTH,my_start,my_end,next_ptr,current_ptr)
{
#pragma omp for schedule(runtime)
/* solve next reds (i+j odd) */
for (j=my_start;j<=my_end;j++) {
for (i=0;i<(int)floor(WIDTH/H);i++) {
if ((i+j)%2 != 0) {
next_ptr[j-my_start][i] = get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i,j)
+ (W/4)*(get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i-1,j)
+ get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i+1,j)
+ get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i,j-1)
+ get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i,j+1)
- 4*(get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i,j))
- (pow(H,2)*f(i,j)));
enforce_bc_par(next_ptr,my_rank,i,j);
}
}
}
/* solve next blacks (i+j) even .... using next reds */
#pragma omp for schedule(runtime)
for (j=my_start;j<=my_end;j++) {
for (i=0;i<(int)floor(WIDTH/H);i++) {
if ((i+j)%2 == 0) {
next_ptr[j-my_start][i] = get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i,j)
+ (W/4)*(get_val_par(U_Curr_Above,next_ptr,U_Curr_Below,my_rank,i-1,j)
+ get_val_par(U_Curr_Above,next_ptr,U_Curr_Below,my_rank,i+1,j)
+ get_val_par(U_Curr_Above,next_ptr,U_Curr_Below,my_rank,i,j-1)
+ get_val_par(U_Curr_Above,next_ptr,U_Curr_Below,my_rank,i,j+1)
- 4*(get_val_par(U_Curr_Above,next_ptr,U_Curr_Below,my_rank,i,j))
- (pow(H,2)*f(i,j)));
enforce_bc_par(next_ptr,my_rank,i,j);
}
}
}
} //end omp parallel region
}
/* enforces bcs in in serial and parallel */
void enforce_bc_par (float ** domain_ptr,int rank,int i,int j) {
/* enforce bc's first */
if(i == ((int)floor(WIDTH/H/2)-1) && j == 0) {
/* This is the heat source location */
domain_ptr[j][i] = T_SRC0;
} else if (i <= 0 || j <= 0 || i >= ((int)floor(WIDTH/H)-1) || j >= ((int)floor(HEIGHT/H)-1)) {
/* All edges and beyond are set to 0.0 */
domain_ptr[global_to_local(rank,j)][i] = 0.0;
}
}
/* returns appropriate values for requested i,j */
float get_val_par (float * above_ptr,float ** domain_ptr,float * below_ptr,int rank,int i,int j) {
float ret_val;
int p;
/* artifact from original serialization of MPI version */
p = 1;
/* enforce bc's first */
if(i == ((int)floor(WIDTH/H/2)-1) && j == 0) {
/* This is the heat source location */
ret_val = T_SRC0;
} else if (i <= 0 || j <= 0 || i >= ((int)floor(WIDTH/H)-1) || j >= ((int)floor(HEIGHT/H)-1)) {
/* All edges and beyond are set to 0.0 */
ret_val = 0.0;
} else {
/* Else, return value for matrix supplied or ghost rows */
if (j < get_start(rank)) {
if (rank == ROOT) {
/* not interested in above ghost row */
ret_val = 0.0;
} else {
ret_val = above_ptr[i];
/*printf("%d: Used ghost (%d,%d) row from above = %f\n",rank,i,j,above_ptr[i]);
fflush(stdout);*/
}
} else if (j > get_end(rank)) {
if (rank == (p-1)) {
/* not interested in below ghost row */
ret_val = 0.0;
} else {
ret_val = below_ptr[i];
/*printf("%d: Used ghost (%d,%d) row from below = %f\n",rank,i,j,below_ptr[i]);
fflush(stdout);*/
}
} else {
/* else, return the value in the domain asked for */
ret_val = domain_ptr[global_to_local(rank,j)][i];
/*printf("%d: Used real (%d,%d) row from self = %f\n",rank,i,global_to_local(rank,j),domain_ptr[global_to_local(rank,j)][i]);
fflush(stdout);*/
}
}
return ret_val;
}
/* initialized domain to 0.0 - could be where grid file is read in */
void init_domain (float ** domain_ptr,int rank) {
int i,j,start,end,rows;
start = get_start(rank);
end = get_end(rank);
rows = get_num_rows(rank);
for (j=start;j<end;j++) {
for (i=0;i<(int)floor(WIDTH/H);i++) {
domain_ptr[j-start][i] = 0.0;
}
}
}
/* computes start row for given PE */
int get_start (int rank) {
/* computer row divisions to each proc */
int p,per_proc,start_row,remainder;
/* artifact of serialization of orignal MPI version */
p = 1;
/* get initial whole divisor */
per_proc = (int)floor(HEIGHT/H)/p;
/* get number of remaining */
remainder = (int)floor(HEIGHT/H)%p;
/* there is a remainder, then it distribute it to the first "remainder" procs */
if (rank < remainder) {
start_row = rank * (per_proc + 1);
} else {
start_row = rank * (per_proc) + remainder;
}
return start_row;
}
/* computes end row for given PE */
int get_end (int rank) {
/* computer row divisions to each proc */
int p,per_proc,remainder,end_row;
/* artifact of serialization of orignal MPI version */
p = 1;
per_proc = (int)floor(HEIGHT/H)/p;
remainder = (int)floor(HEIGHT/H)%p;
if (rank < remainder) {
end_row = get_start(rank) + per_proc;
} else {
end_row = get_start(rank) + per_proc - 1;
}
return end_row;
}
/* calcs number of rows for given PE */
int get_num_rows (int rank) {
return 1 + get_end(rank) - get_start(rank);
}
int global_to_local (int rank, int row) {
return row - get_start(rank);
}
/*
* f - function that would be non zero if there was an internal heat source
*/
float f (int i,int j) {
return 0.0;
}
|
GB_unop__ceil_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__ceil_fp64_fp64)
// op(A') function: GB (_unop_tran__ceil_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = ceil (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ceil (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = ceil (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_CEIL || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__ceil_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = ceil (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = ceil (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__ceil_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
train.h | #pragma once
#include <chrono>
#include <set>
#include <queue>
#include "threadpool.h"
#include "forest.h"
#include "spliteval.h"
enum class TrainType : int {
TRAIN = 0,
RETRAIN = 1,
RETRAIN_WITH_REPLACEMENT = 2
};
struct TrainParameters {
TrainType train_type;
int n_trees;
int max_tree_depth;
int n_test_split_functions;
int n_test_thresholds;
int n_test_samples;
int min_samples_to_split;
int min_samples_for_leaf;
int print_node_info;
TrainParameters() :
train_type(TrainType::TRAIN),
n_trees(5),
max_tree_depth(7),
n_test_split_functions(50),
n_test_thresholds(10),
n_test_samples(100),
min_samples_to_split(14),
min_samples_for_leaf(7),
print_node_info(100)
{}
};
template <typename SplitFunctionT, typename LeafFunctionT, typename SplitEvaluatorT>
class TrainForest {
public:
TrainForest(const TrainParameters& params, const std::shared_ptr<SplitFunctionT> gen_split_fcn, const std::shared_ptr<LeafFunctionT> gen_leaf_fcn, const std::shared_ptr<SplitEvaluatorT> split_eval, int n_threads, bool verbose)
: params_(params), gen_split_fcn_(gen_split_fcn), gen_leaf_fcn_(gen_leaf_fcn), split_eval_(split_eval), n_threads(n_threads), verbose_(verbose) {
n_created_nodes_ = 0;
n_max_nodes_ = 1;
unsigned long n_nodes_d = 1;
for(int depth = 0; depth < params.max_tree_depth; ++depth) {
n_nodes_d *= 2;
n_max_nodes_ += n_nodes_d;
}
n_max_nodes_ *= params.n_trees;
}
virtual ~TrainForest() {}
virtual std::shared_ptr<Forest<SplitFunctionT, LeafFunctionT>> Train(const std::vector<TrainDatum>& samples, TrainType train_type, const std::shared_ptr<Forest<SplitFunctionT, LeafFunctionT>>& old_forest) = 0;
protected:
virtual void PrintParams() {
if(verbose_){
#pragma omp critical (TrainForest_train)
{
std::cout << "[TRAIN] training forest " << std::endl;
std::cout << "[TRAIN] n_trees : " << params_.n_trees << std::endl;
std::cout << "[TRAIN] max_tree_depth : " << params_.max_tree_depth << std::endl;
std::cout << "[TRAIN] n_test_split_functions: " << params_.n_test_split_functions << std::endl;
std::cout << "[TRAIN] n_test_thresholds : " << params_.n_test_thresholds << std::endl;
std::cout << "[TRAIN] n_test_samples : " << params_.n_test_samples << std::endl;
std::cout << "[TRAIN] min_samples_to_split : " << params_.min_samples_to_split << std::endl;
}
}
}
virtual void UpdateNodeInfo(unsigned int depth, bool leaf) {
if(verbose_) {
n_created_nodes_ += 1;
if(leaf) {
unsigned long n_nodes_d = 1;
unsigned int n_remove_max_nodes = 0;
for(int d = depth; d < params_.max_tree_depth; ++d) {
n_nodes_d *= 2;
n_remove_max_nodes += n_nodes_d;
}
n_max_nodes_ -= n_remove_max_nodes;
}
if(n_created_nodes_ % params_.print_node_info == 0 || n_created_nodes_ == n_max_nodes_) {
std::cout << "[Forest]"
<< " created node number " << n_created_nodes_
<< " @ depth " << depth
<< ", max. " << n_max_nodes_ << " left"
<< " => " << (double(n_created_nodes_) / double(n_max_nodes_))
<< " done" << std::endl;
}
}
}
virtual void SampleData(const std::vector<TrainDatum>& all, std::vector<TrainDatum>& sampled, std::mt19937& rng) {
unsigned int n = all.size();
unsigned int k = params_.n_test_samples;
k = n < k ? n : k;
std::set<int> indices;
std::uniform_int_distribution<int> udist(0, all.size()-1);
while(indices.size() < k) {
int idx = udist(rng);
indices.insert(idx);
}
sampled.resize(k);
int sidx = 0;
for(int idx : indices) {
sampled[sidx] = all[idx];
sidx += 1;
}
}
virtual void Split(const std::shared_ptr<SplitFunctionT>& split_function, const std::vector<TrainDatum>& samples, std::vector<TrainDatum>& left, std::vector<TrainDatum>& right) {
for(auto sample : samples) {
if(split_function->Split(sample.sample)) {
left.push_back(sample);
}
else {
right.push_back(sample);
}
}
}
virtual std::shared_ptr<SplitFunctionT> OptimizeSplitFunction(const std::vector<TrainDatum>& samples, int depth, std::mt19937& rng) {
std::vector<TrainDatum> split_samples;
SampleData(samples, split_samples, rng);
unsigned int min_samples_for_leaf = params_.min_samples_for_leaf;
float min_cost = std::numeric_limits<float>::max();
std::shared_ptr<SplitFunctionT> best_split_fcn;
float best_threshold = 0;
for(int split_fcn_idx = 0; split_fcn_idx < params_.n_test_split_functions; ++split_fcn_idx) {
auto split_fcn = gen_split_fcn_->Generate(rng, samples[0].sample);
for(int threshold_idx = 0; threshold_idx < params_.n_test_thresholds; ++threshold_idx) {
std::uniform_int_distribution<int> udist(0, split_samples.size()-1);
int rand_split_sample_idx = udist(rng);
float threshold = split_fcn->Compute(split_samples[rand_split_sample_idx].sample);
split_fcn->set_threshold(threshold);
std::vector<TrainDatum> left;
std::vector<TrainDatum> right;
Split(split_fcn, split_samples, left, right);
if(left.size() < min_samples_for_leaf || right.size() < min_samples_for_leaf) {
continue;
}
// std::cout << "split done " << left.size() << "," << right.size() << std::endl;
float split_cost = split_eval_->Eval(left, right, depth);
// std::cout << ", " << split_cost << ", " << threshold << "; " << std::endl;
if(split_cost < min_cost) {
min_cost = split_cost;
best_split_fcn = split_fcn;
best_threshold = threshold; //need theshold extra because of pointer
}
}
}
if(best_split_fcn != nullptr) {
best_split_fcn->set_threshold(best_threshold);
}
return best_split_fcn;
}
virtual NodePtr CreateLeafNode(const std::vector<TrainDatum>& samples, unsigned int depth) {
auto leaf_fct = gen_leaf_fcn_->Create(samples);
auto node = std::make_shared<LeafNode<LeafFunctionT>>(leaf_fct);
UpdateNodeInfo(depth, true);
return node;
}
protected:
const TrainParameters& params_;
const std::shared_ptr<SplitFunctionT> gen_split_fcn_;
const std::shared_ptr<LeafFunctionT> gen_leaf_fcn_;
const std::shared_ptr<SplitEvaluatorT> split_eval_;
int n_threads;
bool verbose_;
unsigned long n_created_nodes_;
unsigned long n_max_nodes_;
};
template <typename SplitFunctionT, typename LeafFunctionT, typename SplitEvaluatorT>
class TrainForestRecursive : public TrainForest<SplitFunctionT, LeafFunctionT, SplitEvaluatorT> {
public:
TrainForestRecursive(const TrainParameters& params, const std::shared_ptr<SplitFunctionT> gen_split_fcn, const std::shared_ptr<LeafFunctionT> gen_leaf_fcn, const std::shared_ptr<SplitEvaluatorT> split_eval, int n_threads, bool verbose)
: TrainForest<SplitFunctionT, LeafFunctionT, SplitEvaluatorT>(params, gen_split_fcn, gen_leaf_fcn, split_eval, n_threads, verbose) {}
virtual ~TrainForestRecursive() {}
virtual std::shared_ptr<Forest<SplitFunctionT, LeafFunctionT>> Train(const std::vector<TrainDatum>& samples, TrainType train_type, const std::shared_ptr<Forest<SplitFunctionT, LeafFunctionT>>& old_forest) {
this->PrintParams();
auto tim = std::chrono::system_clock::now();
auto forest = std::make_shared<Forest<SplitFunctionT, LeafFunctionT>>();
omp_set_num_threads(this->n_threads);
#pragma omp parallel for ordered
for(size_t treeIdx = 0; treeIdx < this->params_.n_trees; ++treeIdx) {
auto treetim = std::chrono::system_clock::now();
#pragma omp critical (TrainForest_train)
{
if(this->verbose_){
std::cout << "[TRAIN][START] training tree " << treeIdx << " of " << this->params_.n_trees << std::endl;
}
}
std::shared_ptr<Tree<SplitFunctionT, LeafFunctionT>> old_tree;
if(old_forest != 0 && treeIdx < old_forest->trees_size()) {
old_tree = old_forest->trees(treeIdx);
}
std::random_device rd;
std::mt19937 rng(rd());
auto tree = Train(samples, train_type, old_tree,rng);
#pragma omp critical (TrainForest_train)
{
forest->AddTree(tree);
if(this->verbose_){
auto now = std::chrono::system_clock::now();
auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(now - treetim);
std::cout << "[TRAIN][FINISHED] training tree " << treeIdx << " of " << this->params_.n_trees << " - took " << (ms.count() * 1e-3) << "[s]" << std::endl;
std::cout << "[TRAIN][FINISHED] " << (this->params_.n_trees - forest->trees_size()) << " left for training" << std::endl;
}
}
}
if(this->verbose_){
auto now = std::chrono::system_clock::now();
auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(now - tim);
std::cout << "[TRAIN][FINISHED] training forest - took " << (ms.count() * 1e-3) << "[s]" << std::endl;
}
return forest;
}
private:
virtual std::shared_ptr<Tree<SplitFunctionT, LeafFunctionT>> Train(const std::vector<TrainDatum>& samples, TrainType train_type, const std::shared_ptr<Tree<SplitFunctionT, LeafFunctionT>>& old_tree, std::mt19937& rng) {
NodePtr old_root;
if(old_tree != nullptr) {
old_root = old_tree->root();
}
NodePtr root = Train(samples, train_type, old_root, 0, rng);
return std::make_shared<Tree<SplitFunctionT, LeafFunctionT>>(root);
}
virtual NodePtr Train(const std::vector<TrainDatum>& samples, TrainType train_type, const NodePtr& old_node, unsigned int depth, std::mt19937& rng) {
if(depth < this->params_.max_tree_depth && samples.size() > this->params_.min_samples_to_split) {
std::shared_ptr<SplitFunctionT> best_split_fcn;
bool was_split_node = false;
if(old_node == nullptr || old_node->type() == LeafNode<LeafFunctionT>::TYPE) {
best_split_fcn = this->OptimizeSplitFunction(samples, depth, rng);
was_split_node = false;
}
else if(old_node->type() == SplitNode<SplitFunctionT, LeafFunctionT>::TYPE) {
auto split_node = std::static_pointer_cast<SplitNode<SplitFunctionT, LeafFunctionT>>(old_node);
best_split_fcn = split_node->split_fcn()->Copy();
was_split_node = true;
}
if(best_split_fcn == nullptr) {
if(old_node == nullptr || train_type == TrainType::TRAIN || train_type == TrainType::RETRAIN_WITH_REPLACEMENT) {
return this->CreateLeafNode(samples, depth);
}
else if(train_type == TrainType::RETRAIN) {
return old_node->Copy();
}
else {
std::cout << "[ERROR] unknown train type" << std::endl;
exit(-1);
}
}
// (1) split samples
std::vector<TrainDatum> leftsamples, rightsamples;
this->Split(best_split_fcn, samples, leftsamples, rightsamples);
//output node information
this->UpdateNodeInfo(depth, false);
//create split node - recursively train the siblings
if(was_split_node) {
auto split_node = std::static_pointer_cast<SplitNode<SplitFunctionT, LeafFunctionT>>(old_node);
NodePtr left = this->Train(leftsamples, train_type, split_node->left(), depth + 1, rng);
NodePtr right = this->Train(rightsamples, train_type, split_node->right(), depth + 1, rng);
auto new_node = std::make_shared<SplitNode<SplitFunctionT, LeafFunctionT>>(left, right, best_split_fcn);
return new_node;
}
else {
NodePtr left = this->Train(leftsamples, train_type, nullptr, depth + 1, rng);
NodePtr right = this->Train(rightsamples, train_type, nullptr, depth + 1, rng);
auto new_node = std::make_shared<SplitNode<SplitFunctionT, LeafFunctionT>>(left, right, best_split_fcn);
return new_node;
}
} // if samples < min_samples || depth >= max_depth then make leaf node
else {
if(old_node == 0 || train_type == TrainType::TRAIN || train_type == TrainType::RETRAIN_WITH_REPLACEMENT) {
return this->CreateLeafNode(samples, depth);
}
else if(train_type == TrainType::RETRAIN) {
return old_node->Copy();
}
else {
std::cout << "[ERROR] unknown train type" << std::endl;
exit(-1);
}
}
}
};
struct QueueTuple {
int depth;
std::vector<TrainDatum> train_data;
NodePtr* parent;
QueueTuple() : depth(-1), train_data(), parent(nullptr) {}
QueueTuple(int depth, std::vector<TrainDatum> train_data, NodePtr* parent) :
depth(depth), train_data(train_data), parent(parent) {}
};
template <typename SplitFunctionT, typename LeafFunctionT, typename SplitEvaluatorT>
class TrainForestQueued : public TrainForest<SplitFunctionT, LeafFunctionT, SplitEvaluatorT> {
public:
TrainForestQueued(const TrainParameters& params, const std::shared_ptr<SplitFunctionT> gen_split_fcn, const std::shared_ptr<LeafFunctionT> gen_leaf_fcn, const std::shared_ptr<SplitEvaluatorT> split_eval, int n_threads, bool verbose)
: TrainForest<SplitFunctionT, LeafFunctionT, SplitEvaluatorT>(params, gen_split_fcn, gen_leaf_fcn, split_eval, n_threads, verbose) {}
virtual ~TrainForestQueued() {}
virtual std::shared_ptr<Forest<SplitFunctionT, LeafFunctionT>> Train(const std::vector<TrainDatum>& samples, TrainType train_type, const std::shared_ptr<Forest<SplitFunctionT, LeafFunctionT>>& old_forest) {
this->PrintParams();
auto tim = std::chrono::system_clock::now();
auto forest = std::make_shared<Forest<SplitFunctionT, LeafFunctionT>>();
std::cout << "[TRAIN] create pool with " << this->n_threads << " threads" << std::endl;
auto pool = std::make_shared<ThreadPool>(this->n_threads);
for(int treeidx = 0; treeidx < this->params_.n_trees; ++treeidx) {
auto tree = std::make_shared<Tree<SplitFunctionT, LeafFunctionT>>();
forest->AddTree(tree);
AddJob(pool, QueueTuple(0, samples, &(tree->root_)));
}
while(pool->has_running_tasks()) {
std::this_thread::sleep_for(std::chrono::milliseconds(100));
}
if(this->verbose_){
auto now = std::chrono::system_clock::now();
auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(now - tim);
std::cout << "[TRAIN][FINISHED] training forest - took " << (ms.count() * 1e-3) << "[s]" << std::endl;
}
return forest;
}
private:
virtual void AddJob(std::shared_ptr<ThreadPool> pool, QueueTuple data) {
pool->enqueue([this](std::shared_ptr<ThreadPool> pool, QueueTuple data) {
std::random_device rd;
std::mt19937 rng(rd());
std::shared_ptr<SplitFunctionT> best_split_fcn = nullptr;
if(data.depth < this->params_.max_tree_depth && int(data.train_data.size()) > this->params_.min_samples_to_split) {
best_split_fcn = this->OptimizeSplitFunction(data.train_data, data.depth, rng);
}
if(best_split_fcn == nullptr) {
auto node = this->CreateLeafNode(data.train_data, data.depth);
*(data.parent) = node;
}
else {
this->UpdateNodeInfo(data.depth, false);
auto node = std::make_shared<SplitNode<SplitFunctionT, LeafFunctionT>>();
node->split_fcn_ = best_split_fcn;
*(data.parent) = node;
QueueTuple left;
QueueTuple right;
this->Split(best_split_fcn, data.train_data, left.train_data, right.train_data);
left.depth = data.depth + 1;
right.depth = data.depth + 1;
left.parent = &(node->left_);
right.parent = &(node->right_);
this->AddJob(pool, left);
this->AddJob(pool, right);
}
}, pool, data);
}
};
|
GB_unop__round_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__round_fc32_fc32)
// op(A') function: GB (_unop_tran__round_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = GB_croundf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_croundf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = GB_croundf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ROUND || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__round_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_croundf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_croundf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__round_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
updater_basemaker-inl.h | /*!
* Copyright 2014 by Contributors
* \file updater_basemaker-inl.h
* \brief implement a common tree constructor
* \author Tianqi Chen
*/
#ifndef XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
#define XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
#include <xgboost/base.h>
#include <xgboost/tree_updater.h>
#include <vector>
#include <algorithm>
#include <string>
#include <limits>
#include <utility>
#include "./param.h"
#include "../common/sync.h"
#include "../common/io.h"
#include "../common/random.h"
#include "../common/quantile.h"
namespace xgboost {
namespace tree {
/*!
* \brief base tree maker class that defines common operation
* needed in tree making
*/
class BaseMaker: public TreeUpdater {
public:
void Init(const std::vector<std::pair<std::string, std::string> >& args) override {
param.InitAllowUnknown(args);
}
protected:
// helper to collect and query feature meta information
struct FMetaHelper {
public:
/*! \brief find type of each feature, use column format */
inline void InitByCol(DMatrix* p_fmat,
const RegTree& tree) {
fminmax.resize(tree.param.num_feature * 2);
std::fill(fminmax.begin(), fminmax.end(),
-std::numeric_limits<bst_float>::max());
// start accumulating statistics
dmlc::DataIter<ColBatch>* iter = p_fmat->ColIterator();
iter->BeforeFirst();
while (iter->Next()) {
const ColBatch& batch = iter->Value();
for (bst_uint i = 0; i < batch.size; ++i) {
const bst_uint fid = batch.col_index[i];
const ColBatch::Inst& c = batch[i];
if (c.length != 0) {
fminmax[fid * 2 + 0] = std::max(-c[0].fvalue, fminmax[fid * 2 + 0]);
fminmax[fid * 2 + 1] = std::max(c[c.length - 1].fvalue, fminmax[fid * 2 + 1]);
}
}
}
rabit::Allreduce<rabit::op::Max>(dmlc::BeginPtr(fminmax), fminmax.size());
}
// get feature type, 0:empty 1:binary 2:real
inline int Type(bst_uint fid) const {
CHECK_LT(fid * 2 + 1, fminmax.size())
<< "FeatHelper fid exceed query bound ";
bst_float a = fminmax[fid * 2];
bst_float b = fminmax[fid * 2 + 1];
if (a == -std::numeric_limits<bst_float>::max()) return 0;
if (-a == b) {
return 1;
} else {
return 2;
}
}
inline bst_float MaxValue(bst_uint fid) const {
return fminmax[fid *2 + 1];
}
inline void SampleCol(float p, std::vector<bst_uint> *p_findex) const {
std::vector<bst_uint> &findex = *p_findex;
findex.clear();
for (size_t i = 0; i < fminmax.size(); i += 2) {
const bst_uint fid = static_cast<bst_uint>(i / 2);
if (this->Type(fid) != 0) findex.push_back(fid);
}
unsigned n = static_cast<unsigned>(p * findex.size());
std::shuffle(findex.begin(), findex.end(), common::GlobalRandom());
findex.resize(n);
// sync the findex if it is subsample
std::string s_cache;
common::MemoryBufferStream fc(&s_cache);
dmlc::Stream& fs = fc;
if (rabit::GetRank() == 0) {
fs.Write(findex);
}
rabit::Broadcast(&s_cache, 0);
fs.Read(&findex);
}
private:
std::vector<bst_float> fminmax;
};
// ------static helper functions ------
// helper function to get to next level of the tree
/*! \brief this is helper function for row based data*/
inline static int NextLevel(const RowBatch::Inst &inst, const RegTree &tree, int nid) {
const RegTree::Node &n = tree[nid];
bst_uint findex = n.split_index();
for (unsigned i = 0; i < inst.length; ++i) {
if (findex == inst[i].index) {
if (inst[i].fvalue < n.split_cond()) {
return n.cleft();
} else {
return n.cright();
}
}
}
return n.cdefault();
}
/*! \brief get number of omp thread in current context */
inline static int get_nthread() {
int nthread;
#pragma omp parallel
{
nthread = omp_get_num_threads();
}
return nthread;
}
// ------class member helpers---------
/*! \brief initialize temp data structure */
inline void InitData(const std::vector<bst_gpair> &gpair,
const DMatrix &fmat,
const RegTree &tree) {
CHECK_EQ(tree.param.num_nodes, tree.param.num_roots)
<< "TreeMaker: can only grow new tree";
const std::vector<unsigned> &root_index = fmat.info().root_index;
{
// setup position
position.resize(gpair.size());
if (root_index.size() == 0) {
std::fill(position.begin(), position.end(), 0);
} else {
for (size_t i = 0; i < position.size(); ++i) {
position[i] = root_index[i];
CHECK_LT(root_index[i], (unsigned)tree.param.num_roots)
<< "root index exceed setting";
}
}
// mark delete for the deleted datas
for (size_t i = 0; i < position.size(); ++i) {
if (gpair[i].hess < 0.0f) position[i] = ~position[i];
}
// mark subsample
if (param.subsample < 1.0f) {
std::bernoulli_distribution coin_flip(param.subsample);
auto& rnd = common::GlobalRandom();
for (size_t i = 0; i < position.size(); ++i) {
if (gpair[i].hess < 0.0f) continue;
if (!coin_flip(rnd)) position[i] = ~position[i];
}
}
}
{
// expand query
qexpand.reserve(256); qexpand.clear();
for (int i = 0; i < tree.param.num_roots; ++i) {
qexpand.push_back(i);
}
this->UpdateNode2WorkIndex(tree);
}
}
/*! \brief update queue expand add in new leaves */
inline void UpdateQueueExpand(const RegTree &tree) {
std::vector<int> newnodes;
for (size_t i = 0; i < qexpand.size(); ++i) {
const int nid = qexpand[i];
if (!tree[nid].is_leaf()) {
newnodes.push_back(tree[nid].cleft());
newnodes.push_back(tree[nid].cright());
}
}
// use new nodes for qexpand
qexpand = newnodes;
this->UpdateNode2WorkIndex(tree);
}
// return decoded position
inline int DecodePosition(bst_uint ridx) const {
const int pid = position[ridx];
return pid < 0 ? ~pid : pid;
}
// encode the encoded position value for ridx
inline void SetEncodePosition(bst_uint ridx, int nid) {
if (position[ridx] < 0) {
position[ridx] = ~nid;
} else {
position[ridx] = nid;
}
}
/*!
* \brief this is helper function uses column based data structure,
* reset the positions to the lastest one
* \param nodes the set of nodes that contains the split to be used
* \param p_fmat feature matrix needed for tree construction
* \param tree the regression tree structure
*/
inline void ResetPositionCol(const std::vector<int> &nodes,
DMatrix *p_fmat,
const RegTree &tree) {
// set the positions in the nondefault
this->SetNonDefaultPositionCol(nodes, p_fmat, tree);
// set rest of instances to default position
const std::vector<bst_uint> &rowset = p_fmat->buffered_rowset();
// set default direct nodes to default
// for leaf nodes that are not fresh, mark then to ~nid,
// so that they are ignored in future statistics collection
const bst_omp_uint ndata = static_cast<bst_omp_uint>(rowset.size());
#pragma omp parallel for schedule(static)
for (bst_omp_uint i = 0; i < ndata; ++i) {
const bst_uint ridx = rowset[i];
const int nid = this->DecodePosition(ridx);
if (tree[nid].is_leaf()) {
// mark finish when it is not a fresh leaf
if (tree[nid].cright() == -1) {
position[ridx] = ~nid;
}
} else {
// push to default branch
if (tree[nid].default_left()) {
this->SetEncodePosition(ridx, tree[nid].cleft());
} else {
this->SetEncodePosition(ridx, tree[nid].cright());
}
}
}
}
/*!
* \brief this is helper function uses column based data structure,
* update all positions into nondefault branch, if any, ignore the default branch
* \param nodes the set of nodes that contains the split to be used
* \param p_fmat feature matrix needed for tree construction
* \param tree the regression tree structure
*/
virtual void SetNonDefaultPositionCol(const std::vector<int> &nodes,
DMatrix *p_fmat,
const RegTree &tree) {
// step 1, classify the non-default data into right places
std::vector<unsigned> fsplits;
for (size_t i = 0; i < nodes.size(); ++i) {
const int nid = nodes[i];
if (!tree[nid].is_leaf()) {
fsplits.push_back(tree[nid].split_index());
}
}
std::sort(fsplits.begin(), fsplits.end());
fsplits.resize(std::unique(fsplits.begin(), fsplits.end()) - fsplits.begin());
dmlc::DataIter<ColBatch> *iter = p_fmat->ColIterator(fsplits);
while (iter->Next()) {
const ColBatch &batch = iter->Value();
for (size_t i = 0; i < batch.size; ++i) {
ColBatch::Inst col = batch[i];
const bst_uint fid = batch.col_index[i];
const bst_omp_uint ndata = static_cast<bst_omp_uint>(col.length);
#pragma omp parallel for schedule(static)
for (bst_omp_uint j = 0; j < ndata; ++j) {
const bst_uint ridx = col[j].index;
const float fvalue = col[j].fvalue;
const int nid = this->DecodePosition(ridx);
// go back to parent, correct those who are not default
if (!tree[nid].is_leaf() && tree[nid].split_index() == fid) {
if (fvalue < tree[nid].split_cond()) {
this->SetEncodePosition(ridx, tree[nid].cleft());
} else {
this->SetEncodePosition(ridx, tree[nid].cright());
}
}
}
}
}
}
/*! \brief helper function to get statistics from a tree */
template<typename TStats>
inline void GetNodeStats(const std::vector<bst_gpair> &gpair,
const DMatrix &fmat,
const RegTree &tree,
std::vector< std::vector<TStats> > *p_thread_temp,
std::vector<TStats> *p_node_stats) {
std::vector< std::vector<TStats> > &thread_temp = *p_thread_temp;
const MetaInfo &info = fmat.info();
thread_temp.resize(this->get_nthread());
p_node_stats->resize(tree.param.num_nodes);
#pragma omp parallel
{
const int tid = omp_get_thread_num();
thread_temp[tid].resize(tree.param.num_nodes, TStats(param));
for (size_t i = 0; i < qexpand.size(); ++i) {
const unsigned nid = qexpand[i];
thread_temp[tid][nid].Clear();
}
}
const std::vector<bst_uint> &rowset = fmat.buffered_rowset();
// setup position
const bst_omp_uint ndata = static_cast<bst_omp_uint>(rowset.size());
#pragma omp parallel for schedule(static)
for (bst_omp_uint i = 0; i < ndata; ++i) {
const bst_uint ridx = rowset[i];
const int nid = position[ridx];
const int tid = omp_get_thread_num();
if (nid >= 0) {
thread_temp[tid][nid].Add(gpair, info, ridx);
}
}
// sum the per thread statistics together
for (size_t j = 0; j < qexpand.size(); ++j) {
const int nid = qexpand[j];
TStats &s = (*p_node_stats)[nid];
s.Clear();
for (size_t tid = 0; tid < thread_temp.size(); ++tid) {
s.Add(thread_temp[tid][nid]);
}
}
}
/*! \brief common helper data structure to build sketch */
struct SketchEntry {
/*! \brief total sum of amount to be met */
double sum_total;
/*! \brief statistics used in the sketch */
double rmin, wmin;
/*! \brief last seen feature value */
bst_float last_fvalue;
/*! \brief current size of sketch */
double next_goal;
// pointer to the sketch to put things in
common::WXQuantileSketch<bst_float, bst_float> *sketch;
// initialize the space
inline void Init(unsigned max_size) {
next_goal = -1.0f;
rmin = wmin = 0.0f;
sketch->temp.Reserve(max_size + 1);
sketch->temp.size = 0;
}
/*!
* \brief push a new element to sketch
* \param fvalue feature value, comes in sorted ascending order
* \param w weight
* \param max_size
*/
inline void Push(bst_float fvalue, bst_float w, unsigned max_size) {
if (next_goal == -1.0f) {
next_goal = 0.0f;
last_fvalue = fvalue;
wmin = w;
return;
}
if (last_fvalue != fvalue) {
double rmax = rmin + wmin;
if (rmax >= next_goal && sketch->temp.size != max_size) {
if (sketch->temp.size == 0 ||
last_fvalue > sketch->temp.data[sketch->temp.size-1].value) {
// push to sketch
sketch->temp.data[sketch->temp.size] =
common::WXQuantileSketch<bst_float, bst_float>::
Entry(static_cast<bst_float>(rmin),
static_cast<bst_float>(rmax),
static_cast<bst_float>(wmin), last_fvalue);
CHECK_LT(sketch->temp.size, max_size)
<< "invalid maximum size max_size=" << max_size
<< ", stemp.size" << sketch->temp.size;
++sketch->temp.size;
}
if (sketch->temp.size == max_size) {
next_goal = sum_total * 2.0f + 1e-5f;
} else {
next_goal = static_cast<bst_float>(sketch->temp.size * sum_total / max_size);
}
} else {
if (rmax >= next_goal) {
LOG(TRACKER) << "INFO: rmax=" << rmax
<< ", sum_total=" << sum_total
<< ", naxt_goal=" << next_goal
<< ", size=" << sketch->temp.size;
}
}
rmin = rmax;
wmin = w;
last_fvalue = fvalue;
} else {
wmin += w;
}
}
/*! \brief push final unfinished value to the sketch */
inline void Finalize(unsigned max_size) {
double rmax = rmin + wmin;
if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size-1].value) {
CHECK_LE(sketch->temp.size, max_size)
<< "Finalize: invalid maximum size, max_size=" << max_size
<< ", stemp.size=" << sketch->temp.size;
// push to sketch
sketch->temp.data[sketch->temp.size] =
common::WXQuantileSketch<bst_float, bst_float>::
Entry(static_cast<bst_float>(rmin),
static_cast<bst_float>(rmax),
static_cast<bst_float>(wmin), last_fvalue);
++sketch->temp.size;
}
sketch->PushTemp();
}
};
/*! \brief training parameter of tree grower */
TrainParam param;
/*! \brief queue of nodes to be expanded */
std::vector<int> qexpand;
/*!
* \brief map active node to is working index offset in qexpand,
* can be -1, which means the node is node actively expanding
*/
std::vector<int> node2workindex;
/*!
* \brief position of each instance in the tree
* can be negative, which means this position is no longer expanding
* see also Decode/EncodePosition
*/
std::vector<int> position;
private:
inline void UpdateNode2WorkIndex(const RegTree &tree) {
// update the node2workindex
std::fill(node2workindex.begin(), node2workindex.end(), -1);
node2workindex.resize(tree.param.num_nodes);
for (size_t i = 0; i < qexpand.size(); ++i) {
node2workindex[qexpand[i]] = static_cast<int>(i);
}
}
};
} // namespace tree
} // namespace xgboost
#endif // XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
|
zeroslike_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "utility/sys_port.h"
#include "utility/float.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <math.h>
int ref_zeroslike_fp32(struct tensor* input_tensor, struct tensor* output_tensor, int num_thread)
{
// dims size = 2 or 3
if (input_tensor->dim_num < 4)
{
float* input_data = (float*)input_tensor->data;
float* out_data = (float*)output_tensor->data;
int total_size = input_tensor->elem_num;
for (int i = 0; i < total_size; i++)
{
input_data[i] = 0;
}
return 0;
}
// dims size 3
else if (input_tensor->dim_num == 4)
{
int w = input_tensor->dims[3];
int h = output_tensor->dims[2];
int channels = input_tensor->dims[1];
int size = h * w;
int c_step = h * w;
float* input_data = (float*)input_tensor->data;
float* out_data = (float*)output_tensor->data;
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < channels; q++)
{
float* src = input_data + c_step * q;
float* dst = out_data + c_step * q;
for (int i = 0; i < size; i++)
{
dst[i] = 0.f;
}
}
return 0;
}
return -1;
}
int ref_zeroslike_uint8(struct tensor* input_tensor, struct tensor* output_tensor, int num_thread)
{
// dims size = 2 or 3
if (input_tensor->dim_num < 4)
{
uint8_t* input_data = (uint8_t*)input_tensor->data;
uint8_t* out_data = (uint8_t*)output_tensor->data;
int total_size = input_tensor->elem_num;
for (int i = 0; i < total_size; i++)
{
input_data[i] = 0;
}
return 0;
}
// dims size 3
else if (input_tensor->dim_num == 4)
{
int w = input_tensor->dims[3];
int h = output_tensor->dims[2];
int channels = input_tensor->dims[1];
int size = h * w;
int c_step = h * w;
uint8_t* input_data = (uint8_t*)input_tensor->data;
uint8_t* out_data = (uint8_t*)output_tensor->data;
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < channels; q++)
{
uint8_t* src = input_data + c_step * q;
uint8_t* dst = out_data + c_step * q;
for (int i = 0; i < size; i++)
{
dst[i] = 0;
}
}
return 0;
}
return -1;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
// exec_node->inplace_map_num = 0;
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor;
struct tensor* output_tensor;
int layout = ir_graph->graph_layout;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
int ret = -1;
if (input_tensor->data_type == TENGINE_DT_FP32)
ret = ref_zeroslike_fp32(input_tensor, output_tensor, exec_graph->num_thread);
else if (input_tensor->data_type == TENGINE_DT_UINT8)
ret = ref_zeroslike_uint8(input_tensor, output_tensor, exec_graph->num_thread);
return ret;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node)
{
return OPS_SCORE_CANDO;
}
static struct node_ops hcl_node_ops = {.prerun = prerun,
.run = run,
.reshape = NULL,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
int register_zeroslike_ref_op()
{
return register_builtin_node_ops(OP_ZEROSLIKE, &hcl_node_ops);
}
int unregister_zeroslike_ref_op()
{
return unregister_builtin_node_ops(OP_ZEROSLIKE, &hcl_node_ops);
}
|
convolutiondepthwise_3x3_pack4_bf16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw3x3s1_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f);
const unsigned short* k0 = kernel.row<const unsigned short>(g);
unsigned short* outptr0 = out.row<unsigned short>(0);
unsigned short* outptr1 = out.row<unsigned short>(1);
const Mat img0 = bottom_blob.channel(g);
const unsigned short* r0 = img0.row<const unsigned short>(0);
const unsigned short* r1 = img0.row<const unsigned short>(1);
const unsigned short* r2 = img0.row<const unsigned short>(2);
const unsigned short* r3 = img0.row<const unsigned short>(3);
float32x4_t _k00 = vcvt_f32_bf16(vld1_u16(k0));
float32x4_t _k01 = vcvt_f32_bf16(vld1_u16(k0 + 4));
float32x4_t _k02 = vcvt_f32_bf16(vld1_u16(k0 + 8));
float32x4_t _k10 = vcvt_f32_bf16(vld1_u16(k0 + 12));
float32x4_t _k11 = vcvt_f32_bf16(vld1_u16(k0 + 16));
float32x4_t _k12 = vcvt_f32_bf16(vld1_u16(k0 + 20));
float32x4_t _k20 = vcvt_f32_bf16(vld1_u16(k0 + 24));
float32x4_t _k21 = vcvt_f32_bf16(vld1_u16(k0 + 28));
float32x4_t _k22 = vcvt_f32_bf16(vld1_u16(k0 + 32));
int i = 0;
#if __aarch64__
for (; i + 1 < outh; i += 2)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
asm volatile(
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%3], #32 \n" // r10 r11 r12 r13
"mov v16.16b, %21.16b \n" // sum00
"mov v17.16b, %21.16b \n" // sum01
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v28.4h, v29.4h}, [%3] \n" // r14 r15
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"mov v18.16b, %21.16b \n" // sum02
"mov v19.16b, %21.16b \n" // sum03
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"mov v20.16b, %21.16b \n" // sum10
"fmla v16.4s, %15.4s, v10.4s \n"
"fmla v17.4s, %15.4s, v11.4s \n"
"mov v21.16b, %21.16b \n" // sum11
"fmla v18.4s, %15.4s, v12.4s \n"
"fmla v19.4s, %15.4s, v13.4s \n"
"mov v22.16b, %21.16b \n" // sum12
"fmla v20.4s, %12.4s, v10.4s \n"
"fmla v21.4s, %12.4s, v11.4s \n"
"mov v23.16b, %21.16b \n" // sum13
"fmla v22.4s, %12.4s, v12.4s \n"
"fmla v23.4s, %12.4s, v13.4s \n"
"shll v28.4s, v28.4h, #16 \n"
"fmla v16.4s, %16.4s, v11.4s \n"
"fmla v17.4s, %16.4s, v12.4s \n"
"shll v29.4s, v29.4h, #16 \n"
"fmla v18.4s, %16.4s, v13.4s \n"
"fmla v19.4s, %16.4s, v28.4s \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%4], #32 \n" // r20 r21 r22 r23
"fmla v20.4s, %13.4s, v11.4s \n"
"fmla v21.4s, %13.4s, v12.4s \n"
"fmla v22.4s, %13.4s, v13.4s \n"
"fmla v23.4s, %13.4s, v28.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v14.4h, v15.4h}, [%4] \n" // r24 r25
"fmla v16.4s, %17.4s, v12.4s \n"
"fmla v17.4s, %17.4s, v13.4s \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v18.4s, %17.4s, v28.4s \n"
"fmla v19.4s, %17.4s, v29.4s \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, %14.4s, v12.4s \n"
"fmla v21.4s, %14.4s, v13.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%2], #32 \n" // r00 r01 r02 r03
"fmla v22.4s, %14.4s, v28.4s \n"
"fmla v23.4s, %14.4s, v29.4s \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v16.4s, %18.4s, v24.4s \n"
"fmla v17.4s, %18.4s, v25.4s \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v18.4s, %18.4s, v26.4s \n"
"fmla v19.4s, %18.4s, v27.4s \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%5], #32 \n" // r30 r31 r32 r33
"fmla v20.4s, %15.4s, v24.4s \n"
"fmla v21.4s, %15.4s, v25.4s \n"
"shll v14.4s, v14.4h, #16 \n"
"fmla v22.4s, %15.4s, v26.4s \n"
"fmla v23.4s, %15.4s, v27.4s \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v16.4s, %19.4s, v25.4s \n"
"fmla v17.4s, %19.4s, v26.4s \n"
"fmla v18.4s, %19.4s, v27.4s \n"
"fmla v19.4s, %19.4s, v14.4s \n"
"fmla v20.4s, %16.4s, v25.4s \n"
"fmla v21.4s, %16.4s, v26.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v24.4h, v25.4h}, [%2] \n" // r04 r05
"fmla v22.4s, %16.4s, v27.4s \n"
"fmla v23.4s, %16.4s, v14.4s \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, %20.4s, v26.4s \n"
"fmla v17.4s, %20.4s, v27.4s \n"
"shll v12.4s, v12.4h, #16 \n"
"fmla v18.4s, %20.4s, v14.4s \n"
"fmla v19.4s, %20.4s, v15.4s \n"
"shll v13.4s, v13.4h, #16 \n"
"fmla v20.4s, %17.4s, v26.4s \n"
"fmla v21.4s, %17.4s, v27.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v26.4h, v27.4h}, [%5] \n" // r34 r35
"fmla v22.4s, %17.4s, v14.4s \n"
"fmla v23.4s, %17.4s, v15.4s \n"
"shll v28.4s, v28.4h, #16 \n"
"fmla v16.4s, %12.4s, v10.4s \n"
"fmla v17.4s, %12.4s, v11.4s \n"
"shll v29.4s, v29.4h, #16 \n"
"fmla v18.4s, %12.4s, v12.4s \n"
"fmla v19.4s, %12.4s, v13.4s \n"
"shll v30.4s, v30.4h, #16 \n"
"fmla v20.4s, %18.4s, v28.4s \n"
"fmla v21.4s, %18.4s, v29.4s \n"
"shll v31.4s, v31.4h, #16 \n"
"fmla v22.4s, %18.4s, v30.4s \n"
"fmla v23.4s, %18.4s, v31.4s \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v16.4s, %13.4s, v11.4s \n"
"fmla v17.4s, %13.4s, v12.4s \n"
"fmla v18.4s, %13.4s, v13.4s \n"
"fmla v19.4s, %13.4s, v24.4s \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, %19.4s, v29.4s \n"
"fmla v21.4s, %19.4s, v30.4s \n"
"fmla v22.4s, %19.4s, v31.4s \n"
"fmla v23.4s, %19.4s, v26.4s \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v16.4s, %14.4s, v12.4s \n"
"fmla v17.4s, %14.4s, v13.4s \n"
"fmla v18.4s, %14.4s, v24.4s \n"
"fmla v19.4s, %14.4s, v25.4s \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, %20.4s, v30.4s \n"
"fmla v21.4s, %20.4s, v31.4s \n"
"fmla v22.4s, %20.4s, v26.4s \n"
"fmla v23.4s, %20.4s, v27.4s \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%0], #32 \n"
"st1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%1], #32 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"w"(_k00), // %12
"w"(_k01), // %13
"w"(_k02), // %14
"w"(_k10), // %15
"w"(_k11), // %16
"w"(_k12), // %17
"w"(_k20), // %18
"w"(_k21), // %19
"w"(_k22), // %20
"w"(_bias0) // %21
: "memory", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%3] \n" // r10 r11 r12 r13
"mov v16.16b, %21.16b \n" // sum00
"mov v17.16b, %21.16b \n" // sum01
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"mov v18.16b, %21.16b \n" // sum10
"mov v19.16b, %21.16b \n" // sum11
"fmla v16.4s, %15.4s, v10.4s \n"
"fmla v17.4s, %15.4s, v11.4s \n"
"shll v12.4s, v12.4h, #16 \n"
"fmla v18.4s, %12.4s, v10.4s \n"
"fmla v19.4s, %12.4s, v11.4s \n"
"shll v13.4s, v13.4h, #16 \n"
"fmla v16.4s, %16.4s, v11.4s \n"
"fmla v17.4s, %16.4s, v12.4s \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%4] \n" // r20 r21 r22 r23
"fmla v18.4s, %13.4s, v11.4s \n"
"fmla v19.4s, %13.4s, v12.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v16.4s, %17.4s, v12.4s \n"
"fmla v17.4s, %17.4s, v13.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v18.4s, %14.4s, v12.4s \n"
"fmla v19.4s, %14.4s, v13.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v16.4s, %18.4s, v20.4s \n"
"fmla v17.4s, %18.4s, v21.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v18.4s, %15.4s, v20.4s \n"
"fmla v19.4s, %15.4s, v21.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%2] \n" // r00 r01 r02 r03
"fmla v16.4s, %19.4s, v21.4s \n"
"fmla v17.4s, %19.4s, v22.4s \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%5] \n" // r30 r31 r32 r33
"fmla v18.4s, %16.4s, v21.4s \n"
"fmla v19.4s, %16.4s, v22.4s \n"
"shll v10.4s, v10.4h, #16 \n"
"fmla v16.4s, %20.4s, v22.4s \n"
"fmla v17.4s, %20.4s, v23.4s \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v18.4s, %17.4s, v22.4s \n"
"fmla v19.4s, %17.4s, v23.4s \n"
"shll v11.4s, v11.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v16.4s, %12.4s, v10.4s \n"
"fmla v17.4s, %12.4s, v11.4s \n"
"shll v12.4s, v12.4h, #16 \n"
"fmla v18.4s, %18.4s, v24.4s \n"
"fmla v19.4s, %18.4s, v25.4s \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v16.4s, %13.4s, v11.4s \n"
"fmla v17.4s, %13.4s, v12.4s \n"
"shll v13.4s, v13.4h, #16 \n"
"fmla v18.4s, %19.4s, v25.4s \n"
"fmla v19.4s, %19.4s, v26.4s \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v16.4s, %14.4s, v12.4s \n"
"fmla v17.4s, %14.4s, v13.4s \n"
"add %3, %3, #16 \n"
"fmla v18.4s, %20.4s, v26.4s \n"
"fmla v19.4s, %20.4s, v27.4s \n"
"add %4, %4, #16 \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"add %2, %2, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"add %5, %5, #16 \n"
"st1 {v16.4h, v17.4h}, [%0], #16 \n"
"st1 {v18.4h, v19.4h}, [%1], #16 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"w"(_k00), // %12
"w"(_k01), // %13
"w"(_k02), // %14
"w"(_k10), // %15
"w"(_k11), // %16
"w"(_k12), // %17
"w"(_k20), // %18
"w"(_k21), // %19
"w"(_k22), // %20
"w"(_bias0) // %21
: "memory", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
}
for (; j < outw; j++)
{
asm volatile(
"prfm pldl1keep, [%3, #192] \n"
"ld1 {v10.4h, v11.4h, v12.4h}, [%3] \n" // r10 r11 r12
"mov v18.16b, %21.16b \n" // sum0
"mov v19.16b, %21.16b \n" // sum1
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmul v16.4s, %15.4s, v10.4s \n"
"fmul v17.4s, %12.4s, v10.4s \n"
"shll v12.4s, v12.4h, #16 \n"
"fmla v18.4s, %16.4s, v11.4s \n"
"fmla v19.4s, %13.4s, v11.4s \n"
"prfm pldl1keep, [%4, #192] \n"
"ld1 {v20.4h, v21.4h, v22.4h}, [%4] \n" // r20 r21 r22
"fmla v16.4s, %17.4s, v12.4s \n"
"fmla v17.4s, %14.4s, v12.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v18.4s, %18.4s, v20.4s \n"
"fmla v19.4s, %15.4s, v20.4s \n"
"prfm pldl1keep, [%2, #192] \n"
"ld1 {v10.4h, v11.4h, v12.4h}, [%2] \n" // r00 r01 r02
"shll v22.4s, v22.4h, #16 \n"
"prfm pldl1keep, [%5, #192] \n"
"ld1 {v24.4h, v25.4h, v26.4h}, [%5] \n" // r30 r31 r32
"fmla v16.4s, %19.4s, v21.4s \n"
"fmla v17.4s, %16.4s, v21.4s \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v18.4s, %20.4s, v22.4s \n"
"fmla v19.4s, %17.4s, v22.4s \n"
"shll v11.4s, v11.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v16.4s, %12.4s, v10.4s \n"
"fmla v17.4s, %18.4s, v24.4s \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v18.4s, %13.4s, v11.4s \n"
"fmla v19.4s, %19.4s, v25.4s \n"
"add %3, %3, #8 \n"
"fmla v16.4s, %14.4s, v12.4s \n"
"fmla v17.4s, %20.4s, v26.4s \n"
"add %4, %4, #8 \n"
"fadd v18.4s, v18.4s, v16.4s \n"
"fadd v19.4s, v19.4s, v17.4s \n"
"add %2, %2, #8 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"add %5, %5, #8 \n"
"st1 {v18.4h}, [%0], #8 \n"
"st1 {v19.4h}, [%1], #8 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"w"(_k00), // %12
"w"(_k01), // %13
"w"(_k02), // %14
"w"(_k10), // %15
"w"(_k11), // %16
"w"(_k12), // %17
"w"(_k20), // %18
"w"(_k21), // %19
"w"(_k22), // %20
"w"(_bias0) // %21
: "memory", "v10", "v11", "v12", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v24", "v25", "v26");
}
r0 += 2 * 4 + w * 4;
r1 += 2 * 4 + w * 4;
r2 += 2 * 4 + w * 4;
r3 += 2 * 4 + w * 4;
outptr0 += outw * 4;
outptr1 += outw * 4;
}
#endif // __aarch64__
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%1], #32 \n" // r00 r01 r02 r03
"mov v16.16b, %17.16b \n" // sum00
"mov v17.16b, %17.16b \n" // sum01
"mov v18.16b, %17.16b \n" // sum02
"mov v19.16b, %17.16b \n" // sum03
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, %8.4s, v10.4s \n"
"fmla v17.4s, %8.4s, v11.4s \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"fmla v18.4s, %8.4s, v12.4s \n"
"fmla v19.4s, %8.4s, v13.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v14.4h, v15.4h}, [%1] \n" // r04 r05
"fmla v16.4s, %9.4s, v11.4s \n"
"fmla v17.4s, %9.4s, v12.4s \n"
"shll v14.4s, v14.4h, #16 \n"
"fmla v18.4s, %9.4s, v13.4s \n"
"fmla v19.4s, %9.4s, v14.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n" // r10 r11 r12 r13
"fmla v16.4s, %10.4s, v12.4s \n"
"fmla v17.4s, %10.4s, v13.4s \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v18.4s, %10.4s, v14.4s \n"
"fmla v19.4s, %10.4s, v15.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v16.4s, %11.4s, v20.4s \n"
"fmla v17.4s, %11.4s, v21.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v18.4s, %11.4s, v22.4s \n"
"fmla v19.4s, %11.4s, v23.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v14.4h, v15.4h}, [%2] \n" // r14 r15
"fmla v16.4s, %12.4s, v21.4s \n"
"fmla v17.4s, %12.4s, v22.4s \n"
"shll v14.4s, v14.4h, #16 \n"
"fmla v18.4s, %12.4s, v23.4s \n"
"fmla v19.4s, %12.4s, v14.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%3], #32 \n" // r20 r21 r22 r23
"fmla v16.4s, %13.4s, v22.4s \n"
"fmla v17.4s, %13.4s, v23.4s \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v18.4s, %13.4s, v14.4s \n"
"fmla v19.4s, %13.4s, v15.4s \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, %14.4s, v10.4s \n"
"fmla v17.4s, %14.4s, v11.4s \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"fmla v18.4s, %14.4s, v12.4s \n"
"fmla v19.4s, %14.4s, v13.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v14.4h, v15.4h}, [%3] \n" // r24 r25
"fmla v16.4s, %15.4s, v11.4s \n"
"fmla v17.4s, %15.4s, v12.4s \n"
"shll v14.4s, v14.4h, #16 \n"
"fmla v18.4s, %15.4s, v13.4s \n"
"fmla v19.4s, %15.4s, v14.4s \n"
"fmla v16.4s, %16.4s, v12.4s \n"
"fmla v17.4s, %16.4s, v13.4s \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v18.4s, %16.4s, v14.4s \n"
"fmla v19.4s, %16.4s, v15.4s \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
#else
asm volatile(
"pld [%1, #128] \n"
"vld1.u16 {d30-d31}, [%1 :64]! \n" // r00 r01
"vmov q10, %q17 \n" // sum00
"vmov q11, %q17 \n" // sum01
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q10, %q8, q14 \n"
"vmla.f32 q11, %q8, q15 \n"
"vmla.f32 q10, %q9, q15 \n"
"pld [%1, #128] \n"
"vld1.u16 {d30-d31}, [%1 :64]! \n" // r02 r03
"vmov q12, %q17 \n" // sum02
"vmov q13, %q17 \n" // sum03
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q12, %q8, q14 \n"
"vmla.f32 q11, %q9, q14 \n"
"vmla.f32 q13, %q8, q15 \n"
"vmla.f32 q10, %q10, q14 \n"
"vmla.f32 q12, %q9, q15 \n"
"vmla.f32 q11, %q10, q15 \n"
// "pld [%1, #128] \n"
"vld1.u16 {d30-d31}, [%1 :64] \n" // r04 r05
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q13, %q9, q14 \n"
"vmla.f32 q12, %q10, q14 \n"
"vmla.f32 q13, %q10, q15 \n"
"pld [%2, #128] \n"
"vld1.u16 {d30-d31}, [%2 :64]! \n" // r10 r11
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q10, %q11, q14 \n"
"vmla.f32 q11, %q11, q15 \n"
"vmla.f32 q10, %q12, q15 \n"
"pld [%2, #128] \n"
"vld1.u16 {d30-d31}, [%2 :64]! \n" // r12 r13
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q12, %q11, q14 \n"
"vmla.f32 q11, %q12, q14 \n"
"vmla.f32 q13, %q11, q15 \n"
"vmla.f32 q10, %q13, q14 \n"
"vmla.f32 q12, %q12, q15 \n"
"vmla.f32 q11, %q13, q15 \n"
// "pld [%2, #128] \n"
"vld1.u16 {d30-d31}, [%2 :64] \n" // r14 r15
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q13, %q12, q14 \n"
"vmla.f32 q12, %q13, q14 \n"
"vmla.f32 q13, %q13, q15 \n"
"pld [%3, #128] \n"
"vld1.u16 {d30-d31}, [%3 :64]! \n" // r20 r21
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q10, %q14, q14 \n"
"vmla.f32 q11, %q14, q15 \n"
"vmla.f32 q10, %q15, q15 \n"
"pld [%3, #128] \n"
"vld1.u16 {d30-d31}, [%3 :64]! \n" // r22 r23
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q12, %q14, q14 \n"
"vmla.f32 q11, %q15, q14 \n"
"vmla.f32 q13, %q14, q15 \n"
"vmla.f32 q10, %q16, q14 \n"
"vmla.f32 q12, %q15, q15 \n"
"vmla.f32 q11, %q16, q15 \n"
// "pld [%3, #128] \n"
"vld1.u16 {d30-d31}, [%3 :64] \n" // r24 r25
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q13, %q15, q14 \n"
"vmla.f32 q12, %q16, q14 \n"
"vmla.f32 q13, %q16, q15 \n"
"vshrn.u32 d20, q10, #16 \n"
"vshrn.u32 d21, q11, #16 \n"
"vshrn.u32 d22, q12, #16 \n"
"vshrn.u32 d23, q13, #16 \n"
"vst1.u16 {d20-d23}, [%0 :64]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "q10", "q11", "q12", "q13", "q14", "q15");
#endif
}
for (; j + 1 < outw; j += 2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%1] \n" // r00 r01 r02 r03
"mov v18.16b, %17.16b \n" // sum00
"mov v19.16b, %17.16b \n" // sum01
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"fmul v16.4s, %8.4s, v12.4s \n"
"fmul v17.4s, %8.4s, v13.4s \n"
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v18.4s, %9.4s, v13.4s \n"
"fmla v19.4s, %9.4s, v14.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2] \n" // r10 r11 r12 r13
"fmla v16.4s, %10.4s, v14.4s \n"
"fmla v17.4s, %10.4s, v15.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v18.4s, %11.4s, v20.4s \n"
"fmla v19.4s, %11.4s, v21.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v16.4s, %12.4s, v21.4s \n"
"fmla v17.4s, %12.4s, v22.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%3] \n" // r20 r21 r22 r23
"fmla v18.4s, %13.4s, v22.4s \n"
"fmla v19.4s, %13.4s, v23.4s \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"fmla v16.4s, %14.4s, v12.4s \n"
"fmla v17.4s, %14.4s, v13.4s \n"
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v18.4s, %15.4s, v13.4s \n"
"fmla v19.4s, %15.4s, v14.4s \n"
"add %1, %1, #16 \n"
"fmla v16.4s, %16.4s, v14.4s \n"
"fmla v17.4s, %16.4s, v15.4s \n"
"add %2, %2, #16 \n"
"fadd v18.4s, v18.4s, v16.4s \n"
"fadd v19.4s, v19.4s, v17.4s \n"
"add %3, %3, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"st1 {v18.4h, v19.4h}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
#else
asm volatile(
"pld [%1, #256] \n"
"vld1.u16 {d28-d31}, [%1 :64] \n" // r00 r01 r02 r03
"vmov q10, %q17 \n" // sum00
"vmov q11, %q17 \n" // sum01
"vshll.u16 q12, d28, #16 \n"
"vshll.u16 q13, d29, #16 \n"
"vmla.f32 q10, %q8, q12 \n"
"vmla.f32 q11, %q8, q13 \n"
"vshll.u16 q14, d30, #16 \n"
"vmla.f32 q10, %q9, q13 \n"
"vmla.f32 q11, %q9, q14 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q10, %q10, q14 \n"
"vmla.f32 q11, %q10, q15 \n"
"pld [%2, #256] \n"
"vld1.u16 {d28-d31}, [%2 :64] \n" // r10 r11 r12 r13
"vshll.u16 q12, d28, #16 \n"
"vshll.u16 q13, d29, #16 \n"
"vmla.f32 q10, %q11, q12 \n"
"vmla.f32 q11, %q11, q13 \n"
"vshll.u16 q14, d30, #16 \n"
"vmla.f32 q10, %q12, q13 \n"
"vmla.f32 q11, %q12, q14 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q10, %q13, q14 \n"
"vmla.f32 q11, %q13, q15 \n"
"pld [%3, #256] \n"
"vld1.u16 {d28-d31}, [%3 :64] \n" // r20 r21 r22 r23
"vshll.u16 q12, d28, #16 \n"
"vshll.u16 q13, d29, #16 \n"
"vmla.f32 q10, %q14, q12 \n"
"vmla.f32 q11, %q14, q13 \n"
"vshll.u16 q14, d30, #16 \n"
"vmla.f32 q10, %q15, q13 \n"
"vmla.f32 q11, %q15, q14 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q10, %q16, q14 \n"
"vmla.f32 q11, %q16, q15 \n"
"add %1, %1, #16 \n"
"add %2, %2, #16 \n"
"vshrn.u32 d20, q10, #16 \n"
"vshrn.u32 d21, q11, #16 \n"
"add %3, %3, #16 \n"
"vst1.u16 {d20-d21}, [%0 :64]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "q10", "q11", "q12", "q13", "q14", "q15");
#endif
}
for (; j < outw; j++)
{
float32x4_t _sum0 = _bias0;
float32x4_t _r00 = vcvt_f32_bf16(vld1_u16(r0));
float32x4_t _r01 = vcvt_f32_bf16(vld1_u16(r0 + 4));
float32x4_t _r02 = vcvt_f32_bf16(vld1_u16(r0 + 8));
float32x4_t _r10 = vcvt_f32_bf16(vld1_u16(r1));
float32x4_t _r11 = vcvt_f32_bf16(vld1_u16(r1 + 4));
float32x4_t _r12 = vcvt_f32_bf16(vld1_u16(r1 + 8));
float32x4_t _r20 = vcvt_f32_bf16(vld1_u16(r2));
float32x4_t _r21 = vcvt_f32_bf16(vld1_u16(r2 + 4));
float32x4_t _r22 = vcvt_f32_bf16(vld1_u16(r2 + 8));
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
vst1_u16(outptr0, vcvt_bf16_f32(_sum0));
r0 += 4;
r1 += 4;
r2 += 4;
outptr0 += 4;
}
r0 += 2 * 4;
r1 += 2 * 4;
r2 += 2 * 4;
}
}
}
static void convdw3x3s2_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2 * outw + w) * 4;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f);
const unsigned short* k0 = kernel.row<const unsigned short>(g);
unsigned short* outptr0 = out;
const Mat img0 = bottom_blob.channel(g);
const unsigned short* r0 = img0.row<const unsigned short>(0);
const unsigned short* r1 = img0.row<const unsigned short>(1);
const unsigned short* r2 = img0.row<const unsigned short>(2);
float32x4_t _k00 = vcvt_f32_bf16(vld1_u16(k0));
float32x4_t _k01 = vcvt_f32_bf16(vld1_u16(k0 + 4));
float32x4_t _k02 = vcvt_f32_bf16(vld1_u16(k0 + 8));
float32x4_t _k10 = vcvt_f32_bf16(vld1_u16(k0 + 12));
float32x4_t _k11 = vcvt_f32_bf16(vld1_u16(k0 + 16));
float32x4_t _k12 = vcvt_f32_bf16(vld1_u16(k0 + 20));
float32x4_t _k20 = vcvt_f32_bf16(vld1_u16(k0 + 24));
float32x4_t _k21 = vcvt_f32_bf16(vld1_u16(k0 + 28));
float32x4_t _k22 = vcvt_f32_bf16(vld1_u16(k0 + 32));
int i = 0;
for (; i < outh; i++)
{
int j = 0;
#if __aarch64__
for (; j + 3 < outw; j += 4)
{
asm volatile(
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%1], #32 \n" // r00 r01 r02 r03
"mov v28.16b, %17.16b \n" // sum00
"mov v29.16b, %17.16b \n" // sum01
"mov v30.16b, %17.16b \n" // sum02
"mov v31.16b, %17.16b \n" // sum03
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v14.4h, v15.4h, v16.4h, v17.4h}, [%1], #32 \n" // r04 r05 r06 r07
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v18.4h}, [%1] \n" // r08
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v28.4s, %8.4s, v10.4s \n"
"fmla v29.4s, %8.4s, v12.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v30.4s, %8.4s, v14.4s \n"
"fmla v31.4s, %8.4s, v16.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v28.4s, %9.4s, v11.4s \n"
"fmla v29.4s, %9.4s, v13.4s \n"
"fmla v30.4s, %9.4s, v15.4s \n"
"fmla v31.4s, %9.4s, v17.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n" // r10 r11 r12 r13
"fmla v28.4s, %10.4s, v12.4s \n"
"fmla v29.4s, %10.4s, v14.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v30.4s, %10.4s, v16.4s \n"
"fmla v31.4s, %10.4s, v18.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" // r14 r15 r16 r17
"shll v20.4s, v20.4h, #16 \n"
"shll v21.4s, v21.4h, #16 \n"
"shll v22.4s, v22.4h, #16 \n"
"shll v23.4s, v23.4h, #16 \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v19.4h}, [%2] \n" // r18
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v28.4s, %11.4s, v20.4s \n"
"fmla v29.4s, %11.4s, v22.4s \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v30.4s, %11.4s, v24.4s \n"
"fmla v31.4s, %11.4s, v26.4s \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v28.4s, %12.4s, v21.4s \n"
"fmla v29.4s, %12.4s, v23.4s \n"
"fmla v30.4s, %12.4s, v25.4s \n"
"fmla v31.4s, %12.4s, v27.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%3], #32 \n" // r20 r21 r22 r23
"fmla v28.4s, %13.4s, v22.4s \n"
"fmla v29.4s, %13.4s, v24.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v30.4s, %13.4s, v26.4s \n"
"fmla v31.4s, %13.4s, v19.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v14.4h, v15.4h, v16.4h, v17.4h}, [%3], #32 \n" // r24 r25 r26 r27
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v18.4h}, [%3] \n" // r28
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v28.4s, %14.4s, v10.4s \n"
"fmla v29.4s, %14.4s, v12.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v30.4s, %14.4s, v14.4s \n"
"fmla v31.4s, %14.4s, v16.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v28.4s, %15.4s, v11.4s \n"
"fmla v29.4s, %15.4s, v13.4s \n"
"fmla v30.4s, %15.4s, v15.4s \n"
"fmla v31.4s, %15.4s, v17.4s \n"
"fmla v28.4s, %16.4s, v12.4s \n"
"fmla v29.4s, %16.4s, v14.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v30.4s, %16.4s, v16.4s \n"
"fmla v31.4s, %16.4s, v18.4s \n"
"shrn v28.4h, v28.4s, #16 \n"
"shrn v29.4h, v29.4s, #16 \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
#endif // __aarch64__
for (; j + 1 < outw; j += 2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%1], #32 \n" // r00 r01 r02 r03
"mov v22.16b, %17.16b \n" // sum00
"mov v23.16b, %17.16b \n" // sum01
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmul v20.4s, %8.4s, v10.4s \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"fmul v21.4s, %8.4s, v12.4s \n"
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v14.4h}, [%1] \n" // r04
"fmla v22.4s, %9.4s, v11.4s \n"
"fmla v23.4s, %9.4s, v13.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n" // r10 r11 r12 r13
"shll v14.4s, v14.4h, #16 \n"
"fmla v20.4s, %10.4s, v12.4s \n"
"fmla v21.4s, %10.4s, v14.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, %11.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, %11.4s, v18.4s \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v15.4h}, [%2] \n" // r14
"fmla v20.4s, %12.4s, v17.4s \n"
"fmla v21.4s, %12.4s, v19.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%3], #32 \n" // r20 r21 r22 r23
"shll v15.4s, v15.4h, #16 \n"
"fmla v22.4s, %13.4s, v18.4s \n"
"fmla v23.4s, %13.4s, v15.4s \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v20.4s, %14.4s, v10.4s \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"fmla v21.4s, %14.4s, v12.4s \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v14.4h}, [%3] \n" // r24
"fmla v22.4s, %15.4s, v11.4s \n"
"fmla v23.4s, %15.4s, v13.4s \n"
"shll v14.4s, v14.4h, #16 \n"
"fmla v20.4s, %16.4s, v12.4s \n"
"fmla v21.4s, %16.4s, v14.4s \n"
"fadd v22.4s, v20.4s, v22.4s \n"
"fadd v23.4s, v21.4s, v23.4s \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"st1 {v22.4h, v23.4h}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
#else
asm volatile(
"pld [%1, #256] \n"
"vld1.u16 {d28-d31}, [%1 :64]! \n" // r00 r01 r02 r03
"vmov q10, %q17 \n" // sum00
"vmov q11, %q17 \n" // sum01
"vshll.u16 q12, d28, #16 \n"
"vshll.u16 q13, d29, #16 \n"
"vmla.f32 q10, %q8, q12 \n"
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q11, %q8, q14 \n"
"vld1.u16 {d25}, [%1] \n" // r04
"vmla.f32 q10, %q9, q13 \n"
"vmla.f32 q11, %q9, q15 \n"
"vshll.u16 q12, d25, #16 \n"
"vmla.f32 q10, %q10, q14 \n"
"pld [%2, #256] \n"
"vld1.u16 {d28-d31}, [%2 :64]! \n" // r10 r11 r12 r13
"vmla.f32 q11, %q10, q12 \n"
"vshll.u16 q12, d28, #16 \n"
"vshll.u16 q13, d29, #16 \n"
"vmla.f32 q10, %q11, q12 \n"
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q11, %q11, q14 \n"
"vld1.u16 {d25}, [%2] \n" // r14
"vmla.f32 q10, %q12, q13 \n"
"vmla.f32 q11, %q12, q15 \n"
"vshll.u16 q12, d25, #16 \n"
"vmla.f32 q10, %q13, q14 \n"
"pld [%3, #256] \n"
"vld1.u16 {d28-d31}, [%3 :64]! \n" // r20 r21 r22 r23
"vmla.f32 q11, %q13, q12 \n"
"vshll.u16 q12, d28, #16 \n"
"vshll.u16 q13, d29, #16 \n"
"vmla.f32 q10, %q14, q12 \n"
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q11, %q14, q14 \n"
"vld1.u16 {d25}, [%3] \n" // r24
"vmla.f32 q10, %q15, q13 \n"
"vmla.f32 q11, %q15, q15 \n"
"vshll.u16 q12, d25, #16 \n"
"vmla.f32 q10, %q16, q14 \n"
"vmla.f32 q11, %q16, q12 \n"
"vshrn.u32 d20, q10, #16 \n"
"vshrn.u32 d21, q11, #16 \n"
"vst1.u16 {d20-d21}, [%0 :64]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "q10", "q11", "q12", "q13", "q14", "q15");
#endif
}
for (; j < outw; j++)
{
float32x4_t _sum0 = _bias0;
float32x4_t _r00 = vcvt_f32_bf16(vld1_u16(r0));
float32x4_t _r01 = vcvt_f32_bf16(vld1_u16(r0 + 4));
float32x4_t _r02 = vcvt_f32_bf16(vld1_u16(r0 + 8));
float32x4_t _r10 = vcvt_f32_bf16(vld1_u16(r1));
float32x4_t _r11 = vcvt_f32_bf16(vld1_u16(r1 + 4));
float32x4_t _r12 = vcvt_f32_bf16(vld1_u16(r1 + 8));
float32x4_t _r20 = vcvt_f32_bf16(vld1_u16(r2));
float32x4_t _r21 = vcvt_f32_bf16(vld1_u16(r2 + 4));
float32x4_t _r22 = vcvt_f32_bf16(vld1_u16(r2 + 8));
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
vst1_u16(outptr0, vcvt_bf16_f32(_sum0));
r0 += 2 * 4;
r1 += 2 * 4;
r2 += 2 * 4;
outptr0 += 4;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
|
GB_binop__first_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__first_uint16
// A.*B function (eWiseMult): GB_AemultB__first_uint16
// A*D function (colscale): GB_AxD__first_uint16
// D*A function (rowscale): GB_DxB__first_uint16
// C+=B function (dense accum): GB_Cdense_accumB__first_uint16
// C+=b function (dense accum): GB_Cdense_accumb__first_uint16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__first_uint16
// C=scalar+B GB_bind1st__first_uint16
// C=scalar+B' GB_bind1st_tran__first_uint16
// C=A+scalar (none)
// C=A'+scalar (none)
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
;
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = x ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FIRST || GxB_NO_UINT16 || GxB_NO_FIRST_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__first_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__first_uint16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__first_uint16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__first_uint16
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__first_uint16
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__first_uint16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__first_uint16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__first_uint16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
; ;
Cx [p] = x ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
Cx [p] = aij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = x ; \
}
GrB_Info GB_bind1st_tran__first_uint16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = aij ; \
}
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
serial_sum_novec.c | double do_sum_novec(double* restrict var, long ncells)
{
double sum = 0.0;
#pragma omp parallel for reduction(+:sum)
for (long i = 0; i < ncells; i++){
sum += var[i];
}
return(sum);
}
|
GB_unop__isinf_bool_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__isinf_bool_fc32)
// op(A') function: GB (_unop_tran__isinf_bool_fc32)
// C type: bool
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = (aij)
// unaryop: cij = GB_cisinff (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_cisinff (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = (aij) ; \
Cx [pC] = GB_cisinff (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISINF || GxB_NO_BOOL || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__isinf_bool_fc32)
(
bool *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = (aij) ;
Cx [p] = GB_cisinff (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = (aij) ;
Cx [p] = GB_cisinff (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__isinf_bool_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
csr_matvec_oomp.c | /******************************************************************************
* Copyright (c) 1998 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Matvec functions for hypre_CSRMatrix class.
*
*****************************************************************************/
#include "seq_mv.h"
#include "_hypre_utilities.hpp"
#if defined(HYPRE_USING_DEVICE_OPENMP)
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvec
*--------------------------------------------------------------------------*/
/* y[offset:end] = alpha*A[offset:end,:]*x + beta*b[offset:end] */
HYPRE_Int
hypre_CSRMatrixMatvecOMPOffload( HYPRE_Int trans,
HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *y,
HYPRE_Int offset )
{
hypre_CSRMatrix *B;
if (trans)
{
hypre_CSRMatrixTransposeDevice(A, &B, 1);
/* HYPRE_CUDA_CALL(cudaDeviceSynchronize()); */
}
else
{
B = A;
}
HYPRE_Int A_nrows = hypre_CSRMatrixNumRows(B);
HYPRE_Complex *A_data = hypre_CSRMatrixData(B);
HYPRE_Int *A_i = hypre_CSRMatrixI(B);
HYPRE_Int *A_j = hypre_CSRMatrixJ(B);
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int i;
#pragma omp target teams distribute parallel for private(i) is_device_ptr(A_data, A_i, A_j, y_data, x_data)
for (i = offset; i < A_nrows; i++)
{
HYPRE_Complex tempx = 0.0;
HYPRE_Int j;
for (j = A_i[i]; j < A_i[i + 1]; j++)
{
tempx += A_data[j] * x_data[A_j[j]];
}
y_data[i] = alpha * tempx + beta * y_data[i];
}
/* HYPRE_CUDA_CALL(cudaDeviceSynchronize()); */
return hypre_error_flag;
}
#endif /* #if defined(HYPRE_USING_DEVICE_OPENMP) */
|
task_parallel_transpose.c | #include <stdio.h>
#include <errno.h> // for errno
#include <math.h>
#include <limits.h> // for INT_MAX
#include <stdlib.h> // for strtol
#include <time.h>
#include <omp.h>
int numThreads = 1;
void freeMatrix(double** matrix, long lins){
for (long i = 0; i < lins; ++i) {
free(matrix[i]);
}
free(matrix);
}
double** allocMatrix(long lins, long cols){
double** matrix = malloc(lins*sizeof(double*));
for (long i = 0; i < lins; ++i) {
matrix[i] = malloc(cols*sizeof(double));
}
return matrix;
}
void printMatrix(double** matrix, long lins, long cols){
for (long i = 0; i < lins; ++i) {
for (long j = 0; j < cols; ++j)
printf("%lf ", matrix[i][j]);
printf("\n");
}
printf("\n");
}
void fillMatrix(double** matrix, long lins, long cols, long seed){
srand(seed);
for (long i = 0; i < lins; ++i) {
for (long j = 0; j < cols; ++j) {
matrix[i][j]= (double) rand() / INT_MAX;
}
}
}
void transpose_matrix(double** matrix, double** transposed, long lins, long cols){
#pragma omp parallel num_threads(numThreads) default(none) \
shared(matrix, transposed, lins, cols)
{
#pragma omp single
{
for (long i = 0; i < lins; ++i) {
#pragma omp task
{
for (long j = 0; j < cols; ++j) {
transposed[j][i] = matrix[i][j];
}
}
}
}
}
}
void multiply_row(double* linA, double** B, double* result, long colsB, long size){
for (long j = 0; j < colsB; ++j) {
result[j] = 0;
for (long k = 0; k < size; ++k){
result[j] += linA[k] * B[j][k];
}
}
}
void multiply_matrix(double** A, double** B, double** result, long linsA, long colsB, long size){
#pragma omp parallel num_threads(numThreads) default(none) \
shared(linsA, numThreads, A, B, result, colsB, size)
{
#pragma omp single
{
for (long i = 0; i < linsA; ++i) {
#pragma omp task
multiply_row(A[i], B, result[i], colsB, size);
}
}
}
}
long convert_str_long(char *str){
char *p;
errno = 0;
long conv = strtol(str, &p, 10);
if (errno != 0 || *p != '\0')
{
printf("%s não é um número!\n", str);
exit(-1);
}
return (long)conv;
}
int main(int argc, char **argv){
if (argc != 9) {
printf("É necessário informar os seguintes argumentos:\nO número de threads a serem usadas\nSe as matrizes devem ser exibidas\nSeed para gerar a matriz A\nSeed para gerar a matriz B\nNúmero de linhas de A\nNúmero de colunas de A\nNúmero de linhas de B\nNúmero de colunas de B\n");
return -1;
}
numThreads = convert_str_long(argv[1]);
int show_matrix = convert_str_long(argv[2]);
long seedA = convert_str_long(argv[3]);
long seedB = convert_str_long(argv[4]);
long linsA = convert_str_long(argv[5]);
long colsA = convert_str_long(argv[6]);
long linsB = convert_str_long(argv[7]);
long colsB = convert_str_long(argv[8]);
if(colsA != linsB){
printf("Número de colunas de A é diferente do número de linhas de B, multiplicação não é possivel.\n");
return -1;
}
double t = omp_get_wtime();
double** A = allocMatrix(linsA, colsA);
double** B = allocMatrix(linsB, colsB);
double** BT = allocMatrix(colsB, linsB);
double** R = allocMatrix(linsA, colsB);
fillMatrix(A, linsA, colsA, seedA);
fillMatrix(B, linsB, colsB, seedB);
transpose_matrix(B, BT, linsB, colsB);
multiply_matrix(A, BT, R, linsA, colsB, colsA);
t = omp_get_wtime() - t;
printf("%.10lf\n", t);
if(show_matrix == 1){
printMatrix(A, linsA, colsA);
printMatrix(B, linsB, colsB);
printMatrix(R, linsA, colsB);
}
freeMatrix(A, linsA);
freeMatrix(B, linsB);
freeMatrix(BT, colsB);
freeMatrix(R, linsA);
return 0;
} /* main */ |
9760.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp private(j)
for (i = 1; i < _PB_NI - 1; ++i)
{
#pragma omp
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
sync.c | // Load the OpenMP functions library
#include<omp.h>
int main()
{
// Set and initialise variables
int tnum=0, incr=0;
// Start parallel block
// #pragma omp parallel private(tnum)
// {
// Start a critical block that we want only one thread to access
// at a time. Note that the 'incr' variable is NOT private!
// #pragma omp critical
// {
incr = incr + 1;
// }
// Wait here with barrier
// #pragma omp barrier
// The master thread prints out the results of the calculation and
// then does some other processing that the other threads have to
// wait for.
// #pragma omp master
// {
tnum = omp_get_thread_num();
printf("Master thread is number %d\n", tnum);
printf("Summation = %d\n", incr);
sleep (10);
// }
// Ensure ALL threads have finished their processing before continuing.
//#pragma omp barrier
// {
printf("finished!\n");
// }
// }
return 0;
}
|
hello_world.c | #include<stdio.h>
#include<omp.h>
#include<stdlib.h>
int main(int argc, char* argv[])
{
#pragma omp parallel
{
printf("Hello World from thread %d\n", omp_get_thread_num());
}
} |
GB_binop__le_int32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__le_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__le_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__le_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__le_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__le_int32)
// A*D function (colscale): GB (_AxD__le_int32)
// D*A function (rowscale): GB (_DxB__le_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__le_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__le_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_int32)
// C=scalar+B GB (_bind1st__le_int32)
// C=scalar+B' GB (_bind1st_tran__le_int32)
// C=A+scalar GB (_bind2nd__le_int32)
// C=A'+scalar GB (_bind2nd_tran__le_int32)
// C type: bool
// A type: int32_t
// A pattern? 0
// B type: int32_t
// B pattern? 0
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LE || GxB_NO_INT32 || GxB_NO_LE_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__le_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__le_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__le_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__le_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__le_int32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__le_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int32_t alpha_scalar ;
int32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int32_t *) alpha_scalar_in)) ;
beta_scalar = (*((int32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__le_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__le_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__le_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__le_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__le_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__le_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__le_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__le_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
main.c | // C Compiler flag: -fopenmp
#include <stdio.h>
#include <omp.h>
#include <stdlib.h>
#define N 20
int main(int argc, char *argv[])
{
int threadsCount = omp_get_max_threads();
printf("Threads count: %d\n", threadsCount);
int length = 12;
int a[length];
int b[length];
int c[length];
;
//printf("before first section: a: %d ; b: %d\n",a, b);
#pragma omp parallel for schedule(static, 4) num_threads(3)
for (int i = 0; i < length; i++)
{
a[i] = i * 10;
b[i] = i * 20;
printf("working %d of %d threads\n", omp_get_thread_num(), omp_get_num_threads());
}
for (int i = 0; i < length; i++)
{
printf("%d ", a[i]);
}
printf("\n");
for (int i = 0; i < length; i++)
{
printf("%d ", b[i]);
}
printf("\n");
#pragma omp parallel for schedule(dynamic, 2) num_threads(4)
for (int i = 0; i < length; i++)
{
c[i] = a[i] + b[i];
printf("working %d of %d threads\n", omp_get_thread_num(), omp_get_num_threads());
}
for (int i = 0; i < length; i++)
{
printf("%d ", c[i]);
}
printf("\n");
return 0;
}
|
convolution_sgemm_pack16.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_pack16_avx512(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
// Mat bottom_im2col(size, maxk, inch, 64u, 16, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
const float* bias = _bias;
// permute
Mat tmp;
if (size >= 12)
tmp.create(12 * maxk, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + (size % 12 % 4) / 2 + size % 12 % 2, 64u, 16, opt.workspace_allocator);
else if (size >= 8)
tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 64u, 16, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 64u, 16, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 64u, 16, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 64u, 16, opt.workspace_allocator);
{
int nn_size = size / 12;
int remain_size_start = 0;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 12;
float* tmpptr = tmp.channel(i / 12);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 16;
for (int k = 0; k < maxk; k++)
{
// transpose 16x12
__m512 _r0 = _mm512_loadu_ps(img0);
__m512 _r1 = _mm512_loadu_ps(img0 + 16);
__m512 _r2 = _mm512_loadu_ps(img0 + 16 * 2);
__m512 _r3 = _mm512_loadu_ps(img0 + 16 * 3);
__m512 _r4 = _mm512_loadu_ps(img0 + 16 * 4);
__m512 _r5 = _mm512_loadu_ps(img0 + 16 * 5);
__m512 _r6 = _mm512_loadu_ps(img0 + 16 * 6);
__m512 _r7 = _mm512_loadu_ps(img0 + 16 * 7);
__m512 _r8 = _mm512_loadu_ps(img0 + 16 * 8);
__m512 _r9 = _mm512_loadu_ps(img0 + 16 * 9);
__m512 _ra = _mm512_loadu_ps(img0 + 16 * 10);
__m512 _rb = _mm512_loadu_ps(img0 + 16 * 11);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3);
__m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3);
__m512 _tmp4 = _mm512_unpacklo_ps(_r4, _r5);
__m512 _tmp5 = _mm512_unpackhi_ps(_r4, _r5);
__m512 _tmp6 = _mm512_unpacklo_ps(_r6, _r7);
__m512 _tmp7 = _mm512_unpackhi_ps(_r6, _r7);
__m512 _tmp8 = _mm512_unpacklo_ps(_r8, _r9);
__m512 _tmp9 = _mm512_unpackhi_ps(_r8, _r9);
__m512 _tmpa = _mm512_unpacklo_ps(_ra, _rb);
__m512 _tmpb = _mm512_unpackhi_ps(_ra, _rb);
__m512 _tmpc = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpd = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpe = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpf = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpg = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmph = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpi = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpj = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpk = _mm512_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpl = _mm512_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpm = _mm512_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpn = _mm512_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(3, 2, 3, 2));
_tmp0 = _mm512_shuffle_f32x4(_tmpc, _tmpg, _MM_SHUFFLE(2, 0, 2, 0));
_tmp1 = _mm512_shuffle_f32x4(_tmpk, _tmpd, _MM_SHUFFLE(2, 0, 2, 0));
_tmp2 = _mm512_shuffle_f32x4(_tmph, _tmpl, _MM_SHUFFLE(2, 0, 2, 0));
_tmp3 = _mm512_shuffle_f32x4(_tmpe, _tmpi, _MM_SHUFFLE(2, 0, 2, 0));
_tmp4 = _mm512_shuffle_f32x4(_tmpm, _tmpf, _MM_SHUFFLE(2, 0, 2, 0));
_tmp5 = _mm512_shuffle_f32x4(_tmpj, _tmpn, _MM_SHUFFLE(2, 0, 2, 0));
_tmp6 = _mm512_shuffle_f32x4(_tmpc, _tmpg, _MM_SHUFFLE(3, 1, 3, 1));
_tmp7 = _mm512_shuffle_f32x4(_tmpk, _tmpd, _MM_SHUFFLE(3, 1, 3, 1));
_tmp8 = _mm512_shuffle_f32x4(_tmph, _tmpl, _MM_SHUFFLE(3, 1, 3, 1));
_tmp9 = _mm512_shuffle_f32x4(_tmpe, _tmpi, _MM_SHUFFLE(3, 1, 3, 1));
_tmpa = _mm512_shuffle_f32x4(_tmpm, _tmpf, _MM_SHUFFLE(3, 1, 3, 1));
_tmpb = _mm512_shuffle_f32x4(_tmpj, _tmpn, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0));
_r3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0));
_r4 = _mm512_shuffle_f32x4(_tmp8, _tmp9, _MM_SHUFFLE(2, 0, 2, 0));
_r5 = _mm512_shuffle_f32x4(_tmpa, _tmpb, _MM_SHUFFLE(2, 0, 2, 0));
_r6 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r7 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_r8 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1));
_r9 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1));
_ra = _mm512_shuffle_f32x4(_tmp8, _tmp9, _MM_SHUFFLE(3, 1, 3, 1));
_rb = _mm512_shuffle_f32x4(_tmpa, _tmpb, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_storeu_ps(tmpptr, _r0);
_mm512_storeu_ps(tmpptr + 16, _r1);
_mm512_storeu_ps(tmpptr + 16 * 2, _r2);
_mm512_storeu_ps(tmpptr + 16 * 3, _r3);
_mm512_storeu_ps(tmpptr + 16 * 4, _r4);
_mm512_storeu_ps(tmpptr + 16 * 5, _r5);
_mm512_storeu_ps(tmpptr + 16 * 6, _r6);
_mm512_storeu_ps(tmpptr + 16 * 7, _r7);
_mm512_storeu_ps(tmpptr + 16 * 8, _r8);
_mm512_storeu_ps(tmpptr + 16 * 9, _r9);
_mm512_storeu_ps(tmpptr + 16 * 10, _ra);
_mm512_storeu_ps(tmpptr + 16 * 11, _rb);
img0 += size * 16;
tmpptr += 16 * 12;
}
}
}
remain_size_start += nn_size * 12;
nn_size = (size - remain_size_start) >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 16;
for (int k = 0; k < maxk; k++)
{
// transpose 16x8
__m512 _r0 = _mm512_loadu_ps(img0);
__m512 _r1 = _mm512_loadu_ps(img0 + 16);
__m512 _r2 = _mm512_loadu_ps(img0 + 16 * 2);
__m512 _r3 = _mm512_loadu_ps(img0 + 16 * 3);
__m512 _r4 = _mm512_loadu_ps(img0 + 16 * 4);
__m512 _r5 = _mm512_loadu_ps(img0 + 16 * 5);
__m512 _r6 = _mm512_loadu_ps(img0 + 16 * 6);
__m512 _r7 = _mm512_loadu_ps(img0 + 16 * 7);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3);
__m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3);
__m512 _tmp4 = _mm512_unpacklo_ps(_r4, _r5);
__m512 _tmp5 = _mm512_unpackhi_ps(_r4, _r5);
__m512 _tmp6 = _mm512_unpacklo_ps(_r6, _r7);
__m512 _tmp7 = _mm512_unpackhi_ps(_r6, _r7);
__m512 _tmp8 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmp9 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpa = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpb = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpc = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpd = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpe = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpf = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2));
_tmp0 = _mm512_shuffle_f32x4(_tmp8, _tmpc, _MM_SHUFFLE(2, 0, 2, 0));
_tmp1 = _mm512_shuffle_f32x4(_tmp9, _tmpd, _MM_SHUFFLE(2, 0, 2, 0));
_tmp2 = _mm512_shuffle_f32x4(_tmpa, _tmpe, _MM_SHUFFLE(2, 0, 2, 0));
_tmp3 = _mm512_shuffle_f32x4(_tmpb, _tmpf, _MM_SHUFFLE(2, 0, 2, 0));
_tmp4 = _mm512_shuffle_f32x4(_tmp8, _tmpc, _MM_SHUFFLE(3, 1, 3, 1));
_tmp5 = _mm512_shuffle_f32x4(_tmp9, _tmpd, _MM_SHUFFLE(3, 1, 3, 1));
_tmp6 = _mm512_shuffle_f32x4(_tmpa, _tmpe, _MM_SHUFFLE(3, 1, 3, 1));
_tmp7 = _mm512_shuffle_f32x4(_tmpb, _tmpf, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0));
_r3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0));
_r4 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r5 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_r6 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1));
_r7 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_storeu_ps(tmpptr, _r0);
_mm512_storeu_ps(tmpptr + 16, _r1);
_mm512_storeu_ps(tmpptr + 16 * 2, _r2);
_mm512_storeu_ps(tmpptr + 16 * 3, _r3);
_mm512_storeu_ps(tmpptr + 16 * 4, _r4);
_mm512_storeu_ps(tmpptr + 16 * 5, _r5);
_mm512_storeu_ps(tmpptr + 16 * 6, _r6);
_mm512_storeu_ps(tmpptr + 16 * 7, _r7);
img0 += size * 16;
tmpptr += 16 * 8;
}
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 16;
for (int k = 0; k < maxk; k++)
{
// transpose 16x4
__m512 _r0 = _mm512_loadu_ps(img0);
__m512 _r1 = _mm512_loadu_ps(img0 + 16);
__m512 _r2 = _mm512_loadu_ps(img0 + 16 * 2);
__m512 _r3 = _mm512_loadu_ps(img0 + 16 * 3);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3);
__m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3);
__m512 _tmp4 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmp5 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmp6 = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmp7 = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
_tmp0 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0));
_tmp1 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0));
_tmp2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1));
_tmp3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r2 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r3 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_storeu_ps(tmpptr, _r0);
_mm512_storeu_ps(tmpptr + 16, _r1);
_mm512_storeu_ps(tmpptr + 16 * 2, _r2);
_mm512_storeu_ps(tmpptr + 16 * 3, _r3);
img0 += size * 16;
tmpptr += 16 * 4;
}
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 16;
for (int k = 0; k < maxk; k++)
{
// transpose 16x2
__m512 _r0 = _mm512_loadu_ps(img0);
__m512 _r1 = _mm512_loadu_ps(img0 + 16);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
__m512 _tmp3 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_storeu_ps(tmpptr, _r0);
_mm512_storeu_ps(tmpptr + 16, _r1);
img0 += size * 16;
tmpptr += 16 * 2;
}
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 16;
for (int k = 0; k < maxk; k++)
{
__m512 _val = _mm512_loadu_ps(img0);
_mm512_storeu_ps(tmpptr, _val);
img0 += size * 16;
tmpptr += 16;
}
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr0 = top_blob.channel(p);
const float zeros[16] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p * 16 : zeros;
int i = 0;
for (; i + 11 < size; i += 12)
{
const float* tmpptr = tmp.channel(i / 12);
const float* kptr0 = kernel.channel(p);
int nn = inch * maxk * 16; // inch always > 0
__m512 _sum0 = _mm512_loadu_ps(biasptr);
__m512 _sum1 = _sum0;
__m512 _sum2 = _sum0;
__m512 _sum3 = _sum0;
__m512 _sum4 = _sum0;
__m512 _sum5 = _sum0;
__m512 _sum6 = _sum0;
__m512 _sum7 = _sum0;
__m512 _sum8 = _sum0;
__m512 _sum9 = _sum0;
__m512 _suma = _sum0;
__m512 _sumb = _sum0;
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_loadu_ps(kptr0);
__m512 _val0 = _mm512_set1_ps(tmpptr[0]);
__m512 _val1 = _mm512_set1_ps(tmpptr[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
__m512 _val2 = _mm512_set1_ps(tmpptr[2]);
__m512 _val3 = _mm512_set1_ps(tmpptr[3]);
_sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3);
__m512 _val4 = _mm512_set1_ps(tmpptr[4]);
__m512 _val5 = _mm512_set1_ps(tmpptr[5]);
_sum4 = _mm512_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm512_fmadd_ps(_val5, _w0, _sum5);
__m512 _val6 = _mm512_set1_ps(tmpptr[6]);
__m512 _val7 = _mm512_set1_ps(tmpptr[7]);
_sum6 = _mm512_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm512_fmadd_ps(_val7, _w0, _sum7);
__m512 _val8 = _mm512_set1_ps(tmpptr[8]);
__m512 _val9 = _mm512_set1_ps(tmpptr[9]);
_sum8 = _mm512_fmadd_ps(_val8, _w0, _sum8);
_sum9 = _mm512_fmadd_ps(_val9, _w0, _sum9);
__m512 _vala = _mm512_set1_ps(tmpptr[10]);
__m512 _valb = _mm512_set1_ps(tmpptr[11]);
_suma = _mm512_fmadd_ps(_vala, _w0, _suma);
_sumb = _mm512_fmadd_ps(_valb, _w0, _sumb);
tmpptr += 12;
kptr0 += 16;
}
_mm512_storeu_ps(outptr0, _sum0);
_mm512_storeu_ps(outptr0 + 16, _sum1);
_mm512_storeu_ps(outptr0 + 16 * 2, _sum2);
_mm512_storeu_ps(outptr0 + 16 * 3, _sum3);
_mm512_storeu_ps(outptr0 + 16 * 4, _sum4);
_mm512_storeu_ps(outptr0 + 16 * 5, _sum5);
_mm512_storeu_ps(outptr0 + 16 * 6, _sum6);
_mm512_storeu_ps(outptr0 + 16 * 7, _sum7);
_mm512_storeu_ps(outptr0 + 16 * 8, _sum8);
_mm512_storeu_ps(outptr0 + 16 * 9, _sum9);
_mm512_storeu_ps(outptr0 + 16 * 10, _suma);
_mm512_storeu_ps(outptr0 + 16 * 11, _sumb);
outptr0 += 16 * 12;
}
for (; i + 7 < size; i += 8)
{
const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
const float* kptr0 = kernel.channel(p);
int nn = inch * maxk * 16; // inch always > 0
__m512 _sum0 = _mm512_loadu_ps(biasptr);
__m512 _sum1 = _sum0;
__m512 _sum2 = _sum0;
__m512 _sum3 = _sum0;
__m512 _sum4 = _sum0;
__m512 _sum5 = _sum0;
__m512 _sum6 = _sum0;
__m512 _sum7 = _sum0;
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_loadu_ps(kptr0);
__m512 _val0 = _mm512_set1_ps(tmpptr[0]);
__m512 _val1 = _mm512_set1_ps(tmpptr[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
__m512 _val2 = _mm512_set1_ps(tmpptr[2]);
__m512 _val3 = _mm512_set1_ps(tmpptr[3]);
_sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3);
__m512 _val4 = _mm512_set1_ps(tmpptr[4]);
__m512 _val5 = _mm512_set1_ps(tmpptr[5]);
_sum4 = _mm512_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm512_fmadd_ps(_val5, _w0, _sum5);
__m512 _val6 = _mm512_set1_ps(tmpptr[6]);
__m512 _val7 = _mm512_set1_ps(tmpptr[7]);
_sum6 = _mm512_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm512_fmadd_ps(_val7, _w0, _sum7);
tmpptr += 8;
kptr0 += 16;
}
_mm512_storeu_ps(outptr0, _sum0);
_mm512_storeu_ps(outptr0 + 16, _sum1);
_mm512_storeu_ps(outptr0 + 16 * 2, _sum2);
_mm512_storeu_ps(outptr0 + 16 * 3, _sum3);
_mm512_storeu_ps(outptr0 + 16 * 4, _sum4);
_mm512_storeu_ps(outptr0 + 16 * 5, _sum5);
_mm512_storeu_ps(outptr0 + 16 * 6, _sum6);
_mm512_storeu_ps(outptr0 + 16 * 7, _sum7);
outptr0 += 16 * 8;
}
for (; i + 3 < size; i += 4)
{
const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* kptr0 = kernel.channel(p);
int nn = inch * maxk * 16; // inch always > 0
__m512 _sum0 = _mm512_loadu_ps(biasptr);
__m512 _sum1 = _sum0;
__m512 _sum2 = _sum0;
__m512 _sum3 = _sum0;
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_loadu_ps(kptr0);
__m512 _val0 = _mm512_set1_ps(tmpptr[0]);
__m512 _val1 = _mm512_set1_ps(tmpptr[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
__m512 _val2 = _mm512_set1_ps(tmpptr[2]);
__m512 _val3 = _mm512_set1_ps(tmpptr[3]);
_sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3);
tmpptr += 4;
kptr0 += 16;
}
_mm512_storeu_ps(outptr0, _sum0);
_mm512_storeu_ps(outptr0 + 16, _sum1);
_mm512_storeu_ps(outptr0 + 16 * 2, _sum2);
_mm512_storeu_ps(outptr0 + 16 * 3, _sum3);
outptr0 += 16 * 4;
}
for (; i + 1 < size; i += 2)
{
const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* kptr0 = kernel.channel(p);
int nn = inch * maxk * 16; // inch always > 0
__m512 _sum0 = _mm512_loadu_ps(biasptr);
__m512 _sum1 = _sum0;
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_loadu_ps(kptr0);
__m512 _val0 = _mm512_set1_ps(tmpptr[0]);
__m512 _val1 = _mm512_set1_ps(tmpptr[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
tmpptr += 2;
kptr0 += 16;
}
_mm512_storeu_ps(outptr0, _sum0);
_mm512_storeu_ps(outptr0 + 16, _sum1);
outptr0 += 16 * 2;
}
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* kptr0 = kernel.channel(p);
int nn = inch * maxk * 16; // inch always > 0
__m512 _sum = _mm512_loadu_ps(biasptr);
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_loadu_ps(kptr0);
__m512 _val0 = _mm512_set1_ps(tmpptr[0]);
_sum = _mm512_fmadd_ps(_val0, _w0, _sum);
tmpptr += 1;
kptr0 += 16;
}
_mm512_storeu_ps(outptr0, _sum);
outptr0 += 16;
}
}
}
static void convolution_im2col_sgemm_transform_kernel_pack16_avx512(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = 16b-16a-maxk-inch/16a-outch/16b
Mat kernel = _kernel.reshape(maxk, inch, outch);
kernel_tm.create(16 * 16 * maxk, inch / 16, outch / 16, (size_t)4u);
for (int q = 0; q + 15 < outch; q += 16)
{
float* g00 = kernel_tm.channel(q / 16);
for (int p = 0; p + 15 < inch; p += 16)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < 16; i++)
{
for (int j = 0; j < 16; j++)
{
const float* k00 = kernel.channel(q + j).row(p + i);
g00[0] = k00[k];
g00++;
}
}
}
}
}
}
static void convolution_im2col_sgemm_pack16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 64u, 16, opt.workspace_allocator);
{
const int gap = (w * stride_h - outw * stride_w) * 16;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
float* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const float* sptr = img.row(dilation_h * u) + dilation_w * v * 16;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
__m512 _v = _mm512_load_ps(sptr);
_mm512_store_ps(ptr, _v);
sptr += stride_w * 16;
ptr += 16;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_pack16_avx512(bottom_im2col, top_blob, kernel, _bias, opt);
}
|
rt_dtsmqr.c | #include "runtime.h"
#include "core_blas-gpu.h"
//#pragma omp task inout([lda1*n1]A1, [lda2*n2]A2) in([ldt*nb]V, [ldt*nb]T) label(dtsmqr)
//CORE_dtsmqr(side, trans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork);
#ifdef PLASMA_WITH_SMP
#pragma omp target device (smp) copy_deps
#pragma omp task inout([lda1*nb]A1, [lda2*nb]A2) in([ldv*nb]V, [ldt*nb]T) out([ib*nb]WORK) label(ldtsmqr_smp)
void CORE_ldtsmqr_ompss(PLASMA_enum side, PLASMA_enum trans, int m1, int n1, int m2, int n2, int k, int ib, int nb, double *A1, int lda1, double *A2, int lda2, const double *V, int ldv, const double *T, int ldt, double *WORK, int ldwork)
{
CORE_dtsmqr(side, trans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork);
}
#pragma omp target device (smp) copy_deps
#pragma omp task inout([lda1*nb]A1, [lda2*nb]A2) in([ldv*nb]V, [ldt*nb]T) out([ib*nb]WORK) label(rdtsmqr_smp)
void CORE_rdtsmqr_ompss(PLASMA_enum side, PLASMA_enum trans, int m1, int n1, int m2, int n2, int k, int ib, int nb, double *A1, int lda1, double *A2, int lda2, const double *V, int ldv, const double *T, int ldt, double *WORK, int ldwork)
{
CORE_dtsmqr(side, trans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork);
}
#endif
#ifdef PLASMA_WITH_CUDA_PURE
#pragma omp target device (cuda) copy_deps
#pragma omp task inout([lda1*nb]A1, [lda2*nb]A2) in([ldv*nb]V, [ldt*nb]T) out([ib*nb]WORK) label(ldtsmqr_cuda)
void CORE_ldtsmqr_ompss(PLASMA_enum side, PLASMA_enum trans, int m1, int n1, int m2, int n2, int k, int ib, int nb, double *A1, int lda1, double *A2, int lda2, const double *V, int ldv, const double *T, int ldt, double *WORK, int ldwork)
{
/*
CORE_dtsmqr(side, trans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork);
*/
//printf("\n\n=============================> SALEM\n\n");
cublasOperation_t cutrans;
if ( trans == PlasmaNoTrans )
cutrans = CUBLAS_OP_N;
else
cutrans = CUBLAS_OP_T;
cublasHandle_t handle = nanos_get_cublas_handle();
cudaStream_t stream = nanos_get_kernel_execution_stream();
cublasSetStream(handle, stream);
cublasDtsmqr(handle, CUBLAS_SIDE_LEFT, cutrans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork);
}
#pragma omp target device (cuda) copy_deps
#pragma omp task inout([lda1*nb]A1, [lda2*nb]A2) in([ldv*nb]V, [ldt*nb]T) out([ib*nb]WORK) label(rdtsmqr_cuda)
void CORE_rdtsmqr_ompss(PLASMA_enum side, PLASMA_enum trans, int m1, int n1, int m2, int n2, int k, int ib, int nb, double *A1, int lda1, double *A2, int lda2, const double *V, int ldv, const double *T, int ldt, double *WORK, int ldwork)
{
/*
CORE_dtsmqr(side, trans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork);
*/
cublasOperation_t cutrans;
if ( trans == PlasmaNoTrans )
cutrans = CUBLAS_OP_N;
else
cutrans = CUBLAS_OP_T;
cublasHandle_t handle = nanos_get_cublas_handle();
cudaStream_t stream = nanos_get_kernel_execution_stream();
cublasSetStream(handle, stream);
cublasDtsmqr(handle, CUBLAS_SIDE_RIGHT, cutrans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork);
}
#endif
#ifdef PLASMA_WITH_CUDA_HYBRID
#pragma omp target device (smp) copy_deps
#pragma omp task inout([lda1*nb]A1, [lda2*nb]A2) in([ldv*nb]V, [ldt*nb]T) out([ib*nb]WORK) label(ldtsmqr_hyb_smp)
void CORE_ldtsmqr_ompss(PLASMA_enum side, PLASMA_enum trans, int m1, int n1, int m2, int n2, int k, int ib, int nb, double *A1, int lda1, double *A2, int lda2, const double *V, int ldv, const double *T, int ldt, double *WORK, int ldwork)
{
CORE_dtsmqr(side, trans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork);
}
#pragma omp target device (smp) copy_deps
#pragma omp task inout([lda1*nb]A1, [lda2*nb]A2) in([ldv*nb]V, [ldt*nb]T) out([ib*nb]WORK) label(rdtsmqr_hyb_smp)
void CORE_rdtsmqr_ompss(PLASMA_enum side, PLASMA_enum trans, int m1, int n1, int m2, int n2, int k, int ib, int nb, double *A1, int lda1, double *A2, int lda2, const double *V, int ldv, const double *T, int ldt, double *WORK, int ldwork)
{
CORE_dtsmqr(side, trans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork);
}
//Alternative implementations
#pragma omp target device (cuda) copy_deps implements(CORE_ldtsmqr_ompss)
#pragma omp task inout([lda1*nb]A1, [lda2*nb]A2) in([ldv*nb]V, [ldt*nb]T) out([ib*nb]WORK) label(ldtsmqr_hyb_cuda)
void CORE_ldtsmqr_cuda(PLASMA_enum side, PLASMA_enum trans, int m1, int n1, int m2, int n2, int k, int ib, int nb, double *A1, int lda1, double *A2, int lda2, const double *V, int ldv, const double *T, int ldt, double *WORK, int ldwork)
{
/*
CORE_dtsmqr(side, trans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork);
*/
//printf("\n\n=============================> SALEM\n\n");
cublasOperation_t cutrans;
if ( trans == PlasmaNoTrans )
cutrans = CUBLAS_OP_N;
else
cutrans = CUBLAS_OP_T;
cublasHandle_t handle = nanos_get_cublas_handle();
cudaStream_t stream = nanos_get_kernel_execution_stream();
cublasSetStream(handle, stream);
cublasDtsmqr(handle, CUBLAS_SIDE_LEFT, cutrans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork);
}
#pragma omp target device (cuda) copy_deps implements(CORE_rdtsmqr_ompss)
#pragma omp task inout([lda1*nb]A1, [lda2*nb]A2) in([ldv*nb]V, [ldt*nb]T) out([ib*nb]WORK) label(rdtsmqr_hyb_cuda)
void CORE_rdtsmqr_cuda(PLASMA_enum side, PLASMA_enum trans, int m1, int n1, int m2, int n2, int k, int ib, int nb, double *A1, int lda1, double *A2, int lda2, const double *V, int ldv, const double *T, int ldt, double *WORK, int ldwork)
{
/*
CORE_dtsmqr(side, trans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork);
*/
cublasOperation_t cutrans;
if ( trans == PlasmaNoTrans )
cutrans = CUBLAS_OP_N;
else
cutrans = CUBLAS_OP_T;
cublasHandle_t handle = nanos_get_cublas_handle();
cudaStream_t stream = nanos_get_kernel_execution_stream();
cublasSetStream(handle, stream);
cublasDtsmqr(handle, CUBLAS_SIDE_RIGHT, cutrans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork);
}
#endif
void RT_CORE_dtsmqr(Quark *quark, Quark_Task_Flags *task_flags,
PLASMA_enum side, PLASMA_enum trans,
int m1, int n1, int m2, int n2, int k, int ib, int nb,
double *A1, int lda1,
double *A2, int lda2,
const double *V, int ldv,
const double *T, int ldt)
{
plasma_context_t *plasma;
plasma = plasma_context_self();
if (plasma->runtime == PLASMA_QUARK) {
QUARK_CORE_dtsmqr(
quark, task_flags,
side, trans,
m1, n1, m2, n2, k, ib, nb,
A1, lda1,
A2, lda2,
V, ldv,
T, ldt);
}
else if (plasma->runtime == PLASMA_OMPSS) {
/*
*/
double *WORK = malloc(ib*nb*sizeof(double));
#pragma omp register ([ib*nb]WORK)
int ldwork = side == PlasmaLeft?ib:nb;
//#pragma omp task inout([nb*nb]A1, [nb*nb]A2) in([nb*nb]V, [ib*nb]T) label(dtsmqr)
//CORE_dtsmqr(side, trans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork);
if (side == PlasmaLeft){
int ldwork = ib;
//printf("\n\n============> DTSMQR_L BEFORE m1 %d n1 %d m2 %d n2 %d k %d ib %d nb %d lda1 %d lda2 %d ldv %d ldt %d ldwork %d \n", m1, n1, m2, n2, k, ib, nb, lda1, lda2, ldv, ldt, ldwork);
CORE_ldtsmqr_ompss(side, trans, m1, n1, m2, n2, k, ib, nb, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork);
//#pragma omp task inout([nb*nb]A1, [nb*nb]A2) in([nb*nb]V, [ib*nb]T) label(dtsmqr)
//#pragma omp task inout([lda1*nb]A1, [lda2*nb]A2) in([ldv*nb]V, [ldt*nb]T) label(ldtsmqr)
//CORE_dtsmqr(side, trans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork);
//printf("\n\n============> DTSMQR_L AFTER \n");
} else {
int ldwork = nb;
//printf("\n\n============> DTSMQR_R BEFORE m1 %d n1 %d m2 %d n2 %d k %d ib %d nb %d lda1 %d lda2 %d ldv %d ldt %d ldwork %d \n", m1, n1, m2, n2, k, ib, nb, lda1, lda2, ldv, ldt, ldwork);
CORE_rdtsmqr_ompss(side, trans, m1, n1, m2, n2, k, ib, nb, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork);
//#pragma omp task inout([nb*nb]A1, [nb*nb]A2) in([nb*nb]V, [ib*nb]T) label(dtsmqr)
//#pragma omp task inout([lda1*nb]A1, [lda2*nb]A2) in([ldv*nb]V, [ldt*nb]T) label(rdtsmqr)
//CORE_dtsmqr(side, trans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork);
//printf("\n\n============> DTSMQR_R AFTER \n");
}
}
}
|
jacobi.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#include <stdint.h>
#include "../timer.h"
//------------------------------------------------------------------------------------------------------------------------------
void smooth(level_type * level, int x_id, int rhs_id, double a, double b){
if(NUM_SMOOTHS&1){
printf("error - NUM_SMOOTHS must be even...\n");
exit(0);
}
int box,s;
int ghosts = level->box_ghosts;
int radius = STENCIL_RADIUS;
int starShaped = STENCIL_STAR_SHAPED;
int communicationAvoiding = ghosts > radius;
#ifdef USE_L1JACOBI
double weight = 1.0;
#else
double weight = 2.0/3.0;
#endif
// if communication-avoiding, need updated RHS for stencils in ghost zones
if(communicationAvoiding)exchange_boundary(level,rhs_id,0);
for(s=0;s<NUM_SMOOTHS;s+=ghosts){
// Jacobi ping pongs between x_id and VECTOR_TEMP
if((s&1)==0){exchange_boundary(level, x_id,STENCIL_IS_STAR_SHAPED && !communicationAvoiding);apply_BCs(level, x_id);}
else{exchange_boundary(level,VECTOR_TEMP,STENCIL_IS_STAR_SHAPED && !communicationAvoiding);apply_BCs(level,VECTOR_TEMP);}
// now do ghosts communication-avoiding smooths on each box...
uint64_t _timeStart = CycleTime();
#pragma omp parallel for private(box) OMP_THREAD_ACROSS_BOXES(level->concurrent_boxes)
for(box=0;box<level->num_my_boxes;box++){
int i,j,k,ss;
const int jStride = level->my_boxes[box].jStride;
const int kStride = level->my_boxes[box].kStride;
const int dim = level->my_boxes[box].dim;
const double h2inv = 1.0/(level->h*level->h);
const double * __restrict__ rhs = level->my_boxes[box].vectors[ rhs_id] + ghosts*(1+jStride+kStride);
const double * __restrict__ alpha = level->my_boxes[box].vectors[VECTOR_ALPHA ] + ghosts*(1+jStride+kStride);
const double * __restrict__ beta_i = level->my_boxes[box].vectors[VECTOR_BETA_I] + ghosts*(1+jStride+kStride);
const double * __restrict__ beta_j = level->my_boxes[box].vectors[VECTOR_BETA_J] + ghosts*(1+jStride+kStride);
const double * __restrict__ beta_k = level->my_boxes[box].vectors[VECTOR_BETA_K] + ghosts*(1+jStride+kStride);
const double * __restrict__ valid = level->my_boxes[box].vectors[VECTOR_VALID ] + ghosts*(1+jStride+kStride); // cell is inside the domain
#ifdef USE_L1JACOBI
const double * __restrict__ lambda = level->my_boxes[box].vectors[VECTOR_L1INV ] + ghosts*(1+jStride+kStride);
#else
const double * __restrict__ lambda = level->my_boxes[box].vectors[VECTOR_DINV ] + ghosts*(1+jStride+kStride);
#endif
int ghostsToOperateOn=ghosts-1;
for(ss=s;ss<s+ghosts;ss++,ghostsToOperateOn--){
const double * __restrict__ x_n;
double * __restrict__ x_np1;
if((ss&1)==0){x_n = level->my_boxes[box].vectors[ x_id] + ghosts*(1+jStride+kStride);
x_np1 = level->my_boxes[box].vectors[VECTOR_TEMP] + ghosts*(1+jStride+kStride);}
else{x_n = level->my_boxes[box].vectors[VECTOR_TEMP] + ghosts*(1+jStride+kStride);
x_np1 = level->my_boxes[box].vectors[ x_id] + ghosts*(1+jStride+kStride);}
#pragma omp parallel for private(k,j,i) OMP_THREAD_WITHIN_A_BOX(level->threads_per_box)
for(k=0-ghostsToOperateOn;k<dim+ghostsToOperateOn;k++){
for(j=0-ghostsToOperateOn;j<dim+ghostsToOperateOn;j++){
for(i=0-ghostsToOperateOn;i<dim+ghostsToOperateOn;i++){
int ijk = i + j*jStride + k*kStride;
double Ax_n = apply_op_ijk(x_n);
x_np1[ijk] = x_n[ijk] + weight*lambda[ijk]*(rhs[ijk]-Ax_n);
}}}
} // ss-loop
} // box-loop
level->cycles.smooth += (uint64_t)(CycleTime()-_timeStart);
} // s-loop
}
//------------------------------------------------------------------------------------------------------------------------------
|
primo_numeros_without_reduction.c | #include <stdio.h>
#include <math.h>
#include <omp.h>
typedef unsigned long long Entero_grande;
//#define N 100000000ULL
#define N 100000000ULL
int primo(Entero_grande n)
{
int p;
Entero_grande i, s;
p = (n % 2 != 0 || n == 2);
if (p) {
s = sqrt(n);
for (i = 3; p && i <= s; i += 2)
if (n % i == 0) p = 0;
}
return p;
}
int main()
{
Entero_grande i, n;
#ifdef _OPENMP
double t1 = omp_get_wtime();
#endif
int numberOfThreads;
#pragma omp parallel
numberOfThreads = omp_get_num_threads();
n = 2; /* Por el 1 y el 2 */
#pragma omp parallel for schedule(runtime)
for (i = 3; i <= N; i += 2){
if (primo(i))
{
#pragma omp atomic
n++;
}
}
#ifdef _OPENMP
double t2 = omp_get_wtime();
printf("looptime: %f seconds \n", t2-t1);
#endif
printf("Entre el 1 y el %llu hay %llu numeros primos.\n",
N, n);
return 0;
}
|
vefie.c | /*
Methods related to creating and solving the VEFIE.
Copyright 2015 Ian Kavanagh
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "vefie.h"
#include <mkl.h>
#include <string.h>
#if defined(_OPENMP)
#include <omp.h>
#else
void omp_set_num_threads(int num_threads) { return; }
int omp_get_num_threads(void) { return 1; }
int omp_get_max_threads(void) { return 1; }
#endif
#include "matlib.h"
#include "physics.h"
#include "shape.h"
/**
* Purpose
* =======
*
* The dimensions of the FFT used to perform the matrix vector
* product of A*x.
*
* ============================================================
*/
int dims[3];
/**
* Purpose
* =======
*
* Input and output arrays for the FFT.
*
* ============================================================
*/
double complex *in, *out;
/**
* Purpose
* =======
*
* The plans for performing the forward and backward FFT when
* computing matrix vector product of A*x.
*
* ============================================================
*/
fftw_plan forward_plan, backward_plan;
double complex *D, *G, *V;
double complex *E;
double complex *p;
double complex *rfo;
int block_size = 20;
int init_vefie(const double f, const double complex antenna, double complex (*inc)(const double k0, const double complex antenna, const double complex point)) {
// TODO: Document possible return values
int ret_code;
if (omp_get_max_threads() != 1 && fftw_init_threads()) {
omp_set_num_threads(omp_get_max_threads());
fftw_plan_with_nthreads(omp_get_max_threads());
} else {
fprintf(stderr, "Warning: fftw_init_threads() failed!\n");
}
if (f < 0) {
return -10;
}
ret_code = allocate_matrices();
if (ret_code != 0) {
return ret_code;
}
ret_code = init_fftw();
if (ret_code != 0) {
return ret_code;
}
const char *unit = "Hz";
if (3 * floor(log10(f) / 3) - 6 < 1e-15) {
unit = "MHz";
} else if (3 * floor(log10(f) / 3) - 9 < 1e-15) {
unit = "GHz";
}
#pragma omp parallel
#pragma omp master
printf("Running VEFIE with %d threads for a shape of size %.2fm x %.2fm with a source radiating at %.2f%s positioned at (%.2f,%.2f).\n", omp_get_num_threads(), x, y, f / (strcmp(unit, "MHz") == 0 ? 1e6 : strcmp(unit, "GHz") == 0 ? 1e9 : 1), unit, creal(antenna), cimag(antenna));
double dx = x / (double) n, dy = y / (double) m, a = sqrt(dx * dy / M_PI);
double k0 = creal(kd(f, 1, 1, 0));
#pragma omp parallel for
for (int k = 0; k < n*m; ++k) {
int i = k % m;
int j = k / m;
// Define position as centre of each cube
p[k] = (xlim[0] + I*ylim[0]) + (((double) i + 0.5)*dx + I*((double) j + 0.5)*dy);
V[k] = (*inc)(k0, antenna, p[k]);
material_t mat = material[shape[j + m * i]];
double complex km = kd(f, mat.epsilon_r, mat.mu_r, mat.sigma);
D[k] = km*km - k0*k0;
if (cabs(D[k]) > 1e-15) {
rfo[k] = 1.0;
}
double Rmn = cabs(p[0] - p[k]);
G[k] = (I / 4) * (((2*M_PI*a) / k0) * j1(k0*a) * h2n(0, k0*Rmn));
}
G[0] = (I / 4) * (((2*M_PI*a) / k0) * h2n(1, k0*a) - 4*I / (k0*k0));
return toeplitz(&G); // Extend G to size 2N * 2M and take FFT
}
int init_fftw(void) {
dims[0] = 2*n;
dims[1] = 2*m;
int N = dims[0] * dims[1];
int ret_code = fftw_malloc_s(&in, (size_t) N * sizeof *in);
if (ret_code != 0) {
return ret_code;
}
ret_code = fftw_malloc_s(&out, (size_t) N * sizeof *out);
if (ret_code != 0) {
return ret_code;
}
forward_plan = fftw_plan_dft(2, dims, in, out, FFTW_FORWARD, FFTW_MEASURE);
backward_plan = fftw_plan_dft(2, dims, in, out, FFTW_BACKWARD, FFTW_MEASURE);
if (!forward_plan || !backward_plan) {
fprintf(stderr, "Failed: Unable to plan FFT.\n");
return -2;
}
return 0;
}
int toeplitz(double complex** restrict X) {
// TODO: Fix when compiling with gcc
// * Produces a different result (appears to be less accurate)
// * Could depend on which library is being used for the FFT
// (Intel or FFTW3)
// * Other likely option, use of #pragma omp parallel for varies
// * between compiler
double complex* Y = *X;
double complex *Z;
int ret_code = fftw_malloc_s(&Z, (size_t) (dims[0]*dims[1]) * sizeof *Z);
if (ret_code != 0) {
// TODO: Document possible return code
return ret_code;
}
fftw_plan plan = fftw_plan_dft(2, dims, Z, Z, FFTW_FORWARD, FFTW_ESTIMATE);
// Embed Y into a circular convolution problem of size 2Nx2M
#pragma omp parallel for
for (int k = 0; k < n*m; ++k) {
int i = k / m; // Row
int j = k % m; // Column
Z[i * dims[1] + j] = Y[i * m + j]; // Top left
if (j == 0) {
Z[i * dims[1] + (j + m)] = 0.0; // Top right
} else {
Z[i * dims[1] + (j + m)] = Y[(i + 1) * m - j]; // Top right
}
if (i == 0) {
Z[(i + m) * dims[1] + j] = 0.0; // Bottom left
Z[(i + m) * dims[1] + (j + m)] = 0.0; // Bottom right
} else {
Z[(i + m) * dims[1] + j] = Y[(m - i) * m + j]; // Bottom left
if (j == 0) {
Z[(i + m) * dims[1] + (j + m)] = 0.0; // Bottom right
} else {
Z[(i + m) * dims[1] + (j + m)] = Y[(m + 1 - i) * m - j]; // Bottom right
}
}
}
fftw_execute(plan);
fftw_free(Y);
*X = Z;
fftw_destroy_plan(plan);
fftw_cleanup();
return 0;
}
void matvec(const double complex *restrict alpha, const double complex *restrict X, const double complex *restrict beta, double complex *restrict Y) {
// TODO: Fix when compiling with gcc
// * Produces a different result (appears to be less accurate)
// * Could depend on which library is being used for the FFT
// (Intel or FFTW3)
// * Other likely option, use of #pragma omp parallel for varies
// * between compiler
#pragma omp parallel for
for (int k = 0; k < n*m; ++k) {
int i = k / m;
int j = k % m;
in[i * dims[1] + j] = D[i * m + j] * X[i * m + j];
in[i * dims[1] + (j + m)] = 0.0;
in[(i + m) * dims[1] + j] = 0.0;
in[(i + m) * dims[1] + (j + m)] = 0.0;
}
fftw_execute(forward_plan);
#pragma omp parallel for
for (int k = 0; k < dims[0] * dims[1]; ++k) {
in[k] = G[k] * out[k];
}
fftw_execute(backward_plan);
#pragma omp parallel for
for (int k = 0; k < n*m; ++k) {
int i = k / m;
int j = k % m;
Y[k] = *alpha * (X[k] + (out[i * dims[1] + j] / (dims[0]*dims[1]))) + *beta*Y[k];
}
}
int allocate_matrices(void) {
size_t N = (size_t) (n*m);
int ret_code = matalloc(N, 0, &D);
if (ret_code != 0) {
return ret_code;
}
ret_code = fftw_malloc_s(&G, N * sizeof *G);
if (ret_code != 0) {
return ret_code;
}
ret_code = matalloc(N, 0, &V);
if (ret_code != 0) {
return ret_code;
}
ret_code = matalloc(N, 0, &E);
if (ret_code != 0) {
return ret_code;
}
ret_code = matalloc(N, 0, &p);
if (ret_code != 0) {
return ret_code;
}
ret_code = matalloc(N, 0, &rfo);
if (ret_code != 0) {
return ret_code;
}
return 0;
}
int matalloc(const size_t N, const int init, double complex **restrict X) {
int ret_code;
if (init) {
ret_code = calloc_align_s((void **) X, N, sizeof **X);
} else {
ret_code = malloc_align_s((void **) X, N * sizeof **X);
}
return ret_code;
}
void vefie_cleanup(void) {
if (!D) free(D);
if (!G) fftw_free(G);
if (!V) free(V);
if (!E) free(E);
if (!p) free(p);
if (!rfo) free(rfo);
if (!in) fftw_free(in);
if (!out) fftw_free(out);
if (!forward_plan) fftw_destroy_plan(forward_plan);
if (!backward_plan) fftw_destroy_plan(backward_plan);
fftw_cleanup_threads();
fftw_cleanup();
}
|
GB_unop__identity_int8_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int8_fc32)
// op(A') function: GB (_unop_tran__identity_int8_fc32)
// C type: int8_t
// A type: GxB_FC32_t
// cast: int8_t cij = GB_cast_to_int8_t ((double) crealf (aij))
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int8_t z = GB_cast_to_int8_t ((double) crealf (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = GB_cast_to_int8_t ((double) crealf (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int8_fc32)
(
int8_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
int8_t z = GB_cast_to_int8_t ((double) crealf (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
int8_t z = GB_cast_to_int8_t ((double) crealf (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int8_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__bor_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bor_int64)
// A.*B function (eWiseMult): GB (_AemultB_01__bor_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__bor_int64)
// A.*B function (eWiseMult): GB (_AemultB_03__bor_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bor_int64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bor_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__bor_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bor_int64)
// C=scalar+B GB (_bind1st__bor_int64)
// C=scalar+B' GB (_bind1st_tran__bor_int64)
// C=A+scalar GB (_bind2nd__bor_int64)
// C=A'+scalar GB (_bind2nd_tran__bor_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij) | (bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x) | (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BOR || GxB_NO_INT64 || GxB_NO_BOR_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bor_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bor_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bor_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bor_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bor_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bor_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bor_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bor_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bor_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x) | (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bor_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij) | (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x) | (aij) ; \
}
GrB_Info GB (_bind1st_tran__bor_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij) | (y) ; \
}
GrB_Info GB (_bind2nd_tran__bor_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__land_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__land_int64)
// A.*B function (eWiseMult): GB (_AemultB_01__land_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__land_int64)
// A.*B function (eWiseMult): GB (_AemultB_03__land_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__land_int64)
// A*D function (colscale): GB (_AxD__land_int64)
// D*A function (rowscale): GB (_DxB__land_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__land_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__land_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_int64)
// C=scalar+B GB (_bind1st__land_int64)
// C=scalar+B' GB (_bind1st_tran__land_int64)
// C=A+scalar GB (_bind2nd__land_int64)
// C=A'+scalar GB (_bind2nd_tran__land_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = ((aij != 0) && (bij != 0))
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) && (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_INT64 || GxB_NO_LAND_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__land_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__land_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__land_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__land_int64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__land_int64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__land_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__land_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__land_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__land_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__land_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__land_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) && (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__land_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) && (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) && (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__land_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) && (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__land_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ast-dump-openmp-teams-distribute-simd.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp target
#pragma omp teams distribute simd
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp target
#pragma omp teams distribute simd
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp target
#pragma omp teams distribute simd collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp target
#pragma omp teams distribute simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp target
#pragma omp teams distribute simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-teams-distribute-simd.c:3:1, line:8:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:8:1>
// CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:4:1, col:19>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:6:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:1, col:34>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:34>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-OMPTeamsDistributeSimdDirective {{.*}} <col:1, col:34>
// CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:6:3, line:7:5>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:6:3, line:7:5>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:7:5>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:5:1) *const restrict'
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:5:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <col:3, line:7:5>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:7:5>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:5:1) *const restrict'
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <col:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | `-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-'
// CHECK-NEXT: | | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-OMPTeamsDistributeSimdDirective {{.*}} <line:5:1, col:34>
// CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:6:3, line:7:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:6:3, line:7:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:6:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:7:5>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:5:1) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <line:5:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int &'
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <col:3, line:7:5>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:6:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:7:5>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:5:1) *const restrict'
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <col:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | `-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-'
// CHECK-NEXT: | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:10:1, line:16:1> line:10:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:16:1>
// CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:11:1, col:19>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:12:1, col:34>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:34>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-OMPTeamsDistributeSimdDirective {{.*}} <col:1, col:34>
// CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:15:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:12:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:11:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:11:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:12:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &'
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:15:7>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:12:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:13:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | `-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-'
// CHECK-NEXT: | | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:11:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:11:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-OMPTeamsDistributeSimdDirective {{.*}} <line:12:1, col:34>
// CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:13:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:14:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:15:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:12:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:11:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:11:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <line:12:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &'
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:13:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:14:5, line:15:7>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:14:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:15:7>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:12:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:13:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | `-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-'
// CHECK-NEXT: | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:18:1, line:24:1> line:18:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:24:1>
// CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:19:1, col:19>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:20:1, col:46>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:46>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-OMPTeamsDistributeSimdDirective {{.*}} <col:1, col:46>
// CHECK-NEXT: | | | | | |-OMPCollapseClause {{.*}} <col:35, col:45>
// CHECK-NEXT: | | | | | | `-ConstantExpr {{.*}} <col:44> 'int'
// CHECK-NEXT: | | | | | | |-value: Int 1
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:44> 'int' 1
// CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:23:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:20:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:19:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:19:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:20:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &'
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:23:7>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:20:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:21:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | `-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-'
// CHECK-NEXT: | | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:19:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:19:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-OMPTeamsDistributeSimdDirective {{.*}} <line:20:1, col:46>
// CHECK-NEXT: | | | |-OMPCollapseClause {{.*}} <col:35, col:45>
// CHECK-NEXT: | | | | `-ConstantExpr {{.*}} <col:44> 'int'
// CHECK-NEXT: | | | | |-value: Int 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:44> 'int' 1
// CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:21:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:22:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:23:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:20:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:19:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:19:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <line:20:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &'
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:21:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:22:5, line:23:7>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:22:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:23:7>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:20:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:21:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | `-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-'
// CHECK-NEXT: | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:26:1, line:32:1> line:26:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:32:1>
// CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:27:1, col:19>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:28:1, col:46>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:46>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-OMPTeamsDistributeSimdDirective {{.*}} <col:1, col:46>
// CHECK-NEXT: | | | | | |-OMPCollapseClause {{.*}} <col:35, col:45>
// CHECK-NEXT: | | | | | | `-ConstantExpr {{.*}} <col:44> 'int'
// CHECK-NEXT: | | | | | | |-value: Int 2
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:44> 'int' 2
// CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:31:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:28:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:27:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:27:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:28:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int &'
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:31:7>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:28:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:29:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:30:25> col:25 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <line:29:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, line:30:28> 'long' '*'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <line:29:3, col:26> 'long' <IntegralCast>
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | | `-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-'
// CHECK-NEXT: | | | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <line:30:5, col:28> 'long' <IntegralCast>
// CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/'
// CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:5> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:25, col:5> 'int' '-'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | `-ParenExpr {{.*}} <col:5> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:18, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:18, col:28> 'int' '-'
// CHECK-NEXT: | | | | | | | |-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast>
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:27:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:27:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-OMPTeamsDistributeSimdDirective {{.*}} <line:28:1, col:46>
// CHECK-NEXT: | | | |-OMPCollapseClause {{.*}} <col:35, col:45>
// CHECK-NEXT: | | | | `-ConstantExpr {{.*}} <col:44> 'int'
// CHECK-NEXT: | | | | |-value: Int 2
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:44> 'int' 2
// CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:29:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:30:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:31:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:28:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:27:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:27:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <line:28:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int &'
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:29:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:30:5, line:31:7>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:30:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:31:7>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:28:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:29:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:30:25> col:25 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <line:29:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long'
// CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-'
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, line:30:28> 'long' '*'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <line:29:3, col:26> 'long' <IntegralCast>
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | `-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-'
// CHECK-NEXT: | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <line:30:5, col:28> 'long' <IntegralCast>
// CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/'
// CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:5> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:25, col:5> 'int' '-'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | `-ParenExpr {{.*}} <col:5> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:18, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:18, col:28> 'int' '-'
// CHECK-NEXT: | | | | | |-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast>
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:34:1, line:41:1> line:34:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:41:1>
// CHECK-NEXT: `-OMPTargetDirective {{.*}} <line:35:1, col:19>
// CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:36:1, col:46>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <col:1, col:46>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-OMPTeamsDistributeSimdDirective {{.*}} <col:1, col:46>
// CHECK-NEXT: | | | | |-OMPCollapseClause {{.*}} <col:35, col:45>
// CHECK-NEXT: | | | | | `-ConstantExpr {{.*}} <col:44> 'int'
// CHECK-NEXT: | | | | | |-value: Int 2
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:44> 'int' 2
// CHECK-NEXT: | | | | `-CapturedStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:37:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:38:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:39:12, col:21>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:40:9>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:36:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:35:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:35:1) *const restrict'
// CHECK-NEXT: | | | |-RecordDecl {{.*}} <line:36:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int &'
// CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &'
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:37:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:38:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:39:12, col:21>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:40:9>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:36:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | |-OMPCapturedExprDecl {{.*}} <line:37:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-OMPCapturedExprDecl {{.*}} <line:38:25> col:25 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | `-OMPCapturedExprDecl {{.*}} <line:37:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long'
// CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-'
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:3, line:38:28> 'long' '*'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <line:37:3, col:26> 'long' <IntegralCast>
// CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | `-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-'
// CHECK-NEXT: | | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <line:38:5, col:28> 'long' <IntegralCast>
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/'
// CHECK-NEXT: | | | | |-ParenExpr {{.*}} <col:5> 'int'
// CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:25, col:5> 'int' '-'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | `-ParenExpr {{.*}} <col:5> 'int'
// CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:18, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:18, col:28> 'int' '-'
// CHECK-NEXT: | | | | | | |-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast>
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:35:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:35:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-OMPTeamsDistributeSimdDirective {{.*}} <line:36:1, col:46>
// CHECK-NEXT: | | |-OMPCollapseClause {{.*}} <col:35, col:45>
// CHECK-NEXT: | | | `-ConstantExpr {{.*}} <col:44> 'int'
// CHECK-NEXT: | | | |-value: Int 2
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:44> 'int' 2
// CHECK-NEXT: | | `-CapturedStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:37:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:38:5, line:40:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:38:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:39:7, line:40:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:39:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:40:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:36:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:35:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:35:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <line:36:1> col:1 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int &'
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int &'
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &'
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:37:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:38:5, line:40:9>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:38:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:39:7, line:40:9>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:39:12, col:21>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:40:9>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:36:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | |-OMPCapturedExprDecl {{.*}} <line:37:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | |-OMPCapturedExprDecl {{.*}} <line:38:25> col:25 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-OMPCapturedExprDecl {{.*}} <line:37:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long'
// CHECK-NEXT: | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-'
// CHECK-NEXT: | |-BinaryOperator {{.*}} <col:3, line:38:28> 'long' '*'
// CHECK-NEXT: | | |-ImplicitCastExpr {{.*}} <line:37:3, col:26> 'long' <IntegralCast>
// CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | `-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-'
// CHECK-NEXT: | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <line:38:5, col:28> 'long' <IntegralCast>
// CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/'
// CHECK-NEXT: | | |-ParenExpr {{.*}} <col:5> 'int'
// CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:25, col:5> 'int' '-'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | `-ParenExpr {{.*}} <col:5> 'int'
// CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:18, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:18, col:28> 'int' '-'
// CHECK-NEXT: | | | | |-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast>
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
kernel_template.c | #line 1 "kernel_template.c"
// GENERATED CODE --- DO NOT EDIT ---
// Code is produced by sasmodels.gen from sasmodels/models/MODEL.c
#ifdef __OPENCL_VERSION__
# define USE_OPENCL
#endif
#define USE_KAHAN_SUMMATION 0
// If opencl is not available, then we are compiling a C function
// Note: if using a C++ compiler, then define kernel as extern "C"
#ifndef USE_OPENCL
// Use SAS_DOUBLE to force the use of double even for float kernels
# define SAS_DOUBLE dou ## ble
# ifdef __cplusplus
#include <cstdio>
#include <cmath>
using namespace std;
#if defined(_MSC_VER)
#include <limits>
#include <float.h>
#define kernel extern "C" __declspec( dllexport )
inline double trunc(double x) { return x>=0?floor(x):-floor(-x); }
inline double fmin(double x, double y) { return x>y ? y : x; }
inline double fmax(double x, double y) { return x<y ? y : x; }
#define isnan(x) _isnan(x)
#define isinf(x) (!_finite(x))
#define isfinite(x) _finite(x)
#define NAN (std::numeric_limits<double>::quiet_NaN()) // non-signalling NaN
#define INFINITY (std::numeric_limits<double>::infinity())
#define NEED_EXPM1
#define NEED_TGAMMA
#define NEED_ERF
#else
#define kernel extern "C"
#endif
inline void SINCOS(double angle, double &svar, double &cvar) { svar=sin(angle); cvar=cos(angle); }
# else
#include <stdio.h>
#if defined(__TINYC__)
#include <math.h>
// TODO: test isnan
inline double _isnan(double x) { return x != x; } // hope this doesn't optimize away!
#undef isnan
#define isnan(x) _isnan(x)
// Defeat the double->float conversion since we don't have tgmath
inline SAS_DOUBLE trunc(SAS_DOUBLE x) { return x>=0?floor(x):-floor(-x); }
inline SAS_DOUBLE fmin(SAS_DOUBLE x, SAS_DOUBLE y) { return x>y ? y : x; }
inline SAS_DOUBLE fmax(SAS_DOUBLE x, SAS_DOUBLE y) { return x<y ? y : x; }
#define NEED_EXPM1
#define NEED_TGAMMA
#define NEED_ERF
#else
#include <tgmath.h> // C99 type-generic math, so sin(float) => sinf
#endif
// MSVC doesn't support C99, so no need for dllexport on C99 branch
#define kernel
#define SINCOS(angle,svar,cvar) do {const double _t_=angle; svar=sin(_t_);cvar=cos(_t_);} while (0)
# endif
# define global
# define local
# define constant const
// OpenCL powr(a,b) = C99 pow(a,b), b >= 0
// OpenCL pown(a,b) = C99 pow(a,b), b integer
# define powr(a,b) pow(a,b)
# define pown(a,b) pow(a,b)
#else
# if defined(USE_SINCOS)
# define SINCOS(angle,svar,cvar) svar=sincos(angle,&cvar)
# else
# define SINCOS(angle,svar,cvar) do {const double _t_=angle; svar=sin(_t_);cvar=cos(_t_);} while (0)
# endif
#endif
#if defined(NEED_EXPM1)
static SAS_DOUBLE expm1(SAS_DOUBLE x_in) {
double x = (double)x_in; // go back to float for single precision kernels
// Adapted from the cephes math library.
// Copyright 1984 - 1992 by Stephen L. Moshier
if (x != x || x == 0.0) {
return x; // NaN and +/- 0
} else if (x < -0.5 || x > 0.5) {
return exp(x) - 1.0;
} else {
const double xsq = x*x;
const double p = (((
+1.2617719307481059087798E-4)*xsq
+3.0299440770744196129956E-2)*xsq
+9.9999999999999999991025E-1);
const double q = ((((
+3.0019850513866445504159E-6)*xsq
+2.5244834034968410419224E-3)*xsq
+2.2726554820815502876593E-1)*xsq
+2.0000000000000000000897E0);
double r = x * p;
r = r / (q - r);
return r+r;
}
}
#endif
// Standard mathematical constants:
// M_E, M_LOG2E, M_LOG10E, M_LN2, M_LN10, M_PI, M_PI_2=pi/2, M_PI_4=pi/4,
// M_1_PI=1/pi, M_2_PI=2/pi, M_2_SQRTPI=2/sqrt(pi), SQRT2, SQRT1_2=sqrt(1/2)
// OpenCL defines M_constant_F for float constants, and nothing if double
// is not enabled on the card, which is why these constants may be missing
#ifndef M_PI
# define M_PI 3.141592653589793
#endif
#ifndef M_PI_2
# define M_PI_2 1.570796326794897
#endif
#ifndef M_PI_4
# define M_PI_4 0.7853981633974483
#endif
#ifndef M_E
# define M_E 2.718281828459045091
#endif
// Non-standard function library
// pi/180, used for converting between degrees and radians
// 4/3 pi for computing sphere volumes
// square and cube for computing squares and cubes
#ifndef M_PI_180
# define M_PI_180 0.017453292519943295
#endif
#ifndef M_4PI_3
# define M_4PI_3 4.18879020478639
#endif
//inline double square(double x) { return pow(x,2.0); }
//inline double square(double x) { return pown(x,2); }
inline double square(double x) { return x*x; }
inline double cube(double x) { return x*x*x; }
inline double sas_sinx_x(double x) { return x==0 ? 1.0 : sin(x)/x; }
%(DEFINES)s
%(SOURCES)s
/*
##########################################################
# #
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! #
# !! !! #
# !! KEEP THIS CODE CONSISTENT WITH KERNELPY.PY !! #
# !! !! #
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! #
# #
##########################################################
*/
#ifdef IQ_KERNEL_NAME
kernel void IQ_KERNEL_NAME(
global const double *q,
global double *result,
const int Nq,
#ifdef IQ_OPEN_LOOPS
#ifdef USE_OPENCL
global double *loops_g,
#endif
local double *loops,
const double cutoff,
IQ_DISPERSION_LENGTH_DECLARATIONS,
#endif
IQ_FIXED_PARAMETER_DECLARATIONS
)
{
#ifdef USE_OPENCL
#ifdef IQ_OPEN_LOOPS
// copy loops info to local memory
event_t e = async_work_group_copy(loops, loops_g, (IQ_DISPERSION_LENGTH_SUM)*2, 0);
wait_group_events(1, &e);
#endif
int i = get_global_id(0);
if (i < Nq)
#else
#pragma omp parallel for
for (int i=0; i < Nq; i++)
#endif
{
const double qi = q[i];
#ifdef IQ_OPEN_LOOPS
double ret=0.0, norm=0.0;
IQ_OPEN_LOOPS
//for (int radius_i=0; radius_i < Nradius; radius_i++) {
// const double radius = loops[2*(radius_i)];
// const double radius_w = loops[2*(radius_i)+1];
const double weight = IQ_WEIGHT_PRODUCT;
if (weight > cutoff) {
const double scattering = Iq(qi, IQ_PARAMETERS);
// allow kernels to exclude invalid regions by returning NaN
if (!isnan(scattering)) {
ret += weight*scattering;
#ifdef VOLUME_PARAMETERS
norm += weight * form_volume(VOLUME_PARAMETERS);
#else
norm += weight;
#endif
}
//else { printf("exclude qx,qy,I:%%g,%%g,%%g\n",qi,scattering); }
}
IQ_CLOSE_LOOPS
// norm can only be zero if volume is zero, so no scattering
result[i] = (norm > 0. ? scale*ret/norm + background : background);
#else
result[i] = scale*Iq(qi, IQ_PARAMETERS) + background;
#endif
}
}
#endif
#ifdef IQXY_KERNEL_NAME
kernel void IQXY_KERNEL_NAME(
global const double *qx,
global const double *qy,
global double *result,
const int Nq,
#ifdef IQXY_OPEN_LOOPS
#ifdef USE_OPENCL
global double *loops_g,
#endif
local double *loops,
const double cutoff,
IQXY_DISPERSION_LENGTH_DECLARATIONS,
#endif
IQXY_FIXED_PARAMETER_DECLARATIONS
)
{
#ifdef USE_OPENCL
#ifdef IQXY_OPEN_LOOPS
// copy loops info to local memory
event_t e = async_work_group_copy(loops, loops_g, (IQXY_DISPERSION_LENGTH_SUM)*2, 0);
wait_group_events(1, &e);
#endif
int i = get_global_id(0);
if (i < Nq)
#else
#pragma omp parallel for
for (int i=0; i < Nq; i++)
#endif
{
const double qxi = qx[i];
const double qyi = qy[i];
#if USE_KAHAN_SUMMATION
double accumulated_error = 0.0;
#endif
#ifdef IQXY_OPEN_LOOPS
double ret=0.0, norm=0.0;
IQXY_OPEN_LOOPS
//for (int radius_i=0; radius_i < Nradius; radius_i++) {
// const double radius = loops[2*(radius_i)];
// const double radius_w = loops[2*(radius_i)+1];
double weight = IQXY_WEIGHT_PRODUCT;
if (weight > cutoff) {
const double scattering = Iqxy(qxi, qyi, IQXY_PARAMETERS);
if (!isnan(scattering)) { // if scattering is bad, exclude it from sum
#if defined(IQXY_HAS_THETA)
// Force a nominal value for the spherical correction even when
// theta is +0/180 so that there are no divide by zero problems.
// For sin(theta) fixed at 0 and 180, we effectively multiply top and bottom
// by 1e-6, so the effect cancels.
const double spherical_correction = fmax(fabs(cos(M_PI_180*theta)), 1.e-6);
weight *= spherical_correction;
#endif
const double next = weight * scattering;
#if USE_KAHAN_SUMMATION
const double y = next - accumulated_error;
const double t = ret + y;
accumulated_error = (t - ret) - y;
ret = t;
#else
ret += next;
#endif
#ifdef VOLUME_PARAMETERS
norm += weight*form_volume(VOLUME_PARAMETERS);
#else
norm += weight;
#endif
}
//else { printf("exclude qx,qy,I:%%g,%%g,%%g\n",qi,scattering); }
}
IQXY_CLOSE_LOOPS
// norm can only be zero if volume is zero, so no scattering
result[i] = (norm>0. ? scale*ret/norm + background : background);
#else
result[i] = scale*Iqxy(qxi, qyi, IQXY_PARAMETERS) + background;
#endif
}
}
#endif
|
dgbtrf.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zgbtrf.c, normal z -> d, Fri Sep 28 17:38:04 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_gbtrf
*
* Computes an LU factorization of a real m-by-n band matrix A
* using partial pivoting with row interchanges.
*
*******************************************************************************
*
* @param[in] m
* The number of rows of the matrix A. n >= 0.
*
* @param[in] n
* The number of columns of the matrix A. n >= 0.
*
* @param[in] kl
* The number of subdiagonals within the band of A. kl >= 0.
*
* @param[in] ku
* The number of superdiagonals within the band of A. ku >= 0.
*
* @param[in,out] AB
* Details of the LU factorization of the band matrix A, as
* computed by plasma_dgbtrf.
*
* @param[in] ldab
* The leading dimension of the array AB.
*
* @param[out] ipiv
* The pivot indices; for 1 <= i <= min(m,n), row i of the
* matrix was interchanged with row ipiv(i).
*
******************************************************************************/
int plasma_dgbtrf(int m, int n, int kl, int ku,
double *pAB, int ldab, int *ipiv)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
if (m < 0) {
plasma_error("illegal value of m");
return -1;
}
if (n < 0) {
plasma_error("illegal value of n");
return -2;
}
if (kl < 0) {
plasma_error("illegal value of kl");
return -3;
}
if (ku < 0) {
plasma_error("illegal value of ku");
return -4;
}
if (ldab < imax(1, 1+kl+ku)) {
plasma_error("illegal value of ldab");
return -6;
}
// quick return
// Tune parameters.
if (plasma->tuning)
plasma_tune_gbtrf(plasma, PlasmaRealDouble, n, kl+ku+1);
// Set tiling parameters.
int nb = plasma->nb;
// Initialize barrier.
plasma_barrier_init(&plasma->barrier);
// Create tile matrix.
plasma_desc_t AB;
int tku = (ku+kl+nb-1)/nb; // number of tiles in upper band (not including diagonal)
int tkl = (kl+nb-1)/nb; // number of tiles in lower band (not including diagonal)
int lm = (tku+tkl+1)*nb; // since we use dgetrf on panel, we pivot back within panel.
// this could fill the last tile of the panel,
// and we need extra NB space on the bottom
int retval;
retval = plasma_desc_general_band_create(PlasmaRealDouble, PlasmaGeneral,
nb, nb, lm, n, 0, 0, m, n, kl, ku, &AB);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_dpb2desc(pAB, ldab, AB, &sequence, &request);
}
#pragma omp parallel
#pragma omp master
{
// Call the tile async function.
plasma_omp_dgbtrf(AB, ipiv, &sequence, &request);
}
#pragma omp parallel
#pragma omp master
{
// Translate back to LAPACK layout.
plasma_omp_ddesc2pb(AB, pAB, ldab, &sequence, &request);
}
// Free matrix A in tile layout.
plasma_desc_destroy(&AB);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* Computes an LU factorization of a real m-by-n band matrix A
* using partial pivoting with row interchanges.
* Non-blocking tile version of plasma_dgbsv().
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in,out] AB
* Descriptor of matrix A.
*
* @param[out] ipiv
* The pivot indices; for 1 <= i <= min(m,n), row i of the
* matrix was interchanged with row ipiv(i).
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
******************************************************************************/
void plasma_omp_dgbtrf(plasma_desc_t AB, int *ipiv,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if (plasma_desc_check(AB) != PlasmaSuccess) {
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
plasma_error("invalid AB");
return;
}
if (sequence == NULL) {
plasma_fatal_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_fatal_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Call the parallel function.
plasma_pdgbtrf(AB, ipiv, sequence, request);
}
|
168. LU Decompose.c | /**
* \file
* LU decomposition
* square matrix
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/** Perform LU decomposition on matrix
* \param[in] A matrix to decompose
* \param[out] L output L matrix
* \param[out] U output U matrix
* \param[in] mat_size input square matrix size
*/
int lu_decomposition(double **A, double **L, double **U, int mat_size)
{
int row, col, j;
// regularize each row
for (row = 0; row < mat_size; row++)
{
// Upper triangular matrix
#ifdef _OPENMP
#pragma omp for
#endif
for (col = row; col < mat_size; col++)
{
// Summation of L[i,j] * U[j,k]
double lu_sum = 0.;
for (j = 0; j < row; j++) lu_sum += L[row][j] * U[j][col];
// Evaluate U[i,k]
U[row][col] = A[row][col] - lu_sum;
}
// Lower triangular matrix
#ifdef _OPENMP
#pragma omp for
#endif
for (col = row; col < mat_size; col++)
{
if (row == col)
{
L[row][col] = 1.;
continue;
}
// Summation of L[i,j] * U[j,k]
double lu_sum = 0.;
for (j = 0; j < row; j++) lu_sum += L[col][j] * U[j][row];
// Evaluate U[i,k]
L[col][row] = (A[col][row] - lu_sum) / U[row][row];
}
}
return 0;
}
/** Function to display square matrix */
void display(double **A, int N)
{
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
printf("% 3.3g \t", A[i][j]);
}
putchar('\n');
}
}
/** Main function */
int main(int argc, char **argv)
{
int mat_size = 3; // default matrix size
const int range = 10;
const int range2 = range >> 1;
if (argc == 2)
mat_size = atoi(argv[1]);
srand(time(NULL)); // random number initializer
/* Create a square matrix with random values */
double **A = (double **)malloc(mat_size * sizeof(double *));
double **L = (double **)malloc(mat_size * sizeof(double *)); // output
double **U = (double **)malloc(mat_size * sizeof(double *)); // output
for (int i = 0; i < mat_size; i++)
{
// calloc so that all valeus are '0' by default
A[i] = (double *)calloc(mat_size, sizeof(double));
L[i] = (double *)calloc(mat_size, sizeof(double));
U[i] = (double *)calloc(mat_size, sizeof(double));
for (int j = 0; j < mat_size; j++)
/* create random values in the limits [-range2, range-1] */
A[i][j] = (double)(rand() % range - range2);
}
lu_decomposition(A, L, U, mat_size);
printf("A = \n");
display(A, mat_size);
printf("\nL = \n");
display(L, mat_size);
printf("\nU = \n");
display(U, mat_size);
/* Free dynamically allocated memory */
for (int i = 0; i < mat_size; i++)
{
free(A[i]);
free(L[i]);
free(U[i]);
}
free(A);
free(L);
free(U);
return 0;
} |
for-18.c | /* { dg-do compile } */
/* { dg-options "-O -fopenmp -fdump-tree-ompexp" } */
/* LLVM LOCAL test not applicable */
/* { dg-require-fdump "" } */
void
foo (int *a, int i)
{
int j, k = 1, l = 30, m = 4;
#pragma omp parallel for num_threads (3 * i) schedule (dynamic, i * 4)
for (j = 0; j <= l; j++)
a[j] = 1;
#pragma omp parallel for num_threads (3 * i) schedule (dynamic, i * 4)
for (j = k; j <= l; j += (m - 1))
a[j] = 2;
#pragma omp parallel for num_threads (3 * i) schedule (dynamic, 4)
for (j = 0; j <= l; j++)
a[j] = 3;
#pragma omp parallel for num_threads (3 * i) schedule (dynamic, 4)
for (j = k; j <= l; j += (m - 1))
a[j] = 4;
}
void
bar (int *a, int i)
{
int j, k = 1, l = 30, m = 4;
#pragma omp parallel for num_threads (3 * i) schedule (guided, i * 4)
for (j = 0; j <= l; j++)
a[j] = 1;
#pragma omp parallel for num_threads (3 * i) schedule (guided, i * 4)
for (j = k; j <= l; j += (m - 1))
a[j] = 2;
#pragma omp parallel for num_threads (3 * i) schedule (guided, 4)
for (j = 0; j <= l; j++)
a[j] = 3;
#pragma omp parallel for num_threads (3 * i) schedule (guided, 4)
for (j = k; j <= l; j += (m - 1))
a[j] = 4;
}
/* { dg-final { scan-tree-dump-times "GOMP_parallel_loop_dynamic_start" 4 "ompexp" { xfail *-*-* } } } */
/* { dg-final { scan-tree-dump-times "GOMP_parallel_loop_guided_start" 4 "ompexp" { xfail *-*-* } } } */
/* { dg-final { cleanup-tree-dump "ompexp" } } */
|
convolution_sgemm_int8.h | // BUG1989 is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static inline signed char float2int8(float v)
{
int int32 = static_cast<int>(round(v));
if (int32 > 127)
return 127;
if (int32 < -127)
return -127;
return (signed char)int32;
}
static void conv_im2col_sgemm_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel,
const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option &opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const signed char *kernel = _kernel;
// im2row
Mat bottom_im2row(kernel_h * kernel_w * inch, outw * outh, 1UL, opt.workspace_allocator);
{
signed char *ret = (signed char *)bottom_im2row;
int retID = 0;
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
for (int p = 0; p < inch; p++)
{
const signed char *input = bottom_blob.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
int row = u + i * stride_h;
int col = v + j * stride_w;
int index = row * w + col;
ret[retID] = input[index];
retID++;
}
}
}
}
}
}
int kernel_size = kernel_w * kernel_h;
int out_size = outw * outh;
// int M = outch; // outch
int N = outw * outh; // outsize or out stride
int K = kernel_w * kernel_h * inch; // ksize * inch
// bottom_im2row memory packed 4 x 4
Mat bottom_tm(4 * kernel_size, inch, out_size / 4 + out_size % 4, (size_t)1u, opt.workspace_allocator);
{
int nn_size = out_size >> 2;
int remain_size_start = nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 4;
const signed char *img0 = bottom_im2row.row<signed char>(i);
const signed char *img1 = bottom_im2row.row<signed char>(i + 1);
const signed char *img2 = bottom_im2row.row<signed char>(i + 2);
const signed char *img3 = bottom_im2row.row<signed char>(i + 3);
signed char *tmpptr = bottom_tm.channel(i / 4);
int q = 0;
for (; q + 1 < inch * kernel_size; q = q + 2)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img1[0];
tmpptr[3] = img1[1];
tmpptr[4] = img2[0];
tmpptr[5] = img2[1];
tmpptr[6] = img3[0];
tmpptr[7] = img3[1];
tmpptr += 8;
img0 += 2;
img1 += 2;
img2 += 2;
img3 += 2;
}
for (; q < inch * kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr += 4;
img0 += 1;
img1 += 1;
img2 += 1;
img3 += 1;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < out_size; i++)
{
const signed char *img0 = bottom_im2row.row<signed char>(i);
signed char *tmpptr = bottom_tm.channel(i / 4 + i % 4);
int q = 0;
for (; q + 1 < inch * kernel_size; q = q + 2)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr += 2;
img0 += 2;
}
for (; q < inch * kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += 1;
}
}
}
// kernel memory packed 4 x 4
Mat kernel_tm(4 * kernel_size, inch, outch / 4 + outch % 4, (size_t)1u, opt.workspace_allocator);
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
const signed char *k0 = kernel + (p + 0) * inch * kernel_size;
const signed char *k1 = kernel + (p + 1) * inch * kernel_size;
const signed char *k2 = kernel + (p + 2) * inch * kernel_size;
const signed char *k3 = kernel + (p + 3) * inch * kernel_size;
signed char *ktmp = kernel_tm.channel(p / 4);
int q = 0;
for (; q + 1 < inch * kernel_size; q += 2)
{
ktmp[0] = k0[0];
ktmp[1] = k0[1];
ktmp[2] = k1[0];
ktmp[3] = k1[1];
ktmp[4] = k2[0];
ktmp[5] = k2[1];
ktmp[6] = k3[0];
ktmp[7] = k3[1];
ktmp += 8;
k0 += 2;
k1 += 2;
k2 += 2;
k3 += 2;
}
for (; q < inch * kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp += 4;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
const signed char *k0 = kernel + (p + 0) * inch * kernel_size;
signed char *ktmp = kernel_tm.channel(p / 4 + p % 4);
int q = 0;
for (; q + 1 < inch * kernel_size; q = q + 2)
{
ktmp[0] = k0[0];
ktmp[1] = k0[1];
ktmp += 2;
k0 += 2;
}
for (; q < inch * kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp++;
k0++;
}
}
}
// 4x4
// sgemm(int M, int N, int K, float* A, float* B, float* C)
{
// int M = outch; // outch
// int N = outw * outh; // outsize or out stride
// int L = kernel_w * kernel_h * inch; // ksize * inch
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int i = pp * 4;
int *output0 = top_blob.channel(i);
int *output1 = top_blob.channel(i + 1);
int *output2 = top_blob.channel(i + 2);
int *output3 = top_blob.channel(i + 3);
int j = 0;
for (; j + 3 < N; j = j + 4)
{
signed char *vb = bottom_tm.channel(j / 4);
signed char *va = kernel_tm.channel(i / 4);
int sum0[4] = {0};
int sum1[4] = {0};
int sum2[4] = {0};
int sum3[4] = {0};
int k = 0;
for (; k + 1 < K; k = k + 2)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)va[0] * vb[2 * n]; // k0
sum0[n] += (int)va[1] * vb[2 * n + 1];
sum1[n] += (int)va[2] * vb[2 * n]; // k1
sum1[n] += (int)va[3] * vb[2 * n + 1];
sum2[n] += (int)va[4] * vb[2 * n]; // k2
sum2[n] += (int)va[5] * vb[2 * n + 1];
sum3[n] += (int)va[6] * vb[2 * n]; // k3
sum3[n] += (int)va[7] * vb[2 * n + 1];
}
va += 8;
vb += 8;
}
for (; k < K; k++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)va[0] * vb[n];
sum1[n] += (int)va[1] * vb[n];
sum2[n] += (int)va[2] * vb[n];
sum3[n] += (int)va[3] * vb[n];
}
va += 4;
vb += 4;
}
for (int n = 0; n < 4; n++)
{
output0[n] = sum0[n];
output1[n] = sum1[n];
output2[n] = sum2[n];
output3[n] = sum3[n];
}
output0 += 4;
output1 += 4;
output2 += 4;
output3 += 4;
}
for (; j < N; j++)
{
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
signed char *vb = bottom_tm.channel(j / 4 + j % 4);
signed char *va = kernel_tm.channel(i / 4);
int k = 0;
for (; k + 1 < K; k = k + 2)
{
sum0 += (int)va[0] * vb[0];
sum0 += (int)va[1] * vb[1];
sum1 += (int)va[2] * vb[0];
sum1 += (int)va[3] * vb[1];
sum2 += (int)va[4] * vb[0];
sum2 += (int)va[5] * vb[1];
sum3 += (int)va[6] * vb[0];
sum3 += (int)va[7] * vb[1];
va += 8;
vb += 2;
}
for (; k < K; k++)
{
sum0 += (int)va[0] * vb[0];
sum1 += (int)va[1] * vb[0];
sum2 += (int)va[2] * vb[0];
sum3 += (int)va[3] * vb[0];
va += 4;
vb += 1;
}
output0[0] = sum0;
output1[0] = sum1;
output2[0] = sum2;
output3[0] = sum3;
output0++;
output1++;
output2++;
output3++;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_outch_start; i < outch; i++)
{
int *output = top_blob.channel(i);
int j = 0;
for (; j + 3 < N; j = j + 4)
{
signed char *vb = bottom_tm.channel(j / 4);
signed char *va = kernel_tm.channel(i / 4 + i % 4);
int sum[4] = {0};
int k = 0;
for (; k + 1 < K; k = k + 2)
{
for (int n = 0; n < 4; n++)
{
sum[n] += (int)va[0] * vb[2 * n];
sum[n] += (int)va[1] * vb[2 * n + 1];
}
va += 2;
vb += 8;
}
for (; k < K; k++)
{
for (int n = 0; n < 4; n++)
{
sum[n] += (int)va[0] * vb[n];
}
va += 1;
vb += 4;
}
for (int n = 0; n < 4; n++)
{
output[n] = sum[n];
}
output += 4;
}
for (; j < N; j++)
{
int sum = 0;
signed char *vb = bottom_tm.channel(j / 4 + j % 4);
signed char *va = kernel_tm.channel(i / 4 + i % 4);
for (int k = 0; k < K; k++)
{
sum += (int)va[0] * vb[0];
va += 1;
vb += 1;
}
output[0] = sum;
output++;
}
}
}
// // sgemm(int M, int N, int K, float* A, float* B, float* C)
// {
// for (int i=0; i<M; i++)
// {
// int* output = top_blob.channel(i);
// for (int j=0; j<N; j++)
// {
// int sum = 0;
// signed char* vb = (signed char*)bottom_im2row + K * j;
// const signed char* va = kernel + K * i;
// for (int k=0; k<K; k++)
// {
// sum += (int)va[0] * vb[0];
// va += 1;
// vb += 1;
// }
// output[0] = sum;
// output++;
// }
// }
// }
}
static void conv_im2col_sgemm_int8_dequant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel,
const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Mat &_bias, std::vector<float> scale_dequant, const Option &opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const signed char *kernel = _kernel;
const int32_t *bias = _bias;
// im2row
Mat bottom_im2row(kernel_h * kernel_w * inch, outw * outh, 1UL, opt.workspace_allocator);
{
signed char *ret = (signed char *)bottom_im2row;
int retID = 0;
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
for (int p = 0; p < inch; p++)
{
const signed char *input = bottom_blob.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
int row = u + i * stride_h;
int col = v + j * stride_w;
int index = row * w + col;
ret[retID] = input[index];
retID++;
}
}
}
}
}
}
int kernel_size = kernel_w * kernel_h;
int out_size = outw * outh;
// int M = outch; // outch
int N = outw * outh; // outsize or out stride
int K = kernel_w * kernel_h * inch; // ksize * inch
// bottom_im2row memory packed 4 x 4
Mat bottom_tm(4 * kernel_size, inch, out_size / 4 + out_size % 4, (size_t)1u, opt.workspace_allocator);
{
int nn_size = out_size >> 2;
int remain_size_start = nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 4;
const signed char *img0 = bottom_im2row.row<signed char>(i);
const signed char *img1 = bottom_im2row.row<signed char>(i + 1);
const signed char *img2 = bottom_im2row.row<signed char>(i + 2);
const signed char *img3 = bottom_im2row.row<signed char>(i + 3);
signed char *tmpptr = bottom_tm.channel(i / 4);
int q = 0;
for (; q + 1 < inch * kernel_size; q = q + 2)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img1[0];
tmpptr[3] = img1[1];
tmpptr[4] = img2[0];
tmpptr[5] = img2[1];
tmpptr[6] = img3[0];
tmpptr[7] = img3[1];
tmpptr += 8;
img0 += 2;
img1 += 2;
img2 += 2;
img3 += 2;
}
for (; q < inch * kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr += 4;
img0 += 1;
img1 += 1;
img2 += 1;
img3 += 1;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < out_size; i++)
{
const signed char *img0 = bottom_im2row.row<signed char>(i);
signed char *tmpptr = bottom_tm.channel(i / 4 + i % 4);
int q = 0;
for (; q + 1 < inch * kernel_size; q = q + 2)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr += 2;
img0 += 2;
}
for (; q < inch * kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += 1;
}
}
}
// kernel memory packed 4 x 4
Mat kernel_tm(4 * kernel_size, inch, outch / 4 + outch % 4, (size_t)1u, opt.workspace_allocator);
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
const signed char *k0 = kernel + (p + 0) * inch * kernel_size;
const signed char *k1 = kernel + (p + 1) * inch * kernel_size;
const signed char *k2 = kernel + (p + 2) * inch * kernel_size;
const signed char *k3 = kernel + (p + 3) * inch * kernel_size;
signed char *ktmp = kernel_tm.channel(p / 4);
int q = 0;
for (; q + 1 < inch * kernel_size; q += 2)
{
ktmp[0] = k0[0];
ktmp[1] = k0[1];
ktmp[2] = k1[0];
ktmp[3] = k1[1];
ktmp[4] = k2[0];
ktmp[5] = k2[1];
ktmp[6] = k3[0];
ktmp[7] = k3[1];
ktmp += 8;
k0 += 2;
k1 += 2;
k2 += 2;
k3 += 2;
}
for (; q < inch * kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp += 4;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
const signed char *k0 = kernel + (p + 0) * inch * kernel_size;
signed char *ktmp = kernel_tm.channel(p / 4 + p % 4);
int q = 0;
for (; q + 1 < inch * kernel_size; q = q + 2)
{
ktmp[0] = k0[0];
ktmp[1] = k0[1];
ktmp += 2;
k0 += 2;
}
for (; q < inch * kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp++;
k0++;
}
}
}
// 4x4
// sgemm(int M, int N, int K, float* A, float* B, float* C)
{
// int M = outch; // outch
// int N = outw * outh; // outsize or out stride
// int L = kernel_w * kernel_h * inch; // ksize * inch
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int i = pp * 4;
// const float bias0 = bias ? bias[i] : 0.f;
// const float bias1 = bias ? bias[i + 1] : 0.f;
// const float bias2 = bias ? bias[i + 2] : 0.f;
// const float bias3 = bias ? bias[i + 3] : 0.f;
const int32_t bias0 = bias ? bias[i] : 0;
const int32_t bias1 = bias ? bias[i + 1] : 0;
const int32_t bias2 = bias ? bias[i + 2] : 0;
const int32_t bias3 = bias ? bias[i + 3] : 0;
const float scale_dequant0 = scale_dequant[i];
const float scale_dequant1 = scale_dequant[i + 1];
const float scale_dequant2 = scale_dequant[i + 2];
const float scale_dequant3 = scale_dequant[i + 3];
float *output0 = top_blob.channel(i);
float *output1 = top_blob.channel(i + 1);
float *output2 = top_blob.channel(i + 2);
float *output3 = top_blob.channel(i + 3);
int j = 0;
for (; j + 3 < N; j = j + 4)
{
signed char *vb = bottom_tm.channel(j / 4);
signed char *va = kernel_tm.channel(i / 4);
int sum0[4] = {0};
int sum1[4] = {0};
int sum2[4] = {0};
int sum3[4] = {0};
int k = 0;
for (; k + 1 < K; k = k + 2)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)va[0] * vb[2 * n]; // k0
sum0[n] += (int)va[1] * vb[2 * n + 1];
sum1[n] += (int)va[2] * vb[2 * n]; // k1
sum1[n] += (int)va[3] * vb[2 * n + 1];
sum2[n] += (int)va[4] * vb[2 * n]; // k2
sum2[n] += (int)va[5] * vb[2 * n + 1];
sum3[n] += (int)va[6] * vb[2 * n]; // k3
sum3[n] += (int)va[7] * vb[2 * n + 1];
}
va += 8;
vb += 8;
}
for (; k < K; k++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)va[0] * vb[n];
sum1[n] += (int)va[1] * vb[n];
sum2[n] += (int)va[2] * vb[n];
sum3[n] += (int)va[3] * vb[n];
}
va += 4;
vb += 4;
}
for (int n = 0; n < 4; n++)
{
// output0[n] = (float)sum0[n] * scale_dequant0 + bias0;
// output1[n] = (float)sum1[n] * scale_dequant1 + bias1;
// output2[n] = (float)sum2[n] * scale_dequant2 + bias2;
// output3[n] = (float)sum3[n] * scale_dequant3 + bias3;
output0[n] = (sum0[n] + bias0) * scale_dequant0;
output1[n] = (sum1[n] + bias1) * scale_dequant1;
output2[n] = (sum2[n] + bias2) * scale_dequant2;
output3[n] = (sum3[n] + bias3) * scale_dequant3;
}
output0 += 4;
output1 += 4;
output2 += 4;
output3 += 4;
}
for (; j < N; j++)
{
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
signed char *vb = bottom_tm.channel(j / 4 + j % 4);
signed char *va = kernel_tm.channel(i / 4);
int k = 0;
for (; k + 1 < K; k = k + 2)
{
sum0 += (int)va[0] * vb[0];
sum0 += (int)va[1] * vb[1];
sum1 += (int)va[2] * vb[0];
sum1 += (int)va[3] * vb[1];
sum2 += (int)va[4] * vb[0];
sum2 += (int)va[5] * vb[1];
sum3 += (int)va[6] * vb[0];
sum3 += (int)va[7] * vb[1];
va += 8;
vb += 2;
}
for (; k < K; k++)
{
sum0 += (int)va[0] * vb[0];
sum1 += (int)va[1] * vb[0];
sum2 += (int)va[2] * vb[0];
sum3 += (int)va[3] * vb[0];
va += 4;
vb += 1;
}
// output0[0] = (float)sum0 * scale_dequant0 + bias0;
// output1[0] = (float)sum1 * scale_dequant1 + bias1;
// output2[0] = (float)sum2 * scale_dequant2 + bias2;
// output3[0] = (float)sum3 * scale_dequant3 + bias3;
output0[0] = (sum0 + bias0) * scale_dequant0;
output1[0] = (sum1 + bias1) * scale_dequant1;
output2[0] = (sum2 + bias2) * scale_dequant2;
output3[0] = (sum3 + bias3) * scale_dequant3;
output0++;
output1++;
output2++;
output3++;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_outch_start; i < outch; i++)
{
float *output = top_blob.channel(i);
const int32_t bias0 = bias ? bias[i] : 0;
const float scale_dequant0 = scale_dequant[i];
int j = 0;
for (; j + 3 < N; j = j + 4)
{
signed char *vb = bottom_tm.channel(j / 4);
signed char *va = kernel_tm.channel(i / 4 + i % 4);
int sum[4] = {0};
int k = 0;
for (; k + 1 < K; k = k + 2)
{
for (int n = 0; n < 4; n++)
{
sum[n] += (int)va[0] * vb[2 * n];
sum[n] += (int)va[1] * vb[2 * n + 1];
}
va += 2;
vb += 8;
}
for (; k < K; k++)
{
for (int n = 0; n < 4; n++)
{
sum[n] += (int)va[0] * vb[n];
}
va += 1;
vb += 4;
}
for (int n = 0; n < 4; n++)
{
output[n] = (sum[n] + bias0) * scale_dequant0;
}
output += 4;
}
for (; j < N; j++)
{
int sum = 0;
signed char *vb = bottom_tm.channel(j / 4 + j % 4);
signed char *va = kernel_tm.channel(i / 4 + i % 4);
for (int k = 0; k < K; k++)
{
sum += (int)va[0] * vb[0];
va += 1;
vb += 1;
}
output[0] = (sum + bias0) * scale_dequant0;
output++;
}
}
}
// // sgemm(int M, int N, int K, float* A, float* B, float* C)
// {
// for (int i=0; i<M; i++)
// {
// int* output = top_blob.channel(i);
// for (int j=0; j<N; j++)
// {
// int sum = 0;
// signed char* vb = (signed char*)bottom_im2row + K * j;
// const signed char* va = kernel + K * i;
// for (int k=0; k<K; k++)
// {
// sum += (int)va[0] * vb[0];
// va += 1;
// vb += 1;
// }
// output[0] = sum;
// output++;
// }
// }
// }
}
static void conv_im2col_sgemm_int8_requant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel,
const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Mat &_bias, std::vector<float> scale_requant, const Option &opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const signed char *kernel = _kernel;
const float *bias = _bias;
// im2row
Mat bottom_im2row(kernel_h * kernel_w * inch, outw * outh, 1UL, opt.workspace_allocator);
{
signed char *ret = (signed char *)bottom_im2row;
int retID = 0;
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
for (int p = 0; p < inch; p++)
{
const signed char *input = bottom_blob.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
int row = u + i * stride_h;
int col = v + j * stride_w;
int index = row * w + col;
ret[retID] = input[index];
retID++;
}
}
}
}
}
}
int kernel_size = kernel_w * kernel_h;
int out_size = outw * outh;
// int M = outch; // outch
int N = outw * outh; // outsize or out stride
int K = kernel_w * kernel_h * inch; // ksize * inch
// bottom_im2row memory packed 4 x 4
Mat bottom_tm(4 * kernel_size, inch, out_size / 4 + out_size % 4, (size_t)1u, opt.workspace_allocator);
{
int nn_size = out_size >> 2;
int remain_size_start = nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 4;
const signed char *img0 = bottom_im2row.row<signed char>(i);
const signed char *img1 = bottom_im2row.row<signed char>(i + 1);
const signed char *img2 = bottom_im2row.row<signed char>(i + 2);
const signed char *img3 = bottom_im2row.row<signed char>(i + 3);
signed char *tmpptr = bottom_tm.channel(i / 4);
int q = 0;
for (; q + 1 < inch * kernel_size; q = q + 2)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img1[0];
tmpptr[3] = img1[1];
tmpptr[4] = img2[0];
tmpptr[5] = img2[1];
tmpptr[6] = img3[0];
tmpptr[7] = img3[1];
tmpptr += 8;
img0 += 2;
img1 += 2;
img2 += 2;
img3 += 2;
}
for (; q < inch * kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr += 4;
img0 += 1;
img1 += 1;
img2 += 1;
img3 += 1;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < out_size; i++)
{
const signed char *img0 = bottom_im2row.row<signed char>(i);
signed char *tmpptr = bottom_tm.channel(i / 4 + i % 4);
int q = 0;
for (; q + 1 < inch * kernel_size; q = q + 2)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr += 2;
img0 += 2;
}
for (; q < inch * kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += 1;
}
}
}
// kernel memory packed 4 x 4
Mat kernel_tm(4 * kernel_size, inch, outch / 4 + outch % 4, (size_t)1u, opt.workspace_allocator);
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
const signed char *k0 = kernel + (p + 0) * inch * kernel_size;
const signed char *k1 = kernel + (p + 1) * inch * kernel_size;
const signed char *k2 = kernel + (p + 2) * inch * kernel_size;
const signed char *k3 = kernel + (p + 3) * inch * kernel_size;
signed char *ktmp = kernel_tm.channel(p / 4);
int q = 0;
for (; q + 1 < inch * kernel_size; q += 2)
{
ktmp[0] = k0[0];
ktmp[1] = k0[1];
ktmp[2] = k1[0];
ktmp[3] = k1[1];
ktmp[4] = k2[0];
ktmp[5] = k2[1];
ktmp[6] = k3[0];
ktmp[7] = k3[1];
ktmp += 8;
k0 += 2;
k1 += 2;
k2 += 2;
k3 += 2;
}
for (; q < inch * kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp += 4;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
const signed char *k0 = kernel + (p + 0) * inch * kernel_size;
signed char *ktmp = kernel_tm.channel(p / 4 + p % 4);
int q = 0;
for (; q + 1 < inch * kernel_size; q = q + 2)
{
ktmp[0] = k0[0];
ktmp[1] = k0[1];
ktmp += 2;
k0 += 2;
}
for (; q < inch * kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp++;
k0++;
}
}
}
// 4x4
// sgemm(int M, int N, int K, float* A, float* B, float* C)
{
// int M = outch; // outch
// int N = outw * outh; // outsize or out stride
// int L = kernel_w * kernel_h * inch; // ksize * inch
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int i = pp * 4;
signed char *output0 = top_blob.channel(i);
signed char *output1 = top_blob.channel(i + 1);
signed char *output2 = top_blob.channel(i + 2);
signed char *output3 = top_blob.channel(i + 3);
const float bias0 = bias ? bias[i] : 0.f;
const float bias1 = bias ? bias[i + 1] : 0.f;
const float bias2 = bias ? bias[i + 2] : 0.f;
const float bias3 = bias ? bias[i + 3] : 0.f;
const float scale_requant_in0 = scale_requant[2 * i];
const float scale_requant_out0 = scale_requant[2 * i + 1];
const float scale_requant_in1 = scale_requant[2 * (i + 1)];
const float scale_requant_out1 = scale_requant[2 * (i + 1) + 1];
const float scale_requant_in2 = scale_requant[2 * (i + 2)];
const float scale_requant_out2 = scale_requant[2 * (i + 2) + 1];
const float scale_requant_in3 = scale_requant[2 * (i + 3)];
const float scale_requant_out3 = scale_requant[2 * (i + 3) + 1];
int j = 0;
for (; j + 3 < N; j = j + 4)
{
signed char *vb = bottom_tm.channel(j / 4);
signed char *va = kernel_tm.channel(i / 4);
int sum0[4] = {0};
int sum1[4] = {0};
int sum2[4] = {0};
int sum3[4] = {0};
int k = 0;
for (; k + 1 < K; k = k + 2)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)va[0] * vb[2 * n]; // k0
sum0[n] += (int)va[1] * vb[2 * n + 1];
sum1[n] += (int)va[2] * vb[2 * n]; // k1
sum1[n] += (int)va[3] * vb[2 * n + 1];
sum2[n] += (int)va[4] * vb[2 * n]; // k2
sum2[n] += (int)va[5] * vb[2 * n + 1];
sum3[n] += (int)va[6] * vb[2 * n]; // k3
sum3[n] += (int)va[7] * vb[2 * n + 1];
}
va += 8;
vb += 8;
}
for (; k < K; k++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)va[0] * vb[n];
sum1[n] += (int)va[1] * vb[n];
sum2[n] += (int)va[2] * vb[n];
sum3[n] += (int)va[3] * vb[n];
}
va += 4;
vb += 4;
}
for (int n = 0; n < 4; n++)
{
output0[n] = float2int8(((float)sum0[n] * scale_requant_in0 + bias0) * scale_requant_out0);
output1[n] = float2int8(((float)sum1[n] * scale_requant_in1 + bias1) * scale_requant_out1);
output2[n] = float2int8(((float)sum2[n] * scale_requant_in2 + bias2) * scale_requant_out2);
output3[n] = float2int8(((float)sum3[n] * scale_requant_in3 + bias3) * scale_requant_out3);
}
output0 += 4;
output1 += 4;
output2 += 4;
output3 += 4;
}
for (; j < N; j++)
{
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
signed char *vb = bottom_tm.channel(j / 4 + j % 4);
signed char *va = kernel_tm.channel(i / 4);
int k = 0;
for (; k + 1 < K; k = k + 2)
{
sum0 += (int)va[0] * vb[0];
sum0 += (int)va[1] * vb[1];
sum1 += (int)va[2] * vb[0];
sum1 += (int)va[3] * vb[1];
sum2 += (int)va[4] * vb[0];
sum2 += (int)va[5] * vb[1];
sum3 += (int)va[6] * vb[0];
sum3 += (int)va[7] * vb[1];
va += 8;
vb += 2;
}
for (; k < K; k++)
{
sum0 += (int)va[0] * vb[0];
sum1 += (int)va[1] * vb[0];
sum2 += (int)va[2] * vb[0];
sum3 += (int)va[3] * vb[0];
va += 4;
vb += 1;
}
output0[0] = float2int8(((float)sum0 * scale_requant_in0 + bias0) * scale_requant_out0);
output1[0] = float2int8(((float)sum1 * scale_requant_in1 + bias1) * scale_requant_out1);
output2[0] = float2int8(((float)sum2 * scale_requant_in2 + bias2) * scale_requant_out2);
output3[0] = float2int8(((float)sum3 * scale_requant_in3 + bias3) * scale_requant_out3);
output0++;
output1++;
output2++;
output3++;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_outch_start; i < outch; i++)
{
signed char *output = top_blob.channel(i);
const float bias0 = bias ? bias[i] : 0.f;
const float scale_requant_in0 = scale_requant[2 * i];
const float scale_requant_out0 = scale_requant[2 * i + 1];
int j = 0;
for (; j + 3 < N; j = j + 4)
{
signed char *vb = bottom_tm.channel(j / 4);
signed char *va = kernel_tm.channel(i / 4 + i % 4);
int sum[4] = {0};
int k = 0;
for (; k + 1 < K; k = k + 2)
{
for (int n = 0; n < 4; n++)
{
sum[n] += (int)va[0] * vb[2 * n];
sum[n] += (int)va[1] * vb[2 * n + 1];
}
va += 2;
vb += 8;
}
for (; k < K; k++)
{
for (int n = 0; n < 4; n++)
{
sum[n] += (int)va[0] * vb[n];
}
va += 1;
vb += 4;
}
for (int n = 0; n < 4; n++)
{
output[n] = float2int8(((float)sum[n] * scale_requant_in0 + bias0) * scale_requant_out0);
}
output += 4;
}
for (; j < N; j++)
{
int sum = 0;
signed char *vb = bottom_tm.channel(j / 4 + j % 4);
signed char *va = kernel_tm.channel(i / 4 + i % 4);
for (int k = 0; k < K; k++)
{
sum += (int)va[0] * vb[0];
va += 1;
vb += 1;
}
output[0] = float2int8(((float)sum * scale_requant_in0 + bias0) * scale_requant_out0);
output++;
}
}
}
// // sgemm(int M, int N, int K, float* A, float* B, float* C)
// {
// for (int i=0; i<M; i++)
// {
// int* output = top_blob.channel(i);
// for (int j=0; j<N; j++)
// {
// int sum = 0;
// signed char* vb = (signed char*)bottom_im2row + K * j;
// const signed char* va = kernel + K * i;
// for (int k=0; k<K; k++)
// {
// sum += (int)va[0] * vb[0];
// va += 1;
// vb += 1;
// }
// output[0] = sum;
// output++;
// }
// }
// }
}
|
GB_unop__identity_bool_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_bool_bool
// op(A') function: GB_unop_tran__identity_bool_bool
// C type: bool
// A type: bool
// cast: bool cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
bool z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
bool aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
bool z = aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
1
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_bool_bool
(
bool *Cx, // Cx and Ax may be aliased
const bool *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (bool), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
bool z = aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
bool aij = Ax [p] ;
bool z = aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_bool_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pr66199-3.c | /* PR middle-end/66199 */
/* { dg-do run } */
/* { dg-options "-O2 -fopenmp" } */
int u[1024], v[1024], w[1024];
__attribute__((noinline, noclone)) long
f1 (long a, long b)
{
long d;
#pragma omp parallel for lastprivate (d) default(none) firstprivate (a, b) shared(u, v, w)
for (d = a; d < b; d++)
u[d] = v[d] + w[d];
return d;
}
__attribute__((noinline, noclone)) long
f2 (long a, long b, long c)
{
long d, e;
#pragma omp parallel for lastprivate (d) default(none) firstprivate (a, b) shared(u, v, w) linear(c:5) lastprivate(e)
for (d = a; d < b; d++)
{
u[d] = v[d] + w[d];
c += 5;
e = c;
}
return d + c + e;
}
__attribute__((noinline, noclone)) long
f3 (long a1, long b1, long a2, long b2)
{
long d1, d2;
#pragma omp parallel for default(none) firstprivate (a1, b1, a2, b2) shared(u, v, w) lastprivate(d1, d2) collapse(2)
for (d1 = a1; d1 < b1; d1++)
for (d2 = a2; d2 < b2; d2++)
u[d1 * 32 + d2] = v[d1 * 32 + d2] + w[d1 * 32 + d2];
return d1 + d2;
}
int
main ()
{
if (f1 (0, 1024) != 1024
|| f2 (0, 1024, 17) != 1024 + 2 * (17 + 5 * 1024)
|| f3 (0, 32, 0, 32) != 64)
__builtin_abort ();
return 0;
}
|
TriOP.h | #ifndef TRIOP_H_
#define TRIOP_H_
/*
* TriOP.h:
* a simple feed forward neural operation, triple input.
*
* Created on: June 11, 2017
* Author: mszhang
*/
#include "Param.h"
#include "MyLib.h"
#include "Node.h"
#include "Graph.h"
class TriParams {
public:
Param W1;
Param W2;
Param W3;
Param b;
bool bUseB;
public:
TriParams() {
bUseB = true;
}
inline void exportAdaParams(ModelUpdate& ada) {
ada.addParam(&W1);
ada.addParam(&W2);
ada.addParam(&W3);
if (bUseB) {
ada.addParam(&b);
}
}
inline void initial(int nOSize, int nISize1, int nISize2, int nISize3, bool useB = true) {
W1.initial(nOSize, nISize1);
W2.initial(nOSize, nISize2);
W3.initial(nOSize, nISize3);
bUseB = useB;
if (bUseB) {
b.initial(nOSize, 1);
}
}
inline void save(std::ofstream &os) const {
os << bUseB << std::endl;
W1.save(os);
W2.save(os);
W3.save(os);
if (bUseB) {
b.save(os);
}
}
inline void load(std::ifstream &is) {
is >> bUseB;
W1.load(is);
W2.load(is);
W3.load(is);
if (bUseB) {
b.load(is);
}
}
};
// non-linear feed-forward node
// input nodes should be specified by forward function
// for input variables, we exploit column vector,
// which means a concrete input vector x_i is represented by x(0, i), x(1, i), ..., x(n, i)
class TriNode : public Node {
public:
PNode in1, in2, in3;
TriParams* param;
dtype(*activate)(const dtype&); // activation function
dtype(*derivate)(const dtype&, const dtype&); // derivation function of activation function
Tensor1D ty, lty;
public:
TriNode() : Node() {
in1 = in2 = in3 = NULL;
activate = ftanh;
derivate = dtanh;
param = NULL;
node_type = "tri";
}
~TriNode() {
in1 = in2 = in3 = NULL;
}
inline void init(int ndim, dtype dropout) {
Node::init(ndim, dropout);
ty.init(ndim);
lty.init(ndim);
}
inline void setParam(TriParams* paramInit) {
param = paramInit;
}
inline void clearValue() {
Node::clearValue();
in1 = in2 = in3 = NULL;
ty = 0;
lty = 0;
}
// define the activate function and its derivation form
inline void setFunctions(dtype(*f)(const dtype&), dtype(*f_deri)(const dtype&, const dtype&)) {
activate = f;
derivate = f_deri;
}
public:
void forward(Graph *cg, PNode x1, PNode x2, PNode x3) {
in1 = x1;
in2 = x2;
in3 = x3;
degree = 0;
in1->addParent(this);
in2->addParent(this);
in3->addParent(this);
cg->addNode(this);
}
public:
inline void compute() {
ty.mat() = param->W1.val.mat() * in1->val.mat() + param->W2.val.mat() * in2->val.mat() + param->W3.val.mat() * in3->val.mat();
if (param->bUseB) {
ty.vec() += param->b.val.vec();
}
val.vec() = ty.vec().unaryExpr(ptr_fun(activate));
}
inline void backward() {
lty.vec() = loss.vec() * ty.vec().binaryExpr(val.vec(), ptr_fun(derivate));
param->W1.grad.mat() += lty.mat() * in1->val.tmat();
param->W2.grad.mat() += lty.mat() * in2->val.tmat();
param->W3.grad.mat() += lty.mat() * in3->val.tmat();
if (param->bUseB) {
param->b.grad.vec() += lty.vec();
}
in1->loss.mat() += param->W1.val.mat().transpose() * lty.mat();
in2->loss.mat() += param->W2.val.mat().transpose() * lty.mat();
in3->loss.mat() += param->W3.val.mat().transpose() * lty.mat();
}
public:
inline PExecute generate(bool bTrain, dtype cur_drop_factor);
// better to rewrite for deep understanding
inline bool typeEqual(PNode other) {
bool result = Node::typeEqual(other);
if (!result) return false;
TriNode* conv_other = (TriNode*)other;
if (param != conv_other->param) {
return false;
}
if (activate != conv_other->activate || derivate != conv_other->derivate) {
return false;
}
return true;
}
};
// non-linear feed-forward node
// input nodes should be specified by forward function
// for input variables, we exploit column vector,
// which means a concrete input vector x_i is represented by x(0, i), x(1, i), ..., x(n, i)
class LinearTriNode : public Node {
public:
PNode in1, in2, in3;
TriParams* param;
public:
LinearTriNode() : Node() {
in1 = in2 = in3 = NULL;
param = NULL;
node_type = "linear_tri";
}
inline void setParam(TriParams* paramInit) {
param = paramInit;
}
inline void clearValue() {
Node::clearValue();
in1 = in2 = in3 = NULL;
}
public:
void forward(Graph *cg, PNode x1, PNode x2, PNode x3) {
in1 = x1;
in2 = x2;
in3 = x3;
degree = 0;
in1->addParent(this);
in2->addParent(this);
in3->addParent(this);
cg->addNode(this);
}
public:
inline void compute() {
val.mat() = param->W1.val.mat() * in1->val.mat() + param->W2.val.mat() * in2->val.mat() + param->W3.val.mat() * in3->val.mat();
if (param->bUseB) {
val.vec() += param->b.val.vec();
}
}
inline void backward() {
param->W1.grad.mat() += loss.mat() * in1->val.tmat();
param->W2.grad.mat() += loss.mat() * in2->val.tmat();
param->W3.grad.mat() += loss.mat() * in3->val.tmat();
if (param->bUseB) {
param->b.grad.vec() += loss.vec();
}
in1->loss.mat() += param->W1.val.mat().transpose() * loss.mat();
in2->loss.mat() += param->W2.val.mat().transpose() * loss.mat();
in3->loss.mat() += param->W3.val.mat().transpose() * loss.mat();
}
public:
inline PExecute generate(bool bTrain, dtype cur_drop_factor);
// better to rewrite for deep understanding
inline bool typeEqual(PNode other) {
bool result = Node::typeEqual(other);
if (!result) return false;
LinearTriNode* conv_other = (LinearTriNode*)other;
if (param != conv_other->param) {
return false;
}
return true;
}
};
class TriExecute :public Execute {
public:
bool bTrain;
public:
inline void forward() {
int count = batch.size();
//#pragma omp parallel for
for (int idx = 0; idx < count; idx++) {
batch[idx]->compute();
batch[idx]->forward_drop(bTrain, drop_factor);
}
}
inline void backward() {
int count = batch.size();
//#pragma omp parallel for
for (int idx = 0; idx < count; idx++) {
batch[idx]->backward_drop();
batch[idx]->backward();
}
}
};
inline PExecute TriNode::generate(bool bTrain, dtype cur_drop_factor) {
TriExecute* exec = new TriExecute();
exec->batch.push_back(this);
exec->bTrain = bTrain;
exec->drop_factor = cur_drop_factor;
return exec;
};
class LinearTriExecute :public Execute {
public:
bool bTrain;
public:
inline void forward() {
int count = batch.size();
//#pragma omp parallel for
for (int idx = 0; idx < count; idx++) {
batch[idx]->compute();
batch[idx]->forward_drop(bTrain, drop_factor);
}
}
inline void backward() {
int count = batch.size();
//#pragma omp parallel for
for (int idx = 0; idx < count; idx++) {
batch[idx]->backward_drop();
batch[idx]->backward();
}
}
};
inline PExecute LinearTriNode::generate(bool bTrain, dtype cur_drop_factor) {
LinearTriExecute* exec = new LinearTriExecute();
exec->batch.push_back(this);
exec->bTrain = bTrain;
exec->drop_factor = cur_drop_factor;
return exec;
};
#endif /* TRIOP_H_ */
|
irbuilder_for_unsigned_dynamic.c | // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs
// RUN: %clang_cc1 -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=45 -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
// CHECK-LABEL: define {{.*}}@workshareloop_unsigned_dynamic(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[A_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[B_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[C_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[D_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[I:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[AGG_CAPTURED:.+]] = alloca %struct.anon, align 8
// CHECK-NEXT: %[[AGG_CAPTURED1:.+]] = alloca %struct.anon.0, align 4
// CHECK-NEXT: %[[DOTCOUNT_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_LASTITER:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_LOWERBOUND:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_UPPERBOUND:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_STRIDE:.+]] = alloca i32, align 4
// CHECK-NEXT: store float* %[[A:.+]], float** %[[A_ADDR]], align 8
// CHECK-NEXT: store float* %[[B:.+]], float** %[[B_ADDR]], align 8
// CHECK-NEXT: store float* %[[C:.+]], float** %[[C_ADDR]], align 8
// CHECK-NEXT: store float* %[[D:.+]], float** %[[D_ADDR]], align 8
// CHECK-NEXT: store i32 33, i32* %[[I]], align 4
// CHECK-NEXT: %[[TMP0:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 0
// CHECK-NEXT: store i32* %[[I]], i32** %[[TMP0]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[AGG_CAPTURED1]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: store i32 %[[TMP2]], i32* %[[TMP1]], align 4
// CHECK-NEXT: call void @__captured_stmt(i32* %[[DOTCOUNT_ADDR]], %struct.anon* %[[AGG_CAPTURED]])
// CHECK-NEXT: %[[DOTCOUNT:.+]] = load i32, i32* %[[DOTCOUNT_ADDR]], align 4
// CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_PREHEADER]]:
// CHECK-NEXT: store i32 1, i32* %[[P_LOWERBOUND]], align 4
// CHECK-NEXT: store i32 %[[DOTCOUNT]], i32* %[[P_UPPERBOUND]], align 4
// CHECK-NEXT: store i32 1, i32* %[[P_STRIDE]], align 4
// CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1)
// CHECK-NEXT: call void @__kmpc_dispatch_init_4u(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]], i32 35, i32 1, i32 %[[DOTCOUNT]], i32 1, i32 1)
// CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER_OUTER_COND:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_HEADER:.*]]:
// CHECK-NEXT: %[[OMP_LOOP_IV:.+]] = phi i32 [ %[[LB:.+]], %[[OMP_LOOP_PREHEADER_OUTER_COND]] ], [ %[[OMP_LOOP_NEXT:.+]], %[[OMP_LOOP_INC:.+]] ]
// CHECK-NEXT: br label %[[OMP_LOOP_COND:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_COND]]:
// CHECK-NEXT: %[[UB:.+]] = load i32, i32* %[[P_UPPERBOUND]], align 4
// CHECK-NEXT: %[[OMP_LOOP_CMP:.+]] = icmp ult i32 %[[OMP_LOOP_IV]], %[[UB]]
// CHECK-NEXT: br i1 %[[OMP_LOOP_CMP]], label %[[OMP_LOOP_BODY:.+]], label %[[OMP_LOOP_PREHEADER_OUTER_COND]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_BODY]]:
// CHECK-NEXT: call void @__captured_stmt.1(i32* %[[I]], i32 %[[OMP_LOOP_IV]], %struct.anon.0* %[[AGG_CAPTURED1]])
// CHECK-NEXT: %[[TMP3:.+]] = load float*, float** %[[B_ADDR]], align 8
// CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM:.+]] = zext i32 %[[TMP4]] to i64
// CHECK-NEXT: %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP3]], i64 %[[IDXPROM]]
// CHECK-NEXT: %[[TMP5:.+]] = load float, float* %[[ARRAYIDX]], align 4
// CHECK-NEXT: %[[TMP6:.+]] = load float*, float** %[[C_ADDR]], align 8
// CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM2:.+]] = zext i32 %[[TMP7]] to i64
// CHECK-NEXT: %[[ARRAYIDX3:.+]] = getelementptr inbounds float, float* %[[TMP6]], i64 %[[IDXPROM2]]
// CHECK-NEXT: %[[TMP8:.+]] = load float, float* %[[ARRAYIDX3]], align 4
// CHECK-NEXT: %[[MUL:.+]] = fmul float %[[TMP5]], %[[TMP8]]
// CHECK-NEXT: %[[TMP9:.+]] = load float*, float** %[[D_ADDR]], align 8
// CHECK-NEXT: %[[TMP10:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM4:.+]] = zext i32 %[[TMP10]] to i64
// CHECK-NEXT: %[[ARRAYIDX5:.+]] = getelementptr inbounds float, float* %[[TMP9]], i64 %[[IDXPROM4]]
// CHECK-NEXT: %[[TMP11:.+]] = load float, float* %[[ARRAYIDX5]], align 4
// CHECK-NEXT: %[[MUL6:.+]] = fmul float %[[MUL]], %[[TMP11]]
// CHECK-NEXT: %[[TMP12:.+]] = load float*, float** %[[A_ADDR]], align 8
// CHECK-NEXT: %[[TMP13:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM7:.+]] = zext i32 %[[TMP13]] to i64
// CHECK-NEXT: %[[ARRAYIDX8:.+]] = getelementptr inbounds float, float* %[[TMP12]], i64 %[[IDXPROM7]]
// CHECK-NEXT: store float %[[MUL6]], float* %[[ARRAYIDX8]], align 4
// CHECK-NEXT: br label %[[OMP_LOOP_INC]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_INC]]:
// CHECK-NEXT: %[[OMP_LOOP_NEXT]] = add nuw i32 %[[OMP_LOOP_IV]], 1
// CHECK-NEXT: br label %[[OMP_LOOP_HEADER]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_EXIT:.*]]:
// CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM9:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1)
// CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @2, i32 %[[OMP_GLOBAL_THREAD_NUM9]])
// CHECK-NEXT: br label %[[OMP_LOOP_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_AFTER]]:
// CHECK-NEXT: ret void
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_PREHEADER_OUTER_COND]]:
// CHECK-NEXT: %[[TMP14:.+]] = call i32 @__kmpc_dispatch_next_4u(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]], i32* %[[P_LASTITER]], i32* %[[P_LOWERBOUND]], i32* %[[P_UPPERBOUND]], i32* %[[P_STRIDE]])
// CHECK-NEXT: %[[TMP15:.+]] = icmp ne i32 %[[TMP14]], 0
// CHECK-NEXT: %[[TMP16:.+]] = load i32, i32* %[[P_LOWERBOUND]], align 4
// CHECK-NEXT: %[[LB]] = sub i32 %[[TMP16]], 1
// CHECK-NEXT: br i1 %[[TMP15]], label %[[OMP_LOOP_HEADER]], label %[[OMP_LOOP_EXIT]]
// CHECK-NEXT: }
extern "C" void workshareloop_unsigned_dynamic(float *a, float *b, float *c, float *d) {
#pragma omp for schedule(dynamic)
for (unsigned i = 33; i < 32000000; i += 7) {
a[i] = b[i] * c[i] * d[i];
}
}
#endif // HEADER
// CHECK-LABEL: define {{.*}}@__captured_stmt(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[DISTANCE_ADDR:.+]] = alloca i32*, align 8
// CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon*, align 8
// CHECK-NEXT: %[[DOTSTART:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTSTOP:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTSTEP:.+]] = alloca i32, align 4
// CHECK-NEXT: store i32* %[[DISTANCE:.+]], i32** %[[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store %struct.anon* %[[__CONTEXT:.+]], %struct.anon** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon*, %struct.anon** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32*, i32** %[[TMP1]], align 8
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[TMP2]], align 4
// CHECK-NEXT: store i32 %[[TMP3]], i32* %[[DOTSTART]], align 4
// CHECK-NEXT: store i32 32000000, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: store i32 7, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[TMP5:.+]] = load i32, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: %[[CMP:.+]] = icmp ult i32 %[[TMP4]], %[[TMP5]]
// CHECK-NEXT: br i1 %[[CMP]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_TRUE]]:
// CHECK-NEXT: %[[TMP6:.+]] = load i32, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[SUB:.+]] = sub i32 %[[TMP6]], %[[TMP7]]
// CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[SUB1:.+]] = sub i32 %[[TMP8]], 1
// CHECK-NEXT: %[[ADD:.+]] = add i32 %[[SUB]], %[[SUB1]]
// CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[DIV:.+]] = udiv i32 %[[ADD]], %[[TMP9]]
// CHECK-NEXT: br label %[[COND_END:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_FALSE]]:
// CHECK-NEXT: br label %[[COND_END]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_END]]:
// CHECK-NEXT: %[[COND:.+]] = phi i32 [ %[[DIV]], %[[COND_TRUE]] ], [ 0, %[[COND_FALSE]] ]
// CHECK-NEXT: %[[TMP10:.+]] = load i32*, i32** %[[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store i32 %[[COND]], i32* %[[TMP10]], align 4
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LABEL: define {{.*}}@__captured_stmt.1(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[LOOPVAR_ADDR:.+]] = alloca i32*, align 8
// CHECK-NEXT: %[[LOGICAL_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon.0*, align 8
// CHECK-NEXT: store i32* %[[LOOPVAR:.+]], i32** %[[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 %[[LOGICAL:.+]], i32* %[[LOGICAL_ADDR]], align 4
// CHECK-NEXT: store %struct.anon.0* %[[__CONTEXT:.+]], %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon.0*, %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[TMP0]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[TMP1]], align 4
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[LOGICAL_ADDR]], align 4
// CHECK-NEXT: %[[MUL:.+]] = mul i32 7, %[[TMP3]]
// CHECK-NEXT: %[[ADD:.+]] = add i32 %[[TMP2]], %[[MUL]]
// CHECK-NEXT: %[[TMP4:.+]] = load i32*, i32** %[[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 %[[ADD]], i32* %[[TMP4]], align 4
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4}
// CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 45}
// CHECK: ![[META2:[0-9]+]] =
|
elemwise_binary_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2016 by Contributors
* \file elemwise_binary_op.h
* \brief Function definition of elementwise binary operators
*/
#ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
#define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
#include <mxnet/operator_util.h>
#include <mxnet/op_attr_types.h>
#include <vector>
#include <string>
#include <utility>
#include <typeinfo>
#include <algorithm>
#include "../mxnet_op.h"
#include "../mshadow_op.h"
#include "../../engine/openmp.h"
#include "elemwise_unary_op.h"
#include "../../common/utils.h"
#include "./init_op.h"
namespace mxnet {
namespace op {
/*! Gather binary operator functions into ElemwiseBinaryOp class */
class ElemwiseBinaryOp : public OpBase {
public:
/*! \brief For sparse, assume missing rvalue is 0 */
template<typename OP, int Req>
struct MissingRValueOp {
typedef OP Operation;
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs) {
KERNEL_ASSIGN(out[i], Req, OP::Map(lhs[i], DType(0)));
}
};
/*! \brief For sparse, assume missing lvalue is 0 */
template<typename OP, int Req>
struct MissingLValueOp {
typedef OP Operation;
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *rhs) {
KERNEL_ASSIGN(out[i], Req, OP::Map(DType(0), rhs[i]));
}
};
private:
/*!
* \brief CSR operation requires temp space
*/
enum ResourceRequestType {
kTempSpace
};
/*!
* \brief Fill contiguous dense output rows with value computed from 0 lhs and 0 rhs input
* CPU-Only version
*/
template<typename DType, typename OP, typename xpu>
static inline size_t FillDense(mshadow::Stream<xpu> *s,
const size_t idx_l,
const size_t idx_r,
const OpReqType req,
mshadow::Tensor<xpu, 2, DType> *out,
const size_t iter_out) {
const int index_out_min = static_cast<int>(std::min(idx_l, idx_r));
if (static_cast<size_t>(index_out_min) > iter_out) {
const DType zero_input_val = OP::Map(DType(0), DType(0));
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int i = static_cast<int>(iter_out); i < index_out_min; ++i) {
Fill<false>(s, (*out)[i], req, zero_input_val);
}
}
return static_cast<size_t>(index_out_min); // MSVC wants OMP loops to always use 'int'
}
static inline bool IsSameArray(const NDArray& a1, const NDArray& a2) {
return a1.var() == a2.var();
}
public:
/*! \brief Minimum of three */
static MSHADOW_XINLINE size_t minthree(const size_t a, const size_t b, const size_t c) {
return a < b ? (a < c ? a : c) : (b < c ? b : c);
}
private:
template<typename xpu, typename LOP, typename ROP, typename DType>
static void BackwardUseNone_(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
Stream<xpu> *s = ctx.get_stream<xpu>();
const int size = static_cast<int>((outputs[0].Size() + DataType<DType>::kLanes - 1)
/ DataType<DType>::kLanes);
const DType *ograd_dptr = inputs[0].dptr<DType>();
if (std::is_same<LOP, mshadow_op::identity>::value && req[0] == kWriteInplace) {
CHECK_EQ(ograd_dptr, outputs[0].dptr<DType>());
} else if (req[0] != kNullOp) {
DType *lgrad_dptr = outputs[0].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
Kernel<mxnet_op::op_with_req<LOP, Req>, xpu>::Launch(s, size, lgrad_dptr, ograd_dptr);
});
}
if (std::is_same<ROP, mshadow_op::identity>::value && req[1] == kWriteInplace) {
CHECK_EQ(ograd_dptr, outputs[1].dptr<DType>());
} else if (req[1] != kNullOp) {
DType *rgrad_dptr = outputs[1].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[1], Req, {
Kernel<mxnet_op::op_with_req<ROP, Req>, xpu>::Launch(s, size, rgrad_dptr, ograd_dptr);
});
}
}
template<typename xpu, typename LOP, typename ROP, typename DType>
static void BackwardUseIn_(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
DCHECK_EQ(outputs.size(), 2U);
DCHECK_EQ(inputs.size(), 3U);
mxnet_op::Stream<xpu> *s = ctx.get_stream<xpu>();
const DType *ograd_dptr = inputs[0].dptr<DType>();
const DType *lhs_dptr = inputs[1].dptr<DType>();
const DType *rhs_dptr = inputs[2].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
const int size = static_cast<int>(
(outputs[0].Size() + mxnet_op::DataType<DType>::kLanes - 1)
/ mxnet_op::DataType<DType>::kLanes);
DType * lgrad_dptr = outputs[0].dptr<DType>();
mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<LOP>, Req>, xpu>::Launch(
s, size, lgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);});
MXNET_ASSIGN_REQ_SWITCH(req[1], Req, {
const int size = static_cast<int>(
(outputs[1].Size() + mxnet_op::DataType<DType>::kLanes - 1)
/ mxnet_op::DataType<DType>::kLanes);
DType * rgrad_dptr = outputs[1].dptr<DType>();
mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<ROP>, Req>, xpu>::Launch(
s, size, rgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);});
}
template<
typename xpu,
typename LOP,
typename ROP,
bool in0_ok_dense = false,
bool in1_ok_dense = false,
bool in2_ok_dense = false,
typename BackupCompute>
static inline void RspRspOpBackward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs,
BackupCompute backup_compute) {
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
// lhs grad
if (req[0] != kNullOp) {
// RspRspOp can handle dense outputs so long as OP(0, 0) == 0
RspRspOp<LOP>(
s, attrs, ctx, inputs[1], inputs[2], req[0], outputs[0],
false, false, false, false);
// lhs in-place
RspRspOp<op::mshadow_op::mul>(
s, attrs, ctx, outputs[0], inputs[0], req[0], outputs[0],
false, false, true, false);
}
// rhs grad
if (req[1] != kNullOp) {
RspRspOp<ROP>(
s, attrs, ctx, inputs[1], inputs[2], req[1], outputs[1],
false, false, false, false);
// rhs in-place
RspRspOp<op::mshadow_op::mul>(
s, attrs, ctx, inputs[0], outputs[1], req[1], outputs[1],
false, false, true, false);
}
}
template<typename xpu, typename LOP, typename ROP>
static inline void DnsCsrCsrOpBackward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
const bool supported_ops = std::is_same<mshadow_op::right, LOP>::value &&
std::is_same<mshadow_op::left, ROP>::value;
CHECK(supported_ops)
<< "Only backward for mul is supported (LOP should be right, ROP should be left)";
const NDArray& out_grad = inputs[0];
const NDArray& lhs_in = inputs[1];
const NDArray& rhs_in = inputs[2];
const NDArray& lhs_grad = outputs[0];
const NDArray& rhs_grad = outputs[1];
const bool reverse = (outputs[0].storage_type() == kCSRStorage);
if (reverse) {
DnsCsrCsrOp<xpu, mshadow_op::mul>(attrs, ctx, out_grad, rhs_in, req[0], lhs_grad, false);
Compute<xpu, mshadow_op::mul>(attrs, ctx, {out_grad.data(), lhs_in.data()}, {req[1]},
{rhs_grad.data()});
} else {
DnsCsrCsrOp<xpu, mshadow_op::mul>(attrs, ctx, out_grad, lhs_in, req[1], rhs_grad, false);
Compute<xpu, mshadow_op::mul>(attrs, ctx, {out_grad.data(), rhs_in.data()}, {req[0]},
{lhs_grad.data()});
}
}
public:
/*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */
template<typename OP>
static void RspRspOp(mshadow::Stream<cpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
bool lhs_may_be_dense,
bool rhs_may_be_dense,
bool allow_inplace,
bool scatter);
/*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */
template<typename OP>
static void RspRspOp(mshadow::Stream<gpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
bool lhs_may_be_dense,
bool rhs_may_be_dense,
bool allow_inplace,
bool scatter);
/*! \brief CSR -op- CSR binary operator for non-canonical NDArray */
template<typename OP>
static void CsrCsrOp(mshadow::Stream<cpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output);
/*! \brief CSR -op- CSR binary operator for non-canonical NDArray */
template<typename OP>
static void CsrCsrOp(mshadow::Stream<gpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output);
/*! \brief DNS -op- CSR binary operator for non-canonical NDArray */
template<typename OP>
static void DnsCsrDnsOp(mshadow::Stream<cpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
const bool reverse);
/*! \brief DNS -op- CSR binary operator for non-canonical NDArray */
template<typename OP>
static void DnsCsrDnsOp(mshadow::Stream<gpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
const bool reverse);
/*! \brief DNS -op- CSR binary operator for non-canonical NDArray */
template<typename xpu, typename OP>
static void DnsCsrCsrOp(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
const bool reverse);
/*! \brief DNS -op- RSP binary operator for non-canonical NDArray */
template<typename xpu, typename OP>
static void DnsRspDnsOp(mshadow::Stream<xpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
const bool reverse);
public:
/*!
* \brief Rsp-op-Rsp operation which produces a dense result
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool SparseSparseWithDenseResult(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs);
/*!
* \brief Allow one of the binary inputs to be dense and still produce a sparse output.
* Typically used for sparse * dense = sparse.
* Note: for csr, it dispatches to fallback other than csr, csr -> csr
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool PreferSparseStorageType(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
using namespace common;
CHECK_EQ(in_attrs->size(), 2U) << " in operator " << attrs.name;
CHECK_EQ(out_attrs->size(), 1U) << " in operator " << attrs.name;
const auto& lhs_stype = in_attrs->at(0);
const auto& rhs_stype = in_attrs->at(1);
auto& out_stype = out_attrs->at(0);
bool dispatched = false;
const bool invalid_ctx = dev_mask != mshadow::cpu::kDevMask;
const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback :
DispatchMode::kFComputeEx;
if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) {
// dns, dns -> dns
dispatched = storage_type_assign(&out_stype, kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
}
if (!dispatched && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) {
// rsp, rsp -> rsp
dispatched = storage_type_assign(&out_stype, kRowSparseStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched && ContainsOnlyStorage(*in_attrs, kCSRStorage)) {
// csr, csr -> csr
dispatched = storage_type_assign(&out_stype, kCSRStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched &&
((lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage))) {
// rsp, dns -> rsp
// dns, rsp -> rsp
dispatched = storage_type_assign(&out_stype, kRowSparseStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched &&
((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage))) {
// csr, dns -> csr
// dns, csr -> csr
dispatched = storage_type_assign(&out_stype, kCSRStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched) {
dispatched = dispatch_fallback(out_attrs, dispatch_mode);
}
return dispatched;
}
/*!
* \brief Allow one of the inputs to be dense and produce a dense output,
* for rsp inputs only support when both inputs are rsp type.
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
template<bool cpu_only, bool rsp, bool csr>
static bool PreferDenseStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
using namespace common;
CHECK_EQ(in_attrs->size(), 2);
CHECK_EQ(out_attrs->size(), 1);
const auto lhs_stype = (*in_attrs)[0];
const auto rhs_stype = (*in_attrs)[1];
bool dispatched = false;
const bool invalid_ctx = cpu_only && dev_mask != mshadow::cpu::kDevMask;
const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback :
DispatchMode::kFComputeEx;
if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) {
// dns, dns ... -> dns
dispatched = storage_type_assign(out_attrs, kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
}
if (!dispatched && rsp && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) {
// rsp, rsp, ... -> rsp
dispatched = storage_type_assign(out_attrs, kRowSparseStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched && csr && ContainsOnlyStorage(*in_attrs, kCSRStorage)) {
// csr, csr, ... -> csr
dispatched = storage_type_assign(out_attrs, kCSRStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage) ||
(lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage))) {
// dense, csr -> dense / csr, dense -> dense
dispatched = storage_type_assign(out_attrs, kDefaultStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage) ||
(lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage))) {
// dense, rsp -> dense / rsp, dense -> dense
dispatched = storage_type_assign(out_attrs, kDefaultStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched) {
dispatch_fallback(out_attrs, dispatch_mode);
}
return true;
}
/*!
* \brief Backward pass computing input gradient using forward inputs
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool BackwardUseInStorageType(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs);
template<typename xpu, typename OP>
static void ComputeInt(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MXNET_INT_TYPE_SWITCH(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
}
});
});
}
template<typename xpu, typename OP>
static void Compute(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
if (outputs[0].type_flag_ == mshadow::kBool) {
LOG(FATAL) << "Operator " << attrs.op->name << " does not support boolean type";
}
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
}
});
});
}
template<typename xpu, typename OP>
static void ComputeWithBool(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH_WITH_BOOL(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
}
});
});
}
template<typename xpu, typename OP>
static void ComputeLogic(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<bool>(),
inputs[0].dptr<DType>(),
inputs[1].dptr<DType>());
}
});
});
}
template<typename xpu, typename OP>
static void ComputeWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
}
});
});
}
template<typename xpu, typename OP>
static void ComputeEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace common;
CHECK_EQ(inputs.size(), 2);
CHECK_EQ(outputs.size(), 1);
if (req[0] == kNullOp) return;
const auto lhs_stype = inputs[0].storage_type();
const auto rhs_stype = inputs[1].storage_type();
const auto out_stype = outputs[0].storage_type();
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
if ((ContainsOnlyStorage(inputs, kRowSparseStorage)) &&
(out_stype == kRowSparseStorage || out_stype == kDefaultStorage)) {
// rsp, rsp -> rsp
// rsp, rsp -> dns
RspRspOp<OP>(
s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], false, false, false, false);
} else if (ContainsOnlyStorage(inputs, kCSRStorage) && out_stype == kCSRStorage) {
// csr, csr -> csr
CsrCsrOp<OP>(s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0]);
} else if (((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage)) &&
out_stype == kDefaultStorage) {
const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1];
const NDArray& csr = (lhs_stype == kCSRStorage)? inputs[0] : inputs[1];
const bool reverse = (lhs_stype == kCSRStorage);
DnsCsrDnsOp<OP>(s, attrs, ctx, dns, csr, req[0], outputs[0], reverse);
} else if (((lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) &&
out_stype == kDefaultStorage) {
const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1];
const bool reverse = (lhs_stype == kRowSparseStorage);
const NDArray& rsp = (reverse)? inputs[0] : inputs[1];
DnsRspDnsOp<xpu, OP>(s, attrs, ctx, dns, rsp, req[0], outputs[0], reverse);
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
/*! \brief ComputeEx allowing dense lvalue and/or rvalue */
template<typename xpu, typename OP, bool lhs_may_be_dense, bool rhs_may_be_dense>
static void ComputeDnsLRValueEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(inputs.size(), 2);
CHECK_EQ(outputs.size(), 1);
if (req[0] == kNullOp) return;
const auto lhs_stype = inputs[0].storage_type();
const auto rhs_stype = inputs[1].storage_type();
const auto out_stype = outputs[0].storage_type();
if ((out_stype == kRowSparseStorage || out_stype == kDefaultStorage) &&
((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) ||
(lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) &&
lhs_may_be_dense && rhs_may_be_dense) {
// rsp, rsp -> rsp
// rsp, rsp -> dns
// rsp, dns -> rsp
// dns, rsp -> rsp
// More than once dense not allowed (this will be checked in RspRspOp):
// rsp, dns -> dns <-- NOT ALLOWED
// dns, rsp -> dns <-- NOT ALLOWED
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
RspRspOp<OP>(
s, attrs, ctx, inputs[0], inputs[1],
req[0], outputs[0], lhs_may_be_dense, rhs_may_be_dense, false, false);
} else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) {
ComputeEx<xpu, OP>(attrs, ctx, inputs, req, outputs);
} else if (((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage)) &&
out_stype == kCSRStorage) {
const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1];
const NDArray& csr = (lhs_stype == kCSRStorage)? inputs[0] : inputs[1];
const bool reverse = (lhs_stype == kCSRStorage);
DnsCsrCsrOp<xpu, OP>(attrs, ctx, dns, csr, req[0], outputs[0], reverse);
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNone(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNoneWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNoneEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
CHECK_EQ(inputs.size(), 1U); // output grad
CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad
const auto in_stype = inputs[0].storage_type();
const auto lhs_stype = outputs[0].storage_type();
const auto rhs_stype = outputs[1].storage_type();
// lhs grad
if (req[0] != kNullOp) {
if (in_stype == lhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
CHECK_EQ(outputs[0].storage_type(), in_stype);
// rsp -> rsp, _. op requires 0-input returns 0-output
DCHECK_LT(std::fabs(static_cast<float>(LOP::Map(0))), 1e-5f);
UnaryOp::ComputeEx<xpu, LOP>(attrs, ctx, inputs, req, {outputs[0]});
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
// rhs grad
if (req[1] != kNullOp) {
if (in_stype == rhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
CHECK_EQ(outputs[0].storage_type(), in_stype);
// rsp -> _, rsp. op requires 0-input returns 0-output
DCHECK_LT(std::fabs(static_cast<float>(ROP::Map(0))), 1e-5f);
UnaryOp::ComputeEx<xpu, ROP>(attrs, ctx, inputs, req, {outputs[1]});
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseIn(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseInWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<
typename xpu, typename LOP, typename ROP,
bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false>
static inline void BackwardUseInEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace common;
CHECK_EQ(inputs.size(), 3U);
CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad
const auto out_grad_stype = inputs[0].storage_type();
const auto lhs_grad_stype = outputs[0].storage_type();
const auto rhs_grad_stype = outputs[1].storage_type();
if (ContainsOnlyStorage(inputs, kRowSparseStorage) &&
(lhs_grad_stype == kDefaultStorage || lhs_grad_stype == kRowSparseStorage) &&
(rhs_grad_stype == kDefaultStorage || rhs_grad_stype == kRowSparseStorage)) {
// rsp, rsp, rsp -> [dns, rsp], [dns, rsp]
RspRspOpBackward<xpu, LOP, ROP, in0_ok_dense, in1_ok_dense, in2_ok_dense>(
attrs, ctx, inputs, req, outputs, BackwardUseIn<xpu, LOP, ROP>);
}
if (((lhs_grad_stype == kDefaultStorage && rhs_grad_stype == kCSRStorage) ||
(lhs_grad_stype == kCSRStorage && rhs_grad_stype == kDefaultStorage)) &&
out_grad_stype == kDefaultStorage) {
// dns, csr, dns -> [csr, dns] / csr, dns, dns -> [dns, csr]
DnsCsrCsrOpBackward<xpu, LOP, ROP>(attrs, ctx, inputs, req, outputs);
}
}
}; // class ElemwiseBinaryOp
/*! \brief Binary launch */
#define MXNET_OPERATOR_REGISTER_BINARY(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(2) \
.set_num_outputs(1) \
.set_attr<nnvm::FListInputNames>("FListInputNames", \
[](const NodeAttrs& attrs) { \
return std::vector<std::string>{"lhs", "rhs"}; \
}) \
.set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<2, 1>) \
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<2, 1>) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs){ \
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; \
}) \
.add_argument("lhs", "NDArray-or-Symbol", "first input") \
.add_argument("rhs", "NDArray-or-Symbol", "second input")
/*! \brief Binary launch, with FComputeEx for csr and rsp available */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseStorageType<2, 1, true, true, true>) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};})
/*! \brief Binary launch, with FComputeEx for csr and rsp available.
when inputs contain both sparse and dense, sparse output is preferred. */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PS(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseBinaryOp::PreferSparseStorageType) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};})
/*! \brief Binary launch, dense result
* FInferStorageType attr is not set using this macro.
* By default DefaultStorageType is used.
*/
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseBinaryOp::SparseSparseWithDenseResult) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>)
/*! \brief Binary launch, with FComputeEx for prefer dense */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PD(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseBinaryOp::PreferDenseStorageType<true, true, true>) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};})
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.